From 235caffef9353c9a17f04acf1525310a6e9821ec Mon Sep 17 00:00:00 2001 From: Joe Shimkus Date: Thu, 8 Aug 2024 08:56:13 -0400 Subject: [PATCH 1/9] refactor: add EDA_MQ_REDIS_HA_CLUSTER_HOSTS to compose files --- tools/docker/docker-compose-dev-redis-tls.yaml | 1 + tools/docker/docker-compose-dev.yaml | 4 +++- tools/docker/docker-compose-mac.yml | 4 +++- tools/docker/docker-compose-stage.yaml | 4 +++- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/docker/docker-compose-dev-redis-tls.yaml b/tools/docker/docker-compose-dev-redis-tls.yaml index 7907f9497..09bf9afa7 100644 --- a/tools/docker/docker-compose-dev-redis-tls.yaml +++ b/tools/docker/docker-compose-dev-redis-tls.yaml @@ -8,6 +8,7 @@ x-environment: - EDA_MQ_CLIENT_CERT_PATH=${EDA_MQ_CLIENT_CERT_PATH:-/var/lib/eda/redis-tls/client/client.crt} - EDA_MQ_CLIENT_KEY_PATH=${EDA_MQ_CLIENT_KEY_PATH:-/var/lib/eda/redis-tls/client/client.key} - EDA_MQ_CLIENT_CACERT_PATH=${EDA_MQ_CLIENT_CACERT_PATH:-/var/lib/eda/redis-tls/ca.crt} + - EDA_MQ_REDIS_HA_CLUSTER_HOSTS=${EDA_MQ_REDIS_HA_CLUSTER_HOSTS:-} - EDA_DB_HOST=postgres - EDA_DB_PASSWORD=${EDA_DB_PASSWORD:-'secret'} - EDA_ALLOWED_HOSTS=['*'] diff --git a/tools/docker/docker-compose-dev.yaml b/tools/docker/docker-compose-dev.yaml index 3f5904bb7..64c80a97c 100644 --- a/tools/docker/docker-compose-dev.yaml +++ b/tools/docker/docker-compose-dev.yaml @@ -1,6 +1,8 @@ x-environment: &common-env EDA_DB_HOST: postgres - EDA_MQ_HOST: redis + EDA_MQ_HOST: ${EDA_MQ_HOST:-redis} + EDA_MQ_PORT: ${EDA_MQ_PORT:-6379} + EDA_MQ_REDIS_HA_CLUSTER_HOSTS: ${EDA_MQ_REDIS_HA_CLUSTER_HOSTS:-} DJANGO_SETTINGS_MODULE: ${DJANGO_SETTINGS_MODULE:-aap_eda.settings.development} EDA_ALLOWED_HOSTS: '*' EDA_DEPLOYMENT_TYPE: ${EDA_DEPLOYMENT_TYPE:-podman} diff --git a/tools/docker/docker-compose-mac.yml b/tools/docker/docker-compose-mac.yml index 892a2e822..87d8bab46 100644 --- a/tools/docker/docker-compose-mac.yml +++ b/tools/docker/docker-compose-mac.yml @@ -1,7 +1,9 @@ x-environment: &common-env - EDA_DB_HOST=postgres - - EDA_MQ_HOST=redis + - EDA_MQ_HOST=${EDA_MQ_HOST:-redis} + - EDA_MQ_PORT=${EDA_MQ_PORT:-6379} + - EDA_MQ_REDIS_HA_CLUSTER_HOSTS=${EDA_MQ_REDIS_HA_CLUSTER_HOSTS:-} - DJANGO_SETTINGS_MODULE=aap_eda.settings.default - EDA_DB_PASSWORD=secret - EDA_SECRET_KEY=secret diff --git a/tools/docker/docker-compose-stage.yaml b/tools/docker/docker-compose-stage.yaml index e699e99fd..7bc710581 100644 --- a/tools/docker/docker-compose-stage.yaml +++ b/tools/docker/docker-compose-stage.yaml @@ -1,7 +1,9 @@ x-environment: &common-env - EDA_DB_HOST=postgres - - EDA_MQ_HOST=redis + - EDA_MQ_HOST=${EDA_MQ_HOST:-redis} + - EDA_MQ_PORT=${EDA_MQ_PORT:-6379} + - EDA_MQ_REDIS_HA_CLUSTER_HOSTS=${EDA_MQ_REDIS_HA_CLUSTER_HOSTS:-} - DJANGO_SETTINGS_MODULE=${DJANGO_SETTINGS_MODULE:-aap_eda.settings.default} - EDA_DB_PASSWORD=secret - EDA_SECRET_KEY=secret From c298567cdcc1d5c204549e130aad769c05c8e147 Mon Sep 17 00:00:00 2001 From: Joe Shimkus Date: Fri, 9 Aug 2024 14:21:55 -0400 Subject: [PATCH 2/9] refactor: add support for Redis HA cluster --- src/aap_eda/core/tasking/__init__.py | 53 +++++++++++++++++++++----- src/aap_eda/settings/default.py | 26 +++++++++++-- tests/integration/core/test_tasking.py | 2 +- 3 files changed, 68 insertions(+), 13 deletions(-) diff --git a/src/aap_eda/core/tasking/__init__.py b/src/aap_eda/core/tasking/__init__.py index b1b328a9e..1fd7bac1b 100644 --- a/src/aap_eda/core/tasking/__init__.py +++ b/src/aap_eda/core/tasking/__init__.py @@ -8,7 +8,6 @@ import redis import rq -import rq_scheduler from ansible_base.lib.redis.client import ( DABRedis, DABRedisCluster, @@ -25,6 +24,7 @@ ) from rq.job import Job as _Job, JobStatus from rq.serializers import JSONSerializer +from rq_scheduler import Scheduler as _Scheduler from aap_eda.settings import default @@ -73,9 +73,48 @@ def get_redis_client(**kwargs): DAB will return an appropriate client for HA based on the passed parameters. """ + # HA cluster does not support an alternate redis db and will + # generate an exception if we pass a value (even the default). + # If we're in that situation we drop the db and, if the db + # is anything other than the default log an informational + # message. + db = kwargs.get("db", None) + if (db is not None) and kwargs.get("clustered", False): + del kwargs["db"] + if db != default.DEFAULT_REDIS_DB: + logger.info( + f"clustered redis supports only the default db" + f"; db specified: {db}" + ) + return _get_redis_client(_create_url_from_parameters(**kwargs), **kwargs) +class Scheduler(_Scheduler): + """Custom scheduler class.""" + + def __init__( + self, + queue_name="default", + queue=None, + interval=60, + connection=None, + job_class=None, + queue_class=None, + name=None, + ): + connection = _get_necessary_client_connection(connection) + super().__init__( + queue_name=queue_name, + queue=queue, + interval=interval, + connection=connection, + job_class=job_class, + queue_class=queue_class, + name=name, + ) + + def enable_redis_prefix(): redis_prefix = settings.RQ_REDIS_PREFIX @@ -102,16 +141,12 @@ def enable_redis_prefix(): f"{redis_prefix}:canceled:{0}" ) - rq_scheduler.Scheduler.redis_scheduler_namespace_prefix = ( + Scheduler.redis_scheduler_namespace_prefix = ( f"{redis_prefix}:scheduler_instance:" ) - rq_scheduler.Scheduler.scheduler_key = f"{redis_prefix}:scheduler" - rq_scheduler.Scheduler.scheduler_lock_key = ( - f"{redis_prefix}:scheduler_lock" - ) - rq_scheduler.Scheduler.scheduled_jobs_key = ( - f"{redis_prefix}:scheduler:scheduled_jobs" - ) + Scheduler.scheduler_key = f"{redis_prefix}:scheduler" + Scheduler.scheduler_lock_key = f"{redis_prefix}:scheduler_lock" + Scheduler.scheduled_jobs_key = f"{redis_prefix}:scheduler:scheduled_jobs" def eda_get_key(job_id): return f"{redis_prefix}:results:{job_id}" diff --git a/src/aap_eda/settings/default.py b/src/aap_eda/settings/default.py index a08292852..af0c58942 100644 --- a/src/aap_eda/settings/default.py +++ b/src/aap_eda/settings/default.py @@ -358,8 +358,9 @@ def _get_databases_settings() -> dict: # TASKING SETTINGS # --------------------------------------------------------- RQ = { - "QUEUE_CLASS": "aap_eda.core.tasking.Queue", "JOB_CLASS": "aap_eda.core.tasking.Job", + "QUEUE_CLASS": "aap_eda.core.tasking.Queue", + "SCHEDULER_CLASS": "aap_eda.core.tasking.Scheduler", } REDIS_UNIX_SOCKET_PATH = settings.get("MQ_UNIX_SOCKET_PATH", None) @@ -370,9 +371,18 @@ def _get_databases_settings() -> dict: REDIS_CLIENT_CACERT_PATH = settings.get("MQ_CLIENT_CACERT_PATH", None) REDIS_CLIENT_CERT_PATH = settings.get("MQ_CLIENT_CERT_PATH", None) REDIS_CLIENT_KEY_PATH = settings.get("MQ_CLIENT_KEY_PATH", None) -REDIS_DB = settings.get("MQ_DB", 0) +DEFAULT_REDIS_DB = 0 +REDIS_DB = settings.get("MQ_DB", DEFAULT_REDIS_DB) RQ_REDIS_PREFIX = settings.get("RQ_REDIS_PREFIX", "eda-rq") +# The HA cluster hosts is a string of :[,:port>]+ +# and is exhaustive; i.e., not in addition to REDIS_HOST:REDIS_PORT. +# EDA does not validate the content, but relies on DAB to do so. +# +# In establishing an HA Cluster Redis client connection DAB ignores +# the host and port kwargs. +REDIS_HA_CLUSTER_HOSTS = settings.get("REDIS_HA_CLUSTER_HOSTS", "").strip() + def _rq_common_parameters(): params = { @@ -403,7 +413,7 @@ def _rq_redis_client_additional_parameters(): return params -def rq_redis_client_instantiation_parameters(): +def rq_standalone_redis_client_instantiation_parameters(): params = _rq_common_parameters() | _rq_redis_client_additional_parameters() # Convert to lowercase for use in instantiating a redis client. @@ -411,6 +421,16 @@ def rq_redis_client_instantiation_parameters(): return params +def rq_redis_client_instantiation_parameters(): + params = rq_standalone_redis_client_instantiation_parameters() + + # Include the HA cluster parameters. + params["clustered"] = bool(REDIS_HA_CLUSTER_HOSTS) + params["clustered_hosts"] = REDIS_HA_CLUSTER_HOSTS + + return params + + # A list of queues to be used in multinode mode # If the list is empty, use the default singlenode queue name RULEBOOK_WORKER_QUEUES = settings.get("RULEBOOK_WORKER_QUEUES", []) diff --git a/tests/integration/core/test_tasking.py b/tests/integration/core/test_tasking.py index fe18f15a8..c48ea1db5 100644 --- a/tests/integration/core/test_tasking.py +++ b/tests/integration/core/test_tasking.py @@ -111,7 +111,7 @@ def test_worker_dab_client(default_queue: Queue): worker = DefaultWorker( [default_queue], connection=redis.Redis( - **default.rq_redis_client_instantiation_parameters() + **default.rq_standalone_redis_client_instantiation_parameters() ), ) assert isinstance(worker.connection, (DABRedis, DABRedisCluster)) From 1597162ba057ebe27fcb184d2c1e1c28174becca Mon Sep 17 00:00:00 2001 From: Joe Shimkus Date: Fri, 9 Aug 2024 14:23:30 -0400 Subject: [PATCH 3/9] refactor: add MQ to REDIS_HA_CLUSTER_HOSTS for a consistent look --- src/aap_eda/settings/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aap_eda/settings/default.py b/src/aap_eda/settings/default.py index af0c58942..d4125e877 100644 --- a/src/aap_eda/settings/default.py +++ b/src/aap_eda/settings/default.py @@ -381,7 +381,7 @@ def _get_databases_settings() -> dict: # # In establishing an HA Cluster Redis client connection DAB ignores # the host and port kwargs. -REDIS_HA_CLUSTER_HOSTS = settings.get("REDIS_HA_CLUSTER_HOSTS", "").strip() +REDIS_HA_CLUSTER_HOSTS = settings.get("MQ_REDIS_HA_CLUSTER_HOSTS", "").strip() def _rq_common_parameters(): From f2f5b62a943ee98b4589cf0f2c356b9aab71d409 Mon Sep 17 00:00:00 2001 From: Joe Shimkus Date: Mon, 12 Aug 2024 08:24:34 -0400 Subject: [PATCH 4/9] refactor: add support for HA Redis cluster --- src/aap_eda/core/tasking/__init__.py | 153 +++++++++++++++++++++++++-- src/aap_eda/settings/default.py | 1 + src/aap_eda/tasks/orchestrator.py | 3 +- tools/docker/docker-compose-dev.yaml | 2 + 4 files changed, 146 insertions(+), 13 deletions(-) diff --git a/src/aap_eda/core/tasking/__init__.py b/src/aap_eda/core/tasking/__init__.py index 1fd7bac1b..45e1763f9 100644 --- a/src/aap_eda/core/tasking/__init__.py +++ b/src/aap_eda/core/tasking/__init__.py @@ -4,7 +4,16 @@ import logging from datetime import datetime, timedelta from types import MethodType -from typing import Any, Callable, Iterable, Optional, Protocol, Type, Union +from typing import ( + Any, + Callable, + Iterable, + List, + Optional, + Protocol, + Type, + Union, +) import redis import rq @@ -23,6 +32,7 @@ DEFAULT_WORKER_TTL, ) from rq.job import Job as _Job, JobStatus +from rq.registry import StartedJobRegistry from rq.serializers import JSONSerializer from rq_scheduler import Scheduler as _Scheduler @@ -73,11 +83,10 @@ def get_redis_client(**kwargs): DAB will return an appropriate client for HA based on the passed parameters. """ - # HA cluster does not support an alternate redis db and will - # generate an exception if we pass a value (even the default). - # If we're in that situation we drop the db and, if the db - # is anything other than the default log an informational - # message. + # HA cluster does not support an alternate redis db and will generate an + # exception if we pass a value (even the default). If we're in that + # situation we drop the db and, if the db is anything other than the + # default log an informational message. db = kwargs.get("db", None) if (db is not None) and kwargs.get("clustered", False): del kwargs["db"] @@ -203,7 +212,7 @@ def __init__( super().__init__( name=name, default_timeout=default_timeout, - connection=connection, + connection=_get_necessary_client_connection(connection), is_async=is_async, job_class=job_class, serializer=serializer, @@ -225,6 +234,7 @@ def __init__( ): if serializer is None: serializer = JSONSerializer + connection = _get_necessary_client_connection(connection) super().__init__(id, connection, serializer) @@ -242,7 +252,130 @@ def _get_necessary_client_connection(connection: Connection) -> Connection: return connection -class DefaultWorker(_Worker): +class Worker(_Worker): + """Custom worker class. + + Provides establishment of DAB Redis client and work arounds for various + DABRedisCluster issues. + """ + + def __init__( + self, + queues: Iterable[Union[Queue, str]], + name: Optional[str] = None, + default_result_ttl: int = DEFAULT_RESULT_TTL, + connection: Optional[Connection] = None, + exc_handler: Any = None, + exception_handlers: _ErrorHandlersArgType = None, + default_worker_ttl: int = DEFAULT_WORKER_TTL, + job_class: Type[_Job] = None, + queue_class: Type[_Queue] = None, + log_job_description: bool = True, + job_monitoring_interval: int = DEFAULT_JOB_MONITORING_INTERVAL, + disable_default_exception_handler: bool = False, + prepare_for_work: bool = True, + serializer: Optional[SerializerProtocol] = None, + ): + connection = _get_necessary_client_connection(connection) + super().__init__( + queues=queues, + name=name, + default_result_ttl=default_result_ttl, + connection=connection, + exc_handler=exc_handler, + exception_handlers=exception_handlers, + default_worker_ttl=default_worker_ttl, + job_class=job_class, + queue_class=queue_class, + log_job_description=log_job_description, + job_monitoring_interval=job_monitoring_interval, + disable_default_exception_handler=disable_default_exception_handler, # noqa: E501 + prepare_for_work=prepare_for_work, + serializer=JSONSerializer, + ) + + def _set_connection( + self, + connection: Union[DABRedis, DABRedisCluster], + ) -> Union[DABRedis, DABRedisCluster]: + # A DABRedis connection doesn't need intervention. + if isinstance(connection, DABRedis): + return super()._set_connection(connection) + + try: + connection_pool = connection.connection_pool + current_socket_timeout = connection_pool.connection_kwargs.get( + "socket_timeout" + ) + if current_socket_timeout is None: + timeout_config = {"socket_timeout": self.connection_timeout} + connection_pool.connection_kwargs.update(timeout_config) + except AttributeError: + nodes = connection.get_nodes() + for node in nodes: + connection_pool = node.redis_connection.connection_pool + current_socket_timeout = connection_pool.connection_kwargs.get( + "socket_timeout" + ) + if current_socket_timeout is None: + timeout_config = { + "socket_timeout": self.connection_timeout + } + connection_pool.connection_kwargs.update(timeout_config) + return connection + + @classmethod + def all( + cls, + connection: Optional[Union[DABRedis, DABRedisCluster]] = None, + job_class: Optional[Type[Job]] = None, + queue_class: Optional[Type[Queue]] = None, + queue: Optional[Queue] = None, + serializer=None, + ) -> List[Worker]: + # If we don't have a queue (whose connection would be used) make + # certain that we have an appropriate connection and pass it + # to the superclass. + if queue is None: + connection = _get_necessary_client_connection(connection) + return super().all( + connection, + job_class, + queue_class, + queue, + serializer, + ) + + def handle_job_success( + self, job: Job, queue: Queue, started_job_registry: StartedJobRegistry + ): + # A DABRedis connection doesn't need intervention. + if isinstance(self.connection, DABRedis): + return super().handle_job_success(job, queue, started_job_registry) + + # For DABRedisCluster perform success handling. + # DABRedisCluster doesn't provide the watch, multi, etc. methods + # necessary for the superclass implementation, but we don't need + # them as there's no dependencies in how we use the jobs. + with self.connection.pipeline() as pipeline: + self.set_current_job_id(None, pipeline=pipeline) + self.increment_successful_job_count(pipeline=pipeline) + self.increment_total_working_time( + job.ended_at - job.started_at, + pipeline, + ) + + result_ttl = job.get_result_ttl(self.default_result_ttl) + if result_ttl != 0: + job._handle_success(result_ttl, pipeline=pipeline) + + job.cleanup(result_ttl, pipeline=pipeline, remove_from_queue=False) + started_job_registry.remove(job, pipeline=pipeline) + + pipeline.execute() + + +class DefaultWorker(Worker): """Custom default worker class used for non-activation tasks. Uses JSONSerializer as a default one. @@ -269,7 +402,6 @@ def __init__( job_class = Job if queue_class is None: queue_class = Queue - connection = _get_necessary_client_connection(connection) super().__init__( queues=queues, @@ -289,7 +421,7 @@ def __init__( ) -class ActivationWorker(_Worker): +class ActivationWorker(Worker): """Custom worker class used for activation related tasks. Uses JSONSerializer as a default one. @@ -316,7 +448,6 @@ def __init__( job_class = Job if queue_class is None: queue_class = Queue - connection = _get_necessary_client_connection(connection) queue_name = settings.RULEBOOK_QUEUE_NAME super().__init__( diff --git a/src/aap_eda/settings/default.py b/src/aap_eda/settings/default.py index d4125e877..4c9d07471 100644 --- a/src/aap_eda/settings/default.py +++ b/src/aap_eda/settings/default.py @@ -361,6 +361,7 @@ def _get_databases_settings() -> dict: "JOB_CLASS": "aap_eda.core.tasking.Job", "QUEUE_CLASS": "aap_eda.core.tasking.Queue", "SCHEDULER_CLASS": "aap_eda.core.tasking.Scheduler", + "WORKER_CLASS": "aap_eda.core.tasking.Worker", } REDIS_UNIX_SOCKET_PATH = settings.get("MQ_UNIX_SOCKET_PATH", None) diff --git a/src/aap_eda/tasks/orchestrator.py b/src/aap_eda/tasks/orchestrator.py index f1b58bd3a..9b71c6e37 100644 --- a/src/aap_eda/tasks/orchestrator.py +++ b/src/aap_eda/tasks/orchestrator.py @@ -21,7 +21,6 @@ from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django_rq import get_queue -from rq import Worker import aap_eda.tasks.activation_request_queue as requests_queue from aap_eda.core import models @@ -31,7 +30,7 @@ ProcessParentType, ) from aap_eda.core.models import Activation, ActivationRequestQueue, EventStream -from aap_eda.core.tasking import unique_enqueue +from aap_eda.core.tasking import Worker, unique_enqueue from aap_eda.services.activation import exceptions from aap_eda.services.activation.activation_manager import ( ActivationManager, diff --git a/tools/docker/docker-compose-dev.yaml b/tools/docker/docker-compose-dev.yaml index 64c80a97c..f13f89ee7 100644 --- a/tools/docker/docker-compose-dev.yaml +++ b/tools/docker/docker-compose-dev.yaml @@ -2,6 +2,8 @@ x-environment: &common-env EDA_DB_HOST: postgres EDA_MQ_HOST: ${EDA_MQ_HOST:-redis} EDA_MQ_PORT: ${EDA_MQ_PORT:-6379} + EDA_MQ_USER: ${EDA_MQ_USER:-} + EDA_MQ_USER_PASSWORD: ${EDA_MQ_USER_PASSWORD:-} EDA_MQ_REDIS_HA_CLUSTER_HOSTS: ${EDA_MQ_REDIS_HA_CLUSTER_HOSTS:-} DJANGO_SETTINGS_MODULE: ${DJANGO_SETTINGS_MODULE:-aap_eda.settings.development} EDA_ALLOWED_HOSTS: '*' From 7545acf338326020482172a4bc5cf0dcc2bdd8bf Mon Sep 17 00:00:00 2001 From: Madhu Kanoor Date: Mon, 12 Aug 2024 13:43:51 -0400 Subject: [PATCH 5/9] feat: AAP-28915 option to skip audit events (#998) --- src/aap_eda/api/serializers/activation.py | 7 +++++++ .../0045_activation_skip_audit_events.py | 19 +++++++++++++++++++ src/aap_eda/core/models/activation.py | 8 ++++++++ tests/integration/api/test_activation.py | 18 ++++++++++++++++++ tests/integration/conftest.py | 6 ++++++ 5 files changed, 58 insertions(+) create mode 100644 src/aap_eda/core/migrations/0045_activation_skip_audit_events.py diff --git a/src/aap_eda/api/serializers/activation.py b/src/aap_eda/api/serializers/activation.py index d8ddf75f3..207f5b082 100644 --- a/src/aap_eda/api/serializers/activation.py +++ b/src/aap_eda/api/serializers/activation.py @@ -291,6 +291,7 @@ class Meta: "eda_credentials", "log_level", "webhooks", + "skip_audit_events", ] read_only_fields = [ "id", @@ -357,6 +358,7 @@ class Meta: "k8s_service_name", "webhooks", "source_mappings", + "skip_audit_events", ] read_only_fields = ["id", "created_at", "modified_at"] @@ -409,6 +411,7 @@ def to_representation(self, activation): "k8s_service_name": activation.k8s_service_name, "webhooks": webhooks, "source_mappings": activation.source_mappings, + "skip_audit_events": activation.skip_audit_events, } @@ -433,6 +436,7 @@ class Meta: "eda_credentials", "k8s_service_name", "source_mappings", + "skip_audit_events", ] organization_id = serializers.IntegerField( @@ -636,6 +640,7 @@ class Meta: "k8s_service_name", "webhooks", "source_mappings", + "skip_audit_events", ] read_only_fields = ["id", "created_at", "modified_at", "restarted_at"] @@ -727,6 +732,7 @@ def to_representation(self, activation): "k8s_service_name": activation.k8s_service_name, "webhooks": webhooks, "source_mappings": activation.source_mappings, + "skip_audit_events": activation.skip_audit_events, } @@ -782,6 +788,7 @@ class Meta: "eda_credentials", "k8s_service_name", "source_mappings", + "skip_audit_events", ] read_only_fields = [ "id", diff --git a/src/aap_eda/core/migrations/0045_activation_skip_audit_events.py b/src/aap_eda/core/migrations/0045_activation_skip_audit_events.py new file mode 100644 index 000000000..7cd7e1257 --- /dev/null +++ b/src/aap_eda/core/migrations/0045_activation_skip_audit_events.py @@ -0,0 +1,19 @@ +# Generated by Django 4.2.7 on 2024-08-09 20:37 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("core", "0044_remove_activation_swap_single_source_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="activation", + name="skip_audit_events", + field=models.BooleanField( + default=False, help_text="Skip audit events for activation" + ), + ), + ] diff --git a/src/aap_eda/core/models/activation.py b/src/aap_eda/core/models/activation.py index 88c09d4de..90ffffaa0 100644 --- a/src/aap_eda/core/models/activation.py +++ b/src/aap_eda/core/models/activation.py @@ -141,6 +141,14 @@ class Meta: blank=True, help_text="Mapping between sources and event streams", ) + skip_audit_events = models.BooleanField( + default=False, + help_text=("Skip audit events for activation"), + ) def get_parent_type(self) -> str: return ProcessParentType.ACTIVATION + + def _get_skip_audit_events(self) -> bool: + """Activation can optionally skip audit events.""" + return self.skip_audit_events diff --git a/tests/integration/api/test_activation.py b/tests/integration/api/test_activation.py index fc15041b4..a216edb63 100644 --- a/tests/integration/api/test_activation.py +++ b/tests/integration/api/test_activation.py @@ -70,6 +70,7 @@ def test_create_activation( assert activation.status_message == ( "Wait for a worker to be available to start activation" ) + assert not activation.skip_audit_events @pytest.mark.django_db @@ -808,3 +809,20 @@ def test_create_activation_with_awx_token( f"{api_url_v1}/activations/", data=activation_payload ) assert response.status_code == status.HTTP_201_CREATED + + +@pytest.mark.django_db +@patch.object(settings, "RULEBOOK_WORKER_QUEUES", []) +def test_create_activation_with_skip_audit_events( + admin_awx_token: models.AwxToken, + activation_payload_skip_audit_events: Dict[str, Any], + default_rulebook: models.Rulebook, + admin_client: APIClient, +): + response = admin_client.post( + f"{api_url_v1}/activations/", data=activation_payload_skip_audit_events + ) + assert response.status_code == status.HTTP_201_CREATED + data = response.data + activation = models.Activation.objects.filter(id=data["id"]).first() + assert activation.skip_audit_events diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index d5eb51a76..71fe170b1 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1160,3 +1160,9 @@ def default_webhook( url=DUMMY_URL, eda_credential=default_hmac_credential, ) + + +@pytest.fixture +def activation_payload_skip_audit_events(activation_payload: dict) -> dict: + activation_payload["skip_audit_events"] = True + return activation_payload From dc66c3f994bb3db1533c8aa697343e681df5a9f1 Mon Sep 17 00:00:00 2001 From: Joe Shimkus Date: Mon, 12 Aug 2024 18:19:05 -0400 Subject: [PATCH 6/9] fix: use the same db in EDA unit tests This is made as a pragmatic choice to keep the development effort moving forward. --- tests/conftest.py | 23 ++++++++++++++++------- tests/integration/conftest.py | 21 +++++++++++++-------- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 2bee39549..1b51e316e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,7 +14,6 @@ import pytest -from aap_eda.core.tasking import get_redis_client from aap_eda.settings import default @@ -26,12 +25,22 @@ def redis_parameters() -> dict: """Provide redis parameters based on settings values.""" params = default.rq_redis_client_instantiation_parameters() - # We try to avoid conflicting with a deployed environment by using a + # TODO: figure out the db oddity described here-in. + # + # There's an oddity with non-HA unit tests and the use of + # an alternate db. + # + # For pragmatism we've removed the code here which attempted + # to avoid conflicting with a deployed environment by using a # different database from that of the settings. - # This is not guaranteed as the deployed environment could be differently - # configured from the default, but in development it should be fine. - client = get_redis_client(**params) - max_dbs = int(client.config_get("databases")["databases"]) - params["db"] = (params["db"] + 1) % max_dbs + # + # + # One constant is that DAB RedisCluster, which does not support + # alternate dbs passes the EDA unit tests (which are part of + # CI processing) and that by using only the 0 db + # the same unit tests pass for non-HA whereas using an alternate + # db as this code previously did results in non-HA unit tests + # failing. + # return params diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 71fe170b1..f07e6b694 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import logging import uuid from typing import Any, Dict, List @@ -1059,14 +1060,18 @@ def redis_external(redis_parameters): @pytest.fixture def test_queue_name(redis_parameters): - # Use a separately named copy of the default queue to prevent - # cross-environment issues. Using the eda-server default queue results in - # tasks run by tests to execute within eda-server context, if the - # eda-server default worker is running, rather than the test context. - settings.RQ_QUEUES["test-default"] = settings.RQ_QUEUES["default"] - - # The redis parameters provide the DB to use in an effort to avoid - # stepping on a deployed environment. + # Use a separately named deep copy of the default queue to prevent + # cross-environment issues. If not using a deep copy the same queue entry + # is used as value for the two queues and modifying via either affects the + # other. + # Using the eda-server default queue results in tasks run by tests to + # execute within eda-server context, if the eda-server default worker is + # running, rather than the test context. + settings.RQ_QUEUES["test-default"] = copy.deepcopy( + settings.RQ_QUEUES["default"] + ) + + # The redis parameters provide the DB to use. settings.RQ_QUEUES["test-default"]["DB"] = redis_parameters["db"] return "test-default" From 9444336dee8326ef9f024c1afcb7ce9c7857a7cd Mon Sep 17 00:00:00 2001 From: Doston <31990136+Dostonbek1@users.noreply.github.com> Date: Mon, 12 Aug 2024 15:05:31 -0400 Subject: [PATCH 7/9] fix(AAP-29090): remove member_organization permission from org admin role (#999) * remove member_organization permission from org admin role * add tests for role user and team assignments for managed org roles --- .../commands/create_initial_data.py | 2 +- tests/integration/conftest.py | 15 +++++ .../core/test_create_initial_data.py | 6 ++ .../dab_rbac/test_managed_roles.py | 58 +++++++++++++++++++ 4 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 tests/integration/dab_rbac/test_managed_roles.py diff --git a/src/aap_eda/core/management/commands/create_initial_data.py b/src/aap_eda/core/management/commands/create_initial_data.py index 5464c5c8e..8ab529680 100644 --- a/src/aap_eda/core/management/commands/create_initial_data.py +++ b/src/aap_eda/core/management/commands/create_initial_data.py @@ -53,7 +53,7 @@ ], "rulebook_process": ["view"], "audit_rule": ["view"], - "organization": ["view", "change", "delete", "member"], + "organization": ["view", "change", "delete"], "team": CRUD + ["member"], "project": CRUD + ["sync"], "rulebook": ["view"], diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index f07e6b694..efffe5b28 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -27,6 +27,7 @@ from rest_framework.test import APIClient from aap_eda.core import enums, models +from aap_eda.core.management.commands import create_initial_data from aap_eda.core.management.commands.create_initial_data import ( CREDENTIAL_TYPES, populate_credential_types, @@ -1047,6 +1048,20 @@ def new_team(default_organization: models.Organization) -> models.Team: ) +# TODO(doston): creating managed roles should be exported to its own +# management command +@pytest.fixture +def create_initial_data_command(): + """Create all managed roles using create_initial_data command.""" + return create_initial_data.Command() + + +@pytest.fixture +def create_managed_org_roles(create_initial_data_command): + """Create managed org roles using create_initial_data command.""" + create_initial_data_command._create_org_roles() + + ################################################################# # Redis ################################################################# diff --git a/tests/integration/core/test_create_initial_data.py b/tests/integration/core/test_create_initial_data.py index 7c9c73664..59f6b9351 100644 --- a/tests/integration/core/test_create_initial_data.py +++ b/tests/integration/core/test_create_initial_data.py @@ -24,6 +24,9 @@ from aap_eda.core.utils.credentials import inputs_from_store +################################################################# +# Roles +################################################################# @pytest.mark.django_db def test_create_all_roles(): assert RoleDefinition.objects.count() == 0 @@ -86,6 +89,9 @@ def test_remove_extra_permission(): assert perm not in auditor_role.permissions.all() +################################################################# +# Credentials +################################################################# def create_old_registry_credential(): credential = models.Credential.objects.create( name="registry cred", diff --git a/tests/integration/dab_rbac/test_managed_roles.py b/tests/integration/dab_rbac/test_managed_roles.py new file mode 100644 index 000000000..5e361548b --- /dev/null +++ b/tests/integration/dab_rbac/test_managed_roles.py @@ -0,0 +1,58 @@ +# Copyright 2024 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from ansible_base.rbac.models import RoleDefinition +from rest_framework import status + +from aap_eda.core.management.commands.create_initial_data import ORG_ROLES +from tests.integration.constants import api_url_v1 + + +@pytest.mark.django_db +def test_org_role_team_assignments( + admin_client, default_organization, default_team, create_managed_org_roles +): + for org_role in ORG_ROLES: + # ignore Org Member role as it is not assignable to teams + if org_role["name"] != "Organization Member": + role = RoleDefinition.objects.get(name=org_role["name"]) + post_data = { + "object_id": default_organization.id, + "role_definition": role.id, + "team": default_team.id, + } + response = admin_client.post( + f"{api_url_v1}/role_team_assignments/", data=post_data + ) + assert ( + response.status_code == status.HTTP_201_CREATED + ), response.data + + +@pytest.mark.django_db +def test_org_role_user_assignments( + admin_client, default_organization, default_user, create_managed_org_roles +): + for org_role in ORG_ROLES: + role = RoleDefinition.objects.get(name=org_role["name"]) + post_data = { + "object_id": default_organization.id, + "role_definition": role.id, + "user": default_user.id, + } + response = admin_client.post( + f"{api_url_v1}/role_user_assignments/", data=post_data + ) + assert response.status_code == status.HTTP_201_CREATED, response.data From 577a8bd1325f32a806ecef732762a2797595ba23 Mon Sep 17 00:00:00 2001 From: Joe Shimkus Date: Tue, 13 Aug 2024 07:48:57 -0400 Subject: [PATCH 8/9] fix: use tests default queue connection type as check --- tests/integration/core/test_tasking.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tests/integration/core/test_tasking.py b/tests/integration/core/test_tasking.py index c48ea1db5..c96ab6947 100644 --- a/tests/integration/core/test_tasking.py +++ b/tests/integration/core/test_tasking.py @@ -19,8 +19,6 @@ import redis from aap_eda.core.tasking import ( - DABRedis, - DABRedisCluster, DefaultWorker, Queue, _create_url_from_parameters, @@ -102,19 +100,22 @@ def test_rediss_dab_url(): def test_worker_dab_client(default_queue: Queue): """Test that workers end up with a DABRedis client connection.""" - # Verify if given no connection the worker gets DABRedis/DABRedisCluster. + # The default queue has a Redis connection based on the running + # configuration. We use that to check the connection type. + + # Verify if given no connection the worker gets the expected type. worker = DefaultWorker([default_queue]) - assert isinstance(worker.connection, (DABRedis, DABRedisCluster)) + assert isinstance(worker.connection, type(default_queue.connection)) - # Verify if given a redis.Redis connection the worker gets a - # DABRedis/DABRedisCluster. + # Verify if given a redis.Redis connection the worker gets the + # expected type. worker = DefaultWorker( [default_queue], connection=redis.Redis( **default.rq_standalone_redis_client_instantiation_parameters() ), ) - assert isinstance(worker.connection, (DABRedis, DABRedisCluster)) + assert isinstance(worker.connection, type(default_queue.connection)) # Verify if given a DABRedis/DABRedisCluster connection the worker uses it. connection = get_redis_client( From a938d7cd3648ed4343aeff1cd59045e21fa0651c Mon Sep 17 00:00:00 2001 From: Joe Shimkus Date: Tue, 13 Aug 2024 08:22:08 -0400 Subject: [PATCH 9/9] refactor: adjust to DAB's parameter changes --- poetry.lock | 4 ++-- src/aap_eda/core/tasking/__init__.py | 2 +- src/aap_eda/settings/default.py | 5 +++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index f6a28fb05..a7734bf1e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -610,7 +610,7 @@ bcrypt = ["bcrypt"] [[package]] name = "django-ansible-base" -version = "2024.8.5.0.dev7+g708f5c5" +version = "2024.8.13.0.dev2+g4cddc10" description = "A Django app used by ansible services" optional = false python-versions = ">=3.9" @@ -642,7 +642,7 @@ testing = ["cryptography", "pytest", "pytest-django"] type = "git" url = "https://github.com/ansible/django-ansible-base.git" reference = "devel" -resolved_reference = "708f5c59f7b240ea78fa9480b518b3484aeedbad" +resolved_reference = "4cddc10c2e529d9619f49998c6f47ac3fd9eba6b" [[package]] name = "django-crum" diff --git a/src/aap_eda/core/tasking/__init__.py b/src/aap_eda/core/tasking/__init__.py index 45e1763f9..78603301d 100644 --- a/src/aap_eda/core/tasking/__init__.py +++ b/src/aap_eda/core/tasking/__init__.py @@ -88,7 +88,7 @@ def get_redis_client(**kwargs): # situation we drop the db and, if the db is anything other than the # default log an informational message. db = kwargs.get("db", None) - if (db is not None) and kwargs.get("clustered", False): + if (db is not None) and (kwargs.get("mode", "") == "cluster"): del kwargs["db"] if db != default.DEFAULT_REDIS_DB: logger.info( diff --git a/src/aap_eda/settings/default.py b/src/aap_eda/settings/default.py index 4c9d07471..bc5e3f5e1 100644 --- a/src/aap_eda/settings/default.py +++ b/src/aap_eda/settings/default.py @@ -426,8 +426,9 @@ def rq_redis_client_instantiation_parameters(): params = rq_standalone_redis_client_instantiation_parameters() # Include the HA cluster parameters. - params["clustered"] = bool(REDIS_HA_CLUSTER_HOSTS) - params["clustered_hosts"] = REDIS_HA_CLUSTER_HOSTS + if REDIS_HA_CLUSTER_HOSTS: + params["mode"] = "cluster" + params["redis_hosts"] = REDIS_HA_CLUSTER_HOSTS return params