From 952997949f1e3f12691ae2718983c7f43a8ae1cb Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Thu, 12 Dec 2024 10:23:20 +0100 Subject: [PATCH 01/11] add get_connection function --- postgrestq/task_queue.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/postgrestq/task_queue.py b/postgrestq/task_queue.py index abdb41c..2666342 100644 --- a/postgrestq/task_queue.py +++ b/postgrestq/task_queue.py @@ -14,7 +14,7 @@ Sequence, ) -from psycopg import sql, connect +from psycopg import sql, connect, Connection # supported only from 3.11 onwards: # from datetime import UTC @@ -69,19 +69,28 @@ def __init__( # called when ttl <= 0 for a task self.ttl_zero_callback = ttl_zero_callback - self.connect() + self.conn = self.connect() if create_table: self._create_queue_table() if reset: self._reset() + def get_connection(self): + connection = connect(self._dsn) + with connection.cursor() as cur: + cur.execute("SELECT 1+1") + cur.fetchone() + + return connection + def connect(self) -> None: """ Establish a connection to Postgres. If a connection already exists, it's overwritten. """ - self.conn = connect(self._dsn) + if self.conn is None or self.conn.closed: + self.conn = self.get_connection() def _create_queue_table(self) -> None: """ From 75434722bb488d597df79fec92e49150ba51d64b Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Sun, 5 Jan 2025 22:07:56 +0100 Subject: [PATCH 02/11] manage connections using a Connection pool --- postgrestq/task_queue.py | 615 ++++++++++++++++++++------------------- pyproject.toml | 1 + 2 files changed, 309 insertions(+), 307 deletions(-) diff --git a/postgrestq/task_queue.py b/postgrestq/task_queue.py index 2666342..1c6fb53 100644 --- a/postgrestq/task_queue.py +++ b/postgrestq/task_queue.py @@ -14,7 +14,8 @@ Sequence, ) -from psycopg import sql, connect, Connection +from psycopg import sql +from psycopg_pool import ConnectionPool # supported only from 3.11 onwards: # from datetime import UTC @@ -68,29 +69,24 @@ def __init__( # called when ttl <= 0 for a task self.ttl_zero_callback = ttl_zero_callback - - self.conn = self.connect() + self.connect() if create_table: self._create_queue_table() if reset: self._reset() - def get_connection(self): - connection = connect(self._dsn) - with connection.cursor() as cur: - cur.execute("SELECT 1+1") - cur.fetchone() - - return connection - def connect(self) -> None: """ Establish a connection to Postgres. If a connection already exists, it's overwritten. """ - if self.conn is None or self.conn.closed: - self.conn = self.get_connection() + # if self.conn is None or self.conn.closed: + # self.conn = self.get_connection() + self.pool = ConnectionPool(self._dsn, open=True, min_size=1, max_size=1) + # This will block the use of the pool until min_size connections + # have been acquired + self.pool.wait() def _create_queue_table(self) -> None: """ @@ -98,52 +94,54 @@ def _create_queue_table(self) -> None: """ # TODO: check if the table already exist # whether it has the same schema - with self.conn.cursor() as cur: - cur.execute( - sql.SQL( - """CREATE TABLE IF NOT EXISTS {} ( - id UUID PRIMARY KEY, - queue_name TEXT NOT NULL, - task JSONB NOT NULL, - ttl SMALLINT NOT NULL, - can_start_at TIMESTAMPTZ NOT NULL - DEFAULT CURRENT_TIMESTAMP, - lease_timeout FLOAT, - started_at TIMESTAMPTZ, - completed_at TIMESTAMPTZ - )""" - ).format(sql.Identifier(self._table_name)) - ) - cur.execute( - sql.SQL( - """CREATE INDEX IF NOT EXISTS - task_queue_queue_name_can_start_at_idx - ON {} (queue_name, can_start_at) - """ - ).format(sql.Identifier(self._table_name)) - ) - self.conn.commit() + with self.pool.connection() as conn: + with conn.cursor() as cur: + cur.execute( + sql.SQL( + """CREATE TABLE IF NOT EXISTS {} ( + id UUID PRIMARY KEY, + queue_name TEXT NOT NULL, + task JSONB NOT NULL, + ttl SMALLINT NOT NULL, + can_start_at TIMESTAMPTZ NOT NULL + DEFAULT CURRENT_TIMESTAMP, + lease_timeout FLOAT, + started_at TIMESTAMPTZ, + completed_at TIMESTAMPTZ + )""" + ).format(sql.Identifier(self._table_name)) + ) + cur.execute( + sql.SQL( + """CREATE INDEX IF NOT EXISTS + task_queue_queue_name_can_start_at_idx + ON {} (queue_name, can_start_at) + """ + ).format(sql.Identifier(self._table_name)) + ) + conn.commit() def __len__(self) -> int: """ Returns the length of processing or to be processed tasks """ - with self.conn.cursor() as cursor: - cursor.execute( - sql.SQL( - """ - SELECT count(*) as count - FROM {} - WHERE queue_name = %s - AND completed_at IS NULL - """ - ).format(sql.Identifier(self._table_name)), - (self._queue_name,), - ) - row = cursor.fetchone() - count: int = row[0] if row else 0 - self.conn.commit() - return count + with self.pool.connection() as conn: + with conn.cursor() as cursor: + cursor.execute( + sql.SQL( + """ + SELECT count(*) as count + FROM {} + WHERE queue_name = %s + AND completed_at IS NULL + """ + ).format(sql.Identifier(self._table_name)), + (self._queue_name,), + ) + row = cursor.fetchone() + count: int = row[0] if row else 0 + conn.commit() + return count def add( self, @@ -180,29 +178,29 @@ def add( id_ = str(uuid4()) serialized_task = self._serialize(task) - - with self.conn.cursor() as cursor: - # store the task + metadata and put task-id into the task queue - cursor.execute( - sql.SQL( - """ - INSERT INTO {} ( - id, - queue_name, - task, - ttl, - lease_timeout, - can_start_at + with self.pool.connection() as conn: + with conn.cursor() as cursor: + # store the task + metadata and put task-id into the task queue + cursor.execute( + sql.SQL( + """ + INSERT INTO {} ( + id, + queue_name, + task, + ttl, + lease_timeout, + can_start_at + ) + VALUES (%s, %s, %s, %s, %s, COALESCE(%s, current_timestamp)) + """ + ).format(sql.Identifier(self._table_name)), + ( + id_, self._queue_name, serialized_task, + ttl, lease_timeout, can_start_at + ), ) - VALUES (%s, %s, %s, %s, %s, COALESCE(%s, current_timestamp)) - """ - ).format(sql.Identifier(self._table_name)), - ( - id_, self._queue_name, serialized_task, - ttl, lease_timeout, can_start_at - ), - ) - self.conn.commit() + conn.commit() return id_ def add_many( @@ -243,35 +241,36 @@ def add_many( # into problems later when we calculate the actual deadline lease_timeout = float(lease_timeout) ret_ids = [] - with self.conn.cursor() as cursor: - for task in tasks: - id_ = str(uuid4()) - - serialized_task = self._serialize(task) - - cursor.execute( - sql.SQL( - """ - INSERT INTO {} ( - id, - queue_name, - task, - ttl, - lease_timeout, - can_start_at - ) - VALUES ( - %s, %s, %s, %s, %s, COALESCE(%s, current_timestamp) + with self.pool.connection() as conn: + with conn.cursor() as cursor: + for task in tasks: + id_ = str(uuid4()) + + serialized_task = self._serialize(task) + + cursor.execute( + sql.SQL( + """ + INSERT INTO {} ( + id, + queue_name, + task, + ttl, + lease_timeout, + can_start_at + ) + VALUES ( + %s, %s, %s, %s, %s, COALESCE(%s, current_timestamp) + ) + """ + ).format(sql.Identifier(self._table_name)), + ( + id_, self._queue_name, serialized_task, + ttl, lease_timeout, can_start_at + ), ) - """ - ).format(sql.Identifier(self._table_name)), - ( - id_, self._queue_name, serialized_task, - ttl, lease_timeout, can_start_at - ), - ) - ret_ids.append(id_) - self.conn.commit() + ret_ids.append(id_) + conn.commit() return ret_ids def get(self) -> Tuple[ @@ -314,41 +313,41 @@ def get(self) -> Tuple[ empty """ - conn = self.conn - - with conn.cursor() as cur: - cur.execute( - sql.SQL( - """ - UPDATE {} - SET started_at = current_timestamp - WHERE id = ( - SELECT id - FROM {} - WHERE completed_at IS NULL - AND started_at IS NULL - AND queue_name = %s - AND ttl > 0 - AND can_start_at <= current_timestamp - ORDER BY can_start_at - FOR UPDATE SKIP LOCKED - LIMIT 1 + with self.pool.connection() as conn: + with conn.cursor() as cur: + cur.execute( + sql.SQL( + """ + UPDATE {} + SET started_at = current_timestamp + WHERE id = ( + SELECT id + FROM {} + WHERE completed_at IS NULL + AND started_at IS NULL + AND queue_name = %s + AND ttl > 0 + AND can_start_at <= current_timestamp + ORDER BY can_start_at + FOR UPDATE SKIP LOCKED + LIMIT 1 + ) + RETURNING id, task;""" + ).format( + sql.Identifier(self._table_name), + sql.Identifier(self._table_name), + ), + (self._queue_name,), ) - RETURNING id, task;""" - ).format( - sql.Identifier(self._table_name), - sql.Identifier(self._table_name), - ), - (self._queue_name,), - ) - row = cur.fetchone() - conn.commit() - if row is None: - return None, None, None - task_id, task = row - logger.info(f"Got task with id {task_id}") - return task, task_id, self._queue_name + row = cur.fetchone() + conn.commit() + if row is None: + return None, None, None + + task_id, task = row + logger.info(f"Got task with id {task_id}") + return task, task_id, self._queue_name def get_many(self, amount: int) -> Sequence[ Tuple[Optional[Dict[str, Any]], Optional[UUID], Optional[str]], @@ -368,39 +367,38 @@ def get_many(self, amount: int) -> Sequence[ The tasks and their IDs, and the queue_name """ - conn = self.conn - - with conn.cursor() as cur: - cur.execute( - sql.SQL( - """ - UPDATE {} - SET started_at = current_timestamp - WHERE id IN ( - SELECT id - FROM {} - WHERE completed_at IS NULL - AND started_at IS NULL - AND queue_name = %s - AND ttl > 0 - AND can_start_at <= current_timestamp - ORDER BY can_start_at - FOR UPDATE SKIP LOCKED - LIMIT %s + with self.pool.connection() as conn: + with conn.cursor() as cur: + cur.execute( + sql.SQL( + """ + UPDATE {} + SET started_at = current_timestamp + WHERE id IN ( + SELECT id + FROM {} + WHERE completed_at IS NULL + AND started_at IS NULL + AND queue_name = %s + AND ttl > 0 + AND can_start_at <= current_timestamp + ORDER BY can_start_at + FOR UPDATE SKIP LOCKED + LIMIT %s + ) + RETURNING task, id;""" + ).format( + sql.Identifier(self._table_name), + sql.Identifier(self._table_name), + ), + (self._queue_name, amount), ) - RETURNING task, id;""" - ).format( - sql.Identifier(self._table_name), - sql.Identifier(self._table_name), - ), - (self._queue_name, amount), - ) - ret = [] - for task, task_id in cur.fetchall(): - logger.info(f"Got task with id {task_id}") - ret.append((task, task_id, self._queue_name,)) - conn.commit() + ret = [] + for task, task_id in cur.fetchall(): + logger.info(f"Got task with id {task_id}") + ret.append((task, task_id, self._queue_name,)) + conn.commit() return ret def complete(self, task_id: Optional[UUID]) -> int: @@ -424,24 +422,24 @@ def complete(self, task_id: Optional[UUID]) -> int: """ logger.info(f"Marking task {task_id} as completed") - conn = self.conn count = 0 - with conn.cursor() as cur: - cur.execute( - sql.SQL( - """ - UPDATE {} - SET completed_at = current_timestamp - WHERE id = %s - AND completed_at is NULL""" - ).format(sql.Identifier(self._table_name)), - (task_id,), - ) - count = cur.rowcount - if count == 0: - logger.info(f"Task {task_id} was already completed") + with self.pool.connection() as conn: + with conn.cursor() as cur: + cur.execute( + sql.SQL( + """ + UPDATE {} + SET completed_at = current_timestamp + WHERE id = %s + AND completed_at is NULL""" + ).format(sql.Identifier(self._table_name)), + (task_id,), + ) + count = cur.rowcount + if count == 0: + logger.info(f"Task {task_id} was already completed") - conn.commit() + conn.commit() return count def is_empty(self) -> bool: @@ -479,55 +477,57 @@ def check_expired_leases(self) -> None: """ # goes through all the tasks that are marked as started # and check the ones with expired timeout - with self.conn.cursor() as cur: - cur.execute( - sql.SQL( - """ - SELECT id - FROM {} - WHERE completed_at IS NULL - AND started_at IS NOT NULL - AND queue_name = %s - AND ( - started_at + (lease_timeout || ' seconds')::INTERVAL - ) < current_timestamp - ORDER BY can_start_at; - """ - ).format(sql.Identifier(self._table_name)), - (self._queue_name,), - ) - expired_tasks = cur.fetchall() - self.conn.commit() - logger.debug(f"Expired tasks {expired_tasks}") - for row in expired_tasks: - task_id: UUID = row[0] - logger.debug(f"Got expired task with id {task_id}") - task, ttl = self.get_updated_expired_task(task_id) - - if ttl is None: - # race condition! between the time we got `key` from the - # set of tasks (this outer loop) and the time we tried - # to get that task from the queue, it has been completed - # and therefore deleted from the queue. In this case - # tasks is None and we can continue - logger.info( - f"Task {task_id} was marked completed while we " - "checked for expired leases, nothing to do." + with self.pool.connection() as conn: + with conn.cursor() as cur: + cur.execute( + sql.SQL( + """ + SELECT id + FROM {} + WHERE completed_at IS NULL + AND started_at IS NOT NULL + AND queue_name = %s + AND ( + started_at + (lease_timeout || ' seconds')::INTERVAL + ) < current_timestamp + ORDER BY can_start_at; + """ + ).format(sql.Identifier(self._table_name)), + (self._queue_name,), ) - continue + expired_tasks = cur.fetchall() + conn.commit() + logger.debug(f"Expired tasks {expired_tasks}") + + for row in expired_tasks: + task_id: UUID = row[0] + logger.debug(f"Got expired task with id {task_id}") + task, ttl = self.get_updated_expired_task(task_id) + + if ttl is None: + # race condition! between the time we got `key` from the + # set of tasks (this outer loop) and the time we tried + # to get that task from the queue, it has been completed + # and therefore deleted from the queue. In this case + # tasks is None and we can continue + logger.info( + f"Task {task_id} was marked completed while we " + "checked for expired leases, nothing to do." + ) + continue - if ttl <= 0: - logger.error( - f"Job {task} with id {task_id} " - "failed too many times, marking it as completed." - ) - # # here committing to release the previous update lock - self.conn.commit() - self.complete(task_id) + if ttl <= 0: + logger.error( + f"Job {task} with id {task_id} " + "failed too many times, marking it as completed." + ) + # # here committing to release the previous update lock + conn.commit() + self.complete(task_id) - if self.ttl_zero_callback: - self.ttl_zero_callback(task_id, task) - self.conn.commit() + if self.ttl_zero_callback: + self.ttl_zero_callback(task_id, task) + conn.commit() def get_updated_expired_task( self, task_id: UUID @@ -545,42 +545,43 @@ def get_updated_expired_task( task_id after it's rescheduled """ - with self.conn.cursor() as cur: - cur.execute( - sql.SQL( - """ - UPDATE {} - SET ttl = ttl - 1, - started_at = NULL - WHERE id = ( - SELECT id - FROM {} - WHERE completed_at IS NULL - AND started_at IS NOT NULL - AND queue_name = %s - AND id = %s - FOR UPDATE SKIP LOCKED - LIMIT 1 + with self.pool.connection() as conn: + with conn.cursor() as cur: + cur.execute( + sql.SQL( + """ + UPDATE {} + SET ttl = ttl - 1, + started_at = NULL + WHERE id = ( + SELECT id + FROM {} + WHERE completed_at IS NULL + AND started_at IS NOT NULL + AND queue_name = %s + AND id = %s + FOR UPDATE SKIP LOCKED + LIMIT 1 + ) + RETURNING task, ttl; + """ + ).format( + sql.Identifier(self._table_name), + sql.Identifier(self._table_name), + ), + ( + self._queue_name, + task_id, + ), ) - RETURNING task, ttl; - """ - ).format( - sql.Identifier(self._table_name), - sql.Identifier(self._table_name), - ), - ( - self._queue_name, - task_id, - ), - ) - updated_row = cur.fetchone() + updated_row = cur.fetchone() + conn.commit() + if updated_row is None: + return None, None - if updated_row is None: - return None, None - - task, ttl = updated_row - task = self._serialize(task) - return task, ttl + task, ttl = updated_row + task = self._serialize(task) + return task, ttl def _serialize(self, task: Any) -> str: return json.dumps(task, sort_keys=True) @@ -626,45 +627,44 @@ def reschedule( if decrease_ttl: decrease_ttl_sql = "ttl = ttl - 1," - conn = self.conn - with conn.cursor() as cur: - cur.execute( - sql.SQL( - """ - UPDATE {} - SET {} started_at = NULL - WHERE id = ( - SELECT id - FROM {} - WHERE started_at IS NOT NULL - AND id = %s - FOR UPDATE SKIP LOCKED + with self.pool.connection() as conn: + with conn.cursor() as cur: + cur.execute( + sql.SQL( + """ + UPDATE {} + SET {} started_at = NULL + WHERE id = ( + SELECT id + FROM {} + WHERE started_at IS NOT NULL + AND id = %s + FOR UPDATE SKIP LOCKED + ) + RETURNING id;""" + ).format( + sql.Identifier(self._table_name), + sql.SQL(decrease_ttl_sql), + sql.Identifier(self._table_name), + ), + (task_id,), ) - RETURNING id;""" - ).format( - sql.Identifier(self._table_name), - sql.SQL(decrease_ttl_sql), - sql.Identifier(self._table_name), - ), - (task_id,), - ) - found = cur.fetchone() - conn.commit() - if found is None: - raise ValueError(f"Task {task_id} does not exist.") + found = cur.fetchone() + conn.commit() + if found is None: + raise ValueError(f"Task {task_id} does not exist.") def _reset(self) -> None: """Delete all tasks in the DB with our queue name.""" - with self.conn.cursor() as cursor: - cursor.execute( + with self.pool.connection() as conn: + conn.execute( sql.SQL("DELETE FROM {} WHERE queue_name = %s ").format( sql.Identifier(self._table_name) ), (self._queue_name,), ) - - self.conn.commit() + conn.commit() def prune_completed_tasks(self, before: int) -> None: """Delete all completed tasks older than the given number of seconds. @@ -680,21 +680,22 @@ def prune_completed_tasks(self, before: int) -> None: logger.info(f"Pruning all tasks completed more than " f"{before} second(s) ago.") - with self.conn.cursor() as cursor: - cursor.execute( - sql.SQL( - """ - DELETE FROM {} - WHERE queue_name = %s - AND completed_at IS NOT NULL - AND completed_at < current_timestamp - CAST( - %s || ' seconds' AS INTERVAL); - """ - ).format(sql.Identifier(self._table_name)), - (self._queue_name, before), - ) + with self.pool.connection() as conn: + with conn.cursor() as cursor: + cursor.execute( + sql.SQL( + """ + DELETE FROM {} + WHERE queue_name = %s + AND completed_at IS NOT NULL + AND completed_at < current_timestamp - CAST( + %s || ' seconds' AS INTERVAL); + """ + ).format(sql.Identifier(self._table_name)), + (self._queue_name, before), + ) - self.conn.commit() + conn.commit() def __iter__( self, diff --git a/pyproject.toml b/pyproject.toml index f471261..8e6aa1f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,7 @@ authors = [ ] dependencies = [ "psycopg[binary]>=3.1.12", + "psycopg-pool>=3.1.12", ] requires-python = ">=3.9" readme = "README.md" From 7fc4250c61439bbef0f9226923c6a228f0022b1e Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Sun, 5 Jan 2025 22:12:33 +0100 Subject: [PATCH 03/11] remove max_size --- postgrestq/task_queue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgrestq/task_queue.py b/postgrestq/task_queue.py index 1c6fb53..a82f0f7 100644 --- a/postgrestq/task_queue.py +++ b/postgrestq/task_queue.py @@ -83,7 +83,7 @@ def connect(self) -> None: """ # if self.conn is None or self.conn.closed: # self.conn = self.get_connection() - self.pool = ConnectionPool(self._dsn, open=True, min_size=1, max_size=1) + self.pool = ConnectionPool(self._dsn, open=True, min_size=1) # This will block the use of the pool until min_size connections # have been acquired self.pool.wait() From b820c8d09834aaa6f3ffc007ea2fdebcff1dd60d Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Mon, 6 Jan 2025 18:36:21 +0100 Subject: [PATCH 04/11] fix unit tests --- tests/test_task_queue.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_task_queue.py b/tests/test_task_queue.py index 98a277e..e9fca75 100644 --- a/tests/test_task_queue.py +++ b/tests/test_task_queue.py @@ -82,7 +82,7 @@ def test_is_empty(task_queue: TaskQueue): def test_complete(task_queue: TaskQueue): # boring case - task_queue.add({"foo": 1}, LEASE_TIMEOUT, ttl=1) + task_queue.add({"foo": 1}, LEASE_TIMEOUT + 0.1, ttl=1) _, id_, qname = task_queue.get() assert not task_queue.is_empty() assert qname == "test_queue" @@ -100,7 +100,7 @@ def test_complete(task_queue: TaskQueue): def test_expired(task_queue: TaskQueue): - task_queue.add({"foo": 1}, LEASE_TIMEOUT, ttl=1) + task_queue.add({"foo": 1}, LEASE_TIMEOUT + 0.1, ttl=1) task_queue.get() assert not task_queue.is_empty() time.sleep(LEASE_TIMEOUT + 0.1) @@ -113,7 +113,7 @@ def test_expired(task_queue: TaskQueue): while not task_queue.is_empty(): task_queue.get() tend = time.time() - assert tend - tstart > LEASE_TIMEOUT + assert tend - tstart > LEASE_TIMEOUT * 5 def test_ttl(task_queue: TaskQueue, caplog: LogCaptureFixture): From 66f494c490be755446b172aa740cf5bb69059b43 Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Mon, 6 Jan 2025 18:40:33 +0100 Subject: [PATCH 05/11] remove unuseful commits --- postgrestq/task_queue.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/postgrestq/task_queue.py b/postgrestq/task_queue.py index a82f0f7..7969516 100644 --- a/postgrestq/task_queue.py +++ b/postgrestq/task_queue.py @@ -78,15 +78,14 @@ def __init__( def connect(self) -> None: """ - Establish a connection to Postgres. - If a connection already exists, it's overwritten. + Creates a ConnectionPool and waits until a connection to Postgres is + established. """ - # if self.conn is None or self.conn.closed: - # self.conn = self.get_connection() - self.pool = ConnectionPool(self._dsn, open=True, min_size=1) + self.pool = ConnectionPool(self._dsn, open=True, min_size=2) # This will block the use of the pool until min_size connections # have been acquired self.pool.wait() + logger.info("ConnectionPool is ready") def _create_queue_table(self) -> None: """ @@ -439,7 +438,7 @@ def complete(self, task_id: Optional[UUID]) -> int: if count == 0: logger.info(f"Task {task_id} was already completed") - conn.commit() + conn.commit() return count def is_empty(self) -> bool: @@ -575,7 +574,6 @@ def get_updated_expired_task( ), ) updated_row = cur.fetchone() - conn.commit() if updated_row is None: return None, None From 2ac20934c002d10bbd5b478b1df95dd518d5e652 Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Mon, 6 Jan 2025 18:54:33 +0100 Subject: [PATCH 06/11] fix test_expired --- tests/test_task_queue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_task_queue.py b/tests/test_task_queue.py index e9fca75..f07f672 100644 --- a/tests/test_task_queue.py +++ b/tests/test_task_queue.py @@ -113,7 +113,7 @@ def test_expired(task_queue: TaskQueue): while not task_queue.is_empty(): task_queue.get() tend = time.time() - assert tend - tstart > LEASE_TIMEOUT * 5 + assert tend - tstart > LEASE_TIMEOUT def test_ttl(task_queue: TaskQueue, caplog: LogCaptureFixture): From c882348c5cafc64f240d2e56a1a54df8d4ee0c52 Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Mon, 6 Jan 2025 18:59:27 +0100 Subject: [PATCH 07/11] linting --- postgrestq/task_queue.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/postgrestq/task_queue.py b/postgrestq/task_queue.py index 7969516..e5641e9 100644 --- a/postgrestq/task_queue.py +++ b/postgrestq/task_queue.py @@ -191,7 +191,8 @@ def add( lease_timeout, can_start_at ) - VALUES (%s, %s, %s, %s, %s, COALESCE(%s, current_timestamp)) + VALUES (%s, %s, %s, %s, %s, + COALESCE(%s, current_timestamp)) """ ).format(sql.Identifier(self._table_name)), ( @@ -487,7 +488,8 @@ def check_expired_leases(self) -> None: AND started_at IS NOT NULL AND queue_name = %s AND ( - started_at + (lease_timeout || ' seconds')::INTERVAL + started_at + + (lease_timeout || ' seconds')::INTERVAL ) < current_timestamp ORDER BY can_start_at; """ From 4738b7e6eaf4a66e04080795f547009a2572f4a0 Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Wed, 8 Jan 2025 12:09:58 +0100 Subject: [PATCH 08/11] increase the LEASE_TIMEOUT in the tests --- tests/test_task_queue.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/test_task_queue.py b/tests/test_task_queue.py index f07f672..69868c7 100644 --- a/tests/test_task_queue.py +++ b/tests/test_task_queue.py @@ -21,7 +21,10 @@ POSTGRES_DSN = os.environ.get( "POSTGRES_DSN", "postgresql://postgres:password@localhost:15432/postgres" ) # noqa -LEASE_TIMEOUT = 0.1 +# We set the lease timeout to 2 seconds, because if the database is slow +# the timeout would be reached while we are still getting the tasks and +# the tests fail. +LEASE_TIMEOUT = 2 logger = logging.getLogger(__name__) @@ -82,7 +85,7 @@ def test_is_empty(task_queue: TaskQueue): def test_complete(task_queue: TaskQueue): # boring case - task_queue.add({"foo": 1}, LEASE_TIMEOUT + 0.1, ttl=1) + task_queue.add({"foo": 1}, LEASE_TIMEOUT, ttl=1) _, id_, qname = task_queue.get() assert not task_queue.is_empty() assert qname == "test_queue" @@ -100,7 +103,7 @@ def test_complete(task_queue: TaskQueue): def test_expired(task_queue: TaskQueue): - task_queue.add({"foo": 1}, LEASE_TIMEOUT + 0.1, ttl=1) + task_queue.add({"foo": 1}, LEASE_TIMEOUT, ttl=1) task_queue.get() assert not task_queue.is_empty() time.sleep(LEASE_TIMEOUT + 0.1) From a85e2fac9cbc82917dd88f796576da97ebb6dffa Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Wed, 8 Jan 2025 12:11:41 +0100 Subject: [PATCH 09/11] add a new compatible python version --- .github/workflows/python-package.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-package.yaml b/.github/workflows/python-package.yaml index f7d3523..9396bda 100644 --- a/.github/workflows/python-package.yaml +++ b/.github/workflows/python-package.yaml @@ -26,11 +26,11 @@ jobs: fail-fast: false matrix: python-version: - - 3.8 - 3.9 # YAML, yay https://github.com/actions/runner/issues/1989 - '3.10' - 3.11 + - 3.12 steps: - uses: actions/checkout@v3 From 0d78c1e0418c0e307996806f178c35415f239fcd Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Wed, 8 Jan 2025 12:12:16 +0100 Subject: [PATCH 10/11] update pdm.lock --- pdm.lock | 153 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 90 insertions(+), 63 deletions(-) diff --git a/pdm.lock b/pdm.lock index 854ca0d..e66b6f5 100644 --- a/pdm.lock +++ b/pdm.lock @@ -3,9 +3,12 @@ [metadata] groups = ["default", "lint", "mypy", "test"] -strategy = ["cross_platform"] -lock_version = "4.4.2" -content_hash = "sha256:4f11b411e3dd76b58bce4754af1344f6cf39cfe87bb0d5c28598e43f223e1e93" +strategy = [] +lock_version = "4.5.0" +content_hash = "sha256:0f3d39ed32ccb1f17df827641c31dceaa26bb034fbb04c60dc0bbbd36c4b4fec" + +[[metadata.targets]] +requires_python = ">=3.9" [[package]] name = "colorama" @@ -129,83 +132,107 @@ files = [ [[package]] name = "psycopg" -version = "3.1.18" -requires_python = ">=3.7" +version = "3.2.3" +requires_python = ">=3.8" summary = "PostgreSQL database adapter for Python" dependencies = [ - "typing-extensions>=4.1", + "backports-zoneinfo>=0.2.0; python_version < \"3.9\"", + "typing-extensions>=4.6; python_version < \"3.13\"", "tzdata; sys_platform == \"win32\"", ] files = [ - {file = "psycopg-3.1.18-py3-none-any.whl", hash = "sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e"}, - {file = "psycopg-3.1.18.tar.gz", hash = "sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b"}, + {file = "psycopg-3.2.3-py3-none-any.whl", hash = "sha256:644d3973fe26908c73d4be746074f6e5224b03c1101d302d9a53bf565ad64907"}, + {file = "psycopg-3.2.3.tar.gz", hash = "sha256:a5764f67c27bec8bfac85764d23c534af2c27b893550377e37ce59c12aac47a2"}, ] [[package]] name = "psycopg-binary" -version = "3.1.18" -requires_python = ">=3.7" +version = "3.2.3" +requires_python = ">=3.8" summary = "PostgreSQL database adapter for Python -- C optimisation distribution" files = [ - {file = "psycopg_binary-3.1.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c323103dfa663b88204cf5f028e83c77d7a715f9b6f51d2bbc8184b99ddd90a"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:887f8d856c91510148be942c7acd702ccf761a05f59f8abc123c22ab77b5a16c"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d322ba72cde4ca2eefc2196dad9ad7e52451acd2f04e3688d590290625d0c970"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:489aa4fe5a0b653b68341e9e44af247dedbbc655326854aa34c163ef1bcb3143"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55ff0948457bfa8c0d35c46e3a75193906d1c275538877ba65907fd67aa059ad"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b15e3653c82384b043d820fc637199b5c6a36b37fa4a4943e0652785bb2bad5d"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f8ff3bc08b43f36fdc24fedb86d42749298a458c4724fb588c4d76823ac39f54"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1729d0e3dfe2546d823841eb7a3d003144189d6f5e138ee63e5227f8b75276a5"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:13bcd3742112446037d15e360b27a03af4b5afcf767f5ee374ef8f5dd7571b31"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:320047e3d3554b857e16c2b6b615a85e0db6a02426f4d203a4594a2f125dfe57"}, - {file = "psycopg_binary-3.1.18-cp310-cp310-win_amd64.whl", hash = "sha256:888a72c2aca4316ca6d4a619291b805677bae99bba2f6e31a3c18424a48c7e4d"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4e4de16a637ec190cbee82e0c2dc4860fed17a23a35f7a1e6dc479a5c6876722"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6432047b8b24ef97e3fbee1d1593a0faaa9544c7a41a2c67d1f10e7621374c83"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d684227ef8212e27da5f2aff9d4d303cc30b27ac1702d4f6881935549486dd5"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67284e2e450dc7a9e4d76e78c0bd357dc946334a3d410defaeb2635607f632cd"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c9b6bd7fb5c6638cb32469674707649b526acfe786ba6d5a78ca4293d87bae4"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7121acc783c4e86d2d320a7fb803460fab158a7f0a04c5e8c5d49065118c1e73"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e28ff8f3de7b56588c2a398dc135fd9f157d12c612bd3daa7e6ba9872337f6f5"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c84a0174109f329eeda169004c7b7ca2e884a6305acab4a39600be67f915ed38"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:531381f6647fc267383dca88dbe8a70d0feff433a8e3d0c4939201fea7ae1b82"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b293e01057e63c3ac0002aa132a1071ce0fdb13b9ee2b6b45d3abdb3525c597d"}, - {file = "psycopg_binary-3.1.18-cp311-cp311-win_amd64.whl", hash = "sha256:780a90bcb69bf27a8b08bc35b958e974cb6ea7a04cdec69e737f66378a344d68"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:87dd9154b757a5fbf6d590f6f6ea75f4ad7b764a813ae04b1d91a70713f414a1"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f876ebbf92db70125f6375f91ab4bc6b27648aa68f90d661b1fc5affb4c9731c"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d2f0cb45e4574f8b2fe7c6d0a0e2eb58903a4fd1fbaf60954fba82d595ab7"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd27f713f2e5ef3fd6796e66c1a5203a27a30ecb847be27a78e1df8a9a5ae68c"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c38a4796abf7380f83b1653c2711cb2449dd0b2e5aca1caa75447d6fa5179c69"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2f7f95746efd1be2dc240248cc157f4315db3fd09fef2adfcc2a76e24aa5741"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4085f56a8d4fc8b455e8f44380705c7795be5317419aa5f8214f315e4205d804"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2e2484ae835dedc80cdc7f1b1a939377dc967fed862262cfd097aa9f50cade46"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3c2b039ae0c45eee4cd85300ef802c0f97d0afc78350946a5d0ec77dd2d7e834"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f54978c4b646dec77fefd8485fa82ec1a87807f334004372af1aaa6de9539a5"}, - {file = "psycopg_binary-3.1.18-cp312-cp312-win_amd64.whl", hash = "sha256:9ffcbbd389e486d3fd83d30107bbf8b27845a295051ccabde240f235d04ed921"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eea5f14933177ffe5c40b200f04f814258cc14b14a71024ad109f308e8bad414"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:824a1bfd0db96cc6bef2d1e52d9e0963f5bf653dd5bc3ab519a38f5e6f21c299"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a87e9eeb80ce8ec8c2783f29bce9a50bbcd2e2342a340f159c3326bf4697afa1"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91074f78a9f890af5f2c786691575b6b93a4967ad6b8c5a90101f7b8c1a91d9c"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e05f6825f8db4428782135e6986fec79b139210398f3710ed4aa6ef41473c008"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f68ac2364a50d4cf9bb803b4341e83678668f1881a253e1224574921c69868c"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7ac1785d67241d5074f8086705fa68e046becea27964267ab3abd392481d7773"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:cd2a9f7f0d4dacc5b9ce7f0e767ae6cc64153264151f50698898c42cabffec0c"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:3e4b0bb91da6f2238dbd4fbb4afc40dfb4f045bb611b92fce4d381b26413c686"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:74e498586b72fb819ca8ea82107747d0cb6e00ae685ea6d1ab3f929318a8ce2d"}, - {file = "psycopg_binary-3.1.18-cp39-cp39-win_amd64.whl", hash = "sha256:d4422af5232699f14b7266a754da49dc9bcd45eba244cf3812307934cd5d6679"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:965455eac8547f32b3181d5ec9ad8b9be500c10fe06193543efaaebe3e4ce70c"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:71adcc8bc80a65b776510bc39992edf942ace35b153ed7a9c6c573a6849ce308"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f73adc05452fb85e7a12ed3f69c81540a8875960739082e6ea5e28c373a30774"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8630943143c6d6ca9aefc88bbe5e76c90553f4e1a3b2dc339e67dc34aa86f7e"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bffb61e198a91f712cc3d7f2d176a697cb05b284b2ad150fb8edb308eba9002"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc4fa2240c9fceddaa815a58f29212826fafe43ce80ff666d38c4a03fb036955"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:192a5f8496e6e1243fdd9ac20e117e667c0712f148c5f9343483b84435854c78"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64dc6e9ec64f592f19dc01a784e87267a64a743d34f68488924251253da3c818"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:79498df398970abcee3d326edd1d4655de7d77aa9aecd578154f8af35ce7bbd2"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:949551752930d5e478817e0b49956350d866b26578ced0042a61967e3fcccdea"}, + {file = "psycopg_binary-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:80a2337e2dfb26950894c8301358961430a0304f7bfe729d34cc036474e9c9b1"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:6d8f2144e0d5808c2e2aed40fbebe13869cd00c2ae745aca4b3b16a435edb056"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:94253be2b57ef2fea7ffe08996067aabf56a1eb9648342c9e3bad9e10c46e045"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fda0162b0dbfa5eaed6cdc708179fa27e148cb8490c7d62e5cf30713909658ea"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c0419cdad8c70eaeb3116bb28e7b42d546f91baf5179d7556f230d40942dc78"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74fbf5dd3ef09beafd3557631e282f00f8af4e7a78fbfce8ab06d9cd5a789aae"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d784f614e4d53050cbe8abf2ae9d1aaacf8ed31ce57b42ce3bf2a48a66c3a5c"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4e76ce2475ed4885fe13b8254058be710ec0de74ebd8ef8224cf44a9a3358e5f"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5938b257b04c851c2d1e6cb2f8c18318f06017f35be9a5fe761ee1e2e344dfb7"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:257c4aea6f70a9aef39b2a77d0658a41bf05c243e2bf41895eb02220ac6306f3"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:06b5cc915e57621eebf2393f4173793ed7e3387295f07fed93ed3fb6a6ccf585"}, + {file = "psycopg_binary-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:09baa041856b35598d335b1a74e19a49da8500acedf78164600694c0ba8ce21b"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:48f8ca6ee8939bab760225b2ab82934d54330eec10afe4394a92d3f2a0c37dd6"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5361ea13c241d4f0ec3f95e0bf976c15e2e451e9cc7ef2e5ccfc9d170b197a40"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb987f14af7da7c24f803111dbc7392f5070fd350146af3345103f76ea82e339"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0463a11b1cace5a6aeffaf167920707b912b8986a9c7920341c75e3686277920"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b7be9a6c06518967b641fb15032b1ed682fd3b0443f64078899c61034a0bca6"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64a607e630d9f4b2797f641884e52b9f8e239d35943f51bef817a384ec1678fe"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fa33ead69ed133210d96af0c63448b1385df48b9c0247eda735c5896b9e6dbbf"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:1f8b0d0e99d8e19923e6e07379fa00570be5182c201a8c0b5aaa9a4d4a4ea20b"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:709447bd7203b0b2debab1acec23123eb80b386f6c29e7604a5d4326a11e5bd6"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5e37d5027e297a627da3551a1e962316d0f88ee4ada74c768f6c9234e26346d9"}, + {file = "psycopg_binary-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:261f0031ee6074765096a19b27ed0f75498a8338c3dcd7f4f0d831e38adf12d1"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:41fdec0182efac66b27478ac15ef54c9ebcecf0e26ed467eb7d6f262a913318b"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:07d019a786eb020c0f984691aa1b994cb79430061065a694cf6f94056c603d26"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c57615791a337378fe5381143259a6c432cdcbb1d3e6428bfb7ce59fff3fb5c"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8eb9a4e394926b93ad919cad1b0a918e9b4c846609e8c1cfb6b743683f64da0"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5905729668ef1418bd36fbe876322dcb0f90b46811bba96d505af89e6fbdce2f"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd65774ed7d65101b314808b6893e1a75b7664f680c3ef18d2e5c84d570fa393"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:700679c02f9348a0d0a2adcd33a0275717cd0d0aee9d4482b47d935023629505"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:96334bb64d054e36fed346c50c4190bad9d7c586376204f50bede21a913bf942"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9099e443d4cc24ac6872e6a05f93205ba1a231b1a8917317b07c9ef2b955f1f4"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1985ab05e9abebfbdf3163a16ebb37fbc5d49aff2bf5b3d7375ff0920bbb54cd"}, + {file = "psycopg_binary-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:e90352d7b610b4693fad0feea48549d4315d10f1eba5605421c92bb834e90170"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:9994f7db390c17fc2bd4c09dca722fd792ff8a49bb3bdace0c50a83f22f1767d"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1303bf8347d6be7ad26d1362af2c38b3a90b8293e8d56244296488ee8591058e"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:842da42a63ecb32612bb7f5b9e9f8617eab9bc23bd58679a441f4150fcc51c96"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2bb342a01c76f38a12432848e6013c57eb630103e7556cf79b705b53814c3949"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd40af959173ea0d087b6b232b855cfeaa6738f47cb2a0fd10a7f4fa8b74293f"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9b60b465773a52c7d4705b0a751f7f1cdccf81dd12aee3b921b31a6e76b07b0e"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fc6d87a1c44df8d493ef44988a3ded751e284e02cdf785f746c2d357e99782a6"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f0b018e37608c3bfc6039a1dc4eb461e89334465a19916be0153c757a78ea426"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a29f5294b0b6360bfda69653697eff70aaf2908f58d1073b0acd6f6ab5b5a4f"}, + {file = "psycopg_binary-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:e56b1fd529e5dde2d1452a7d72907b37ed1b4f07fdced5d8fb1e963acfff6749"}, +] + +[[package]] +name = "psycopg-pool" +version = "3.2.4" +requires_python = ">=3.8" +summary = "Connection Pool for Psycopg" +dependencies = [ + "typing-extensions>=4.6", +] +files = [ + {file = "psycopg_pool-3.2.4-py3-none-any.whl", hash = "sha256:f6a22cff0f21f06d72fb2f5cb48c618946777c49385358e0c88d062c59cbd224"}, + {file = "psycopg_pool-3.2.4.tar.gz", hash = "sha256:61774b5bbf23e8d22bedc7504707135aaf744679f8ef9b3fe29942920746a6ed"}, ] [[package]] name = "psycopg" -version = "3.1.18" +version = "3.2.3" extras = ["binary"] -requires_python = ">=3.7" +requires_python = ">=3.8" summary = "PostgreSQL database adapter for Python" dependencies = [ - "psycopg-binary==3.1.18; implementation_name != \"pypy\"", - "psycopg==3.1.18", + "psycopg-binary==3.2.3; implementation_name != \"pypy\"", + "psycopg==3.2.3", ] files = [ - {file = "psycopg-3.1.18-py3-none-any.whl", hash = "sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e"}, - {file = "psycopg-3.1.18.tar.gz", hash = "sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b"}, + {file = "psycopg-3.2.3-py3-none-any.whl", hash = "sha256:644d3973fe26908c73d4be746074f6e5224b03c1101d302d9a53bf565ad64907"}, + {file = "psycopg-3.2.3.tar.gz", hash = "sha256:a5764f67c27bec8bfac85764d23c534af2c27b893550377e37ce59c12aac47a2"}, ] [[package]] @@ -268,10 +295,10 @@ files = [ [[package]] name = "tzdata" -version = "2023.3" +version = "2024.2" requires_python = ">=2" summary = "Provider of IANA time zone data" files = [ - {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, - {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, ] From 4f59051399b934b6dc04e2c65f51c2e68bf22c0f Mon Sep 17 00:00:00 2001 From: Sourour Benzarti Date: Wed, 8 Jan 2025 12:15:02 +0100 Subject: [PATCH 11/11] update the changelog + increase the version --- CHANGELOG.md | 4 ++++ pyproject.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7fd81af..69448e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 1.2.1 - 2025-01-08 + +* Use a ConnectionPool to connect to the Database instead of a single connection. + ## 1.2.0 - 2024-07-25 * The `reschedule()` method has an optional parameter to decrease the TTL if set to True (False by default). diff --git a/pyproject.toml b/pyproject.toml index 8e6aa1f..7ac51ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ log_cli_date_format = "%Y-%m-%d %H:%M:%S" [project] name = "postgres-tq" -version = "1.2.0" +version = "1.2.1" description = "Postgres Based Task Queue" authors = [ {name = "FlixTech", email = "open-source@flixbus.com"},