Phase 1: Add pgqueuer infrastructure
- Add pgqueuer dependency to pyproject.toml - Create worker.py with schema installation and job handler registration - Add make worker command to Makefile - Update make dev to run worker alongside backend/frontend - Use has_table() check for idempotent schema installation - Register 'random_number' job handler (placeholder that logs for now)
This commit is contained in:
parent
15bae15731
commit
10c0316603
4 changed files with 55 additions and 44 deletions
|
|
@ -11,4 +11,4 @@ Use the `TEST` variable to select specific tests:
|
||||||
- Backend: `make test-backend TEST="tests/test_booking.py"` or `TEST="tests/test_booking.py::TestClass::test_method"`
|
- Backend: `make test-backend TEST="tests/test_booking.py"` or `TEST="tests/test_booking.py::TestClass::test_method"`
|
||||||
- Frontend: `make test-frontend TEST="app/login"` (file pattern)
|
- Frontend: `make test-frontend TEST="app/login"` (file pattern)
|
||||||
- E2E: `make test-e2e TEST="auth"` (matches e2e/auth.spec.ts)
|
- E2E: `make test-e2e TEST="auth"` (matches e2e/auth.spec.ts)
|
||||||
- Don't do `2>&1 | tail`. Let the output hit the console when running the tests.
|
- Don't use `tail`. Let the output hit the console when running the tests.
|
||||||
|
|
|
||||||
2
Makefile
2
Makefile
|
|
@ -48,8 +48,8 @@ db-seed: db-ready
|
||||||
dev:
|
dev:
|
||||||
$(MAKE) db-seed
|
$(MAKE) db-seed
|
||||||
cd backend && uv run uvicorn main:app --reload & \
|
cd backend && uv run uvicorn main:app --reload & \
|
||||||
cd frontend && npm run dev & \
|
|
||||||
cd backend && uv run python worker.py & \
|
cd backend && uv run python worker.py & \
|
||||||
|
cd frontend && npm run dev & \
|
||||||
wait
|
wait
|
||||||
|
|
||||||
# TEST variable can be used to select specific tests:
|
# TEST variable can be used to select specific tests:
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ dependencies = [
|
||||||
"python-jose[cryptography]>=3.3.0",
|
"python-jose[cryptography]>=3.3.0",
|
||||||
"email-validator>=2.0.0",
|
"email-validator>=2.0.0",
|
||||||
"bech32>=1.2.0",
|
"bech32>=1.2.0",
|
||||||
"pgqueuer>=0.1.0",
|
"pgqueuer>=0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[dependency-groups]
|
[dependency-groups]
|
||||||
|
|
|
||||||
|
|
@ -1,62 +1,73 @@
|
||||||
"""Worker process for processing async jobs using pgqueuer."""
|
"""Background job worker using pgqueuer."""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import asyncpg
|
import asyncpg
|
||||||
from pgqueuer import AsyncpgDriver, PgQueuer, Queries
|
from pgqueuer import Job, QueueManager
|
||||||
|
from pgqueuer.db import AsyncpgDriver
|
||||||
|
from pgqueuer.queries import Queries
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(
|
||||||
logger = logging.getLogger(__name__)
|
level=logging.INFO,
|
||||||
|
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||||
|
)
|
||||||
|
logger = logging.getLogger("worker")
|
||||||
|
|
||||||
# Get DATABASE_URL and convert from SQLAlchemy format (postgresql+asyncpg://)
|
# SQLAlchemy uses postgresql+asyncpg://, but asyncpg needs postgresql://
|
||||||
# to asyncpg format (postgresql://)
|
_raw_url = os.getenv(
|
||||||
_raw_db_url = os.getenv(
|
|
||||||
"DATABASE_URL", "postgresql+asyncpg://postgres:postgres@localhost:5432/arbret"
|
"DATABASE_URL", "postgresql+asyncpg://postgres:postgres@localhost:5432/arbret"
|
||||||
)
|
)
|
||||||
DATABASE_URL = _raw_db_url.replace("postgresql+asyncpg://", "postgresql://")
|
DATABASE_URL = _raw_url.replace("postgresql+asyncpg://", "postgresql://")
|
||||||
|
|
||||||
|
|
||||||
async def main() -> None:
|
async def install_schema() -> None:
|
||||||
"""Main worker loop."""
|
"""Install pgqueuer schema if not already present."""
|
||||||
logger.info("Starting worker...")
|
|
||||||
|
|
||||||
# Connect to database independently
|
|
||||||
conn = await asyncpg.connect(DATABASE_URL)
|
conn = await asyncpg.connect(DATABASE_URL)
|
||||||
driver = AsyncpgDriver(conn)
|
|
||||||
|
|
||||||
# Install pgqueuer schema (creates tables if they don't exist)
|
|
||||||
queries = Queries.from_asyncpg_connection(conn)
|
|
||||||
try:
|
try:
|
||||||
await queries.install()
|
queries = Queries.from_asyncpg_connection(conn)
|
||||||
logger.info("pgqueuer schema installed")
|
# Check if schema is already installed by looking for the main table
|
||||||
except Exception as e:
|
if not await queries.has_table("pgqueuer"):
|
||||||
# Schema might already exist, which is fine
|
await queries.install()
|
||||||
if "already exists" in str(e).lower():
|
logger.info("pgqueuer schema installed")
|
||||||
logger.info("pgqueuer schema already exists")
|
|
||||||
else:
|
else:
|
||||||
raise
|
logger.info("pgqueuer schema already exists")
|
||||||
|
|
||||||
# Initialize pgqueuer
|
|
||||||
pgq = PgQueuer(connection=driver)
|
|
||||||
|
|
||||||
# Register job handlers using entrypoint decorator
|
|
||||||
@pgq.entrypoint("dummy") # type: ignore[type-var]
|
|
||||||
async def dummy_job_handler(payload: dict) -> None:
|
|
||||||
"""Dummy job handler for testing."""
|
|
||||||
logger.info(f"Processing dummy job with payload: {payload}")
|
|
||||||
|
|
||||||
logger.info("Worker started, waiting for jobs...")
|
|
||||||
|
|
||||||
# Start consuming jobs
|
|
||||||
try:
|
|
||||||
await pgq.run()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
logger.info("Worker shutting down...")
|
|
||||||
finally:
|
finally:
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
def register_job_handlers(qm: QueueManager) -> None:
|
||||||
|
"""Register all job handlers with the queue manager."""
|
||||||
|
|
||||||
|
@qm.entrypoint("random_number")
|
||||||
|
async def process_random_number(job: Job) -> None:
|
||||||
|
"""Process a random number job (placeholder - just logs for now)."""
|
||||||
|
payload_str = job.payload.decode() if job.payload else ""
|
||||||
|
logger.info(f"Processing random_number job {job.id}: {payload_str}")
|
||||||
|
|
||||||
|
|
||||||
|
async def main() -> None:
|
||||||
|
"""Main worker entry point."""
|
||||||
|
logger.info("Installing pgqueuer schema...")
|
||||||
|
await install_schema()
|
||||||
|
|
||||||
|
logger.info("Connecting to database...")
|
||||||
|
conn = await asyncpg.connect(DATABASE_URL)
|
||||||
|
|
||||||
|
try:
|
||||||
|
driver = AsyncpgDriver(conn)
|
||||||
|
qm = QueueManager(driver)
|
||||||
|
|
||||||
|
# Register job handlers
|
||||||
|
register_job_handlers(qm)
|
||||||
|
|
||||||
|
logger.info("Worker started, waiting for jobs...")
|
||||||
|
await qm.run()
|
||||||
|
finally:
|
||||||
|
await conn.close()
|
||||||
|
logger.info("Worker stopped")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue