LiKenun commited on
Commit
ce323cf
·
1 Parent(s): fafc982

Introduce dependency injection

Browse files
src/ctp_slack_bot/api/main.py CHANGED
@@ -1,14 +1,15 @@
1
  from contextlib import asynccontextmanager
2
- from fastapi import FastAPI, HTTPException
3
  from loguru import logger
4
- from typing import AsyncGenerator, Never
 
5
 
6
  from ctp_slack_bot.api.routes import router
7
- from ctp_slack_bot.core.config import Settings, settings
8
  from ctp_slack_bot.core.logging import setup_logging
9
  from ctp_slack_bot.core.response_rendering import PrettyJSONResponse
10
  from ctp_slack_bot.tasks.scheduler import start_scheduler, stop_scheduler
11
-
12
 
13
  @asynccontextmanager
14
  async def lifespan(app: FastAPI) -> AsyncGenerator:
@@ -16,20 +17,31 @@ async def lifespan(app: FastAPI) -> AsyncGenerator:
16
  Lifespan context manager for FastAPI application.
17
  Handles startup and shutdown events.
18
  """
19
- # Setup logging
20
- setup_logging()
 
 
 
 
 
 
 
 
 
 
 
21
  logger.info("Starting application")
22
-
23
- # Start scheduler
24
- #scheduler = start_scheduler()
25
- #logger.info("Started scheduler")
26
-
27
- yield
28
-
29
- # Shutdown
30
  logger.info("Shutting down application")
31
- #stop_scheduler(scheduler)
32
- #logger.info("Stopped scheduler")
33
 
34
 
35
  app = FastAPI(
@@ -39,30 +51,27 @@ app = FastAPI(
39
  lifespan=lifespan,
40
  )
41
 
42
- # Include routers
43
  app.include_router(router)
44
 
 
45
  @app.get("/health")
46
- async def health() -> dict[str, str]:
47
  """Health check"""
48
  return {
49
  "status": "healthy"
50
  }
51
 
52
- @app.get("/env", response_class=PrettyJSONResponse)
53
- async def env() -> Settings:
54
- """Server-internal environment variables"""
55
- if not settings.DEBUG:
56
- raise HTTPException(status_code=404)
57
- return settings
58
-
59
-
60
- if __name__ == "__main__":
61
  import uvicorn
62
-
63
  uvicorn.run(
64
  "main:app",
65
  host=settings.API_HOST,
66
  port=settings.API_PORT,
67
  reload=settings.DEBUG
68
  )
 
 
 
 
1
  from contextlib import asynccontextmanager
2
+ from fastapi import FastAPI, HTTPException, Depends
3
  from loguru import logger
4
+ from typing import AsyncGenerator
5
+ from dependency_injector.wiring import inject, Provide
6
 
7
  from ctp_slack_bot.api.routes import router
8
+ from ctp_slack_bot.core.config import Settings
9
  from ctp_slack_bot.core.logging import setup_logging
10
  from ctp_slack_bot.core.response_rendering import PrettyJSONResponse
11
  from ctp_slack_bot.tasks.scheduler import start_scheduler, stop_scheduler
12
+ from ctp_slack_bot.containers import Container
13
 
14
  @asynccontextmanager
15
  async def lifespan(app: FastAPI) -> AsyncGenerator:
 
17
  Lifespan context manager for FastAPI application.
18
  Handles startup and shutdown events.
19
  """
20
+ # Initialize container and wire the container to modules that need dependency injection.
21
+ container = Container()
22
+ app.container = container
23
+ container.wire(
24
+ modules=[
25
+ "ctp_slack_bot.api.routes",
26
+ "ctp_slack_bot.services",
27
+ "ctp_slack_bot.tasks"
28
+ ]
29
+ )
30
+
31
+ # Setup logging.
32
+ setup_logging(container)
33
  logger.info("Starting application")
34
+
35
+ # Start the scheduler.
36
+ scheduler = start_scheduler(container)
37
+ logger.info("Started scheduler")
38
+
39
+ yield # control to FastAPI until shutdown.
40
+
41
+ # Shutdown.
42
  logger.info("Shutting down application")
43
+ stop_scheduler(scheduler)
44
+ logger.info("Stopped scheduler")
45
 
46
 
47
  app = FastAPI(
 
51
  lifespan=lifespan,
52
  )
53
 
54
+ # Include routers.
55
  app.include_router(router)
56
 
57
+ # Provide a minimalist health check endpoint for clients to detect availability.
58
  @app.get("/health")
59
+ async def get_health() -> dict[str, str]:
60
  """Health check"""
61
  return {
62
  "status": "healthy"
63
  }
64
 
65
+ # Alternate starting path for development
66
+ def run() -> None:
 
 
 
 
 
 
 
67
  import uvicorn
68
+ settings = Settings() # type: ignore
69
  uvicorn.run(
70
  "main:app",
71
  host=settings.API_HOST,
72
  port=settings.API_PORT,
73
  reload=settings.DEBUG
74
  )
75
+
76
+ if __name__ == "__main__":
77
+ run()
src/ctp_slack_bot/api/routes.py CHANGED
@@ -1,13 +1,20 @@
1
  from fastapi import APIRouter, Depends, HTTPException, status
 
2
  from loguru import logger
3
 
 
4
  #from ctp_slack_bot.api.dependencies import get_slack_service, get_transcript_service
 
5
  #from ctp_slack_bot.models.transcript import TranscriptRequest, TranscriptResponse
6
  #from ctp_slack_bot.services.slack_service import SlackService
7
  #from ctp_slack_bot.services.transcript_service import TranscriptService
8
 
9
  router = APIRouter(prefix="/api/v1")
10
 
 
 
 
 
11
 
12
  # @router.post("/transcripts/analyze", response_model=TranscriptResponse)
13
  # async def analyze_transcript(
 
1
  from fastapi import APIRouter, Depends, HTTPException, status
2
+ from dependency_injector.wiring import inject, Provide
3
  from loguru import logger
4
 
5
+ from ctp_slack_bot.containers import Container
6
  #from ctp_slack_bot.api.dependencies import get_slack_service, get_transcript_service
7
+ from ctp_slack_bot.core.config import Settings
8
  #from ctp_slack_bot.models.transcript import TranscriptRequest, TranscriptResponse
9
  #from ctp_slack_bot.services.slack_service import SlackService
10
  #from ctp_slack_bot.services.transcript_service import TranscriptService
11
 
12
  router = APIRouter(prefix="/api/v1")
13
 
14
+ @router.get("/env", response_model=Settings)
15
+ @inject
16
+ async def get_env(settings: Settings = Depends(Provide[Container.settings])) -> Settings:
17
+ return settings
18
 
19
  # @router.post("/transcripts/analyze", response_model=TranscriptResponse)
20
  # async def analyze_transcript(
src/ctp_slack_bot/containers.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dependency_injector.containers import DeclarativeContainer
2
+ from dependency_injector.providers import Configuration, Factory, Singleton
3
+ #from pymongo import MongoClient
4
+ #from langchain.embeddings import HuggingFaceEmbeddings
5
+ #from transformers import AutoTokenizer, AutoModel
6
+
7
+ from ctp_slack_bot.core.config import Settings
8
+ #from ctp_slack_bot.db.connection import get_database
9
+ #from ctp_slack_bot.services.vectorization_service import VectorizationService
10
+
11
+ class Container(DeclarativeContainer):
12
+ # Core dependencies
13
+ settings = Singleton(Settings)
14
+
15
+ # db_client = Singleton(
16
+ # MongoClient,
17
+ # host=config.MONGODB_URI.get_secret_value
18
+ # )
19
+
20
+ # db = Singleton(
21
+ # get_database,
22
+ # client=db_client,
23
+ # db_name=config.MONGODB_NAME
24
+ # )
25
+
26
+ # Machine-learning models
27
+ # tokenizer = Singleton(
28
+ # AutoTokenizer.from_pretrained,
29
+ # config.EMBEDDING_MODEL
30
+ # )
31
+
32
+ # model = Singleton(
33
+ # AutoModel.from_pretrained,
34
+ # config.EMBEDDING_MODEL
35
+ # )
36
+
37
+ # embeddings = Singleton(
38
+ # HuggingFaceEmbeddings,
39
+ # model_name=config.EMBEDDING_MODEL
40
+ # )
41
+
42
+ # Repositories
43
+ # transcript_repository = Factory(
44
+ # # Your transcript repository class
45
+ # db=db
46
+ # )
47
+
48
+ # Services
49
+ # vectorization_service = Factory(
50
+ # VectorizationService,
51
+ # embeddings=embeddings,
52
+ # chunk_size=config.CHUNK_SIZE,
53
+ # chunk_overlap=config.CHUNK_OVERLAP,
54
+ # vector_dimension=config.VECTOR_DIMENSION
55
+ # )
56
+
57
+ # Add other services here
58
+ # transcript_service = providers.Factory(...)
59
+ # slack_service = providers.Factory(...)
src/ctp_slack_bot/core/config.py CHANGED
@@ -1,9 +1,6 @@
1
- from functools import lru_cache
2
- from typing import Literal, Optional
3
-
4
  from pydantic import Field, MongoDsn, NonNegativeFloat, NonNegativeInt, PositiveInt, SecretStr
5
  from pydantic_settings import BaseSettings, SettingsConfigDict
6
-
7
 
8
  class Settings(BaseSettings): # TODO: Strong guarantees of validity, because garbage in = garbage out, and settings flow into all the nooks and crannies
9
  """
@@ -54,14 +51,3 @@ class Settings(BaseSettings): # TODO: Strong guarantees of validity, because gar
54
  env_file_encoding="utf-8",
55
  case_sensitive=True,
56
  )
57
-
58
-
59
- @lru_cache
60
- def get_settings() -> Settings:
61
- """
62
- Get cached settings instance.
63
- """
64
- return Settings() # type: ignore
65
-
66
-
67
- settings = get_settings()
 
 
 
 
1
  from pydantic import Field, MongoDsn, NonNegativeFloat, NonNegativeInt, PositiveInt, SecretStr
2
  from pydantic_settings import BaseSettings, SettingsConfigDict
3
+ from typing import Literal, Optional
4
 
5
  class Settings(BaseSettings): # TODO: Strong guarantees of validity, because garbage in = garbage out, and settings flow into all the nooks and crannies
6
  """
 
51
  env_file_encoding="utf-8",
52
  case_sensitive=True,
53
  )
 
 
 
 
 
 
 
 
 
 
 
src/ctp_slack_bot/core/logging.py CHANGED
@@ -1,13 +1,12 @@
1
- import logging
2
- import sys
3
- from typing import Dict, Union
4
-
5
  from loguru import logger
 
 
6
 
7
- from ctp_slack_bot.core.config import settings
8
 
9
 
10
- class InterceptHandler(logging.Handler):
11
  """
12
  Intercept standard logging messages toward Loguru.
13
 
@@ -15,7 +14,7 @@ class InterceptHandler(logging.Handler):
15
  to Loguru, allowing unified logging across the application.
16
  """
17
 
18
- def emit(self, record: logging.LogRecord) -> None:
19
  # Get corresponding Loguru level if it exists
20
  try:
21
  level = logger.level(record.levelname).name
@@ -23,8 +22,8 @@ class InterceptHandler(logging.Handler):
23
  level = record.levelno
24
 
25
  # Find caller from where the logged message originated
26
- frame, depth = logging.currentframe(), 2
27
- while frame and frame.f_code.co_filename == logging.__file__:
28
  frame = frame.f_back
29
  depth += 1
30
 
@@ -33,7 +32,7 @@ class InterceptHandler(logging.Handler):
33
  )
34
 
35
 
36
- def setup_logging() -> None:
37
  """
38
  Configure logging with Loguru.
39
 
@@ -41,9 +40,11 @@ def setup_logging() -> None:
41
  configures the log format based on settings, and intercepts
42
  standard logging messages.
43
  """
 
 
44
  # Remove default loguru handler
45
  logger.remove()
46
-
47
  # Determine log format
48
  if settings.LOG_FORMAT == "json":
49
  log_format = {
@@ -62,17 +63,17 @@ def setup_logging() -> None:
62
  "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - "
63
  "<level>{message}</level>"
64
  )
65
-
66
  # Add console handler
67
  logger.add(
68
- sys.stderr,
69
  format=format_string,
70
  level=settings.LOG_LEVEL,
71
  serialize=(settings.LOG_FORMAT == "json"),
72
  backtrace=True,
73
  diagnose=True,
74
  )
75
-
76
  # Add file handler for non-DEBUG environments
77
  if settings.LOG_LEVEL != "DEBUG":
78
  logger.add(
@@ -84,19 +85,12 @@ def setup_logging() -> None:
84
  level=settings.LOG_LEVEL,
85
  serialize=(settings.LOG_FORMAT == "json"),
86
  )
87
-
88
  # Intercept standard logging messages
89
- logging.basicConfig(handlers=[InterceptHandler()], level=0, force=True)
90
-
91
  # Update logging levels for some noisy libraries
92
- for logger_name in [
93
- "uvicorn",
94
- "uvicorn.error",
95
- "fastapi",
96
- "httpx",
97
- "apscheduler",
98
- "pymongo",
99
- ]:
100
- logging.getLogger(logger_name).setLevel(logging.INFO)
101
-
102
  logger.info(f"Logging configured with level {settings.LOG_LEVEL}")
 
1
+ from logging import __file__ as logging_file, basicConfig, currentframe, getLogger, Handler, INFO, LogRecord
 
 
 
2
  from loguru import logger
3
+ from sys import stderr
4
+ from typing import Dict, Union
5
 
6
+ from ctp_slack_bot.containers import Container
7
 
8
 
9
+ class InterceptHandler(Handler):
10
  """
11
  Intercept standard logging messages toward Loguru.
12
 
 
14
  to Loguru, allowing unified logging across the application.
15
  """
16
 
17
+ def emit(self, record: LogRecord) -> None:
18
  # Get corresponding Loguru level if it exists
19
  try:
20
  level = logger.level(record.levelname).name
 
22
  level = record.levelno
23
 
24
  # Find caller from where the logged message originated
25
+ frame, depth = currentframe(), 2
26
+ while frame and frame.f_code.co_filename == logging_file:
27
  frame = frame.f_back
28
  depth += 1
29
 
 
32
  )
33
 
34
 
35
+ def setup_logging(container: Container) -> None:
36
  """
37
  Configure logging with Loguru.
38
 
 
40
  configures the log format based on settings, and intercepts
41
  standard logging messages.
42
  """
43
+ settings = container.settings() if container else Provide[Container.settings]
44
+
45
  # Remove default loguru handler
46
  logger.remove()
47
+
48
  # Determine log format
49
  if settings.LOG_FORMAT == "json":
50
  log_format = {
 
63
  "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - "
64
  "<level>{message}</level>"
65
  )
66
+
67
  # Add console handler
68
  logger.add(
69
+ stderr,
70
  format=format_string,
71
  level=settings.LOG_LEVEL,
72
  serialize=(settings.LOG_FORMAT == "json"),
73
  backtrace=True,
74
  diagnose=True,
75
  )
76
+
77
  # Add file handler for non-DEBUG environments
78
  if settings.LOG_LEVEL != "DEBUG":
79
  logger.add(
 
85
  level=settings.LOG_LEVEL,
86
  serialize=(settings.LOG_FORMAT == "json"),
87
  )
88
+
89
  # Intercept standard logging messages
90
+ basicConfig(handlers=[InterceptHandler()], level=0, force=True)
91
+
92
  # Update logging levels for some noisy libraries
93
+ for logger_name in ("uvicorn", "uvicorn.error", "fastapi", "httpx", "apscheduler", "pymongo"):
94
+ getLogger(logger_name).setLevel(INFO)
95
+
 
 
 
 
 
 
 
96
  logger.info(f"Logging configured with level {settings.LOG_LEVEL}")
src/ctp_slack_bot/tasks/scheduler.py CHANGED
@@ -1,28 +1,28 @@
1
- from datetime import datetime
2
- from typing import Optional
3
-
4
  from apscheduler.schedulers.asyncio import AsyncIOScheduler
5
  from apscheduler.triggers.cron import CronTrigger
 
 
6
  from loguru import logger
7
  from pytz import timezone
 
8
 
9
- from ctp_slack_bot.core.config import settings
10
  #from ctp_slack_bot.tasks.error_report import send_error_report
11
  #from ctp_slack_bot.tasks.transcript_cleanup import cleanup_old_transcripts
12
 
13
-
14
- def start_scheduler() -> AsyncIOScheduler:
15
  """
16
  Start and configure the APScheduler instance.
17
 
18
  Returns:
19
  AsyncIOScheduler: Configured scheduler instance
20
  """
21
- scheduler = AsyncIOScheduler(timezone=timezone(settings.SCHEDULER_TIMEZONE))
 
 
22
 
23
- # Add jobs to the scheduler
24
-
25
- # Daily error report at 7 AM
26
  # scheduler.add_job(
27
  # send_error_report,
28
  # CronTrigger(hour=7, minute=0),
@@ -30,8 +30,6 @@ def start_scheduler() -> AsyncIOScheduler:
30
  # name="Daily Error Report",
31
  # replace_existing=True,
32
  # )
33
-
34
- # Weekly transcript cleanup on Sundays at 1 AM
35
  # scheduler.add_job(
36
  # cleanup_old_transcripts,
37
  # CronTrigger(day_of_week="sun", hour=1, minute=0),
@@ -40,25 +38,25 @@ def start_scheduler() -> AsyncIOScheduler:
40
  # replace_existing=True,
41
  # )
42
 
43
- # Start the scheduler
44
  scheduler.start()
45
  logger.info("Scheduler started with timezone: {}", settings.SCHEDULER_TIMEZONE)
46
- logger.info("Next run for error report: {}",
47
- scheduler.get_job("daily_error_report").next_run_time)
48
- logger.info("Next run for transcript cleanup: {}",
49
- scheduler.get_job("weekly_transcript_cleanup").next_run_time)
50
 
51
  return scheduler
52
 
53
 
54
- def stop_scheduler(scheduler: Optional[AsyncIOScheduler] = None) -> None:
55
  """
56
  Shutdown the scheduler gracefully.
57
 
58
  Args:
59
  scheduler: The scheduler instance to shut down
60
  """
61
- if scheduler is not None and scheduler.running:
62
  logger.info("Shutting down scheduler")
63
  scheduler.shutdown(wait=False)
64
  logger.info("Scheduler shutdown complete")
 
 
 
 
1
  from apscheduler.schedulers.asyncio import AsyncIOScheduler
2
  from apscheduler.triggers.cron import CronTrigger
3
+ from datetime import datetime
4
+ from dependency_injector.wiring import inject, Provide
5
  from loguru import logger
6
  from pytz import timezone
7
+ from typing import Optional
8
 
9
+ from ctp_slack_bot.containers import Container
10
  #from ctp_slack_bot.tasks.error_report import send_error_report
11
  #from ctp_slack_bot.tasks.transcript_cleanup import cleanup_old_transcripts
12
 
13
+ @inject
14
+ def start_scheduler(container: Container) -> AsyncIOScheduler:
15
  """
16
  Start and configure the APScheduler instance.
17
 
18
  Returns:
19
  AsyncIOScheduler: Configured scheduler instance
20
  """
21
+ settings = container.settings() if container else Provide[Container.settings]
22
+ zone = settings.SCHEDULER_TIMEZONE
23
+ scheduler = AsyncIOScheduler(timezone=timezone(zone))
24
 
25
+ # Add jobs to the scheduler.
 
 
26
  # scheduler.add_job(
27
  # send_error_report,
28
  # CronTrigger(hour=7, minute=0),
 
30
  # name="Daily Error Report",
31
  # replace_existing=True,
32
  # )
 
 
33
  # scheduler.add_job(
34
  # cleanup_old_transcripts,
35
  # CronTrigger(day_of_week="sun", hour=1, minute=0),
 
38
  # replace_existing=True,
39
  # )
40
 
41
+ # Start the scheduler.
42
  scheduler.start()
43
  logger.info("Scheduler started with timezone: {}", settings.SCHEDULER_TIMEZONE)
44
+ # logger.info("Next run for error report: {}",
45
+ # scheduler.get_job("daily_error_report").next_run_time)
46
+ # logger.info("Next run for transcript cleanup: {}",
47
+ # scheduler.get_job("weekly_transcript_cleanup").next_run_time)
48
 
49
  return scheduler
50
 
51
 
52
+ def stop_scheduler(scheduler: AsyncIOScheduler) -> None:
53
  """
54
  Shutdown the scheduler gracefully.
55
 
56
  Args:
57
  scheduler: The scheduler instance to shut down
58
  """
59
+ if scheduler.running:
60
  logger.info("Shutting down scheduler")
61
  scheduler.shutdown(wait=False)
62
  logger.info("Scheduler shutdown complete")