llmgaurdrails / endpoints /api_models.py
Sasidhar's picture
Upload 16 files
826f9a4 verified
from pydantic import BaseModel
class OutputGuardrailsConfig(BaseModel):
contextual_grounding: bool = True
toxicity: bool = True
# Extend with more flags for additional guardrails
# Define the input that went to LLM and its response.
class LLMResponse(BaseModel):
question: str
answer: str
context: str
# GaurdRail Check Input Model
class CheckRequest(BaseModel):
llm_response: LLMResponse
config: OutputGuardrailsConfig = OutputGuardrailsConfig() # Default config if not provided
# GaurdRail Check Response
class CheckResponse(BaseModel):
grounded: bool
details: dict