adrienbrdne commited on
Commit
b121bbc
·
verified ·
1 Parent(s): 86e77ee

Update api.py

Browse files
Files changed (1) hide show
  1. api.py +44 -4
api.py CHANGED
@@ -66,9 +66,6 @@ class ProblemDescriptionRequest(BaseModel):
66
  descriptions: List[str]
67
  technical_topic: str
68
 
69
- class ProblemDescriptionResponse(BaseModel):
70
- problem_description: str
71
-
72
  # Format KI
73
 
74
  class FormattedKeyIssue(BaseModel):
@@ -96,6 +93,9 @@ class ProblemDescriptionItem(BaseModel):
96
  problematic: str
97
  score: float
98
 
 
 
 
99
  # --- Global Variables / State ---
100
  # Keep the graph instance global for efficiency if desired,
101
  # but consider potential concurrency issues if graph/LLMs have state.
@@ -697,9 +697,49 @@ async def create_several_probdesc_hardcoded(request: CreateSeveralProbDescReques
697
  'score': 0.624
698
  }
699
  ]
700
-
701
  return hardcoded_response
702
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703
 
704
  # --- How to Run ---
705
  if __name__ == "__main__":
 
66
  descriptions: List[str]
67
  technical_topic: str
68
 
 
 
 
69
  # Format KI
70
 
71
  class FormattedKeyIssue(BaseModel):
 
93
  problematic: str
94
  score: float
95
 
96
+ class EvaluateProblemDescriptionsRequest(BaseModel):
97
+ problem_descriptions: List[str]
98
+
99
  # --- Global Variables / State ---
100
  # Keep the graph instance global for efficiency if desired,
101
  # but consider potential concurrency issues if graph/LLMs have state.
 
697
  'score': 0.624
698
  }
699
  ]
 
700
  return hardcoded_response
701
 
702
+ @app.post("/evaluate_problem_descriptions", response_model=List[ProblemDescriptionItem])
703
+ async def evaluate_problem_descriptions_endpoint(request: EvaluateProblemDescriptionsRequest):
704
+ """
705
+ Evaluates a list of problem descriptions by generating problematics and scoring them.
706
+ """
707
+ logger.info(f"Received request to evaluate {len(request.problem_descriptions)} problem descriptions.")
708
+
709
+ if not request.problem_descriptions:
710
+ # Although Pydantic might catch empty list if min_items=1 is set, explicit check is good.
711
+ raise HTTPException(status_code=400, detail="The 'problem_descriptions' list cannot be empty.")
712
+
713
+ evaluated_results = generate_problematics_and_scores(request.problem_descriptions)
714
+
715
+ # Validate consistency of lengths
716
+ if len(request.problem_descriptions) != len(evaluated_results):
717
+ logger.error(
718
+ f"Mismatch between the number of input problem descriptions ({len(request.problem_descriptions)}) "
719
+ f"and the number of evaluated results ({len(evaluated_results)})."
720
+ )
721
+ raise HTTPException(
722
+ status_code=500,
723
+ detail="Internal error: Mismatch occurred while processing problem descriptions."
724
+ )
725
+
726
+ response_items: List[ProblemDescriptionItem] = []
727
+ for i in range(len(request.problem_descriptions)):
728
+ problem_description = request.problem_descriptions[i]
729
+ problematic = evaluated_results[i].problematic
730
+ score = round(evaluated_results[i].score, 3)
731
+
732
+ response_items.append(
733
+ ProblemDescriptionItem(
734
+ problem_description=problem_description,
735
+ problematic=problematic,
736
+ score=score
737
+ )
738
+ )
739
+
740
+ logger.info(f"Successfully evaluated and prepared {len(response_items)} problem descriptions.")
741
+ return response_items
742
+
743
 
744
  # --- How to Run ---
745
  if __name__ == "__main__":