Ibraaheem commited on
Commit
a763397
·
verified ·
1 Parent(s): 02b6053

Update private_gpt/server/chat/chat_router.py

Browse files
private_gpt/server/chat/chat_router.py CHANGED
@@ -2,6 +2,7 @@ from fastapi import APIRouter, Depends, Request
2
  from llama_index.llms import ChatMessage, MessageRole
3
  from pydantic import BaseModel
4
  from starlette.responses import StreamingResponse
 
5
 
6
  from private_gpt.open_ai.extensions.context_filter import ContextFilter
7
  from private_gpt.open_ai.openai_models import (
@@ -107,6 +108,8 @@ class ChatBody(BaseModel):
107
  # completion.response, completion.sources if body.include_sources else None
108
  # )
109
 
 
 
110
  @chat_router.post(
111
  "/chat/completions",
112
  response_model=None,
@@ -118,16 +121,19 @@ def chat_completion(
118
  ) -> OpenAICompletion | StreamingResponse:
119
  """Given a list of messages comprising a conversation, return a response."""
120
  try:
 
121
  service = request.state.injector.get(ChatService)
122
  all_messages = [
123
  ChatMessage(content=m.content, role=MessageRole(m.role)) for m in body.messages
124
  ]
 
125
  if body.stream:
126
  completion_gen = service.stream_chat(
127
  messages=all_messages,
128
  use_context=body.use_context,
129
  context_filter=body.context_filter,
130
  )
 
131
  return StreamingResponse(
132
  to_openai_sse_stream(
133
  completion_gen.response,
@@ -141,11 +147,11 @@ def chat_completion(
141
  use_context=body.use_context,
142
  context_filter=body.context_filter,
143
  )
 
144
  return to_openai_response(
145
  completion.response, completion.sources if body.include_sources else None
146
  )
147
  except Exception as e:
148
- # Log the exception details for debugging
149
- print(f"Error processing chat completion: {e}")
150
  return {"error": {"message": "Internal server error"}}
151
 
 
2
  from llama_index.llms import ChatMessage, MessageRole
3
  from pydantic import BaseModel
4
  from starlette.responses import StreamingResponse
5
+ import logging
6
 
7
  from private_gpt.open_ai.extensions.context_filter import ContextFilter
8
  from private_gpt.open_ai.openai_models import (
 
108
  # completion.response, completion.sources if body.include_sources else None
109
  # )
110
 
111
+ logger = logging.getLogger(__name__)
112
+
113
  @chat_router.post(
114
  "/chat/completions",
115
  response_model=None,
 
121
  ) -> OpenAICompletion | StreamingResponse:
122
  """Given a list of messages comprising a conversation, return a response."""
123
  try:
124
+ logger.info("Received chat completion request with body: %s", body.json())
125
  service = request.state.injector.get(ChatService)
126
  all_messages = [
127
  ChatMessage(content=m.content, role=MessageRole(m.role)) for m in body.messages
128
  ]
129
+ logger.info("Constructed all_messages: %s", all_messages)
130
  if body.stream:
131
  completion_gen = service.stream_chat(
132
  messages=all_messages,
133
  use_context=body.use_context,
134
  context_filter=body.context_filter,
135
  )
136
+ logger.info("Streaming response initialized")
137
  return StreamingResponse(
138
  to_openai_sse_stream(
139
  completion_gen.response,
 
147
  use_context=body.use_context,
148
  context_filter=body.context_filter,
149
  )
150
+ logger.info("Completed chat request: %s", completion.response)
151
  return to_openai_response(
152
  completion.response, completion.sources if body.include_sources else None
153
  )
154
  except Exception as e:
155
+ logger.error("Error processing chat completion: %s", str(e), exc_info=True)
 
156
  return {"error": {"message": "Internal server error"}}
157