burhan112 commited on
Commit
ca2f154
·
verified ·
1 Parent(s): 4f9a700

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -38
app.py CHANGED
@@ -45,14 +45,7 @@ def retrieve_docs(query, k=5):
45
  return retrieved_docs
46
 
47
  # RAG pipeline integrated into respond function
48
- def respond(
49
- message,
50
- history: list[tuple[str, str]],
51
- system_message,
52
- max_tokens,
53
- temperature,
54
- top_p, # Keeping top_p as an input, though Gemini doesn’t use it directly
55
- ):
56
  # Preprocess the user message
57
  preprocessed_query = preprocess_text(message)
58
 
@@ -60,17 +53,13 @@ def respond(
60
  retrieved_docs = retrieve_docs(preprocessed_query, k=5)
61
  context = "\n".join(retrieved_docs['text'].tolist())
62
 
63
- # Construct the prompt with system message, history, and RAG context
64
  prompt = f"{system_message}\n\n"
65
- for user_msg, assistant_msg in history:
66
- if user_msg:
67
- prompt += f"User: {user_msg}\n"
68
- if assistant_msg:
69
- prompt += f"Assistant: {assistant_msg}\n"
70
  prompt += (
71
  f"Query: {message}\n"
72
  f"Relevant Context: {context}\n"
73
- f"Generate a short, concise, and to-the-point response to the query based only on the provided context."
 
74
  )
75
 
76
  # Generate response with Gemini
@@ -88,37 +77,27 @@ def respond(
88
  answer = answer[:last_period + 1]
89
  else:
90
  answer += "."
 
 
91
 
92
- # Format the output with Gradio markdown for better readability
93
- formatted_answer = f"""
94
- <div style='background-color:#f0f0f0; padding: 10px; border-radius: 5px;'>
95
- <h3 style='color:#333; font-weight:bold;'>Assistant's Response:</h3>
96
- <p style='color:#555;'>{answer}</p>
97
- </div>
98
- """
99
- # Yield the formatted response
100
- yield formatted_answer
101
 
102
- # Gradio Chat Interface
103
- demo = gr.ChatInterface(
104
- respond,
105
- additional_inputs=[
106
  gr.Textbox(
107
  value="You are a medical AI assistant diagnosing patients based on their query, using relevant context from past records of other patients.",
108
- label="System message"
109
  ),
110
- gr.Slider(minimum=1, maximum=2048, value=150, step=1, label="Max new tokens"),
111
  gr.Slider(minimum=0.1, maximum=4.0, value=0.75, step=0.1, label="Temperature"),
112
- gr.Slider(
113
- minimum=0.1,
114
- maximum=1.0,
115
- value=0.95,
116
- step=0.05,
117
- label="Top-p (nucleus sampling)", # Included but not used by Gemini
118
- ),
119
  ],
 
120
  title="🏥 Medical Chat Assistant",
121
- description="A chat-based medical assistant that diagnoses patient queries using AI and past records."
122
  )
123
 
124
  if __name__ == "__main__":
 
45
  return retrieved_docs
46
 
47
  # RAG pipeline integrated into respond function
48
+ def respond(message, system_message, max_tokens, temperature):
 
 
 
 
 
 
 
49
  # Preprocess the user message
50
  preprocessed_query = preprocess_text(message)
51
 
 
53
  retrieved_docs = retrieve_docs(preprocessed_query, k=5)
54
  context = "\n".join(retrieved_docs['text'].tolist())
55
 
56
+ # Construct the prompt with system message and RAG context, asking for structured response
57
  prompt = f"{system_message}\n\n"
 
 
 
 
 
58
  prompt += (
59
  f"Query: {message}\n"
60
  f"Relevant Context: {context}\n"
61
+ f"Generate a short, concise response to the query based only on the provided context. "
62
+ f"Format the response as a structured list (e.g., bullet points or numbered items) instead of a paragraph."
63
  )
64
 
65
  # Generate response with Gemini
 
77
  answer = answer[:last_period + 1]
78
  else:
79
  answer += "."
80
+
81
+ return answer
82
 
83
+ # Simple Gradio Interface
84
+ def chatbot_interface(message, system_message, max_tokens, temperature):
85
+ return respond(message, system_message, max_tokens, temperature)
 
 
 
 
 
 
86
 
87
+ demo = gr.Interface(
88
+ fn=chatbot_interface,
89
+ inputs=[
90
+ gr.Textbox(label="Your Query", placeholder="Enter your medical question here..."),
91
  gr.Textbox(
92
  value="You are a medical AI assistant diagnosing patients based on their query, using relevant context from past records of other patients.",
93
+ label="System Message"
94
  ),
95
+ gr.Slider(minimum=1, maximum=2048, value=150, step=1, label="Max Tokens"),
96
  gr.Slider(minimum=0.1, maximum=4.0, value=0.75, step=0.1, label="Temperature"),
 
 
 
 
 
 
 
97
  ],
98
+ outputs=gr.Textbox(label="Response"),
99
  title="🏥 Medical Chat Assistant",
100
+ description="A simple medical assistant that diagnoses patient queries using AI and past records, providing structured responses."
101
  )
102
 
103
  if __name__ == "__main__":