Yashvj123 commited on
Commit
13e89b3
Β·
verified Β·
1 Parent(s): 26344cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -40
app.py CHANGED
@@ -76,19 +76,19 @@ def set_background(image_file):
76
  )
77
 
78
  # Split large response into smaller chunks (for translation)
79
- def split_text_into_chunks(text, max_length=450):
80
- lines = text.split('\n')
81
- chunks = []
82
- current = ""
83
- for line in lines:
84
- if len(current) + len(line) + 1 <= max_length:
85
- current += line + '\n'
86
- else:
87
- chunks.append(current.strip())
88
- current = line + '\n'
89
- if current:
90
- chunks.append(current.strip())
91
- return chunks
92
 
93
 
94
  def save_text_as_image(text, file_path):
@@ -209,8 +209,8 @@ if uploaded_file:
209
  prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
210
 
211
  llm_model = HuggingFaceEndpoint(
212
- repo_id="deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
213
- provider="novita",
214
  temperature=0.6,
215
  max_new_tokens=300,
216
  task="conversational"
@@ -218,8 +218,8 @@ if uploaded_file:
218
 
219
  llm = ChatHuggingFace(
220
  llm=llm_model,
221
- repo_id="deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
222
- provider="novita",
223
  temperature=0.6,
224
  max_new_tokens=300,
225
  task="conversational"
@@ -234,7 +234,7 @@ if uploaded_file:
234
  with st.spinner("Analyzing with LLM..."):
235
  response = chain.run(prescription_text=text)
236
  st.markdown("#### πŸ’‘ AI-based Medicine Analysis")
237
- st.text_area("LLM Output", response, height=600)
238
 
239
  # Save txt and image
240
  txt_path = "medicine_analysis.txt"
@@ -252,31 +252,31 @@ if uploaded_file:
252
  with open(img_path, "rb") as img_file:
253
  st.download_button("πŸ–ΌοΈ English Image", data=img_file, file_name="medicine_analysis.png", mime="image/png")\
254
 
255
- if response and st.button("🌐 Translate to Hindi"):
256
- with st.spinner("Translating to Hindi..."):
257
- chunks = split_text_into_chunks(response, max_length=100)
258
- hindi_chunks = []
259
- for chunk in chunks:
260
- try:
261
- translated = GoogleTranslator(source='auto', target='hi').translate(chunk)
262
- hindi_chunks.append(translated)
263
- except Exception as e:
264
- hindi_chunks.append("[Error translating chunk]")
265
- hindi_text = "\n\n".join(hindi_chunks)
266
 
267
- st.markdown("#### 🌐 Hindi Translation")
268
- st.text_area("Translated Output (Hindi)", hindi_text, height=600)
269
 
270
- hindi_img_path = "hindi_output.png"
271
- save_text_as_image(hindi_text, hindi_img_path)
272
 
273
- st.markdown("#### πŸ“₯ Download (Hindi)")
274
- col3, col4 = st.columns(2)
275
- with col3:
276
- st.download_button("⬇️ Hindi TXT", data=hindi_text.encode(), file_name="hindi_medicine_analysis.txt")
277
- with col4:
278
- with open(hindi_img_path, "rb") as img_file:
279
- st.download_button("πŸ–ΌοΈ Hindi Image", data=img_file, file_name="hindi_medicine_analysis.png", mime="image/png")
280
 
281
  try:
282
  os.remove(orig_path)
 
76
  )
77
 
78
  # Split large response into smaller chunks (for translation)
79
+ # def split_text_into_chunks(text, max_length=450):
80
+ # lines = text.split('\n')
81
+ # chunks = []
82
+ # current = ""
83
+ # for line in lines:
84
+ # if len(current) + len(line) + 1 <= max_length:
85
+ # current += line + '\n'
86
+ # else:
87
+ # chunks.append(current.strip())
88
+ # current = line + '\n'
89
+ # if current:
90
+ # chunks.append(current.strip())
91
+ # return chunks
92
 
93
 
94
  def save_text_as_image(text, file_path):
 
209
  prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
210
 
211
  llm_model = HuggingFaceEndpoint(
212
+ repo_id="Qwen/Qwen3-235B-A22B",
213
+ provider="nebius",
214
  temperature=0.6,
215
  max_new_tokens=300,
216
  task="conversational"
 
218
 
219
  llm = ChatHuggingFace(
220
  llm=llm_model,
221
+ repo_id="Qwen/Qwen3-235B-A22B",
222
+ provider="nebius",
223
  temperature=0.6,
224
  max_new_tokens=300,
225
  task="conversational"
 
234
  with st.spinner("Analyzing with LLM..."):
235
  response = chain.run(prescription_text=text)
236
  st.markdown("#### πŸ’‘ AI-based Medicine Analysis")
237
+ st.text_area("LLM Output", response, height=300)
238
 
239
  # Save txt and image
240
  txt_path = "medicine_analysis.txt"
 
252
  with open(img_path, "rb") as img_file:
253
  st.download_button("πŸ–ΌοΈ English Image", data=img_file, file_name="medicine_analysis.png", mime="image/png")\
254
 
255
+ # if response and st.button("🌐 Translate to Hindi"):
256
+ # with st.spinner("Translating to Hindi..."):
257
+ # chunks = split_text_into_chunks(response, max_length=100)
258
+ # hindi_chunks = []
259
+ # for chunk in chunks:
260
+ # try:
261
+ # translated = GoogleTranslator(source='auto', target='hi').translate(chunk)
262
+ # hindi_chunks.append(translated)
263
+ # except Exception as e:
264
+ # hindi_chunks.append("[Error translating chunk]")
265
+ # hindi_text = "\n\n".join(hindi_chunks)
266
 
267
+ # st.markdown("#### 🌐 Hindi Translation")
268
+ # st.text_area("Translated Output (Hindi)", hindi_text, height=600)
269
 
270
+ # hindi_img_path = "hindi_output.png"
271
+ # save_text_as_image(hindi_text, hindi_img_path)
272
 
273
+ # st.markdown("#### πŸ“₯ Download (Hindi)")
274
+ # col3, col4 = st.columns(2)
275
+ # with col3:
276
+ # st.download_button("⬇️ Hindi TXT", data=hindi_text.encode(), file_name="hindi_medicine_analysis.txt")
277
+ # with col4:
278
+ # with open(hindi_img_path, "rb") as img_file:
279
+ # st.download_button("πŸ–ΌοΈ Hindi Image", data=img_file, file_name="hindi_medicine_analysis.png", mime="image/png")
280
 
281
  try:
282
  os.remove(orig_path)