hosseinhimself commited on
Commit
332053a
·
verified ·
1 Parent(s): 0971287

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -166,12 +166,14 @@ llm_model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.4, top_p=0.
166
  prompt = PromptTemplate(input_variables=['task_type', 'task_number', 'question', 'content', 'description'], template=initial_prompt)
167
 
168
  # Define the LLM chain
169
- chain = LLMChain(
170
- llm=llm_model,
171
- prompt=prompt,
172
- )
173
 
174
  def evaluate(task_type, task_number, question, image):
 
 
 
 
 
175
  # Process the image to extract text
176
  text_content = ocr_pipe(image)
177
  content = text_content[0]['generated_text']
@@ -237,4 +239,4 @@ with gr.Blocks() as demo:
237
  gr.HTML(footer_html_with_analytics)
238
 
239
  # Launch the interface
240
- demo.launch(share=True, debug=True)
 
166
  prompt = PromptTemplate(input_variables=['task_type', 'task_number', 'question', 'content', 'description'], template=initial_prompt)
167
 
168
  # Define the LLM chain
169
+ chain = LLMChain(llm=llm_model, prompt=prompt)
 
 
 
170
 
171
  def evaluate(task_type, task_number, question, image):
172
+ # Ensure the image is in the correct format
173
+ if isinstance(image, str):
174
+ # Load the image if it's a URL or path
175
+ image = Image.open(image)
176
+
177
  # Process the image to extract text
178
  text_content = ocr_pipe(image)
179
  content = text_content[0]['generated_text']
 
239
  gr.HTML(footer_html_with_analytics)
240
 
241
  # Launch the interface
242
+ demo.launch(share=True, debug=True)