juancho72h commited on
Commit
79041c3
·
verified ·
1 Parent(s): 1a7916d

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -21,7 +21,7 @@ load_dotenv()
21
  # Access Pinecone and OpenAI API keys from environment variables
22
  pinecone_api_key = os.getenv("PINECONE_API_KEY")
23
  openai.api_key = os.getenv("OPENAI_API_KEY")
24
- index_name = "amtrak-rmm-image-text"
25
 
26
  # Initialize Pinecone using a class-based method
27
  pc = Pinecone(api_key=pinecone_api_key)
@@ -66,7 +66,7 @@ def get_model_response(human_input):
66
  image_url = flatten_to_string(result.get('metadata', {}).get('image_path', None))
67
  figure_desc = flatten_to_string(result.get('metadata', {}).get('figure_description', ''))
68
 
69
- context_list.append(f"Document {ind+1}: {document_content}")
70
  if image_url and figure_desc:
71
  images.append((figure_desc, image_url))
72
 
@@ -108,6 +108,7 @@ def get_model_response(human_input):
108
  def get_model_response_with_images(human_input, history=None):
109
  output_text, images = get_model_response(human_input)
110
  if images:
 
111
  image_output = "".join([f"\n\n**{figure_desc}**\n![{figure_desc}]({image_path})" for figure_desc, image_path in images])
112
  return output_text + image_output
113
  return output_text
 
21
  # Access Pinecone and OpenAI API keys from environment variables
22
  pinecone_api_key = os.getenv("PINECONE_API_KEY")
23
  openai.api_key = os.getenv("OPENAI_API_KEY")
24
+ index_name = "amtrak-acela-ai-demo"
25
 
26
  # Initialize Pinecone using a class-based method
27
  pc = Pinecone(api_key=pinecone_api_key)
 
66
  image_url = flatten_to_string(result.get('metadata', {}).get('image_path', None))
67
  figure_desc = flatten_to_string(result.get('metadata', {}).get('figure_description', ''))
68
 
69
+ context_list.append(f"Relevant information: {document_content}")
70
  if image_url and figure_desc:
71
  images.append((figure_desc, image_url))
72
 
 
108
  def get_model_response_with_images(human_input, history=None):
109
  output_text, images = get_model_response(human_input)
110
  if images:
111
+ # Append images in Markdown format for Gradio to render
112
  image_output = "".join([f"\n\n**{figure_desc}**\n![{figure_desc}]({image_path})" for figure_desc, image_path in images])
113
  return output_text + image_output
114
  return output_text