ziyadsuper2017 commited on
Commit
4fa0fd6
·
1 Parent(s): bdedc35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -41
app.py CHANGED
@@ -37,14 +37,24 @@ for message in chat_history:
37
  r, t = message["role"], message["parts"][0]["text"]
38
  st.markdown(f"**{r.title()}:** {t}")
39
 
40
- # Use text_area for multiline input
41
- user_input = st.text_area("", height=5)
42
  if user_input:
43
- chat_history.append({"role": role, "parts": [{"text": user_input}]})
 
 
 
 
 
 
 
 
 
 
 
44
  if role == "user":
45
 
46
  # Model code
47
- model_name = "gemini-pro"
48
  model = genai.GenerativeModel(
49
  model_name=model_name,
50
  generation_config=generation_config,
@@ -74,40 +84,3 @@ for message in chat_history:
74
  conn.commit()
75
 
76
  conn.close()
77
-
78
- # Separate section for image uploading
79
- st.title("Image Description Generator")
80
-
81
- # Change the file_uploader to accept multiple files
82
- uploaded_files = st.file_uploader("Upload one or more images here", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
83
-
84
- # Text input for asking questions about the images
85
- image_question = st.text_input("Ask something about the images:")
86
-
87
- # Check if the user has entered a question
88
- if image_question:
89
- # Create a list of image parts from the uploaded files
90
- image_parts = []
91
- for uploaded_file in uploaded_files:
92
- image_parts.append({
93
- "mime_type": uploaded_file.type,
94
- "data": uploaded_file.read()
95
- })
96
-
97
- # Create a prompt parts list with the question and the image parts
98
- prompt_parts = [image_question] + image_parts
99
-
100
- # Use the gemini-pro-vision model to generate a response
101
- model = genai.GenerativeModel(
102
- model_name="gemini-pro-vision",
103
- generation_config=generation_config,
104
- safety_settings=safety_settings
105
- )
106
-
107
- response = model.generate_content(prompt_parts)
108
- st.markdown(f"**Model's answer:** {response.text}")
109
-
110
- # Loop through the uploaded files and display them
111
- for uploaded_file in uploaded_files:
112
- # Display the image
113
- st.image(uploaded_file)
 
37
  r, t = message["role"], message["parts"][0]["text"]
38
  st.markdown(f"**{r.title()}:** {t}")
39
 
40
+ # Use file_uploader for text and image input
41
+ user_input = st.file_uploader("Upload a text or an image file here", type=["txt", "png", "jpg", "jpeg"])
42
  if user_input:
43
+ # Check the file type and create the prompt parts accordingly
44
+ if user_input.type == "text/plain":
45
+ prompt_parts = [user_input.getvalue().decode("utf-8")]
46
+ model_name = "gemini-pro" # Use the text-only model
47
+ else:
48
+ prompt_parts = [{
49
+ "mime_type": user_input.type,
50
+ "data": user_input.read()
51
+ }]
52
+ model_name = "gemini-pro-vision" # Use the multimodal model
53
+
54
+ chat_history.append({"role": role, "parts": prompt_parts})
55
  if role == "user":
56
 
57
  # Model code
 
58
  model = genai.GenerativeModel(
59
  model_name=model_name,
60
  generation_config=generation_config,
 
84
  conn.commit()
85
 
86
  conn.close()