ziyadsuper2017 commited on
Commit
5cc2255
·
1 Parent(s): 37ac5ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -20
app.py CHANGED
@@ -8,21 +8,21 @@ genai.configure(api_key="AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM")
8
  generation_config = {
9
  "temperature": 0.9,
10
  "top_p": 1,
11
- "top_k": 1,
12
  "max_output_tokens": 2048
13
  }
14
 
15
  safety_settings = [
16
  {
17
  "category": "HARM_CATEGORY_HARASSMENT",
18
- "threshold": "BLOCK_MEDIUM_AND_ABOVE"
19
  },
20
- {
21
  "category": "HARM_CATEGORY_HATE_SPEECH",
22
  "threshold": "BLOCK_MEDIUM_AND_ABOVE"
23
  },
24
  {
25
- "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
26
  "threshold": "BLOCK_MEDIUM_AND_ABOVE"
27
  },
28
  {
@@ -32,46 +32,49 @@ safety_settings = [
32
  ]
33
 
34
  model = genai.GenerativeModel(
35
- model_name="gemini-pro",
36
- generation_config=generation_config,
37
- safety_settings=safety_settings
38
  )
39
 
40
  # Chatbot interface
41
- st.title("Gemini API Chatbot")
42
 
43
- chat_history = st.session_state.get("chat_history", [])
44
 
45
- chat_container = st.container()
46
 
47
  for message in chat_history:
48
  # Display message
49
- role = message["role"]
50
- content = message["content"]
51
 
52
  with chat_container.empty():
53
  st.markdown(f"**{role}:** {content}")
54
 
55
- # Get user input
56
- user_input = st.text_input("Ask the assistant")
57
 
58
  if user_input:
59
 
60
- # Process input
61
  chat_history.append({"role": "user", "content": user_input})
62
-
 
63
  with chat_container.empty():
64
  st.markdown(f"**user:** {user_input}")
65
 
66
  # Get response
67
  with st.spinner("Thinking..."):
68
- response = model.ask(user_input, history=chat_history)
 
69
 
70
- # Process response
71
- chat_history.append({"role": "assistant", "content": response })
72
 
 
73
  with chat_container.empty():
74
  st.markdown(f"**assistant:** {response}")
75
-
76
  # Update session state
77
  st.session_state["chat_history"] = chat_history
 
8
  generation_config = {
9
  "temperature": 0.9,
10
  "top_p": 1,
11
+ "top_k": 1,
12
  "max_output_tokens": 2048
13
  }
14
 
15
  safety_settings = [
16
  {
17
  "category": "HARM_CATEGORY_HARASSMENT",
18
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
19
  },
20
+ {
21
  "category": "HARM_CATEGORY_HATE_SPEECH",
22
  "threshold": "BLOCK_MEDIUM_AND_ABOVE"
23
  },
24
  {
25
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
26
  "threshold": "BLOCK_MEDIUM_AND_ABOVE"
27
  },
28
  {
 
32
  ]
33
 
34
  model = genai.GenerativeModel(
35
+ model_name="gemini-pro",
36
+ generation_config=generation_config,
37
+ safety_settings=safety_settings
38
  )
39
 
40
  # Chatbot interface
41
+ st.title("Gemini API Chatbot")
42
 
43
+ chat_history = st.session_state.get("chat_history", [])
44
 
45
+ chat_container = st.container()
46
 
47
  for message in chat_history:
48
  # Display message
49
+ role = message["role"]
50
+ content = message["content"]
51
 
52
  with chat_container.empty():
53
  st.markdown(f"**{role}:** {content}")
54
 
55
+ # Get user input
56
+ user_input = st.text_input("Ask the assistant")
57
 
58
  if user_input:
59
 
60
+ # Process input
61
  chat_history.append({"role": "user", "content": user_input})
62
+
63
+ # Display input
64
  with chat_container.empty():
65
  st.markdown(f"**user:** {user_input}")
66
 
67
  # Get response
68
  with st.spinner("Thinking..."):
69
+ convo = model.start_chat(history=chat_history)
70
+ response = convo.last.text
71
 
72
+ # Process response
73
+ chat_history.append({"role": "assistant", "content": response})
74
 
75
+ # Display response
76
  with chat_container.empty():
77
  st.markdown(f"**assistant:** {response}")
78
+
79
  # Update session state
80
  st.session_state["chat_history"] = chat_history