JeCabrera commited on
Commit
3706199
·
verified ·
1 Parent(s): 1b764c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -10
app.py CHANGED
@@ -11,6 +11,7 @@ if not GOOGLE_API_KEY:
11
 
12
  genai.configure(api_key=GOOGLE_API_KEY)
13
 
 
14
  def transform_history(history):
15
  new_history = []
16
  for chat in history:
@@ -18,22 +19,43 @@ def transform_history(history):
18
  new_history.append({"parts": [{"text": chat[1]}], "role": "model"})
19
  return new_history
20
 
21
- def response(message, history):
 
 
 
 
 
 
 
22
  chat_history = transform_history(history)
23
-
24
  response = genai.ChatCompletion.create(
25
- model="gemini-1.5-flash", # Asegúrate de usar el modelo correcto
26
- messages=chat_history + [{"role": "user", "content": message}],
27
  max_tokens=150,
28
  )
29
-
30
  reply = response['choices'][0]['message']['content']
31
 
32
- # Mostrar la respuesta personaje por personaje
33
  for i in range(len(reply)):
34
  time.sleep(0.05)
35
- yield reply[:i + 1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- gr.ChatInterface(response,
38
- title='Gemini Chat',
39
- textbox=gr.Textbox(placeholder="Pregunta a Gemini")).launch(debug=True)
 
11
 
12
  genai.configure(api_key=GOOGLE_API_KEY)
13
 
14
+ # Transformar el historial a formato Gemini
15
  def transform_history(history):
16
  new_history = []
17
  for chat in history:
 
19
  new_history.append({"parts": [{"text": chat[1]}], "role": "model"})
20
  return new_history
21
 
22
+ def add_message(history, message):
23
+ for x in message["files"]:
24
+ history.append({"role": "user", "content": {"path": x}})
25
+ if message["text"] is not None:
26
+ history.append({"role": "user", "content": message["text"]})
27
+ return history, gr.MultimodalTextbox(value=None, interactive=False)
28
+
29
+ def bot(history):
30
  chat_history = transform_history(history)
 
31
  response = genai.ChatCompletion.create(
32
+ model="gemini-1.5-flash", # Usar el modelo adecuado
33
+ messages=chat_history + [{"role": "user", "content": history[-1][0]}],
34
  max_tokens=150,
35
  )
36
+
37
  reply = response['choices'][0]['message']['content']
38
 
39
+ # Responder con cada carácter progresivamente
40
  for i in range(len(reply)):
41
  time.sleep(0.05)
42
+ yield history + [{"role": "assistant", "content": reply[:i + 1]}]
43
+
44
+ with gr.Blocks() as demo:
45
+ chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
46
+
47
+ chat_input = gr.MultimodalTextbox(
48
+ interactive=True,
49
+ file_count="multiple",
50
+ placeholder="Escribe un mensaje o sube un archivo...",
51
+ show_label=False,
52
+ sources=["microphone", "upload"],
53
+ )
54
+
55
+ chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
56
+ bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
57
+ bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
58
+
59
+ chatbot.like(lambda x: print(x.index, x.value, x.liked), None, None, like_user_message=True)
60
 
61
+ demo.launch()