JeCabrera commited on
Commit
a292f1b
verified
1 Parent(s): d3d1e52

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -44
app.py CHANGED
@@ -1,46 +1,67 @@
1
- from huggingface_hub import InferenceClient
2
  import gradio as gr
 
 
 
3
 
4
- client = InferenceClient()
5
-
6
- def respond(
7
- prompt: str,
8
- history,
9
- ):
10
- if not history:
11
- history = [{"role": "system", "content": "You are a friendly chatbot"}]
12
- history.append({"role": "user", "content": prompt})
13
-
14
- yield history
15
-
16
- response = {"role": "assistant", "content": ""}
17
- for message in client.chat_completion(
18
- history,
19
- temperature=0.95,
20
- top_p=0.9,
21
- max_tokens=512,
22
- stream=True,
23
- model="HuggingFaceH4/zephyr-7b-beta"
24
- ):
25
- response["content"] += message.choices[0].delta.content or ""
26
-
27
- yield history + [response]
28
-
29
-
30
- with gr.Blocks() as demo:
31
- gr.Markdown("# Chat with Hugging Face Zephyr 7b 馃")
32
- chatbot = gr.Chatbot(
33
- label="Agent",
34
- type="messages",
35
- avatar_images=(
36
- None,
37
- "https://em-content.zobj.net/source/twitter/376/hugging-face_1f917.png",
38
- ),
39
- )
40
- prompt = gr.Textbox(max_lines=1, label="Chat Message")
41
- prompt.submit(respond, [prompt, chatbot], [chatbot])
42
- prompt.submit(lambda: "", None, [prompt])
43
-
44
-
45
- if __name__ == "__main__":
46
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import google.generativeai as genai
3
+ import os
4
+ from pypdf import PdfReader
5
 
6
+ # Reemplaza con tu clave de API de Gemini
7
+ GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
8
+ if not GOOGLE_API_KEY:
9
+ raise ValueError("La variable de entorno GOOGLE_API_KEY no est谩 definida.")
10
+
11
+ genai.configure(api_key=GOOGLE_API_KEY)
12
+ model_text = genai.GenerativeModel('gemini-pro')
13
+ model_vision = genai.GenerativeModel('gemini-pro-vision')
14
+
15
+ def read_text_file(filepath):
16
+ with open(filepath, 'r', encoding='utf-8') as f:
17
+ return f.read()
18
+
19
+ def read_pdf_file(filepath):
20
+ text = ""
21
+ with open(filepath, 'rb') as f:
22
+ reader = PdfReader(f)
23
+ for page in reader.pages:
24
+ text += page.extract_text() + "\n"
25
+ return text
26
+
27
+ def process_input(instruction, text_file, pdf_file, image_file):
28
+ prompt_parts = []
29
+
30
+ if instruction:
31
+ prompt_parts.append(instruction)
32
+
33
+ if text_file:
34
+ file_content = read_text_file(text_file.name)
35
+ prompt_parts.append(f"Contenido del archivo de texto:\n{file_content}")
36
+
37
+ if pdf_file:
38
+ file_content = read_pdf_file(pdf_file.name)
39
+ prompt_parts.append(f"Contenido del archivo PDF:\n{file_content}")
40
+
41
+ if image_file:
42
+ # Para el modelo vision, necesitas pasar la imagen como datos
43
+ prompt_parts.append(image_file)
44
+ response = model_vision.generate_content(prompt_parts)
45
+ return response.text
46
+
47
+ if prompt_parts:
48
+ prompt = "\n\n".join(prompt_parts)
49
+ response = model_text.generate_content(prompt)
50
+ return response.text
51
+ else:
52
+ return "Por favor, proporciona alguna instrucci贸n o archivo."
53
+
54
+ iface = gr.Interface(
55
+ fn=process_input,
56
+ inputs=[
57
+ gr.Textbox(label="Instrucciones"),
58
+ gr.File(label="Archivo de Texto (.txt)"),
59
+ gr.File(label="Archivo PDF (.pdf)"),
60
+ gr.Image(label="Imagen"),
61
+ ],
62
+ outputs=gr.Textbox(label="Respuesta de Gemini"),
63
+ title="Interact煤a con Gemini",
64
+ description="Sube archivos de texto, PDF o im谩genes y proporciona instrucciones para que Gemini los procese.",
65
+ )
66
+
67
+ iface.launch()