mateoluksenberg commited on
Commit
a4331e1
verified
1 Parent(s): abf3837

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -25
app.py CHANGED
@@ -5,6 +5,8 @@ import spaces
5
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
  import os
7
  from threading import Thread
 
 
8
 
9
  import pymupdf
10
  import docx
@@ -33,15 +35,11 @@ h1 {
33
  }
34
  """
35
 
36
-
37
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
38
 
39
-
40
-
41
  def extract_text(path):
42
  return open(path, 'r').read()
43
 
44
-
45
  def extract_pdf(path):
46
  doc = pymupdf.open(path)
47
  text = ""
@@ -49,7 +47,6 @@ def extract_pdf(path):
49
  text += page.get_text()
50
  return text
51
 
52
-
53
  def extract_docx(path):
54
  doc = docx.Document(path)
55
  data = []
@@ -58,7 +55,6 @@ def extract_docx(path):
58
  content = '\n\n'.join(data)
59
  return content
60
 
61
-
62
  def extract_pptx(path):
63
  prs = Presentation(path)
64
  text = ""
@@ -68,7 +64,6 @@ def extract_pptx(path):
68
  text += shape.text + "\n"
69
  return text
70
 
71
-
72
  def mode_load(path):
73
  choice = ""
74
  file_type = path.split(".")[-1]
@@ -85,20 +80,15 @@ def mode_load(path):
85
  choice = "doc"
86
  print(content[:100])
87
  return choice, content[:5000]
88
-
89
-
90
  elif file_type in ["png", "jpg", "jpeg", "bmp", "tiff", "webp"]:
91
  content = Image.open(path).convert('RGB')
92
  choice = "image"
93
  return choice, content
94
-
95
  else:
96
  raise gr.Error("Oops, unsupported files.")
97
 
98
-
99
  @spaces.GPU()
100
  def stream_chat(message, history: list, temperature: float, max_length: int, top_p: float, top_k: int, penalty: float):
101
-
102
  model = AutoModelForCausalLM.from_pretrained(
103
  MODEL_ID,
104
  torch_dtype=torch.bfloat16,
@@ -136,7 +126,6 @@ def stream_chat(message, history: list, temperature: float, max_length: int, top
136
  choice = ""
137
  conversation.append({"role": "user", "image": "", "content": message['text']})
138
 
139
-
140
  if choice == "image":
141
  conversation.append({"role": "user", "image": contents, "content": message['text']})
142
  elif choice == "doc":
@@ -168,18 +157,11 @@ def stream_chat(message, history: list, temperature: float, max_length: int, top
168
  buffer += new_text
169
  yield buffer
170
 
171
-
172
- chatbot = gr.Chatbot(
173
- #rtl=True,
174
- )
175
  chat_input = gr.MultimodalTextbox(
176
  interactive=True,
177
  placeholder="Enter message or upload a file ...",
178
  show_label=False,
179
- #rtl=True,
180
-
181
-
182
-
183
  )
184
 
185
  EXAMPLES = [
@@ -189,14 +171,21 @@ EXAMPLES = [
189
  [{"text": "Quiero armar un JSON, solo el JSON sin texto, que contenga los datos de la primera mitad de la tabla de la imagen (las primeras 10 jurisdicciones 901-910). Ten en cuenta que los valores num茅ricos son decimales de cuatro d铆gitos. La tabla contiene las siguientes columnas: Codigo, Nombre, Fecha Inicio, Fecha Cese, Coeficiente Ingresos, Coeficiente Gastos y Coeficiente Unificado. La tabla puede contener valores vac铆os, en ese caso dejarlos como null. Cada fila de la tabla representa una jurisdicci贸n con sus respectivos valores.", }]
190
  ]
191
 
 
 
 
 
 
 
 
 
 
192
  with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
193
  gr.HTML(TITLE)
194
  gr.HTML(DESCRIPTION)
195
  gr.ChatInterface(
196
  fn=stream_chat,
197
  multimodal=True,
198
-
199
-
200
  textbox=chat_input,
201
  chatbot=chatbot,
202
  fill_height=True,
@@ -247,5 +236,17 @@ with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
247
  gr.Examples(EXAMPLES, [chat_input])
248
 
249
  if __name__ == "__main__":
250
-
251
- demo.queue(api_open=False).launch(show_api=False, share=False, )#server_name="0.0.0.0", )
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
  import os
7
  from threading import Thread
8
+ from fastapi import FastAPI
9
+ import uvicorn
10
 
11
  import pymupdf
12
  import docx
 
35
  }
36
  """
37
 
 
38
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
39
 
 
 
40
  def extract_text(path):
41
  return open(path, 'r').read()
42
 
 
43
  def extract_pdf(path):
44
  doc = pymupdf.open(path)
45
  text = ""
 
47
  text += page.get_text()
48
  return text
49
 
 
50
  def extract_docx(path):
51
  doc = docx.Document(path)
52
  data = []
 
55
  content = '\n\n'.join(data)
56
  return content
57
 
 
58
  def extract_pptx(path):
59
  prs = Presentation(path)
60
  text = ""
 
64
  text += shape.text + "\n"
65
  return text
66
 
 
67
  def mode_load(path):
68
  choice = ""
69
  file_type = path.split(".")[-1]
 
80
  choice = "doc"
81
  print(content[:100])
82
  return choice, content[:5000]
 
 
83
  elif file_type in ["png", "jpg", "jpeg", "bmp", "tiff", "webp"]:
84
  content = Image.open(path).convert('RGB')
85
  choice = "image"
86
  return choice, content
 
87
  else:
88
  raise gr.Error("Oops, unsupported files.")
89
 
 
90
  @spaces.GPU()
91
  def stream_chat(message, history: list, temperature: float, max_length: int, top_p: float, top_k: int, penalty: float):
 
92
  model = AutoModelForCausalLM.from_pretrained(
93
  MODEL_ID,
94
  torch_dtype=torch.bfloat16,
 
126
  choice = ""
127
  conversation.append({"role": "user", "image": "", "content": message['text']})
128
 
 
129
  if choice == "image":
130
  conversation.append({"role": "user", "image": contents, "content": message['text']})
131
  elif choice == "doc":
 
157
  buffer += new_text
158
  yield buffer
159
 
160
+ chatbot = gr.Chatbot()
 
 
 
161
  chat_input = gr.MultimodalTextbox(
162
  interactive=True,
163
  placeholder="Enter message or upload a file ...",
164
  show_label=False,
 
 
 
 
165
  )
166
 
167
  EXAMPLES = [
 
171
  [{"text": "Quiero armar un JSON, solo el JSON sin texto, que contenga los datos de la primera mitad de la tabla de la imagen (las primeras 10 jurisdicciones 901-910). Ten en cuenta que los valores num茅ricos son decimales de cuatro d铆gitos. La tabla contiene las siguientes columnas: Codigo, Nombre, Fecha Inicio, Fecha Cese, Coeficiente Ingresos, Coeficiente Gastos y Coeficiente Unificado. La tabla puede contener valores vac铆os, en ese caso dejarlos como null. Cada fila de la tabla representa una jurisdicci贸n con sus respectivos valores.", }]
172
  ]
173
 
174
+ app = FastAPI()
175
+
176
+ def test():
177
+ return "Funci贸n test llamada con 茅xito"
178
+
179
+ @app.get("/test")
180
+ def call_test():
181
+ return {"message": test()}
182
+
183
  with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
184
  gr.HTML(TITLE)
185
  gr.HTML(DESCRIPTION)
186
  gr.ChatInterface(
187
  fn=stream_chat,
188
  multimodal=True,
 
 
189
  textbox=chat_input,
190
  chatbot=chatbot,
191
  fill_height=True,
 
236
  gr.Examples(EXAMPLES, [chat_input])
237
 
238
  if __name__ == "__main__":
239
+ def run_fastapi():
240
+ uvicorn.run(app, host="0.0.0.0", port=8000)
241
+
242
+ def run_gradio():
243
+ demo.queue(api_open=False).launch(show_api=False, share=False)
244
+
245
+ fastapi_thread = Thread(target=run_fastapi)
246
+ fastapi_thread.start()
247
+
248
+ gradio_thread = Thread(target=run_gradio)
249
+ gradio_thread.start()
250
+
251
+ fastapi_thread.join()
252
+ gradio_thread.join()