CamiloVega commited on
Commit
654a56c
verified
1 Parent(s): a62e63a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -98
app.py CHANGED
@@ -4,24 +4,24 @@ import whisper
4
  import tempfile
5
  import gradio as gr
6
  from pydub import AudioSegment
7
- import fitz # PyMuPDF para manejar PDFs
8
- import docx # Para manejar archivos .docx
9
- import pandas as pd # Para manejar archivos .xlsx y .csv
10
- # from google.colab import userdata # Importa userdata de google.colab
11
  import requests
12
  from bs4 import BeautifulSoup
13
 
14
- # Configura tu clave API de OpenAI usando Google Colab userdata
15
  # openai.api_key = userdata.get('OPENAI_API_KEY')
16
 
17
- # Cargar las variables de entorno desde el entorno de Hugging Face
18
  openai.api_key = os.getenv("OPENAI_API_KEY")
19
 
20
- # Cargar el modelo Whisper de mayor calidad una vez
21
  model = whisper.load_model("large")
22
 
23
  def preprocess_audio(audio_file):
24
- """Preprocesa el archivo de audio para mejorar la calidad."""
25
  try:
26
  audio = AudioSegment.from_file(audio_file)
27
  audio = audio.apply_gain(-audio.dBFS + (-20))
@@ -29,150 +29,150 @@ def preprocess_audio(audio_file):
29
  audio.export(temp_file.name, format="mp3")
30
  return temp_file.name
31
  except Exception as e:
32
- return f"Error al preprocesar el archivo de audio: {str(e)}"
33
 
34
- def transcribir_audio(audio_file):
35
- """Transcribe un archivo de audio."""
36
  try:
37
- archivo_path = preprocess_audio(audio_file) if isinstance(audio_file, str) else preprocess_audio(tempfile.NamedTemporaryFile(delete=False, suffix=".mp3", mode='w+b').name)
38
- resultado = model.transcribe(archivo_path)
39
- return resultado.get("text", "Error en la transcripci贸n")
40
  except Exception as e:
41
- return f"Error al procesar el archivo de audio: {str(e)}"
42
 
43
- def leer_documento(documento_path):
44
- """Lee el contenido de un documento PDF, DOCX, XLSX o CSV."""
45
  try:
46
- if documento_path.endswith(".pdf"):
47
- doc = fitz.open(documento_path)
48
- return "\n".join([pagina.get_text() for pagina in doc])
49
- elif documento_path.endswith(".docx"):
50
- doc = docx.Document(documento_path)
51
- return "\n".join([parrafo.text for parrafo in doc.paragraphs])
52
- elif documento_path.endswith(".xlsx"):
53
- return pd.read_excel(documento_path).to_string()
54
- elif documento_path.endswith(".csv"):
55
- return pd.read_csv(documento_path).to_string()
56
  else:
57
- return "Tipo de archivo no soportado. Por favor suba un documento PDF, DOCX, XLSX o CSV."
58
  except Exception as e:
59
- return f"Error al leer el documento: {str(e)}"
60
 
61
- def leer_url(url):
62
- """Lee el contenido de una URL."""
63
  try:
64
  response = requests.get(url)
65
  response.raise_for_status()
66
  soup = BeautifulSoup(response.content, 'html.parser')
67
  return soup.get_text()
68
  except Exception as e:
69
- return f"Error al leer la URL: {str(e)}"
70
 
71
- def generar_noticia(instrucciones, hechos, tama帽o, tono, urls, *args):
72
- """Genera una noticia a partir de instrucciones, hechos, URLs, documentos y transcripciones."""
73
- base_de_conocimiento = {"instrucciones": instrucciones, "hechos": hechos, "contenido_documentos": [], "audio_data": [], "contenido_urls": []}
74
- num_audios = 5 * 3 # 5 audios * 3 campos (audio, nombre, cargo)
75
  audios = args[:num_audios]
76
- documentos = args[num_audios:]
77
 
78
  for url in urls.split():
79
  if url:
80
- base_de_conocimiento["contenido_urls"].append(leer_url(url))
81
 
82
- for documento in documentos:
83
- if documento is not None:
84
- base_de_conocimiento["contenido_documentos"].append(leer_documento(documento.name))
85
 
86
  for i in range(0, len(audios), 3):
87
- audio_file, nombre, cargo = audios[i:i+3]
88
  if audio_file is not None:
89
- base_de_conocimiento["audio_data"].append({"audio": audio_file, "nombre": nombre, "cargo": cargo})
90
 
91
- transcripciones_texto, transcripciones_brutas, total_citas_directas = "", "", 0
92
 
93
- for idx, data in enumerate(base_de_conocimiento["audio_data"]):
94
  if data["audio"] is not None:
95
- transcripcion = transcribir_audio(data["audio"])
96
- transcripcion_texto = f'"{transcripcion}" - {data["nombre"]}, {data["cargo"]}'
97
- transcripcion_bruta = f'[Audio {idx + 1}]: "{transcripcion}" - {data["nombre"]}, {data["cargo"]}'
98
- if total_citas_directas < len(base_de_conocimiento["audio_data"]) * 0.8:
99
- transcripciones_texto += transcripcion_texto + "\n"
100
- total_citas_directas += 1
101
  else:
102
- transcripciones_texto += f'{data["nombre"]} mencion贸 que {transcripcion}' + "\n"
103
- transcripciones_brutas += transcripcion_bruta + "\n\n"
104
-
105
- contenido_documentos = "\n\n".join(base_de_conocimiento["contenido_documentos"])
106
- contenido_urls = "\n\n".join(base_de_conocimiento["contenido_urls"])
107
-
108
- prompt_interno = """
109
- Instrucciones para el modelo:
110
- - Debes seguir los principios de una noticia: es decir, procura siempre responder las 5 W de una noticia en el primer p谩rrafo (Who?, What?, When?, Where?, Why?).
111
- - Aseg煤rate de que al menos el 80% de las citas sean directas y est茅n entrecomilladas.
112
- - El 20% restante puede ser citas indirectas.
113
- - No inventes informaci贸n nueva.
114
- - S茅 riguroso con los hechos proporcionados.
115
- - Al procesar los documentos cargados, extrae y resalta citas importantes y testimonios textuales de las fuentes.
116
- - Al procesar los documentos cargados, extrae y resalta cifras clave.
117
  """
118
 
119
  prompt = f"""
120
- {prompt_interno}
121
- Escribe una noticia con la siguiente informaci贸n, incluyendo un t铆tulo, un gancho de 15 palabras (el gancho es lo que se conoce en ingl茅s como hook, informaci贸n adicional que complementa el t铆tulo), y el cuerpo del contenido cuyo tama帽o es {tama帽o} palabras. El tono debe ser {tono}.
122
- Instrucciones: {base_de_conocimiento["instrucciones"]}
123
- Hechos: {base_de_conocimiento["hechos"]}
124
- Contenido adicional de los documentos: {contenido_documentos}
125
- Contenido adicional de las URLs: {contenido_urls}
126
- Utiliza las siguientes transcripciones como citas directas e indirectas (sin cambiar ni inventar contenido):
127
- {transcripciones_texto}
128
  """
129
 
130
  try:
131
- respuesta = openai.ChatCompletion.create(
132
  model="gpt-3.5-turbo",
133
  messages=[{"role": "user", "content": prompt}],
134
  temperature=0.1
135
  )
136
- noticia = respuesta['choices'][0]['message']['content']
137
- return noticia, transcripciones_brutas
138
  except Exception as e:
139
- return f"Error al generar la noticia: {str(e)}", ""
140
 
141
  with gr.Blocks() as demo:
142
- gr.Markdown("## Generador noticias todo en uno")
143
  with gr.Row():
144
  with gr.Column(scale=2):
145
- instrucciones = gr.Textbox(label="Instrucciones para la noticia", lines=2)
146
- hechos = gr.Textbox(label="Describe los hechos de la noticia", lines=4)
147
- tama帽o = gr.Number(label="Tama帽o del cuerpo de la noticia (en palabras)", value=100)
148
- tono = gr.Dropdown(label="Tono de la noticia", choices=["serio", "neutral", "divertido"], value="neutral")
149
- urls = gr.Textbox(label="URLs (separadas por espacio)", lines=2)
150
  with gr.Column(scale=3):
151
- inputs_list = [instrucciones, hechos, tama帽o, tono, urls]
152
  with gr.Tabs():
153
  for i in range(1, 6):
154
  with gr.TabItem(f"Audio {i}"):
155
  audio = gr.Audio(type="filepath", label=f"Audio {i}")
156
- nombre = gr.Textbox(label="Nombre", scale=1)
157
- cargo = gr.Textbox(label="Cargo", scale=1)
158
- inputs_list.extend([audio, nombre, cargo])
159
  for i in range(1, 6):
160
- with gr.TabItem(f"Documento {i}"):
161
- documento = gr.File(label=f"Documento {i}", type="filepath", file_count="single")
162
- inputs_list.append(documento)
163
 
164
- gr.Markdown("---") # Separador visual
165
 
166
  with gr.Row():
167
- transcripciones_output = gr.Textbox(label="Transcripciones", lines=10)
168
 
169
- gr.Markdown("---") # Separador visual
170
 
171
  with gr.Row():
172
- generar = gr.Button("Generar borrador")
173
  with gr.Row():
174
- noticia_output = gr.Textbox(label="Borrador generado", lines=20)
175
 
176
- generar.click(fn=generar_noticia, inputs=inputs_list, outputs=[noticia_output, transcripciones_output])
177
 
178
- demo.launch(share=True)
 
4
  import tempfile
5
  import gradio as gr
6
  from pydub import AudioSegment
7
+ import fitz # PyMuPDF for handling PDFs
8
+ import docx # For handling .docx files
9
+ import pandas as pd # For handling .xlsx and .csv files
10
+ # from google.colab import userdata # Import userdata from google.colab
11
  import requests
12
  from bs4 import BeautifulSoup
13
 
14
+ # Configure your OpenAI API key using Google Colab userdata
15
  # openai.api_key = userdata.get('OPENAI_API_KEY')
16
 
17
+ # Load environment variables from the Hugging Face environment
18
  openai.api_key = os.getenv("OPENAI_API_KEY")
19
 
20
+ # Load the highest quality Whisper model once
21
  model = whisper.load_model("large")
22
 
23
  def preprocess_audio(audio_file):
24
+ """Preprocess the audio file to improve quality."""
25
  try:
26
  audio = AudioSegment.from_file(audio_file)
27
  audio = audio.apply_gain(-audio.dBFS + (-20))
 
29
  audio.export(temp_file.name, format="mp3")
30
  return temp_file.name
31
  except Exception as e:
32
+ return f"Error preprocessing the audio file: {str(e)}"
33
 
34
+ def transcribe_audio(audio_file):
35
+ """Transcribe an audio file."""
36
  try:
37
+ file_path = preprocess_audio(audio_file) if isinstance(audio_file, str) else preprocess_audio(tempfile.NamedTemporaryFile(delete=False, suffix=".mp3", mode='w+b').name)
38
+ result = model.transcribe(file_path)
39
+ return result.get("text", "Error in transcription")
40
  except Exception as e:
41
+ return f"Error processing the audio file: {str(e)}"
42
 
43
+ def read_document(document_path):
44
+ """Read the content of a PDF, DOCX, XLSX or CSV document."""
45
  try:
46
+ if document_path.endswith(".pdf"):
47
+ doc = fitz.open(document_path)
48
+ return "\n".join([page.get_text() for page in doc])
49
+ elif document_path.endswith(".docx"):
50
+ doc = docx.Document(document_path)
51
+ return "\n".join([paragraph.text for paragraph in doc.paragraphs])
52
+ elif document_path.endswith(".xlsx"):
53
+ return pd.read_excel(document_path).to_string()
54
+ elif document_path.endswith(".csv"):
55
+ return pd.read_csv(document_path).to_string()
56
  else:
57
+ return "Unsupported file type. Please upload a PDF, DOCX, XLSX or CSV document."
58
  except Exception as e:
59
+ return f"Error reading the document: {str(e)}"
60
 
61
+ def read_url(url):
62
+ """Read the content of a URL."""
63
  try:
64
  response = requests.get(url)
65
  response.raise_for_status()
66
  soup = BeautifulSoup(response.content, 'html.parser')
67
  return soup.get_text()
68
  except Exception as e:
69
+ return f"Error reading the URL: {str(e)}"
70
 
71
+ def generate_news(instructions, facts, size, tone, urls, *args):
72
+ """Generate a news article based on instructions, facts, URLs, documents, and transcriptions."""
73
+ knowledge_base = {"instructions": instructions, "facts": facts, "document_content": [], "audio_data": [], "url_content": []}
74
+ num_audios = 5 * 3 # 5 audios * 3 fields (audio, name, position)
75
  audios = args[:num_audios]
76
+ documents = args[num_audios:]
77
 
78
  for url in urls.split():
79
  if url:
80
+ knowledge_base["url_content"].append(read_url(url))
81
 
82
+ for document in documents:
83
+ if document is not None:
84
+ knowledge_base["document_content"].append(read_document(document.name))
85
 
86
  for i in range(0, len(audios), 3):
87
+ audio_file, name, position = audios[i:i+3]
88
  if audio_file is not None:
89
+ knowledge_base["audio_data"].append({"audio": audio_file, "name": name, "position": position})
90
 
91
+ transcriptions_text, raw_transcriptions, total_direct_quotes = "", "", 0
92
 
93
+ for idx, data in enumerate(knowledge_base["audio_data"]):
94
  if data["audio"] is not None:
95
+ transcription = transcribe_audio(data["audio"])
96
+ transcription_text = f'"{transcription}" - {data["name"]}, {data["position"]}'
97
+ raw_transcription = f'[Audio {idx + 1}]: "{transcription}" - {data["name"]}, {data["position"]}'
98
+ if total_direct_quotes < len(knowledge_base["audio_data"]) * 0.8:
99
+ transcriptions_text += transcription_text + "\n"
100
+ total_direct_quotes += 1
101
  else:
102
+ transcriptions_text += f'{data["name"]} mentioned that {transcription}' + "\n"
103
+ raw_transcriptions += raw_transcription + "\n\n"
104
+
105
+ document_content = "\n\n".join(knowledge_base["document_content"])
106
+ url_content = "\n\n".join(knowledge_base["url_content"])
107
+
108
+ internal_prompt = """
109
+ Instructions for the model:
110
+ - Follow the principles of news writing: always try to answer the 5 Ws of a news story in the first paragraph (Who?, What?, When?, Where?, Why?).
111
+ - Ensure that at least 80% of the quotes are direct and in quotation marks.
112
+ - The remaining 20% can be indirect quotes.
113
+ - Do not invent new information.
114
+ - Be rigorous with the provided facts.
115
+ - When processing uploaded documents, extract and highlight important quotes and verbatim testimonies from sources.
116
+ - When processing uploaded documents, extract and highlight key figures.
117
  """
118
 
119
  prompt = f"""
120
+ {internal_prompt}
121
+ Write a news article with the following information, including a title, a 15-word hook (additional information that complements the title), and the body content with a size of {size} words. The tone should be {tone}.
122
+ Instructions: {knowledge_base["instructions"]}
123
+ Facts: {knowledge_base["facts"]}
124
+ Additional content from documents: {document_content}
125
+ Additional content from URLs: {url_content}
126
+ Use the following transcriptions as direct and indirect quotes (without changing or inventing content):
127
+ {transcriptions_text}
128
  """
129
 
130
  try:
131
+ response = openai.ChatCompletion.create(
132
  model="gpt-3.5-turbo",
133
  messages=[{"role": "user", "content": prompt}],
134
  temperature=0.1
135
  )
136
+ news_article = response['choices'][0]['message']['content']
137
+ return news_article, raw_transcriptions
138
  except Exception as e:
139
+ return f"Error generating the news article: {str(e)}", ""
140
 
141
  with gr.Blocks() as demo:
142
+ gr.Markdown("## All-in-One News Generator")
143
  with gr.Row():
144
  with gr.Column(scale=2):
145
+ instructions = gr.Textbox(label="Instructions for the news article", lines=2)
146
+ facts = gr.Textbox(label="Describe the facts of the news", lines=4)
147
+ size = gr.Number(label="Size of the news body (in words)", value=100)
148
+ tone = gr.Dropdown(label="Tone of the news", choices=["serious", "neutral", "lighthearted"], value="neutral")
149
+ urls = gr.Textbox(label="URLs (separated by space)", lines=2)
150
  with gr.Column(scale=3):
151
+ inputs_list = [instructions, facts, size, tone, urls]
152
  with gr.Tabs():
153
  for i in range(1, 6):
154
  with gr.TabItem(f"Audio {i}"):
155
  audio = gr.Audio(type="filepath", label=f"Audio {i}")
156
+ name = gr.Textbox(label="Name", scale=1)
157
+ position = gr.Textbox(label="Position", scale=1)
158
+ inputs_list.extend([audio, name, position])
159
  for i in range(1, 6):
160
+ with gr.TabItem(f"Document {i}"):
161
+ document = gr.File(label=f"Document {i}", type="filepath", file_count="single")
162
+ inputs_list.append(document)
163
 
164
+ gr.Markdown("---") # Visual separator
165
 
166
  with gr.Row():
167
+ transcriptions_output = gr.Textbox(label="Transcriptions", lines=10)
168
 
169
+ gr.Markdown("---") # Visual separator
170
 
171
  with gr.Row():
172
+ generate = gr.Button("Generate draft")
173
  with gr.Row():
174
+ news_output = gr.Textbox(label="Generated draft", lines=20)
175
 
176
+ generate.click(fn=generate_news, inputs=inputs_list, outputs=[news_output, transcriptions_output])
177
 
178
+ demo.launch(share=True)