Spaces:
Sleeping
Sleeping
File size: 7,984 Bytes
5163b18 f5c99c2 654a56c 5163b18 654a56c 5bcc9f0 f5c99c2 654a56c 5bcc9f0 f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c 5163b18 654a56c 5163b18 654a56c f5c99c2 654a56c f5c99c2 5163b18 654a56c 5163b18 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c a62e63a f5c99c2 6c87a6f f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c f5c99c2 654a56c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
import os
import openai
import whisper
import tempfile
import gradio as gr
from pydub import AudioSegment
import fitz # PyMuPDF for handling PDFs
import docx # For handling .docx files
import pandas as pd # For handling .xlsx and .csv files
# from google.colab import userdata # Import userdata from google.colab
import requests
from bs4 import BeautifulSoup
# Configure your OpenAI API key using Google Colab userdata
# openai.api_key = userdata.get('OPENAI_API_KEY')
# Load environment variables from the Hugging Face environment
openai.api_key = os.getenv("OPENAI_API_KEY")
# Load the highest quality Whisper model once
model = whisper.load_model("large")
def preprocess_audio(audio_file):
"""Preprocess the audio file to improve quality."""
try:
audio = AudioSegment.from_file(audio_file)
audio = audio.apply_gain(-audio.dBFS + (-20))
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_file:
audio.export(temp_file.name, format="mp3")
return temp_file.name
except Exception as e:
return f"Error preprocessing the audio file: {str(e)}"
def transcribe_audio(audio_file):
"""Transcribe an audio file."""
try:
file_path = preprocess_audio(audio_file) if isinstance(audio_file, str) else preprocess_audio(tempfile.NamedTemporaryFile(delete=False, suffix=".mp3", mode='w+b').name)
result = model.transcribe(file_path)
return result.get("text", "Error in transcription")
except Exception as e:
return f"Error processing the audio file: {str(e)}"
def read_document(document_path):
"""Read the content of a PDF, DOCX, XLSX or CSV document."""
try:
if document_path.endswith(".pdf"):
doc = fitz.open(document_path)
return "\n".join([page.get_text() for page in doc])
elif document_path.endswith(".docx"):
doc = docx.Document(document_path)
return "\n".join([paragraph.text for paragraph in doc.paragraphs])
elif document_path.endswith(".xlsx"):
return pd.read_excel(document_path).to_string()
elif document_path.endswith(".csv"):
return pd.read_csv(document_path).to_string()
else:
return "Unsupported file type. Please upload a PDF, DOCX, XLSX or CSV document."
except Exception as e:
return f"Error reading the document: {str(e)}"
def read_url(url):
"""Read the content of a URL."""
try:
response = requests.get(url)
response.raise_for_status()
soup = BeautifulSoup(response.content, 'html.parser')
return soup.get_text()
except Exception as e:
return f"Error reading the URL: {str(e)}"
def generate_news(instructions, facts, size, tone, urls, *args):
"""Generate a news article based on instructions, facts, URLs, documents, and transcriptions."""
knowledge_base = {"instructions": instructions, "facts": facts, "document_content": [], "audio_data": [], "url_content": []}
num_audios = 5 * 3 # 5 audios * 3 fields (audio, name, position)
audios = args[:num_audios]
documents = args[num_audios:]
for url in urls.split():
if url:
knowledge_base["url_content"].append(read_url(url))
for document in documents:
if document is not None:
knowledge_base["document_content"].append(read_document(document.name))
for i in range(0, len(audios), 3):
audio_file, name, position = audios[i:i+3]
if audio_file is not None:
knowledge_base["audio_data"].append({"audio": audio_file, "name": name, "position": position})
transcriptions_text, raw_transcriptions, total_direct_quotes = "", "", 0
for idx, data in enumerate(knowledge_base["audio_data"]):
if data["audio"] is not None:
transcription = transcribe_audio(data["audio"])
transcription_text = f'"{transcription}" - {data["name"]}, {data["position"]}'
raw_transcription = f'[Audio {idx + 1}]: "{transcription}" - {data["name"]}, {data["position"]}'
if total_direct_quotes < len(knowledge_base["audio_data"]) * 0.8:
transcriptions_text += transcription_text + "\n"
total_direct_quotes += 1
else:
transcriptions_text += f'{data["name"]} mentioned that {transcription}' + "\n"
raw_transcriptions += raw_transcription + "\n\n"
document_content = "\n\n".join(knowledge_base["document_content"])
url_content = "\n\n".join(knowledge_base["url_content"])
internal_prompt = """
Instructions for the model:
- Follow the principles of news writing: always try to answer the 5 Ws of a news story in the first paragraph (Who?, What?, When?, Where?, Why?).
- Ensure that at least 80% of the quotes are direct and in quotation marks.
- The remaining 20% can be indirect quotes.
- Do not invent new information.
- Be rigorous with the provided facts.
- When processing uploaded documents, extract and highlight important quotes and verbatim testimonies from sources.
- When processing uploaded documents, extract and highlight key figures.
"""
prompt = f"""
{internal_prompt}
Write a news article with the following information, including a title, a 15-word hook (additional information that complements the title), and the body content with a size of {size} words. The tone should be {tone}.
Instructions: {knowledge_base["instructions"]}
Facts: {knowledge_base["facts"]}
Additional content from documents: {document_content}
Additional content from URLs: {url_content}
Use the following transcriptions as direct and indirect quotes (without changing or inventing content):
{transcriptions_text}
"""
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.1
)
news_article = response['choices'][0]['message']['content']
return news_article, raw_transcriptions
except Exception as e:
return f"Error generating the news article: {str(e)}", ""
with gr.Blocks() as demo:
gr.Markdown("## All-in-One News Generator")
with gr.Row():
with gr.Column(scale=2):
instructions = gr.Textbox(label="Instructions for the news article", lines=2)
facts = gr.Textbox(label="Describe the facts of the news", lines=4)
size = gr.Number(label="Size of the news body (in words)", value=100)
tone = gr.Dropdown(label="Tone of the news", choices=["serious", "neutral", "lighthearted"], value="neutral")
urls = gr.Textbox(label="URLs (separated by space)", lines=2)
with gr.Column(scale=3):
inputs_list = [instructions, facts, size, tone, urls]
with gr.Tabs():
for i in range(1, 6):
with gr.TabItem(f"Audio {i}"):
audio = gr.Audio(type="filepath", label=f"Audio {i}")
name = gr.Textbox(label="Name", scale=1)
position = gr.Textbox(label="Position", scale=1)
inputs_list.extend([audio, name, position])
for i in range(1, 6):
with gr.TabItem(f"Document {i}"):
document = gr.File(label=f"Document {i}", type="filepath", file_count="single")
inputs_list.append(document)
gr.Markdown("---") # Visual separator
with gr.Row():
transcriptions_output = gr.Textbox(label="Transcriptions", lines=10)
gr.Markdown("---") # Visual separator
with gr.Row():
generate = gr.Button("Generate draft")
with gr.Row():
news_output = gr.Textbox(label="Generated draft", lines=20)
generate.click(fn=generate_news, inputs=inputs_list, outputs=[news_output, transcriptions_output])
demo.launch(share=True) |