Spaces:
Sleeping
Sleeping
Umar Majeed
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,17 +1,18 @@
|
|
| 1 |
import streamlit as st
|
|
|
|
|
|
|
| 2 |
import requests
|
| 3 |
import pdfplumber
|
| 4 |
import torch
|
| 5 |
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
|
|
|
|
|
|
|
| 6 |
from reportlab.lib.pagesizes import letter
|
| 7 |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
| 8 |
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
| 9 |
-
import os
|
| 10 |
|
| 11 |
-
#
|
| 12 |
-
|
| 13 |
-
pdf_path = "./form.pdf" # Temporary path for uploaded files
|
| 14 |
-
output_pdf_path = "./response_output.pdf" # Path to save the PDF
|
| 15 |
|
| 16 |
# Setup models
|
| 17 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
@@ -30,11 +31,12 @@ whisper_pipe = pipeline(
|
|
| 30 |
device=device
|
| 31 |
)
|
| 32 |
|
|
|
|
| 33 |
granite_url = "https://us-south.ml.cloud.ibm.com/ml/v1/text/generation?version=2023-05-29"
|
| 34 |
granite_headers = {
|
| 35 |
"Accept": "application/json",
|
| 36 |
"Content-Type": "application/json",
|
| 37 |
-
"Authorization": "Bearer
|
| 38 |
}
|
| 39 |
|
| 40 |
# Function to transcribe audio files
|
|
@@ -68,7 +70,7 @@ def generate_form_data(text, questions):
|
|
| 68 |
"repetition_penalty": 1.05
|
| 69 |
},
|
| 70 |
"model_id": "ibm/granite-13b-chat-v2",
|
| 71 |
-
"project_id": "
|
| 72 |
"moderations": {
|
| 73 |
"hap": {
|
| 74 |
"input": {
|
|
@@ -117,36 +119,39 @@ def save_responses_to_pdf(responses, output_pdf_path):
|
|
| 117 |
|
| 118 |
document.build(content)
|
| 119 |
|
| 120 |
-
# Streamlit
|
| 121 |
-
st.title("Audio to Form
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
uploaded_pdf = st.file_uploader("Upload PDF File", type=["pdf"])
|
| 126 |
|
| 127 |
-
if
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
import zipfile
|
| 3 |
+
import tempfile
|
| 4 |
import requests
|
| 5 |
import pdfplumber
|
| 6 |
import torch
|
| 7 |
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
|
| 8 |
+
import os
|
| 9 |
+
import warnings
|
| 10 |
from reportlab.lib.pagesizes import letter
|
| 11 |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
| 12 |
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
|
|
|
| 13 |
|
| 14 |
+
# Suppress warnings
|
| 15 |
+
warnings.filterwarnings("ignore")
|
|
|
|
|
|
|
| 16 |
|
| 17 |
# Setup models
|
| 18 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 31 |
device=device
|
| 32 |
)
|
| 33 |
|
| 34 |
+
# IBM Granite API URL and Headers
|
| 35 |
granite_url = "https://us-south.ml.cloud.ibm.com/ml/v1/text/generation?version=2023-05-29"
|
| 36 |
granite_headers = {
|
| 37 |
"Accept": "application/json",
|
| 38 |
"Content-Type": "application/json",
|
| 39 |
+
"Authorization": "Bearer YOUR_API_KEY_HERE" # Replace with your actual API key
|
| 40 |
}
|
| 41 |
|
| 42 |
# Function to transcribe audio files
|
|
|
|
| 70 |
"repetition_penalty": 1.05
|
| 71 |
},
|
| 72 |
"model_id": "ibm/granite-13b-chat-v2",
|
| 73 |
+
"project_id": "YOUR_PROJECT_ID", # Replace with your actual project ID
|
| 74 |
"moderations": {
|
| 75 |
"hap": {
|
| 76 |
"input": {
|
|
|
|
| 119 |
|
| 120 |
document.build(content)
|
| 121 |
|
| 122 |
+
# Streamlit Interface
|
| 123 |
+
st.title("Audio to Form Filling")
|
| 124 |
|
| 125 |
+
zip_file = st.file_uploader("Upload ZIP File with Audio Files", type="zip")
|
| 126 |
+
pdf_file = st.file_uploader("Upload PDF Form", type="pdf")
|
|
|
|
| 127 |
|
| 128 |
+
if zip_file and pdf_file:
|
| 129 |
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
| 130 |
+
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
|
| 131 |
+
zip_ref.extractall(tmp_dir)
|
| 132 |
|
| 133 |
+
responses = []
|
| 134 |
+
for filename in os.listdir(tmp_dir):
|
| 135 |
+
if filename.endswith((".wav", ".mp3")):
|
| 136 |
+
file_path = os.path.join(tmp_dir, filename)
|
| 137 |
+
# Transcribe audio
|
| 138 |
+
transcribed_text = transcribe_audio(file_path)
|
| 139 |
+
# Extract text and form fields from PDF
|
| 140 |
+
pdf_text, pdf_questions = extract_text_from_pdf(pdf_file)
|
| 141 |
+
# Generate form data
|
| 142 |
+
form_data = generate_form_data(transcribed_text, pdf_questions)
|
| 143 |
+
responses.append(form_data)
|
| 144 |
+
st.write(f"File {len(responses)}:\n{form_data}\n") # Display the extracted form data with numbering
|
| 145 |
+
|
| 146 |
+
# Save all responses to a PDF
|
| 147 |
+
output_pdf_path = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf").name
|
| 148 |
+
save_responses_to_pdf(responses, output_pdf_path)
|
| 149 |
+
|
| 150 |
+
# Provide a download button for the generated PDF
|
| 151 |
+
with open(output_pdf_path, "rb") as f:
|
| 152 |
+
st.download_button(
|
| 153 |
+
label="Download Processed PDF",
|
| 154 |
+
data=f,
|
| 155 |
+
file_name="processed_output.pdf",
|
| 156 |
+
mime="application/pdf"
|
| 157 |
+
)
|