Y4SH's picture
Update app.py
70b60fa verified
raw
history blame
3.75 kB
import os
import streamlit as st
import requests
import json
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from io import BytesIO
# Load tokens from environment variables
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
GROQ_TOKEN = os.getenv("GROQ_TOKEN")
# Hugging Face API URLs
TRANSLATE_API_URL = "https://api-inference.huggingface.co/models/facebook/mbart-large-50-many-to-one-mmt"
IMAGE_API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
headers = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"}
# Function to translate Tamil to English
def translate_tamil_to_english(text):
payload = {"inputs": text}
response = requests.post(TRANSLATE_API_URL, headers=headers, json=payload)
if response.status_code == 200:
result = response.json()
return result[0]['generated_text']
else:
return f"Error {response.status_code}: {response.text}"
# Function to generate an image from a prompt
def generate_image(prompt):
data = {"inputs": prompt}
response = requests.post(IMAGE_API_URL, headers=headers, json=data)
if response.status_code == 200:
image = Image.open(BytesIO(response.content))
return image
else:
return f"Error {response.status_code}: {response.text}"
# Function to generate text using Groq
def generate_text(prompt, max_tokens, temperature):
messages = [{"role": "user", "content": prompt}]
payload = {
"model": "mixtral-8x7b-32768",
"messages": messages,
"max_tokens": max_tokens,
"temperature": temperature,
}
response = requests.post("https://api.groq.com/openai/v1/chat/completions",
headers={"Authorization": f"Bearer {GROQ_TOKEN}"},
json=payload)
if response.status_code == 200:
result = response.json()
return result["choices"][0]["message"]["content"]
else:
return f"Error {response.status_code}: {response.text}"
# Process input data
def process_input(tamil_text, max_tokens, temperature):
english_text = translate_tamil_to_english(tamil_text)
image = generate_image(english_text)
generated_story = generate_text(english_text, max_tokens, temperature)
return english_text, image, generated_story
# Streamlit UI
st.markdown("<h1 style='text-align: center;'>Tamil to English Translation and Story Generation App</h1>", unsafe_allow_html=True)
st.sidebar.header("Settings")
tamil_text_input = st.sidebar.text_area("Enter Tamil Text")
max_tokens_input = st.sidebar.slider("Max Tokens", min_value=50, max_value=200, value=100, step=10)
temperature_input = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
if st.sidebar.button("Submit"):
english_text, image, generated_story = process_input(tamil_text_input, max_tokens_input, temperature_input)
st.subheader("Translated English Text")
st.write(english_text)
st.subheader("Generated Image")
if isinstance(image, Image.Image):
plt.imshow(np.array(image))
plt.axis('off')
st.pyplot(plt)
else:
st.error(image)
st.subheader("Generated Story")
st.write(generated_story)
# Guide section
if st.sidebar.button("How to Use"):
st.sidebar.write("""
### How to Use This App
1. **Enter Tamil Text**: Type or paste the Tamil text you want to translate.
2. **Adjust Settings**: Use the sliders to set the maximum tokens for text generation and the temperature for creativity.
3. **Submit**: Click the Submit button to generate translations, images, and stories.
4. **View Results**: Check the translated text, generated image, and story in the main area.
""")