Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,41 +6,86 @@ import matplotlib.pyplot as plt
|
|
| 6 |
import numpy as np
|
| 7 |
from io import BytesIO
|
| 8 |
|
| 9 |
-
#
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
.
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
}
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
| 43 |
|
| 44 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
st.markdown("<h1 style='text-align: center;'>Tamil to English Translation and Story Generation App</h1>", unsafe_allow_html=True)
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
import numpy as np
|
| 7 |
from io import BytesIO
|
| 8 |
|
| 9 |
+
# Hugging Face API URLs and token
|
| 10 |
+
TRANSLATE_API_URL = "https://api-inference.huggingface.co/models/facebook/mbart-large-50-many-to-one-mmt"
|
| 11 |
+
IMAGE_API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
| 12 |
+
HUGGINGFACE_TOKEN = "YOUR_HUGGINGFACE_TOKEN" # Replace with your actual token
|
| 13 |
+
headers = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"}
|
| 14 |
+
|
| 15 |
+
# Function to translate Tamil to English
|
| 16 |
+
def translate_tamil_to_english(text):
|
| 17 |
+
payload = {"inputs": text}
|
| 18 |
+
response = requests.post(TRANSLATE_API_URL, headers=headers, json=payload)
|
| 19 |
+
if response.status_code == 200:
|
| 20 |
+
result = response.json()
|
| 21 |
+
return result[0]['generated_text']
|
| 22 |
+
else:
|
| 23 |
+
return f"Error {response.status_code}: {response.text}"
|
| 24 |
+
|
| 25 |
+
# Function to generate an image from a prompt
|
| 26 |
+
def generate_image(prompt):
|
| 27 |
+
data = {"inputs": prompt}
|
| 28 |
+
response = requests.post(IMAGE_API_URL, headers=headers, json=data)
|
| 29 |
+
if response.status_code == 200:
|
| 30 |
+
image = Image.open(BytesIO(response.content))
|
| 31 |
+
return image
|
| 32 |
+
else:
|
| 33 |
+
return f"Error {response.status_code}: {response.text}"
|
| 34 |
+
|
| 35 |
+
# Function to generate text using Groq
|
| 36 |
+
def generate_text(prompt, max_tokens, temperature):
|
| 37 |
+
messages = [{"role": "user", "content": prompt}]
|
| 38 |
+
payload = {
|
| 39 |
+
"model": "mixtral-8x7b-32768",
|
| 40 |
+
"messages": messages,
|
| 41 |
+
"max_tokens": max_tokens,
|
| 42 |
+
"temperature": temperature,
|
| 43 |
}
|
| 44 |
+
response = requests.post("https://api.groq.com/openai/v1/chat/completions", headers={"Authorization": "Bearer YOUR_GROQ_TOKEN"}, json=payload)
|
| 45 |
+
if response.status_code == 200:
|
| 46 |
+
result = response.json()
|
| 47 |
+
return result["choices"][0]["message"]["content"]
|
| 48 |
+
else:
|
| 49 |
+
return f"Error {response.status_code}: {response.text}"
|
| 50 |
|
| 51 |
+
# Process input data
|
| 52 |
+
def process_input(tamil_text, max_tokens, temperature):
|
| 53 |
+
english_text = translate_tamil_to_english(tamil_text)
|
| 54 |
+
image = generate_image(english_text)
|
| 55 |
+
generated_story = generate_text(english_text, max_tokens, temperature)
|
| 56 |
+
return english_text, image, generated_story
|
| 57 |
+
|
| 58 |
+
# Streamlit UI
|
| 59 |
st.markdown("<h1 style='text-align: center;'>Tamil to English Translation and Story Generation App</h1>", unsafe_allow_html=True)
|
| 60 |
+
|
| 61 |
+
st.sidebar.header("Settings")
|
| 62 |
+
tamil_text_input = st.sidebar.text_area("Enter Tamil Text")
|
| 63 |
+
max_tokens_input = st.sidebar.slider("Max Tokens", min_value=50, max_value=200, value=100, step=10)
|
| 64 |
+
temperature_input = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
|
| 65 |
+
|
| 66 |
+
if st.sidebar.button("Submit"):
|
| 67 |
+
english_text, image, generated_story = process_input(tamil_text_input, max_tokens_input, temperature_input)
|
| 68 |
+
|
| 69 |
+
st.subheader("Translated English Text")
|
| 70 |
+
st.write(english_text)
|
| 71 |
+
|
| 72 |
+
st.subheader("Generated Image")
|
| 73 |
+
if isinstance(image, Image.Image):
|
| 74 |
+
plt.imshow(np.array(image))
|
| 75 |
+
plt.axis('off')
|
| 76 |
+
st.pyplot(plt)
|
| 77 |
+
else:
|
| 78 |
+
st.error(image)
|
| 79 |
+
|
| 80 |
+
st.subheader("Generated Story")
|
| 81 |
+
st.write(generated_story)
|
| 82 |
+
|
| 83 |
+
# Guide section
|
| 84 |
+
if st.sidebar.button("How to Use"):
|
| 85 |
+
st.sidebar.write("""
|
| 86 |
+
### How to Use This App
|
| 87 |
+
1. **Enter Tamil Text**: Type or paste the Tamil text you want to translate.
|
| 88 |
+
2. **Adjust Settings**: Use the sliders to set the maximum tokens for text generation and the temperature for creativity.
|
| 89 |
+
3. **Submit**: Click the Submit button to generate translations, images, and stories.
|
| 90 |
+
4. **View Results**: Check the translated text, generated image, and story in the main area.
|
| 91 |
+
""")
|