Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import os
|
2 |
-
import concurrent.futures
|
3 |
from huggingface_hub import login
|
4 |
from transformers import MarianMTModel, MarianTokenizer, pipeline
|
5 |
import requests
|
@@ -7,87 +6,107 @@ import io
|
|
7 |
from PIL import Image
|
8 |
import gradio as gr
|
9 |
|
10 |
-
#
|
11 |
-
hf_token = os.getenv("HUGGINGFACE_API_KEY")
|
12 |
-
if hf_token:
|
13 |
-
|
14 |
-
else:
|
15 |
-
raise ValueError("Hugging Face token not found in environment variables.")
|
16 |
|
17 |
-
#
|
18 |
-
|
19 |
-
model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}"
|
20 |
-
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
21 |
-
model = MarianMTModel.from_pretrained(model_name)
|
22 |
-
translator = pipeline("translation", model=model, tokenizer=tokenizer)
|
23 |
-
return translator
|
24 |
|
25 |
-
#
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
try:
|
28 |
-
|
29 |
-
translation = translator(
|
30 |
-
|
|
|
31 |
except Exception as e:
|
32 |
return f"An error occurred: {str(e)}"
|
33 |
|
34 |
-
#
|
35 |
flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
36 |
flux_headers = {"Authorization": f"Bearer {hf_token}"}
|
|
|
|
|
37 |
def generate_image(prompt):
|
38 |
try:
|
39 |
response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": prompt})
|
40 |
if response.status_code == 200:
|
41 |
-
|
42 |
-
image =
|
43 |
return image
|
44 |
else:
|
|
|
45 |
return None
|
46 |
except Exception as e:
|
47 |
-
print(f"
|
48 |
return None
|
49 |
|
50 |
-
#
|
51 |
mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
|
52 |
mistral_headers = {"Authorization": f"Bearer {hf_token}"}
|
|
|
53 |
def generate_creative_text(translated_text):
|
54 |
try:
|
55 |
-
response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text
|
56 |
if response.status_code == 200:
|
57 |
-
|
|
|
58 |
else:
|
59 |
return "Error generating creative text"
|
60 |
except Exception as e:
|
61 |
-
|
62 |
-
return None
|
63 |
|
64 |
-
#
|
65 |
-
def translate_generate_image_and_text(
|
66 |
-
|
67 |
-
|
68 |
-
image_future = executor.submit(generate_image, translated_text)
|
69 |
-
creative_text_future = executor.submit(generate_creative_text, translated_text)
|
70 |
-
image = image_future.result()
|
71 |
-
creative_text = creative_text_future.result()
|
72 |
-
return translated_text, creative_text, image
|
73 |
|
74 |
-
#
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
78 |
|
79 |
-
|
|
|
|
|
80 |
interface = gr.Interface(
|
81 |
fn=translate_generate_image_and_text,
|
82 |
inputs=[
|
83 |
-
gr.Textbox(label="Enter text"),
|
84 |
-
gr.Dropdown(choices=list(language_codes.keys()), label="Source Language"
|
85 |
-
|
|
|
|
|
|
|
|
|
86 |
],
|
87 |
-
|
88 |
-
|
89 |
-
description="Translate text between languages, generate images based on translation, and create creative text.",
|
90 |
)
|
91 |
|
92 |
-
# Launch Gradio app
|
93 |
interface.launch()
|
|
|
1 |
import os
|
|
|
2 |
from huggingface_hub import login
|
3 |
from transformers import MarianMTModel, MarianTokenizer, pipeline
|
4 |
import requests
|
|
|
6 |
from PIL import Image
|
7 |
import gradio as gr
|
8 |
|
9 |
+
# Set Hugging Face API key
|
10 |
+
hf_token = os.getenv("HUGGINGFACE_API_KEY")
|
11 |
+
if hf_token is None:
|
12 |
+
raise ValueError("Hugging Face API key not found in environment variables.")
|
|
|
|
|
13 |
|
14 |
+
# Login to Hugging Face
|
15 |
+
login(token=hf_token)
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
# Define language codes for around 10 languages
|
18 |
+
language_codes = {
|
19 |
+
"French": "fr",
|
20 |
+
"Spanish": "es",
|
21 |
+
"German": "de",
|
22 |
+
"Tamil": "ta",
|
23 |
+
"Hindi": "hi",
|
24 |
+
"Chinese": "zh",
|
25 |
+
"Russian": "ru",
|
26 |
+
"Japanese": "ja",
|
27 |
+
"Korean": "ko",
|
28 |
+
"Arabic": "ar",
|
29 |
+
"Portuguese": "pt",
|
30 |
+
"Italian": "it"
|
31 |
+
}
|
32 |
+
|
33 |
+
model_name = "Helsinki-NLP/opus-mt-mul-en"
|
34 |
+
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
35 |
+
model = MarianMTModel.from_pretrained(model_name)
|
36 |
+
translator = pipeline("translation", model=model, tokenizer=tokenizer)
|
37 |
+
|
38 |
+
# Function for translation
|
39 |
+
def translate_text(input_text, src_lang):
|
40 |
try:
|
41 |
+
src_prefix = f">>{src_lang}<< " + input_text
|
42 |
+
translation = translator(src_prefix, max_length=40)
|
43 |
+
translated_text = translation[0]['translation_text']
|
44 |
+
return translated_text
|
45 |
except Exception as e:
|
46 |
return f"An error occurred: {str(e)}"
|
47 |
|
48 |
+
# API credentials and endpoint for FLUX
|
49 |
flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
50 |
flux_headers = {"Authorization": f"Bearer {hf_token}"}
|
51 |
+
|
52 |
+
# Function to generate image based on prompt
|
53 |
def generate_image(prompt):
|
54 |
try:
|
55 |
response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": prompt})
|
56 |
if response.status_code == 200:
|
57 |
+
image_bytes = response.content
|
58 |
+
image = Image.open(io.BytesIO(image_bytes))
|
59 |
return image
|
60 |
else:
|
61 |
+
print(f"Failed to get image: Status code {response.status_code}")
|
62 |
return None
|
63 |
except Exception as e:
|
64 |
+
print(f"An error occurred: {e}")
|
65 |
return None
|
66 |
|
67 |
+
# API setup for Mistral model
|
68 |
mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
|
69 |
mistral_headers = {"Authorization": f"Bearer {hf_token}"}
|
70 |
+
|
71 |
def generate_creative_text(translated_text):
|
72 |
try:
|
73 |
+
response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text})
|
74 |
if response.status_code == 200:
|
75 |
+
creative_text = response.json()[0]['generated_text']
|
76 |
+
return creative_text
|
77 |
else:
|
78 |
return "Error generating creative text"
|
79 |
except Exception as e:
|
80 |
+
return f"An error occurred: {str(e)}"
|
|
|
81 |
|
82 |
+
# Main function to handle full workflow
|
83 |
+
def translate_generate_image_and_text(input_text, src_lang):
|
84 |
+
# Step 1: Translate input text
|
85 |
+
translated_text = translate_text(input_text, language_codes[src_lang])
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
+
# Step 2: Generate an image
|
88 |
+
image = generate_image(translated_text)
|
89 |
+
|
90 |
+
# Step 3: Generate creative text based on the translation
|
91 |
+
creative_text = generate_creative_text(translated_text)
|
92 |
|
93 |
+
return translated_text, creative_text, image
|
94 |
+
|
95 |
+
# Gradio interface
|
96 |
interface = gr.Interface(
|
97 |
fn=translate_generate_image_and_text,
|
98 |
inputs=[
|
99 |
+
gr.Textbox(label="Enter text for translation"),
|
100 |
+
gr.Dropdown(choices=list(language_codes.keys()), label="Source Language")
|
101 |
+
],
|
102 |
+
outputs=[
|
103 |
+
gr.Textbox(label="Translated Text"),
|
104 |
+
gr.Textbox(label="Creative Text"),
|
105 |
+
gr.Image(label="Generated Image")
|
106 |
],
|
107 |
+
title="Multilingual Translation, Image, and Creative Text Generator",
|
108 |
+
description="Translates text from multiple languages to English, generates images, and creates creative text."
|
|
|
109 |
)
|
110 |
|
111 |
+
# Launch the Gradio app
|
112 |
interface.launch()
|