gokilashree commited on
Commit
37409eb
·
verified ·
1 Parent(s): b7c9ef1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -19
app.py CHANGED
@@ -1,30 +1,37 @@
1
- from transformers import MBartForConditionalGeneration, MBart50Tokenizer, pipeline
 
2
  import gradio as gr
3
  import requests
4
  import io
5
  from PIL import Image
6
  import os
7
- import torch
 
 
8
 
9
  # Load the translation model and tokenizer
10
  model_name = "facebook/mbart-large-50-many-to-one-mmt"
11
  tokenizer = MBart50Tokenizer.from_pretrained(model_name)
12
  model = MBartForConditionalGeneration.from_pretrained(model_name)
13
 
14
- # Use a more powerful text generation model, e.g., GPT-J-6B
15
- text_gen_model = "EleutherAI/gpt-j-6B" # Or use 'EleutherAI/gpt-neox-20b' for better results
16
- pipe = pipeline(
17
- "text-generation",
18
- model=text_gen_model,
19
- torch_dtype=torch.float32,
20
- device_map="auto"
21
- )
22
-
23
  # Use the Hugging Face API key from environment variables for text-to-image model
24
  API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image"
25
  headers = {"Authorization": f"Bearer {os.getenv('full_token')}"}
26
 
27
- # Define the translation, text generation, and image generation function
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  def translate_and_generate_image(tamil_text):
29
  # Step 1: Translate Tamil text to English using mbart-large-50
30
  tokenizer.src_lang = "ta_IN"
@@ -32,10 +39,10 @@ def translate_and_generate_image(tamil_text):
32
  translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
33
  translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
34
 
35
- # Step 2: Generate high-quality English text using GPT-J
36
- prompt = f"Create a detailed description based on the following text: {translated_text}"
37
- generated_text = pipe(prompt, max_length=150, temperature=0.7, top_p=0.9, top_k=50, truncation=True)[0]['generated_text']
38
-
39
  # Step 3: Use the generated English text to create an image
40
  def query(payload):
41
  response = requests.post(API_URL, headers=headers, json=payload)
@@ -54,9 +61,9 @@ iface = gr.Interface(
54
  outputs=[gr.Textbox(label="Translated English Text"),
55
  gr.Textbox(label="Generated Descriptive Text"),
56
  gr.Image(label="Generated Image")],
57
- title="Tamil to English Translation, Text Generation, and Image Creation",
58
- description="Translate Tamil text to English using Facebook's mbart-large-50 model, generate high-quality text using GPT-J, and create an image using the generated text.",
59
  )
60
 
61
- # Launch Gradio app
62
  iface.launch()
 
1
+ import openai
2
+ from transformers import MBartForConditionalGeneration, MBart50Tokenizer
3
  import gradio as gr
4
  import requests
5
  import io
6
  from PIL import Image
7
  import os
8
+
9
+ # Set up your OpenAI API key (make sure it's stored as an environment variable)
10
+ openai.api_key = os.getenv("OPENAI_API_KEY")
11
 
12
  # Load the translation model and tokenizer
13
  model_name = "facebook/mbart-large-50-many-to-one-mmt"
14
  tokenizer = MBart50Tokenizer.from_pretrained(model_name)
15
  model = MBartForConditionalGeneration.from_pretrained(model_name)
16
 
 
 
 
 
 
 
 
 
 
17
  # Use the Hugging Face API key from environment variables for text-to-image model
18
  API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image"
19
  headers = {"Authorization": f"Bearer {os.getenv('full_token')}"}
20
 
21
+ # Define the OpenAI GPT-3 text generation function
22
+ def generate_with_gpt3(prompt, max_tokens=150, temperature=0.7):
23
+ response = openai.Completion.create(
24
+ engine="text-davinci-003", # You can also use "text-davinci-002" or "curie"
25
+ prompt=prompt,
26
+ max_tokens=max_tokens,
27
+ temperature=temperature,
28
+ top_p=0.9,
29
+ frequency_penalty=0.0,
30
+ presence_penalty=0.0
31
+ )
32
+ return response.choices[0].text.strip()
33
+
34
+ # Define the translation, GPT-3 text generation, and image generation function
35
  def translate_and_generate_image(tamil_text):
36
  # Step 1: Translate Tamil text to English using mbart-large-50
37
  tokenizer.src_lang = "ta_IN"
 
39
  translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
40
  translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
41
 
42
+ # Step 2: Generate high-quality descriptive text using OpenAI's GPT-3
43
+ prompt = f"Create a detailed and creative description based on the following text: {translated_text}"
44
+ generated_text = generate_with_gpt3(prompt, max_tokens=150, temperature=0.7)
45
+
46
  # Step 3: Use the generated English text to create an image
47
  def query(payload):
48
  response = requests.post(API_URL, headers=headers, json=payload)
 
61
  outputs=[gr.Textbox(label="Translated English Text"),
62
  gr.Textbox(label="Generated Descriptive Text"),
63
  gr.Image(label="Generated Image")],
64
+ title="Tamil to English Translation, GPT-3 Text Generation, and Image Creation",
65
+ description="Translate Tamil text to English using Facebook's mbart-large-50 model, generate high-quality text using GPT-3, and create an image using the generated text.",
66
  )
67
 
68
+ # Launch Gradio app without `share=True`
69
  iface.launch()