adi-123 commited on
Commit
a5e8b58
·
verified ·
1 Parent(s): 8738846

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -67
app.py CHANGED
@@ -4,74 +4,8 @@ import requests
4
  from transformers import pipeline
5
  from typing import Dict
6
  from together import Together
 
7
 
8
- # Image-to-text
9
- def img2txt(url: str) -> str:
10
- print("Initializing captioning model...")
11
- captioning_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
12
-
13
- print("Generating text from the image...")
14
- text = captioning_model(url, max_new_tokens=20)[0]["generated_text"]
15
-
16
- print(text)
17
- return text
18
-
19
- # Text-to-story generation with LLM model
20
- def txt2story(prompt: str, top_k: int, top_p: float, temperature: float) -> str:
21
- # Load the Together API client
22
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
23
-
24
- # Modify the prompt based on user inputs and ensure a 250-word limit
25
- story_prompt = f"Write a short story of no more than 250 words based on the following prompt: {prompt}"
26
-
27
- # Call the LLM model
28
- stream = client.chat.completions.create(
29
- model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
30
- messages=[
31
- {"role": "system", "content": '''As an experienced short story writer, write a meaningful story influenced by the provided prompt.
32
- Ensure the story does not exceed 250 words.'''},
33
- {"role": "user", "content": story_prompt}
34
- ],
35
- top_k=top_k,
36
- top_p=top_p,
37
- temperature=temperature,
38
- stream=True
39
- )
40
-
41
- # Concatenate story chunks
42
- story = ''
43
- for chunk in stream:
44
- story += chunk.choices[0].delta.content
45
-
46
- return story
47
-
48
- # Text-to-speech
49
- def txt2speech(text: str) -> None:
50
- print("Initializing text-to-speech conversion...")
51
- API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
52
- headers = {"Authorization": f"Bearer {os.environ['HUGGINGFACEHUB_API_TOKEN']}"}
53
- payloads = {'inputs': text}
54
-
55
- response = requests.post(API_URL, headers=headers, json=payloads)
56
-
57
- with open('audio_story.mp3', 'wb') as file:
58
- file.write(response.content)
59
-
60
- # Get user preferences for the story
61
- def get_user_preferences() -> Dict[str, str]:
62
- preferences = {}
63
-
64
- preferences['continent'] = st.selectbox("Continent", ["North America", "Europe", "Asia", "Africa", "Australia"])
65
- preferences['genre'] = st.selectbox("Genre", ["Science Fiction", "Fantasy", "Mystery", "Romance"])
66
- preferences['setting'] = st.selectbox("Setting", ["Future", "Medieval times", "Modern day", "Alternate reality"])
67
- preferences['plot'] = st.selectbox("Plot", ["Hero's journey", "Solving a mystery", "Love story", "Survival"])
68
- preferences['tone'] = st.selectbox("Tone", ["Serious", "Light-hearted", "Humorous", "Dark"])
69
- preferences['theme'] = st.selectbox("Theme", ["Self-discovery", "Redemption", "Love", "Justice"])
70
- preferences['conflict'] = st.selectbox("Conflict Type", ["Person vs. Society", "Internal struggle", "Person vs. Nature", "Person vs. Person"])
71
- preferences['twist'] = st.selectbox("Mystery/Twist", ["Plot twist", "Hidden identity", "Unexpected ally/enemy", "Time paradox"])
72
- preferences['ending'] = st.selectbox("Ending", ["Happy", "Bittersweet", "Open-ended", "Tragic"])
73
-
74
- return preferences
75
 
76
  # Main function
77
  def main():
 
4
  from transformers import pipeline
5
  from typing import Dict
6
  from together import Together
7
+ from utils import img2txt, txt2story, txt2speech, get_user_preferences
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  # Main function
11
  def main():