File size: 3,097 Bytes
5350da8
 
 
 
 
 
 
8738846
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d9279e
 
 
 
 
 
 
 
 
 
8738846
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import os
import streamlit as st
import requests
from transformers import pipeline
from typing import Dict
from together import Together

# Image-to-text
def img2txt(url: str) -> str:
    print("Initializing captioning model...")
    captioning_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
    
    print("Generating text from the image...")
    text = captioning_model(url, max_new_tokens=20)[0]["generated_text"]
    
    print(text)
    return text

# Text-to-story generation with LLM model
def txt2story(prompt: str, top_k: int, top_p: float, temperature: float) -> str:
    # Load the Together API client
    client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))

    # Modify the prompt based on user inputs and ensure a 250-word limit
    story_prompt = f"Write a short story of no more than 250 words based on the following prompt: {prompt}"

    # Call the LLM model
    stream = client.chat.completions.create(
        model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
        messages=[
            {"role": "system", "content": '''As an experienced short story writer, write a meaningful story influenced by the provided prompt.
            Ensure the story does not exceed 250 words.'''},
            {"role": "user", "content": story_prompt}
        ],
        top_k=top_k,
        top_p=top_p,
        temperature=temperature,
        stream=True
    )

    # Concatenate story chunks
    story = ''
    for chunk in stream:
        story += chunk.choices[0].delta.content

    return story

# Text-to-speech
def txt2speech(text: str) -> None:
    print("Initializing MeloTTS text-to-speech conversion...")
    pipe = pipeline("text-to-speech", model="myshell-ai/MeloTTS-English")
    
    # Generate audio from the text
    audio = pipe(text)
    
    # Save the audio output to a file
    with open("audio_story.wav", "wb") as file:
        file.write(audio["wav"])

# Get user preferences for the story
def get_user_preferences() -> Dict[str, str]:
    preferences = {}

    preferences['continent'] = st.selectbox("Continent", ["North America", "Europe", "Asia", "Africa", "Australia"])
    preferences['genre'] = st.selectbox("Genre", ["Science Fiction", "Fantasy", "Mystery", "Romance"])
    preferences['setting'] = st.selectbox("Setting", ["Future", "Medieval times", "Modern day", "Alternate reality"])
    preferences['plot'] = st.selectbox("Plot", ["Hero's journey", "Solving a mystery", "Love story", "Survival"])
    preferences['tone'] = st.selectbox("Tone", ["Serious", "Light-hearted", "Humorous", "Dark"])
    preferences['theme'] = st.selectbox("Theme", ["Self-discovery", "Redemption", "Love", "Justice"])
    preferences['conflict'] = st.selectbox("Conflict Type", ["Person vs. Society", "Internal struggle", "Person vs. Nature", "Person vs. Person"])
    preferences['twist'] = st.selectbox("Mystery/Twist", ["Plot twist", "Hidden identity", "Unexpected ally/enemy", "Time paradox"])
    preferences['ending'] = st.selectbox("Ending", ["Happy", "Bittersweet", "Open-ended", "Tragic"])
    
    return preferences