File size: 5,976 Bytes
e15f81c 3e74eb0 e7bd9fb ad83944 424c4c2 e7bd9fb ad83944 1254333 e7bd9fb 1254333 e7bd9fb 1254333 e7bd9fb 1254333 ad83944 1254333 ad83944 1254333 ad83944 1254333 a4bb483 ad83944 1254333 ad83944 f6c162d e7bd9fb ad83944 1254333 e7bd9fb ad83944 e7bd9fb 1254333 ad83944 c240433 f81f2e5 ad83944 15b6f1c ad83944 f6c162d e7bd9fb f9fdb45 1254333 e7bd9fb f9fdb45 1254333 33ee975 6250287 33ee975 ad83944 e7bd9fb f9fdb45 e7bd9fb f9fdb45 1254333 e7bd9fb ad83944 15b6f1c 1254333 ad83944 f6c162d 1254333 e7bd9fb 1254333 e7bd9fb f9fdb45 e7bd9fb e15f81c e7bd9fb f0dbb23 e7bd9fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import os
import streamlit as st
import requests
from transformers import pipeline
from typing import Dict
from together import Together
# Image-to-text
def img2txt(url: str) -> str:
print("Initializing captioning model...")
captioning_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
print("Generating text from the image...")
text = captioning_model(url, max_new_tokens=20)[0]["generated_text"]
print(text)
return text
# Text-to-story generation with LLM model
def txt2story(prompt: str, top_k: int, top_p: float, temperature: float) -> str:
# Load the Together API client
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
# Modify the prompt based on user inputs and ensure a 250-word limit
story_prompt = f"Write a short story of no more than 250 words based on the following prompt: {prompt}"
# Call the LLM model
stream = client.chat.completions.create(
model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
messages=[
{"role": "system", "content": '''As an experienced short story writer, write a meaningful story influenced by the provided prompt.
Ensure the story does not exceed 250 words.'''},
{"role": "user", "content": story_prompt}
],
top_k=top_k,
top_p=top_p,
temperature=temperature,
stream=True
)
# Concatenate story chunks
story = ''
for chunk in stream:
story += chunk.choices[0].delta.content
return story
# Text-to-speech
def txt2speech(text: str) -> None:
print("Initializing text-to-speech conversion...")
API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
headers = {"Authorization": f"Bearer {os.environ['HUGGINGFACEHUB_API_TOKEN']}"}
payloads = {'inputs': text}
response = requests.post(API_URL, headers=headers, json=payloads)
with open('audio_story.mp3', 'wb') as file:
file.write(response.content)
# Get user preferences for the story
def get_user_preferences() -> Dict[str, str]:
preferences = {}
preferences['continent'] = st.selectbox("Continent", ["North America", "Europe", "Asia", "Africa", "Australia"])
preferences['genre'] = st.selectbox("Genre", ["Science Fiction", "Fantasy", "Mystery", "Romance"])
preferences['setting'] = st.selectbox("Setting", ["Future", "Medieval times", "Modern day", "Alternate reality"])
preferences['plot'] = st.selectbox("Plot", ["Hero's journey", "Solving a mystery", "Love story", "Survival"])
preferences['tone'] = st.selectbox("Tone", ["Serious", "Light-hearted", "Humorous", "Dark"])
preferences['theme'] = st.selectbox("Theme", ["Self-discovery", "Redemption", "Love", "Justice"])
preferences['conflict'] = st.selectbox("Conflict Type", ["Person vs. Society", "Internal struggle", "Person vs. Nature", "Person vs. Person"])
preferences['twist'] = st.selectbox("Mystery/Twist", ["Plot twist", "Hidden identity", "Unexpected ally/enemy", "Time paradox"])
preferences['ending'] = st.selectbox("Ending", ["Happy", "Bittersweet", "Open-ended", "Tragic"])
return preferences
# Main function
def main():
st.set_page_config(page_title="π¨ Image-to-Audio Story π§", page_icon="πΌοΈ")
st.title("Turn the Image into Audio Story")
# Allows users to upload an image file
uploaded_file = st.file_uploader("# π· Upload an image...", type=["jpg", "jpeg", "png"])
# Parameters for LLM model (in the sidebar)
st.sidebar.markdown("# LLM Inference Configuration Parameters")
top_k = st.sidebar.number_input("Top-K", min_value=1, max_value=100, value=5)
top_p = st.sidebar.number_input("Top-P", min_value=0.0, max_value=1.0, value=0.8)
temperature = st.sidebar.number_input("Temperature", min_value=0.1, max_value=2.0, value=1.5)
# Get user preferences for the story
st.markdown("## Story Preferences")
preferences = get_user_preferences()
if uploaded_file is not None:
# Reads and saves uploaded image file
bytes_data = uploaded_file.read()
with open("uploaded_image.jpg", "wb") as file:
file.write(bytes_data)
st.image(uploaded_file, caption='πΌοΈ Uploaded Image', use_column_width=True)
# Initiates AI processing and story generation
with st.spinner("## π€ AI is at Work! "):
scenario = img2txt("uploaded_image.jpg") # Extracts text from the image
# Modify the prompt to include user preferences
prompt = f"Based on the image description: '{scenario}', create a {preferences['genre']} story set in {preferences['setting']} in {preferences['continent']}. " \
f"The story should have a {preferences['tone']} tone and explore the theme of {preferences['theme']}. " \
f"The main conflict should be {preferences['conflict']}. " \
f"The story should have a {preferences['twist']} and end with a {preferences['ending']} ending."
story = txt2story(prompt, top_k, top_p, temperature) # Generates a story based on the image text, LLM params, and user preferences
txt2speech(story) # Converts the story to audio
st.markdown("---")
st.markdown("## π Image Caption")
st.write(scenario)
st.markdown("---")
st.markdown("## π Story")
st.write(story)
st.markdown("---")
st.markdown("## π§ Audio Story")
st.audio("audio_story.mp3")
if __name__ == '__main__':
main()
# Credits
st.markdown("### Credits")
st.caption('''
Made with β€οΈ by @Aditya-Neural-Net-Ninja\n
Utilizes Image-to-Text, Text Generation, Text-to-Speech Transformer Models\n
Gratitude to Streamlit, π€ Spaces for Deployment & Hosting
''') |