File size: 2,622 Bytes
6364e8b
 
69ad934
6364e8b
 
bf0d8d4
6364e8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f415cd3
69ad934
 
 
 
 
75aa37c
94b1b8a
3e473f0
69ad934
3e473f0
 
 
 
 
6364e8b
 
 
ba289e3
 
2946dd7
ba289e3
 
 
 
 
 
6364e8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0755676
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
from langchain import HuggingFaceHub, LLMChain, PromptTemplate
import gradio as gr
import numpy as np
import requests
import os

# Load image captioning model
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")

def generate_caption_from_image(image_path):
    # Process the image and generate caption
    raw_image = Image.open(image_path).convert("RGB")
    inputs = processor(raw_image, return_tensors="pt")
    out = model.generate(**inputs)
    caption = processor.decode(out[0], skip_special_tokens=True)
    return caption

def generate_story_from_caption(caption):
    # Generate story based on caption
    llm = HuggingFaceHub(huggingfacehub_api_token=os.getenv('HUGGING_FACE'),
                            repo_id="tiiuae/falcon-7b-instruct",
                            verbose=False,
                            model_kwargs={"temperature": 0.2, "max_new_tokens": 4000})
    template = """You are a story teller.
    You get a scenario as an input text, and generate a short story out of it.
    Context: {scenario}
    Story:"""
    prompt = PromptTemplate(template=template, input_variables=["scenario"])
    # Let's create our LLM chain now
    chain = LLMChain(prompt=prompt, llm=llm)
    story = chain.run(caption)
    start_index = story.find("Story:") + len("Story:")
    # Extract the text after "Story:"
    story = story[start_index:].strip()
    return story

def text_to_speech(text):
    headers = {"Authorization": f"Bearer {os.getenv('HUGGING_FACE')}"}
    payload = {"inputs": text}
    API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
    response = requests.post(API_URL, headers=headers, json=payload)
    
    if response.status_code == 200:
        with open("output.mp3", "wb") as f:
            f.write(response.content)
        return "output.mp3"

def generate_story_from_image(image_input):
    input_image = Image.fromarray(image_input)
    input_image.save("input_image.jpg")
    image_path = 'input_image.jpg'
    caption = generate_caption_from_image(image_path)
    story = generate_story_from_caption(caption)
    audio = text_to_speech(story)
    return audio

# Define the input and output components
inputs = gr.Image(label="Image")
outputs = gr.Audio(label="Story Audio")

# Create the Gradio interface
gr.Interface(fn=generate_story_from_image, inputs=inputs, outputs=outputs, title="Story Teller").launch(debug=True)