Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -22,16 +22,22 @@ def generate_caption_from_image(image_path):
|
|
22 |
|
23 |
def generate_story_from_caption(caption):
|
24 |
# Generate story based on caption
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
return story
|
36 |
|
37 |
def text_to_speech(text):
|
|
|
22 |
|
23 |
def generate_story_from_caption(caption):
|
24 |
# Generate story based on caption
|
25 |
+
llm = HuggingFaceHub(huggingfacehub_api_token = os.getenv('HUGGING_FACE')}"},
|
26 |
+
repo_id = "tiiuae/falcon-7b-instruct",
|
27 |
+
verbose = False,
|
28 |
+
model_kwargs = {"temperature":0.2, "max_new_tokens": 4000})
|
29 |
+
template= """You are a story teller.
|
30 |
+
You get a scenario as an input text, and generates a short story out of it.
|
31 |
+
Context: {scenario}
|
32 |
+
Story:
|
33 |
+
"""
|
34 |
+
prompt = PromptTemplate(template=template, input_variables=["scenario"])
|
35 |
+
#Let's create our LLM chain now
|
36 |
+
chain = LLMChain(prompt=prompt, llm=llm)
|
37 |
+
story = chain.run(caption)
|
38 |
+
start_index = story.find("Story:") + len("Story:")
|
39 |
+
# Extract the text after "Story:"
|
40 |
+
story = story[start_index:].strip()
|
41 |
return story
|
42 |
|
43 |
def text_to_speech(text):
|