Mr-Vicky-01 commited on
Commit
3e473f0
·
verified ·
1 Parent(s): bf0d8d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -10
app.py CHANGED
@@ -22,16 +22,22 @@ def generate_caption_from_image(image_path):
22
 
23
  def generate_story_from_caption(caption):
24
  # Generate story based on caption
25
- api_key = os.getenv("GOOGLE_API")
26
- prompt_template = """You are a story teller;
27
- You can generate a short story based on a simple narrative, the story should between 30 to 80 words;
28
- CONTEXT: {scenario}
29
- Story: """
30
- PROMPT = PromptTemplate(template=prompt_template, input_variables=["scenario"])
31
- llm_chain = LLMChain(prompt=PROMPT,
32
- llm=GooglePalm(google_api_key=api_key, temperature=0.8))
33
- scenario = caption
34
- story = llm_chain.run(scenario)
 
 
 
 
 
 
35
  return story
36
 
37
  def text_to_speech(text):
 
22
 
23
  def generate_story_from_caption(caption):
24
  # Generate story based on caption
25
+ llm = HuggingFaceHub(huggingfacehub_api_token = os.getenv('HUGGING_FACE')}"},
26
+ repo_id = "tiiuae/falcon-7b-instruct",
27
+ verbose = False,
28
+ model_kwargs = {"temperature":0.2, "max_new_tokens": 4000})
29
+ template= """You are a story teller.
30
+ You get a scenario as an input text, and generates a short story out of it.
31
+ Context: {scenario}
32
+ Story:
33
+ """
34
+ prompt = PromptTemplate(template=template, input_variables=["scenario"])
35
+ #Let's create our LLM chain now
36
+ chain = LLMChain(prompt=prompt, llm=llm)
37
+ story = chain.run(caption)
38
+ start_index = story.find("Story:") + len("Story:")
39
+ # Extract the text after "Story:"
40
+ story = story[start_index:].strip()
41
  return story
42
 
43
  def text_to_speech(text):