Spaces:
Sleeping
Sleeping
Krzysztof Krystian Jankowski
commited on
Commit
·
5b8c66e
1
Parent(s):
8a41d71
updated prompt
Browse files
app.py
CHANGED
@@ -2,22 +2,22 @@ import streamlit as st
|
|
2 |
from langchain.prompts import PromptTemplate
|
3 |
from langchain_community.llms import CTransformers
|
4 |
|
|
|
|
|
|
|
|
|
|
|
5 |
def getLlamaResponse(input_text, no_words, blog_style):
|
6 |
-
llm=CTransformers(model=
|
7 |
model_type='llama',
|
8 |
-
config=
|
9 |
-
'repetition_penalty': 1.1,
|
10 |
-
'temperature':0.6})
|
11 |
|
12 |
# create a prompt
|
13 |
|
14 |
template="""
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
Write a blog post about the topic: {input_text} in {blog_style} style. The blog should be {no_words} words long.
|
19 |
-
<|im_end|>
|
20 |
-
<|im_start|>assistant
|
21 |
"""
|
22 |
prompt=PromptTemplate(input_variables=["blog_style", "input_text", "no_words"],
|
23 |
template=template)
|
|
|
2 |
from langchain.prompts import PromptTemplate
|
3 |
from langchain_community.llms import CTransformers
|
4 |
|
5 |
+
model="TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF"
|
6 |
+
config = {'max_new_tokens': 128,
|
7 |
+
'repetition_penalty': 1.1,
|
8 |
+
'temperature':0.6}
|
9 |
+
|
10 |
def getLlamaResponse(input_text, no_words, blog_style):
|
11 |
+
llm=CTransformers(model=model,
|
12 |
model_type='llama',
|
13 |
+
config=config)
|
|
|
|
|
14 |
|
15 |
# create a prompt
|
16 |
|
17 |
template="""
|
18 |
+
You are a ghost writer for writing posts for a weblog.
|
19 |
+
Write a blog post about the topic: {input_text} in {blog_style} style.
|
20 |
+
The blog should be {no_words} words long.
|
|
|
|
|
|
|
21 |
"""
|
22 |
prompt=PromptTemplate(input_variables=["blog_style", "input_text", "no_words"],
|
23 |
template=template)
|