Spaces:
Sleeping
Sleeping
Krzysztof Krystian Jankowski
commited on
Commit
·
e3f833e
1
Parent(s):
70576b6
better prompt
Browse files
app.py
CHANGED
@@ -1,33 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
-
from
|
3 |
-
|
4 |
-
|
5 |
-
model="TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF"
|
6 |
-
config = {'max_new_tokens': 128,
|
7 |
-
'repetition_penalty': 1.1,
|
8 |
-
'temperature':0.4,
|
9 |
-
'top_k':50,
|
10 |
-
'top_p':0.9}
|
11 |
-
|
12 |
-
def getLlamaResponse(input_text, no_words, blog_style):
|
13 |
-
llm=CTransformers(model=model,
|
14 |
-
model_type='llama',
|
15 |
-
config=config)
|
16 |
-
|
17 |
-
# create a prompt
|
18 |
-
|
19 |
-
template="""
|
20 |
-
You are a ghost writer helping writing posts for a weblog. Do not provide any instructions just write the post.
|
21 |
-
The post should be {no_words} words long.
|
22 |
-
Write a blog post about the topic: {input_text} in {blog_style} style.
|
23 |
-
"""
|
24 |
-
prompt=PromptTemplate(input_variables=["blog_style", "input_text", "no_words"],
|
25 |
-
template=template)
|
26 |
-
|
27 |
-
# generate the response
|
28 |
-
response=llm.invoke(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
|
29 |
-
|
30 |
-
return response
|
31 |
|
32 |
# Streamlit UI
|
33 |
st.set_page_config(page_title="GenBlog Demo",
|
@@ -35,19 +9,35 @@ st.set_page_config(page_title="GenBlog Demo",
|
|
35 |
layout="centered",
|
36 |
initial_sidebar_state='collapsed')
|
37 |
st.header("GenBlog Demo 📚")
|
38 |
-
st.
|
39 |
-
st.write("
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
input_text=st.text_input("Enter the Blog Topic")
|
41 |
col1, col2 = st.columns([5, 5])
|
42 |
with col1:
|
43 |
-
no_words=st.text_input("Enter the number of words", value=
|
44 |
with col2:
|
45 |
-
blog_style=st.selectbox("Select the Blog Style", ["Personal", "
|
|
|
|
|
46 |
submit=st.button("Generate Blog")
|
47 |
-
|
|
|
|
|
|
|
48 |
|
49 |
if submit:
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from ctransformers import AutoModelForCausalLM
|
3 |
+
import torch
|
4 |
+
llm = AutoModelForCausalLM.from_pretrained("TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF", gpu_layers=24)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
# Streamlit UI
|
7 |
st.set_page_config(page_title="GenBlog Demo",
|
|
|
9 |
layout="centered",
|
10 |
initial_sidebar_state='collapsed')
|
11 |
st.header("GenBlog Demo 📚")
|
12 |
+
st.subheader('This is a demo of the GenBlog assistant', divider='rainbow')
|
13 |
+
st.write("Enter a blog topic, the number of words and the blog style to generate a blog post.")
|
14 |
+
|
15 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
+
st.write(f"Based on the TinyLlama 🦙 model by TheBloke. Device: {device} 🚀")
|
17 |
+
|
18 |
+
st.divider()
|
19 |
+
|
20 |
input_text=st.text_input("Enter the Blog Topic")
|
21 |
col1, col2 = st.columns([5, 5])
|
22 |
with col1:
|
23 |
+
no_words=st.text_input("Enter the number of words", value=200)
|
24 |
with col2:
|
25 |
+
blog_style=st.selectbox("Select the Blog Style", ["Personal", "Casual", "Proffesional"])
|
26 |
+
|
27 |
+
|
28 |
submit=st.button("Generate Blog")
|
29 |
+
st.divider()
|
30 |
+
response_field = st.empty()
|
31 |
+
|
32 |
+
|
33 |
|
34 |
if submit:
|
35 |
+
prompt = f"""
|
36 |
+
As a professional writer skilled in {blog_style.lower()} style, your task is to create an engaging and informative blog post about '{input_text}'. Begin with a captivating title that reflects the essence of the topic. Follow with an introduction that sets the stage for the discussion.
|
37 |
+
|
38 |
+
Please ensure the main body delves into the details of the topic, providing insight, analysis, and relevant examples that illuminate the subject for the readers. Conclude with a summary that reinforces the main points and offers a call to action or a thought-provoking question to engage the readers further.
|
39 |
+
|
40 |
+
The post should be well-structured, clearly written, and approximately {no_words} words in total, reflecting a professional tone suitable for an audience interested in {blog_style.lower()} topics.
|
41 |
+
"""
|
42 |
+
|
43 |
+
st.write_stream(llm(prompt, stream=True, max_new_tokens=512, temperature=0.3))
|