Brett Fiedler
commited on
Commit
·
bfb63a8
1
Parent(s):
51fab2a
initial test commit
Browse files- .gitignore +1 -0
- ai.py +70 -0
- app.py +2 -0
- logging.py +77 -0
- test-repo.py +66 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.env
|
ai.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
import os # built in python library with operating system functions
|
3 |
+
from dotenv import load_dotenv, find_dotenv
|
4 |
+
import json
|
5 |
+
|
6 |
+
import openai # Set of functions provided by openai to interact with their models
|
7 |
+
from langchain.chat_models import ChatOpenAI
|
8 |
+
from langchain.prompts.chat import (
|
9 |
+
ChatPromptTemplate,
|
10 |
+
SystemMessagePromptTemplate,
|
11 |
+
AIMessagePromptTemplate,
|
12 |
+
HumanMessagePromptTemplate,
|
13 |
+
)
|
14 |
+
from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
15 |
+
|
16 |
+
warnings.filterwarnings('ignore')
|
17 |
+
_ = load_dotenv(find_dotenv())
|
18 |
+
openai.api_key = os.getenv('OPENAI_API_KEY')
|
19 |
+
|
20 |
+
def get_completion(prompt, model="gpt-3.5-turbo"):
|
21 |
+
messages = [{"role": "user", "content": prompt}]
|
22 |
+
response = openai.ChatCompletion.create(
|
23 |
+
model=model,
|
24 |
+
messages=messages,
|
25 |
+
temperature=0, # this is the degree of randomness of the model's output
|
26 |
+
)
|
27 |
+
return response.choices[0].message["content"]
|
28 |
+
|
29 |
+
def get_completion_from_messages(messages, model="gpt-3.5-turbo", temperature=0):
|
30 |
+
response = openai.ChatCompletion.create(
|
31 |
+
model=model,
|
32 |
+
messages=messages,
|
33 |
+
temperature=temperature, # this is the degree of randomness of the model's output
|
34 |
+
)
|
35 |
+
# print(str(response.choices[0].message))
|
36 |
+
return response.choices[0].message["content"]
|
37 |
+
|
38 |
+
def collect_messages(_):
|
39 |
+
prompt = inp.value_input
|
40 |
+
inp.value = ''
|
41 |
+
context.append({'role':'user', 'content':f"{prompt}"})
|
42 |
+
response = get_completion_from_messages(context)
|
43 |
+
context.append({'role':'assistant', 'content':f"{response}"})
|
44 |
+
panels.append(
|
45 |
+
pn.Row('User:', pn.pane.Markdown(prompt, width=600)))
|
46 |
+
panels.append(
|
47 |
+
pn.Row('Assistant:', pn.pane.Markdown(response, width=600, style={'background-color': '#F6F6F6'})))
|
48 |
+
|
49 |
+
return pn.Column(*panels)
|
50 |
+
|
51 |
+
|
52 |
+
persona = "LLM_1"
|
53 |
+
|
54 |
+
pn.extension('floatpanel')
|
55 |
+
panels = [] # collect display
|
56 |
+
|
57 |
+
context = [ {'role':'system', 'content': sys_prompt.get_prompt(persona)} ] # accumulate messages
|
58 |
+
|
59 |
+
inp = pn.widgets.TextInput(value="Hi", placeholder='Enter text here…')
|
60 |
+
button_conversation = pn.widgets.Button(name="Chat!")
|
61 |
+
|
62 |
+
interactive_conversation = pn.bind(collect_messages, button_conversation)
|
63 |
+
|
64 |
+
dashboard = pn.Column(
|
65 |
+
inp,
|
66 |
+
pn.Row(button_conversation),
|
67 |
+
pn.panel(interactive_conversation, loading_indicator=True, height=300),
|
68 |
+
)
|
69 |
+
|
70 |
+
dashboard
|
app.py
CHANGED
@@ -7,6 +7,8 @@ import panel as pn
|
|
7 |
from PIL import Image
|
8 |
from transformers import CLIPModel, CLIPProcessor
|
9 |
|
|
|
|
|
10 |
pn.extension(design="bootstrap", sizing_mode="stretch_width")
|
11 |
|
12 |
ICON_URLS = {
|
|
|
7 |
from PIL import Image
|
8 |
from transformers import CLIPModel, CLIPProcessor
|
9 |
|
10 |
+
import ai
|
11 |
+
|
12 |
pn.extension(design="bootstrap", sizing_mode="stretch_width")
|
13 |
|
14 |
ICON_URLS = {
|
logging.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#@title JSON log system prompt management
|
2 |
+
|
3 |
+
class LLMPrompts:
|
4 |
+
def __init__(self, log_file):
|
5 |
+
self.prompts = []
|
6 |
+
self.log_file = log_file
|
7 |
+
self.load_prompts_from_log()
|
8 |
+
|
9 |
+
def add_prompt(self, identity, prompt, temperature):
|
10 |
+
self.prompts.append({'identity': identity, 'prompt': prompt, 'temperature': temperature})
|
11 |
+
self.save_prompts_to_log()
|
12 |
+
|
13 |
+
def remove_prompt(self, identity):
|
14 |
+
self.prompts = [p for p in self.prompts if p['identity'] != identity]
|
15 |
+
self.save_prompts_to_log()
|
16 |
+
|
17 |
+
def get_prompt(self, identity):
|
18 |
+
for prompt in self.prompts:
|
19 |
+
if prompt['identity'] == identity:
|
20 |
+
return prompt['prompt']
|
21 |
+
return None
|
22 |
+
|
23 |
+
def get_temperature(self, identity):
|
24 |
+
for prompt in self.prompts:
|
25 |
+
if prompt['identity'] == identity:
|
26 |
+
return prompt['temperature']
|
27 |
+
return None
|
28 |
+
|
29 |
+
def load_prompts_from_log(self):
|
30 |
+
try:
|
31 |
+
with open(self.log_file, 'r') as file:
|
32 |
+
self.prompts = json.load(file)
|
33 |
+
except FileNotFoundError:
|
34 |
+
print("No file found. Starting from scratch.")
|
35 |
+
pass # Ignore if the log file is not found or cannot be read
|
36 |
+
|
37 |
+
def save_prompts_to_log(self):
|
38 |
+
with open(self.log_file, 'w') as file:
|
39 |
+
json.dump(self.prompts, file)
|
40 |
+
|
41 |
+
# Accessing prompts and temperatures
|
42 |
+
# print(llm_prompts.get_prompt("LLM_2")) # Output: Write a short story about a detective solving a mysterious murder case.
|
43 |
+
# print(llm_prompts.get_temperature("LLM_2")) # Output: 0.7
|
44 |
+
|
45 |
+
|
46 |
+
# Adding a new prompt with temperature
|
47 |
+
# llm_prompts.add_prompt("LLM_5", "Explain the concept of artificial intelligence.", 0.5)
|
48 |
+
|
49 |
+
# Set the filepath for the log
|
50 |
+
log_file_path = '/drive/My Drive/Colab Notebooks/llm_prompts_log.json'
|
51 |
+
sys_prompt = LLMPrompts(log_file_path)
|
52 |
+
|
53 |
+
# Print all prompts and temperatures
|
54 |
+
for prompt in sys_prompt.prompts:
|
55 |
+
print(prompt['identity'], prompt['prompt'], prompt['temperature'])
|
56 |
+
|
57 |
+
# Want to add personas? Do that here!
|
58 |
+
|
59 |
+
# sys_prompt.add_prompt('LLM_2', "")
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
# Log the last conversation
|
65 |
+
convo_log_file_path = '/drive/My Drive/Colab Notebooks/last_conversation.json'
|
66 |
+
|
67 |
+
# Convert list to JSON object
|
68 |
+
json_object = json.dumps(context)
|
69 |
+
|
70 |
+
# Save JSON object to a file
|
71 |
+
with open('output.json', 'w') as file:
|
72 |
+
file.write(json_object)
|
73 |
+
|
74 |
+
# Save list items as separate lines in a JSON
|
75 |
+
# with open(convo_log_file_path, 'w') as file:
|
76 |
+
# for item in context:
|
77 |
+
# file.write(json.dumps(item) + '\n')
|
test-repo.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import csv
|
3 |
+
import panel as pn
|
4 |
+
from datetime import datetime
|
5 |
+
import pandas as pd
|
6 |
+
# import huggingface_hub
|
7 |
+
from huggingface_hub import Repository
|
8 |
+
|
9 |
+
DATASET_REPO_URL = "https://huggingface.co/datasets/julien-c/persistent-space-dataset"
|
10 |
+
DATA_FILENAME = "data.csv"
|
11 |
+
DATA_FILE = os.path.join("data", DATA_FILENAME)
|
12 |
+
|
13 |
+
repo_dir = "data"
|
14 |
+
if not os.path.exists(repo_dir):
|
15 |
+
os.makedirs(repo_dir)
|
16 |
+
|
17 |
+
repo = pn.widgets.TextInput(name="Repository URL", value=DATASET_REPO_URL)
|
18 |
+
name_input = pn.widgets.TextInput(name="Your name")
|
19 |
+
message_input = pn.widgets.TextAreaInput(name="Your message")
|
20 |
+
|
21 |
+
def generate_html() -> str:
|
22 |
+
if not os.path.exists(DATA_FILE):
|
23 |
+
return "<div class='chatbot'>no messages yet</div>"
|
24 |
+
else:
|
25 |
+
df = pd.read_csv(DATA_FILE)
|
26 |
+
df = df.iloc[::-1] # Reverse the order of rows
|
27 |
+
html = "<div class='chatbot'>"
|
28 |
+
for _, row in df.iterrows():
|
29 |
+
html += "<div>"
|
30 |
+
html += f"<span>{row['name']}</span>"
|
31 |
+
html += f"<span class='message'>{row['message']}</span>"
|
32 |
+
html += "</div>"
|
33 |
+
html += "</div>"
|
34 |
+
return html
|
35 |
+
|
36 |
+
def store_message(event):
|
37 |
+
name = name_input.value
|
38 |
+
message = message_input.value
|
39 |
+
if name and message:
|
40 |
+
with open(DATA_FILE, "a", newline="") as csvfile:
|
41 |
+
writer = csv.writer(csvfile)
|
42 |
+
writer.writerow([name, message, str(datetime.now())])
|
43 |
+
pn.state.session_context.request_relayout(repo)
|
44 |
+
pn.state.session_context.request_relayout(messages)
|
45 |
+
|
46 |
+
repo.on_change(store_message, "value")
|
47 |
+
name_input.on_change(store_message, "value")
|
48 |
+
message_input.on_change(store_message, "value")
|
49 |
+
|
50 |
+
repo_text = pn.pane.Markdown(f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL}) (open in new tab)", sizing_mode="stretch_width")
|
51 |
+
messages = pn.pane.HTML(sizing_mode="stretch_width")
|
52 |
+
messages.object = generate_html()
|
53 |
+
|
54 |
+
template = pn.template.FastListTemplate(
|
55 |
+
site="Persistent Space Dataset",
|
56 |
+
title="Reading/Writing to a HuggingFace Dataset Repo from Spaces",
|
57 |
+
main=[
|
58 |
+
pn.Column(repo_text, name_input, message_input, messages),
|
59 |
+
],
|
60 |
+
header_background="#333",
|
61 |
+
header_color="white",
|
62 |
+
main_max_width="800px",
|
63 |
+
main_padding=20,
|
64 |
+
main_min_height="600px"
|
65 |
+
)
|
66 |
+
template.servable()
|