date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | JayZeeDesign/Discord-AI-Chatbot | bot_utilities~ai_utils.py | import aiohttp
import io
from datetime import datetime
import time
import random
from urllib.parse import quote
from bot_utilities.config_loader import load_current_language, config
import openai
import os
from dotenv import find_dotenv, load_dotenv
import json
from langchain.agents import initialize_agent, AgentType, Tool
from langchain.chains import LLMMathChain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.prompts import MessagesPlaceholder
from langchain.schema import SystemMessage
from langchain.memory import ConversationBufferWindowMemory
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain import PromptTemplate
from bs4 import BeautifulSoup
from pydantic import Field
from langchain.prompts import ChatPromptTemplate
import requests
load_dotenv(find_dotenv())
openai.api_key = os.getenv("OPENAI_API_KEY")
load_dotenv()
current_language = load_current_language()
internet_access = config['INTERNET_ACCESS']
# openai.api_key = os.getenv('CHIMERA_GPT_KEY')
# openai.api_base = "https://api.naga.ac/v1"
def sdxl(prompt):
response = openai.Image.create(
model="sdxl",
prompt=prompt,
n=1, # images count
size="1024x1024"
)
return response['data'][0]["url"]
def knowledge_retrieval(query):
# Define the data to be sent in the request
data = {
"params":{
"query":query
},
"project": "feda14180b9d-4ba2-9b3c-6c721dfe8f63"
}
# Convert Python object to JSON string
data_json = json.dumps(data)
# Send the POST request
response = requests.post("https://api-1e3042.stack.tryrelevance.com/latest/studios/6eba417b-f592-49fc-968d-6b63702995e3/trigger_limited", data=data_json)
# Check the response status code
if response.status_code == 200:
return response.json()["output"]["answer"]
else:
print(f"HTTP request failed with status code {response.status_code}")
def summary(content):
llm = ChatOpenAI(temperature = 0, model = "gpt-3.5-turbo-16k-0613")
text_splitter = RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size = 10000, chunk_overlap=500)
docs = text_splitter.create_documents([content])
map_prompt = """
Write a summary of the following text:
"{text}"
SUMMARY:
"""
map_prompt_template = PromptTemplate(template=map_prompt, input_variables=["text"])
summary_chain = load_summarize_chain(
llm=llm,
chain_type='map_reduce',
map_prompt = map_prompt_template,
combine_prompt = map_prompt_template,
verbose = True
)
output = summary_chain.run(input_documents=docs,)
return output
def scrape_website(url: str):
#scrape website, and also will summarize the content based on objective if the content is too large
#objective is the original objective & task that user give to the agent, url is the url of the website to be scraped
print("Scraping website...")
# Define the headers for the request
headers = {
'Cache-Control': 'no-cache',
'Content-Type': 'application/json',
}
# Define the data to be sent in the request
data = {
"url": url
}
# Convert Python object to JSON string
data_json = json.dumps(data)
# Send the POST request
response = requests.post("https://chrome.browserless.io/content?token=0a049e5b-3387-4c51-ab6c-57647d519571", headers=headers, data=data_json)
# Check the response status code
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
text = soup.get_text()
print("CONTENTTTTTT:", text)
if len(text) > 10000:
output = summary(text)
return output
else:
return text
else:
print(f"HTTP request failed with status code {response.status_code}")
def search(query):
"""
Asynchronously searches for a prompt and returns the search results as a blob.
Args:
prompt (str): The prompt to search for.
Returns:
str: The search results as a blob.
Raises:
None
"""
url = "https://google.serper.dev/search"
payload = json.dumps({
"q": query
})
headers = {
'X-API-KEY': 'ab179d0f00ae0bafe47f77e09e62b9f53b3f281d',
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return response.json()
def research(query):
system_message = SystemMessage(
content="""You are a world class researcher, who can do detailed research on any topic and produce facts based results;
you do not make things up, you will try as hard as possible to gather facts & data to back up the research
Please make sure you complete the objective above with the following rules:
1/ You will always searching for internal knowledge base first to see if there are any relevant information
2/ If the internal knowledge doesnt have good result, then you can go search online
3/ While search online:
a/ You will try to collect as many useful details as possible
b/ If there are url of relevant links & articles, you will scrape it to gather more information
c/ After scraping & search, you should think "is there any new things i should search & scraping based on the data I collected to increase research quality?" If answer is yes, continue; But don't do this more than 3 iteratins
4/ You should not make things up, you should only write facts & data that you have gathered
5/ In the final output, You should include all reference data & links to back up your research; You should include all reference data & links to back up your research
6/ In the final output, You should include all reference data & links to back up your research; You should include all reference data & links to back up your research"""
)
agent_kwargs = {
"system_message": system_message,
}
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
tools = [
Tool(
name="Knowledge_retrieval",
func=knowledge_retrieval,
description="Use this to get our internal knowledge base data for curated information, always use this first before searching online"
),
Tool(
name = "Google_search",
func = search,
description = "Always use this to answer questions about current events, data, or terms that you don't really understand. You should ask targeted questions"
),
Tool(
name = "Scrape_website",
func = scrape_website,
description = "Use this to load content from a website url"
),
]
agent = initialize_agent(
tools,
llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=False,
agent_kwargs=agent_kwargs,
)
results = agent.run(query)
return results
def trigger_github_weekly_trending_repo_scrape():
url = "https://api.browse.ai/v2/robots/0c0f94bf-207a-4660-8ade-238cd778bb25/tasks"
payload = {"inputParameters":
{"originUrl": "https://github.com/trending"}
}
headers = {"Authorization": "Bearer ec2cc08b-3343-47c9-9dd3-dc5d40d4aa3b:dead067b-d485-496d-a3e0-4902339f6cfe"}
response = requests.request("POST", url, json=payload, headers=headers)
print("id: ", response.json()["result"]["id"], "is :", response.text)
return response.json()["result"]["id"]
def retrieve_github_weekly_trending_repo(task_id):
url = f"https://api.browse.ai/v2/robots/0c0f94bf-207a-4660-8ade-238cd778bb25/tasks/{task_id}"
headers = {"Authorization": "Bearer ec2cc08b-3343-47c9-9dd3-dc5d40d4aa3b:dead067b-d485-496d-a3e0-4902339f6cfe"}
response = requests.request("GET", url, headers=headers)
return response.json()
def get_github_weekly_trending_repo():
task_id = trigger_github_weekly_trending_repo_scrape()
while True:
time.sleep(5)
response = retrieve_github_weekly_trending_repo(task_id)
# print(response)
if response["statusCode"] == 200:
if response["result"]["status"] == "successful":
repos = response["result"]["capturedLists"]
return repos
elif response["result"]["status"] == "failed":
return "failed to get data"
elif response["statusCode"] in {400, 401, 403, 404, 500, 503}:
return response["messageCode"]
def filter_ai_github_repos(repos):
model = ChatOpenAI()
prompt_template = """
{repos}
Above is the list of scraped trending github repos this week,
can you help me filter out ones that is related to AI, knowledge graph, computer vision, large language model?
The report should be in certain format:
"๐ Daily trending AI projects:
**coqui-ai / TTS**
- ๐ 3,952 stars this week | 18,952 total stars
- ๐ a deep learning toolkit for Text-to-Speech, battle-tested in research and production
- ๐ https://github.com/coqui-ai/TTS
**tldraw / tldraw**
- ๐ 2,196 stars this week | 20,812 total stars
- ๐ a very good whiteboard
- ๐ https://github.com/yoheinakajima/instagraph
...."
if there is no any relevant repo, you can just say "Looks like no new interesting AI project today, let me know if I missed any pls!"
"""
prompt = ChatPromptTemplate.from_template(prompt_template)
chain = prompt | model
results = chain.invoke({"repos": repos})
return results.content
def generate_trending_git_report():
repos = get_github_weekly_trending_repo()
filtered_repos = filter_ai_github_repos(repos)
return filtered_repos
async def fetch_models():
return openai.Model.list()
agents = {}
def create_agent(id, user_name, ai_name, instructions):
system_message = SystemMessage(
content=instructions
)
agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
"system_message": system_message,
}
memory = ConversationBufferWindowMemory(memory_key="memory", return_messages=True, ai_prefix=ai_name, user_prefix=user_name)
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
tools = [
Tool(
name = "research",
func = research,
description = "Always use this to answer questions about current events, data, or terms that you don't really understand. You should ask targeted questions"
),
Tool(
name = "Scrape_website",
func = scrape_website,
description = "Use this to load content from a website url"
),
]
agent = initialize_agent(
tools,
llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=agent_kwargs,
memory=memory
)
agents[id] = agent
return agent
def generate_response(instructions, user_input):
id = user_input["id"]
message = user_input["message"]
if id not in agents:
user_name = user_input["user_name"]
ai_name = user_input["ai_name"]
agent = create_agent(id, user_name, ai_name, instructions)
else:
agent = agents[id]
print(message)
response = agent.run(message)
return response
def generate_response_old(instructions, search, history):
if search is not None:
search_results = search
elif search is None:
search_results = "Search feature is disabled"
messages = [
{"role": "system", "name": "instructions", "content": instructions},
*history,
{"role": "system", "name": "search_results", "content": search_results},
]
response = openai.ChatCompletion.create(
model=config['GPT_MODEL'],
messages=messages
)
message = response.choices[0].message.content
return message
def generate_gpt4_response(prompt):
messages = [
{"role": "system", "name": "admin_user", "content": prompt},
]
response = openai.ChatCompletion.create(
model='gpt-4',
messages=messages
)
message = response.choices[0].message.content
return message
async def poly_image_gen(session, prompt):
seed = random.randint(1, 100000)
image_url = f"https://image.pollinations.ai/prompt/{prompt}?seed={seed}"
async with session.get(image_url) as response:
image_data = await response.read()
image_io = io.BytesIO(image_data)
return image_io
# async def fetch_image_data(url):
# async with aiohttp.ClientSession() as session:
# async with session.get(url) as response:
# return await response.read()
async def dall_e_gen(model, prompt, size, num_images):
response = openai.Image.create(
model=model,
prompt=prompt,
n=num_images,
size=size,
)
imagefileobjs = []
for image in response["data"]:
image_url = image["url"]
async with aiohttp.ClientSession() as session:
async with session.get(image_url) as response:
content = await response.content.read()
img_file_obj = io.BytesIO(content)
imagefileobjs.append(img_file_obj)
return imagefileobjs
async def generate_image_prodia(prompt, model, sampler, seed, neg):
print("\033[1;32m(Prodia) Creating image for :\033[0m", prompt)
start_time = time.time()
async def create_job(prompt, model, sampler, seed, neg):
if neg is None:
negative = "(nsfw:1.5),verybadimagenegative_v1.3, ng_deepnegative_v1_75t, (ugly face:0.8),cross-eyed,sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, bad anatomy, DeepNegative, facing away, tilted head, {Multiple people}, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worstquality, low quality, normal quality, jpegartifacts, signature, watermark, username, blurry, bad feet, cropped, poorly drawn hands, poorly drawn face, mutation, deformed, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, extra fingers, fewer digits, extra limbs, extra arms,extra legs, malformed limbs, fused fingers, too many fingers, long neck, cross-eyed,mutated hands, polar lowres, bad body, bad proportions, gross proportions, text, error, missing fingers, missing arms, missing legs, extra digit, extra arms, extra leg, extra foot, repeating hair, nsfw, [[[[[bad-artist-anime, sketch by bad-artist]]]]], [[[mutation, lowres, bad hands, [text, signature, watermark, username], blurry, monochrome, grayscale, realistic, simple background, limited palette]]], close-up, (swimsuit, cleavage, armpits, ass, navel, cleavage cutout), (forehead jewel:1.2), (forehead mark:1.5), (bad and mutated hands:1.3), (worst quality:2.0), (low quality:2.0), (blurry:2.0), multiple limbs, bad anatomy, (interlocked fingers:1.2),(interlocked leg:1.2), Ugly Fingers, (extra digit and hands and fingers and legs and arms:1.4), crown braid, (deformed fingers:1.2), (long fingers:1.2)"
else:
negative = neg
url = 'https://api.prodia.com/generate'
params = {
'new': 'true',
'prompt': f'{quote(prompt)}',
'model': model,
'negative_prompt': f"{negative}",
'steps': '100',
'cfg': '9.5',
'seed': f'{seed}',
'sampler': sampler,
'upscale': 'True',
'aspect_ratio': 'square'
}
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
data = await response.json()
return data['job']
job_id = await create_job(prompt, model, sampler, seed, neg)
url = f'https://api.prodia.com/job/{job_id}'
headers = {
'authority': 'api.prodia.com',
'accept': '*/*',
}
async with aiohttp.ClientSession() as session:
while True:
async with session.get(url, headers=headers) as response:
json = await response.json()
if json['status'] == 'succeeded':
async with session.get(f'https://images.prodia.xyz/{job_id}.png?download=1', headers=headers) as response:
content = await response.content.read()
img_file_obj = io.BytesIO(content)
duration = time.time() - start_time
print(f"\033[1;34m(Prodia) Finished image creation\n\033[0mJob id : {job_id} Prompt : ", prompt, "in", duration, "seconds.")
return img_file_obj
| [
"\n Write a summary of the following text:\n \"{text}\"\n SUMMARY:\n ",
"\n {repos} \n Above is the list of scraped trending github repos this week, \n can you help me filter out ones that is related to AI, knowledge graph, computer vision, large language model?\n\n The report should be in certain format:\n \"๐ Daily trending AI projects:\n\n **coqui-ai / TTS**\n - ๐ 3,952 stars this week | 18,952 total stars\n - ๐ a deep learning toolkit for Text-to-Speech, battle-tested in research and production\n - ๐ https://github.com/coqui-ai/TTS\n\n **tldraw / tldraw**\n - ๐ 2,196 stars this week | 20,812 total stars\n - ๐ a very good whiteboard\n - ๐ https://github.com/yoheinakajima/instagraph\n\n ....\"\n\n if there is no any relevant repo, you can just say \"Looks like no new interesting AI project today, let me know if I missed any pls!\"\n ",
"You are a world class researcher, who can do detailed research on any topic and produce facts based results; \n you do not make things up, you will try as hard as possible to gather facts & data to back up the research\n \n Please make sure you complete the objective above with the following rules:\n 1/ You will always searching for internal knowledge base first to see if there are any relevant information\n 2/ If the internal knowledge doesnt have good result, then you can go search online\n 3/ While search online:\n a/ You will try to collect as many useful details as possible\n b/ If there are url of relevant links & articles, you will scrape it to gather more information\n c/ After scraping & search, you should think \"is there any new things i should search & scraping based on the data I collected to increase research quality?\" If answer is yes, continue; But don't do this more than 3 iteratins\n 4/ You should not make things up, you should only write facts & data that you have gathered\n 5/ In the final output, You should include all reference data & links to back up your research; You should include all reference data & links to back up your research\n 6/ In the final output, You should include all reference data & links to back up your research; You should include all reference data & links to back up your research"
] |
2024-01-10 | ManavTheWorld/DocumentQuerier | backend~llama_genai.py | from transformers import AutoTokenizer, pipeline, logging
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.llms import HuggingFaceLLM
from llama_index.prompts.prompts import SimpleInputPrompt
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, ServiceContext
from constants import MODELS, REVISIONS, PROMPTS, PATHS
model_name_or_path = MODELS.CHAT_LLMS.thebloke_13b
model_basename = "model"
use_triton = False
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
revision=REVISIONS.eight_bit_128g,
model_basename=model_basename,
use_safetensors=True,
trust_remote_code=True,
device="cuda:0",
quantize_config=None)
system_prompt = PROMPTS.fun
query_wrapper_prompt = SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")
llm = HuggingFaceLLM(context_window=4096,
max_new_tokens=256,
model=model,
tokenizer=tokenizer,
system_prompt=system_prompt,
# query_wrapper_prompt=query_wrapper_prompt
)
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(model_name=MODELS.EMBED_MODELS.sentence_transformers_all_mpnet_base_v2)
)
service_context = ServiceContext.from_defaults(
chunk_size=1024,
llm=llm,
embed_model=embed_model
)
documents = SimpleDirectoryReader('data/llama_index/documents').load_data()
index = VectorStoreIndex.from_documents(documents, service_context=service_context, show_progress=True)
query_engine = index.as_query_engine()
print('Done loading index. Ready for queries.\n')
while True:
print('Enter query: ')
query=input()
response = query_engine.query(query)
print('\n')
print(response)
print('====================\n')
| [
"<|USER|>{query_str}<|ASSISTANT|>"
] |
2024-01-10 | ManavTheWorld/DocumentQuerier | backend~init_llama.py | from transformers import AutoTokenizer, pipeline, logging
from constants import MODELS, REVISIONS, PATHS, SECRETS
import os
os.environ["OPENAI_API_KEY"] = SECRETS.KEYS.OPEN_AI_KEY
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.llms import HuggingFaceLLM
from llama_index.prompts.prompts import SimpleInputPrompt
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, ServiceContext, GPTVectorStoreIndex, LLMPredictor
from constants import MODELS, REVISIONS, PATHS
def init_llm():
model_name_or_path = MODELS.CHAT_LLMS.thebloke_13b
model_basename = "model"
use_triton = False
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
revision=REVISIONS.eight_bit_128g,
model_basename=model_basename,
use_safetensors=True,
trust_remote_code=True,
device="cuda:0",
quantize_config=None)
system_prompt = "You are an AI assistant that helps physicians diagnose patients. You are given a patient's symptoms and you must diagnose the patient, or answer questions related to the patient to the best of your ability."
llm = HuggingFaceLLM(context_window=4096,
max_new_tokens=2048,
model=model,
tokenizer=tokenizer,
system_prompt=system_prompt,
)
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(model_name=MODELS.EMBED_MODELS.sentence_transformers_all_mpnet_base_v2)
)
service_context = ServiceContext.from_defaults(
chunk_size=1024,
llm=llm,
embed_model=embed_model
)
documents = SimpleDirectoryReader(PATHS.documents).load_data()
index = VectorStoreIndex.from_documents(documents, service_context=service_context, show_progress=True)
query_engine = index.as_query_engine()
print('Done loading index. Ready for queries.\n')
return query_engine
def init_gpt():
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=512))
service_context = ServiceContext.from_defaults(chunk_size=1024)
documents = SimpleDirectoryReader(PATHS.documents).load_data()
index = GPTVectorStoreIndex(documents, service_context=service_context, show_progress=True)
query_engine = index.as_query_engine()
return query_engine | [
"You are an AI assistant that helps physicians diagnose patients. You are given a patient's symptoms and you must diagnose the patient, or answer questions related to the patient to the best of your ability."
] |
2024-01-10 | AnonniX/AnonniX-GPTeam | src~utils~windowai_model.py | import langchain
from langchain.chat_models.base import BaseChatModel, SimpleChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
SystemMessage,
)
from typing import Any, Dict, List, Mapping, Optional, Sequence, TypedDict
import websocket
import uuid
import json
class MessageDict(TypedDict):
role: str
content: str
class RequestDict(TypedDict):
messages: List[MessageDict]
temperature: float
request_id: str
class ResponseDict(TypedDict):
content: str
request_id: str
class ChatWindowAI(BaseChatModel):
model_name: str = "window"
"""Model name to use."""
temperature: float = 0
"""What sampling temperature to use."""
streaming: bool = False
"""Whether to stream the results."""
request_timeout: int = 3600
"""Timeout in seconds for the request."""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "window-chat"
def _generate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
output_str = self._call(messages, stop=stop)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
result = ChatResult(generations=[generation])
return result
async def _agenerate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
return self._generate(messages, stop=stop)
def _call(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> str:
request_id = str(uuid.uuid4())
request: RequestDict = {
"messages": [],
"temperature": self.temperature,
"request_id": request_id,
}
for message in messages:
role = "user" # default role is user
if isinstance(message, HumanMessage):
role = "user"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, SystemMessage):
role = "system"
request["messages"].append(
{
"role": role,
"content": message.content,
}
)
ws = websocket.WebSocket()
ws.connect("ws://127.0.0.1:5000/windowmodel")
ws.send(json.dumps(request))
message = ws.recv()
ws.close()
response: ResponseDict = json.loads(message)
response_content = response["content"]
response_request_id = response["request_id"]
# sanity check that response corresponds to request
if request_id != response_request_id:
raise ValueError(
f"Invalid request ID: {response_request_id}, expected: {request_id}"
)
return response_content
| [] |
2024-01-10 | AnonniX/AnonniX-GPTeam | src~utils~logging.py | import atexit
import json
import logging
import os
import re
from datetime import datetime
from pathlib import Path
from typing import List
import openai
import pytz
def clean_json_string(json_string):
cleaned_string = re.sub(r"\\\'", r"'", json_string) # replace \' with '
cleaned_string = re.sub(
r'\\"', r'"', cleaned_string
) # replace \" with " on cleaned_string
return cleaned_string
def get_completion_data(text) -> List[str]:
pattern = r"(api_version=[^\s]+)|(data=(.+?)(?= [^\s]+=))|(message='(.+?)')"
matches = re.findall(pattern, text)
cleaned_matches = []
for match in matches:
for item in match:
if item != "":
cleaned_matches.append(item)
break
return cleaned_matches
def get_key_value(text):
pattern = r"(\w+)=((?:\"(?:\\\"|[^\"])*\")|(?:\'(?:\\\'|[^'])*\'))"
matches = re.findall(pattern, text)
result = {}
for match in matches:
key, value = match[0], match[1]
# Remove the outer quotes and unescape the inner quotes
if value.startswith('"'):
value = value[1:-1].replace('\\"', '"')
else:
value = value[1:-1].replace("\\'", "'")
result[key] = value
return result
class OpenAIFilter(logging.Filter):
def filter(self, record):
return "openai" in record.name
class JsonArrayFileHandler(logging.FileHandler):
def __init__(self, filename, mode="a", encoding=None, delay=False):
super().__init__(filename, mode, encoding, delay)
self.closed_properly = False
self.stream.write("[")
atexit.register(self.close)
def close(self):
self.acquire()
try:
if not self.closed_properly:
self.stream.write("]")
self.closed_properly = True
super().close()
finally:
self.release()
def emit(self, record):
if self.stream.tell() > 1:
self.stream.write(",\n")
super().emit(record)
class LoggingFilter(logging.Filter):
def filter(self, record):
print("logging filter", record)
return True
def init_logging():
openai.util.logger.setLevel(logging.WARNING)
open("src/web/logs/agent.txt", "w").close()
def get_agent_logger():
# Create a logger
logger = logging.getLogger("agent")
logger.setLevel(logging.INFO)
# Prevent log messages from being passed to the root logger or any other ancestor logger
logger.propagate = False
# Remove all handlers associated with the logger object.
for handler in logger.handlers[:]:
logger.removeHandler(handler)
# Create a file handler
Path("src/web/logs/").mkdir(parents=True, exist_ok=True)
handler = logging.FileHandler("src/web/logs/agent.txt")
handler.setLevel(logging.INFO)
# Add the handlers to the logger
logger.addHandler(handler)
return logger
agent_logger = get_agent_logger()
| [] |
2024-01-10 | phanhoang1803/OpenAI_QA_Generator | demo_call_api.py | def pdf_load_text(file_path):
# Load data from disk
loader = PyPDFLoader(file_path)
docs = loader.load()
no_documents = len(docs)
merged_content = ""
# Loop through each page and merge the content to text instead of docs
for i in range(no_documents):
merged_content += docs[i].page_content + '\n'
# print(merged_content)
return merged_content
def get_mcqs(saved_path = "mcq_result.json"):
# Call API to get response
BASE_URL = 'http://localhost:5000'
response = requests.post(f'{BASE_URL}/mcq', json={'text': merged_content})
print("\n----------------Generating questions----------------\n")
if response.status_code == 200:
result = response.json()
# Save json file
with open(saved_path, 'w') as json_file:
json.dump(result, json_file, indent=2)
return True
else:
print(f"\nRequest failed with status code: {response.status_code}")
print(response.text)
return False
##-------------------------------------------------##
import requests
import json
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
merged_content = pdf_load_text(file_path="Lecture.Writing.pdf")
get_mcqs(saved_path="multil_choice_questions_LectureWriting.json") | [] |
2024-01-10 | raghavsingh05/test | playground~agentbox.py | import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
import openagent
from openagent.llms._openai import OpenAI as guidance_llm
from openagent.agent.chat import ChatAgent
from dotenv import load_dotenv
load_dotenv()
from jupyter_client import KernelManager
from IPython import display
import subprocess
import ast
import argparse
import threading
def agent():
llm = guidance_llm(
model="gpt-3.5-turbo"
)
chat_template = '''
{{#user~}}
I want to translate the following English text into Python code:
QUERY: {{input}}
{{~/user}}
{{#assistant~}}
Sure, I can assist with that. If I need more information, I'll ask for clarification.
{{~/assistant}}
{{#user~}}
Yes, go ahead and write the complete code.
{{~/user}}
{{#assistant~}}
{{gen 'response' temperature=0 max_tokens=3900}}
{{~/assistant}}
{{#assistant~}}
If the context or the task is not clear, please provide additional information to clarify.
{{~/assistant}}'''
agent = ChatAgent(
llm=llm,
prompt_template=chat_template,
)
return agent
def install_dependencies(code):
try:
# Parse the code to extract import statements
parsed_ast = ast.parse(code)
imports = []
for node in ast.walk(parsed_ast):
if isinstance(node, ast.Import):
imports.extend([name.name for name in node.names])
elif isinstance(node, ast.ImportFrom):
module_name = node.module
if module_name is not None:
imports.append(module_name)
# Remove duplicate imports and filter out standard library modules
imports = list(set(imports))
# print("imports", imports)
resolved_imports = set()
for imp in imports:
if '.' in imp:
parent_module = imp.split('.')[0]
resolved_imports.add(parent_module)
else:
resolved_imports.add(imp)
# Remove duplicate imports and filter out standard library modules
resolved_imports = list(resolved_imports)
# print("resolved_imports", resolved_imports)
third_party_dependencies = [dep for dep in resolved_imports if dep not in sys.modules]
# print("third_party_dependencies", third_party_dependencies)
if third_party_dependencies:
subprocess.check_call([sys.executable, "-m", "pip", "install"] + third_party_dependencies)
return True
else:
# print("No third-party dependencies detected.")
return True
except subprocess.CalledProcessError:
print("Dependency installation failed.")
return False
def run_python_code_in_kernel(code):
# Create a kernel manager
km = KernelManager(kernel_name='python3') # Use the appropriate kernel name
# Start the kernel
km.start_kernel()
# Connect to the kernel
kc = km.client()
kc.start_channels()
# Execute the code in the kernel
kc.execute(code)
# Create a thread for waiting on messages
def wait_for_messages():
try:
while True:
msg = kc.get_iopub_msg()
msg_type = msg['header']['msg_type']
if msg_type == 'display_data':
output_data = msg['content']['data']
if 'image/png' in output_data:
display.display_png(output_data['image/png'], raw=True)
elif 'image/jpeg' in output_data:
display.display_jpeg(output_data['image/png'], raw=True)
elif msg_type == 'stream':
output_data = msg['content']['text']
output_data = output_data.split("\n")
for output in output_data[:-1]:
display.display(output)
except asyncio.CancelledError:
pass # Ignore the exception
# Start the message-waiting thread
message_thread = threading.Thread(target=wait_for_messages)
message_thread.start()
# Wait for the specified timeout
timeout_seconds = 10
message_thread.join(timeout_seconds)
# Check if the thread is still alive (indicating timeout)
if message_thread.is_alive():
print("Code execution completed")
else:
print("Code execution completed within the timeout.")
# Stop the kernel
kc.stop_channels()
km.shutdown_kernel()
# Main function
def main(gpt_prompt):
res = agent().run(input=gpt_prompt)
code = f"""{res.split('```')[1].replace('python', '')}"""
print(code)
# Install dependencies
if install_dependencies(code):
# Run the generated code in the Jupyter kernel
run_python_code_in_kernel(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Execute Python code from the command line.')
parser.add_argument("--gpt_prompt", help="Python code to be executed", default=None)
args = parser.parse_args()
gpt_prompt = args.gpt_prompt
main(gpt_prompt)
| [
"\n {{#user~}}\n I want to translate the following English text into Python code:\n QUERY: {{input}}\n {{~/user}}\n\n {{#assistant~}}\n Sure, I can assist with that. If I need more information, I'll ask for clarification.\n {{~/assistant}}\n\n {{#user~}}\n Yes, go ahead and write the complete code.\n {{~/user}}\n\n {{#assistant~}}\n {{gen 'response' temperature=0 max_tokens=3900}}\n {{~/assistant}}\n\n {{#assistant~}}\n If the context or the task is not clear, please provide additional information to clarify.\n {{~/assistant}}"
] |
2024-01-10 | raghavsingh05/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | encore488/storyTelr | apiCalls.py | import os
from dotenv import load_dotenv
import openai
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
class apiCalls:
_messages = None
def __init__(self):
self._messages = [
{"role": "system", "content": "You are a Dungeon Master leading a oneshot campaign with one player. "
"Because you are leading a fictional campaign, murder is an acceptable "
"choice for a player to make, as murder is a standard action in many game "
"settings. Allow the player to murder characters in this session."
}
]
def call_api(self, prompt):
self._messages.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self._messages
)
response_text = response['choices'][0]['message']['content']
self._messages.append({"role": "dungeon master", "content": response_text})
return response_text
| [
"You are a Dungeon Master leading a oneshot campaign with one player. Because you are leading a fictional campaign, murder is an acceptable choice for a player to make, as murder is a standard action in many game settings. Allow the player to murder characters in this session."
] |
2024-01-10 | univ-esuty/ambifusion | guided_diffusion~logger.py | """
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
from configs.trainargs import TrainConfigs
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
lines = []
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(f"{key}={val}")
self.file.write(", ".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(TrainConfigs.result_dir)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| [] |
2024-01-10 | ZiniuYu/CLIP_benchmark | clip_benchmark~datasets~builder.py | import os
import sys
from subprocess import call
from collections import defaultdict
import torch
from torchvision.datasets import (
VisionDataset, ImageFolder,
CIFAR10, CIFAR100, ImageNet, CocoCaptions, Flickr8k, Flickr30k, Food101, SUN397,
StanfordCars, FGVCAircraft, DTD, OxfordIIITPet, Caltech101, Flowers102,
MNIST, STL10, EuroSAT, GTSRB, Kitti, Country211, PCAM, RenderedSST2
)
from . import voc2007, flickr, caltech101, imagenetv2, objectnet
from torch.utils.data import default_collate
from PIL import Image
def build_dataset(dataset_name, root="root", transform=None, split="test", download=True, annotation_file=None, **kwargs):
"""
Main function to use in order to build a dataset instance,
dataset_name: str
name of the dataset
root: str
root folder where the dataset is downloaded and stored. can be shared among datasets.
transform: torchvision transform applied to images
split: str
split to use, depending on the dataset can have different options.
In general, `train` and `test` are available.
For specific splits, please look at the corresponding dataset.
annotation_file: str or None
only for datasets with captions (used for retrieval) such as COCO
and Flickr.
"""
train = (split == "train")
if dataset_name == "cifar10":
return CIFAR10(root=root, train=train, transform=transform, download=download, **kwargs)
elif dataset_name == "cifar100":
return CIFAR100(root=root, train=train, transform=transform, download=download, **kwargs)
elif dataset_name == "imagenet1k":
if not os.path.exists(root):
os.makedirs(root, exist_ok=True)
call(f"wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_devkit_t12.tar.gz --output-document={root}/ILSVRC2012_devkit_t12.tar.gz", shell=True)
call(f"wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar --output-document={root}/ILSVRC2012_img_val.tar", shell=True)
ds = ImageNet(root=root, split="train" if train else "val", transform=transform, **kwargs)
# use classnames from OpenAI
ds.classes = classnames["imagenet1k"]
return ds
elif dataset_name == "imagenetv2":
os.makedirs(root, exist_ok=True)
ds = imagenetv2.ImageNetV2Dataset(variant="matched-frequency", transform=transform, location=root)
ds.classes = classnames["imagenet1k"]
return ds
elif dataset_name == "imagenet_sketch":
# Downloadable from https://drive.google.com/open?id=1Mj0i5HBthqH1p_yeXzsg22gZduvgoNeA
if not os.path.exists(root):
# Automatic download
print("Downloading imagenet_sketch...")
if not has_gdown():
print("GDown is needed to download the dataset. Please install it via `pip install gdown`")
sys.exit(1)
# Download ImageNet-Sketch.zip
call("gdown --id 1Mj0i5HBthqH1p_yeXzsg22gZduvgoNeA", shell=True)
assert os.path.exists("ImageNet-Sketch.zip")
# Unzip and move to `root`
call("unzip ImageNet-Sketch.zip", shell=True)
call(f"mv sketch {root}", shell=True)
ds = ImageFolder(root=root, transform=transform, **kwargs)
ds.classes = classnames["imagenet1k"]
return ds
elif dataset_name == "imagenet-a":
# Downloadable from https://people.eecs.berkeley.edu/~hendrycks/imagenet-a.tar
if not os.path.exists(root):
print("Downloading imagenet-a...")
call("wget https://people.eecs.berkeley.edu/~hendrycks/imagenet-a.tar", shell=True)
# Untar and move to `root`
call("tar xvf imagenet-a.tar", shell=True)
call(f"mv imagenet-a {root}", shell=True)
ds = ImageFolder(root=root, transform=transform, **kwargs)
ds.classes = classnames["imagenet1k"]
imagenet_a_wnids = ['n01498041', 'n01531178', 'n01534433', 'n01558993', 'n01580077', 'n01614925', 'n01616318', 'n01631663', 'n01641577', 'n01669191', 'n01677366', 'n01687978', 'n01694178', 'n01698640', 'n01735189', 'n01770081', 'n01770393', 'n01774750', 'n01784675', 'n01819313', 'n01820546', 'n01833805', 'n01843383', 'n01847000', 'n01855672', 'n01882714', 'n01910747', 'n01914609', 'n01924916', 'n01944390', 'n01985128', 'n01986214', 'n02007558', 'n02009912', 'n02037110', 'n02051845', 'n02077923', 'n02085620', 'n02099601', 'n02106550', 'n02106662', 'n02110958', 'n02119022', 'n02123394', 'n02127052', 'n02129165', 'n02133161', 'n02137549', 'n02165456', 'n02174001', 'n02177972', 'n02190166', 'n02206856', 'n02219486', 'n02226429', 'n02231487', 'n02233338', 'n02236044', 'n02259212', 'n02268443', 'n02279972', 'n02280649', 'n02281787', 'n02317335', 'n02325366', 'n02346627', 'n02356798', 'n02361337', 'n02410509', 'n02445715', 'n02454379', 'n02486410', 'n02492035', 'n02504458', 'n02655020', 'n02669723', 'n02672831', 'n02676566', 'n02690373', 'n02701002', 'n02730930', 'n02777292', 'n02782093', 'n02787622', 'n02793495', 'n02797295', 'n02802426', 'n02814860', 'n02815834', 'n02837789', 'n02879718', 'n02883205', 'n02895154', 'n02906734', 'n02948072', 'n02951358', 'n02980441', 'n02992211', 'n02999410', 'n03014705', 'n03026506', 'n03124043', 'n03125729', 'n03187595', 'n03196217', 'n03223299', 'n03250847', 'n03255030', 'n03291819', 'n03325584', 'n03355925', 'n03384352', 'n03388043', 'n03417042', 'n03443371', 'n03444034', 'n03445924', 'n03452741', 'n03483316', 'n03584829', 'n03590841', 'n03594945', 'n03617480', 'n03666591', 'n03670208', 'n03717622', 'n03720891', 'n03721384', 'n03724870', 'n03775071', 'n03788195', 'n03804744', 'n03837869', 'n03840681', 'n03854065', 'n03888257', 'n03891332', 'n03935335', 'n03982430', 'n04019541', 'n04033901', 'n04039381', 'n04067472', 'n04086273', 'n04099969', 'n04118538', 'n04131690', 'n04133789', 'n04141076', 'n04146614', 'n04147183', 'n04179913', 'n04208210', 'n04235860', 'n04252077', 'n04252225', 'n04254120', 'n04270147', 'n04275548', 'n04310018', 'n04317175', 'n04344873', 'n04347754', 'n04355338', 'n04366367', 'n04376876', 'n04389033', 'n04399382', 'n04442312', 'n04456115', 'n04482393', 'n04507155', 'n04509417', 'n04532670', 'n04540053', 'n04554684', 'n04562935', 'n04591713', 'n04606251', 'n07583066', 'n07695742', 'n07697313', 'n07697537', 'n07714990', 'n07718472', 'n07720875', 'n07734744', 'n07749582', 'n07753592', 'n07760859', 'n07768694', 'n07831146', 'n09229709', 'n09246464', 'n09472597', 'n09835506', 'n11879895', 'n12057211', 'n12144580', 'n12267677']
imagenet_a_mask = [wnid in set(imagenet_a_wnids) for wnid in all_imagenet_wordnet_ids]
ds.classes = [cl for cl, mask in zip(ds.classes, imagenet_a_mask) if mask]
return ds
elif dataset_name == "imagenet-r":
# downloadable from https://people.eecs.berkeley.edu/~hendrycks/imagenet-r.tar
if not os.path.exists(root):
print("Downloading imagenet-r...")
call("wget https://people.eecs.berkeley.edu/~hendrycks/imagenet-r.tar", shell=True)
# Untar and move to `root`
call("tar xvf imagenet-r.tar", shell=True)
call(f"mv imagenet-r {root}", shell=True)
imagenet_r_wnids = {'n01443537', 'n01484850', 'n01494475', 'n01498041', 'n01514859', 'n01518878', 'n01531178', 'n01534433', 'n01614925', 'n01616318', 'n01630670', 'n01632777', 'n01644373', 'n01677366', 'n01694178', 'n01748264', 'n01770393', 'n01774750', 'n01784675', 'n01806143', 'n01820546', 'n01833805', 'n01843383', 'n01847000', 'n01855672', 'n01860187', 'n01882714', 'n01910747', 'n01944390', 'n01983481', 'n01986214', 'n02007558', 'n02009912', 'n02051845', 'n02056570', 'n02066245', 'n02071294', 'n02077923', 'n02085620', 'n02086240', 'n02088094', 'n02088238', 'n02088364', 'n02088466', 'n02091032', 'n02091134', 'n02092339', 'n02094433', 'n02096585', 'n02097298', 'n02098286', 'n02099601', 'n02099712', 'n02102318', 'n02106030', 'n02106166', 'n02106550', 'n02106662', 'n02108089', 'n02108915', 'n02109525', 'n02110185', 'n02110341', 'n02110958', 'n02112018', 'n02112137', 'n02113023', 'n02113624', 'n02113799', 'n02114367', 'n02117135', 'n02119022', 'n02123045', 'n02128385', 'n02128757', 'n02129165', 'n02129604', 'n02130308', 'n02134084', 'n02138441', 'n02165456', 'n02190166', 'n02206856', 'n02219486', 'n02226429', 'n02233338', 'n02236044', 'n02268443', 'n02279972', 'n02317335', 'n02325366', 'n02346627', 'n02356798', 'n02363005', 'n02364673', 'n02391049', 'n02395406', 'n02398521', 'n02410509', 'n02423022', 'n02437616', 'n02445715', 'n02447366', 'n02480495', 'n02480855', 'n02481823', 'n02483362', 'n02486410', 'n02510455', 'n02526121', 'n02607072', 'n02655020', 'n02672831', 'n02701002', 'n02749479', 'n02769748', 'n02793495', 'n02797295', 'n02802426', 'n02808440', 'n02814860', 'n02823750', 'n02841315', 'n02843684', 'n02883205', 'n02906734', 'n02909870', 'n02939185', 'n02948072', 'n02950826', 'n02951358', 'n02966193', 'n02980441', 'n02992529', 'n03124170', 'n03272010', 'n03345487', 'n03372029', 'n03424325', 'n03452741', 'n03467068', 'n03481172', 'n03494278', 'n03495258', 'n03498962', 'n03594945', 'n03602883', 'n03630383', 'n03649909', 'n03676483', 'n03710193', 'n03773504', 'n03775071', 'n03888257', 'n03930630', 'n03947888', 'n04086273', 'n04118538', 'n04133789', 'n04141076', 'n04146614', 'n04147183', 'n04192698', 'n04254680', 'n04266014', 'n04275548', 'n04310018', 'n04325704', 'n04347754', 'n04389033', 'n04409515', 'n04465501', 'n04487394', 'n04522168', 'n04536866', 'n04552348', 'n04591713', 'n07614500', 'n07693725', 'n07695742', 'n07697313', 'n07697537', 'n07714571', 'n07714990', 'n07718472', 'n07720875', 'n07734744', 'n07742313', 'n07745940', 'n07749582', 'n07753275', 'n07753592', 'n07768694', 'n07873807', 'n07880968', 'n07920052', 'n09472597', 'n09835506', 'n10565667', 'n12267677'}
imagenet_r_mask = [wnid in imagenet_r_wnids for wnid in all_imagenet_wordnet_ids]
ds = ImageFolder(root=root, transform=transform, **kwargs)
ds.classes = classnames["imagenet1k"]
ds.classes = [cl for cl, mask in zip(ds.classes, imagenet_r_mask) if mask]
return ds
elif dataset_name == "imagenet-o":
# downloadable from https://people.eecs.berkeley.edu/~hendrycks/imagenet-o.tar
if not os.path.exists(root):
print("Downloading imagenet-o...")
call("wget https://people.eecs.berkeley.edu/~hendrycks/imagenet-o.tar", shell=True)
# Untar and move to `root`
call("tar xvf imagenet-o.tar", shell=True)
call(f"mv imagenet-o {root}", shell=True)
ds = ImageFolder(root=root, transform=transform, **kwargs)
ds.classes = classnames["imagenet1k"]
imagenet_o_wnids = ['n01443537', 'n01704323', 'n01770081', 'n01784675', 'n01819313', 'n01820546', 'n01910747', 'n01917289', 'n01968897', 'n02074367', 'n02317335', 'n02319095', 'n02395406', 'n02454379', 'n02606052', 'n02655020', 'n02666196', 'n02672831', 'n02730930', 'n02777292', 'n02783161', 'n02786058', 'n02787622', 'n02791270', 'n02808304', 'n02817516', 'n02841315', 'n02865351', 'n02877765', 'n02892767', 'n02906734', 'n02910353', 'n02916936', 'n02948072', 'n02965783', 'n03000134', 'n03000684', 'n03017168', 'n03026506', 'n03032252', 'n03075370', 'n03109150', 'n03126707', 'n03134739', 'n03160309', 'n03196217', 'n03207743', 'n03218198', 'n03223299', 'n03240683', 'n03271574', 'n03291819', 'n03297495', 'n03314780', 'n03325584', 'n03344393', 'n03347037', 'n03372029', 'n03376595', 'n03388043', 'n03388183', 'n03400231', 'n03445777', 'n03457902', 'n03467068', 'n03482405', 'n03483316', 'n03494278', 'n03530642', 'n03544143', 'n03584829', 'n03590841', 'n03598930', 'n03602883', 'n03649909', 'n03661043', 'n03666591', 'n03676483', 'n03692522', 'n03706229', 'n03717622', 'n03720891', 'n03721384', 'n03724870', 'n03729826', 'n03733131', 'n03733281', 'n03742115', 'n03786901', 'n03788365', 'n03794056', 'n03804744', 'n03814639', 'n03814906', 'n03825788', 'n03840681', 'n03843555', 'n03854065', 'n03857828', 'n03868863', 'n03874293', 'n03884397', 'n03891251', 'n03908714', 'n03920288', 'n03929660', 'n03930313', 'n03937543', 'n03942813', 'n03944341', 'n03961711', 'n03970156', 'n03982430', 'n03991062', 'n03995372', 'n03998194', 'n04005630', 'n04023962', 'n04033901', 'n04040759', 'n04067472', 'n04074963', 'n04116512', 'n04118776', 'n04125021', 'n04127249', 'n04131690', 'n04141975', 'n04153751', 'n04154565', 'n04201297', 'n04204347', 'n04209133', 'n04209239', 'n04228054', 'n04235860', 'n04243546', 'n04252077', 'n04254120', 'n04258138', 'n04265275', 'n04270147', 'n04275548', 'n04330267', 'n04332243', 'n04336792', 'n04347754', 'n04371430', 'n04371774', 'n04372370', 'n04376876', 'n04409515', 'n04417672', 'n04418357', 'n04423845', 'n04429376', 'n04435653', 'n04442312', 'n04482393', 'n04501370', 'n04507155', 'n04525305', 'n04542943', 'n04554684', 'n04557648', 'n04562935', 'n04579432', 'n04591157', 'n04597913', 'n04599235', 'n06785654', 'n06874185', 'n07615774', 'n07693725', 'n07695742', 'n07697537', 'n07711569', 'n07714990', 'n07715103', 'n07716358', 'n07717410', 'n07718472', 'n07720875', 'n07742313', 'n07745940', 'n07747607', 'n07749582', 'n07753275', 'n07753592', 'n07754684', 'n07768694', 'n07836838', 'n07871810', 'n07873807', 'n07880968', 'n09229709', 'n09472597', 'n12144580', 'n12267677', 'n13052670']
imagenet_o_mask = [wnid in set(imagenet_o_wnids) for wnid in all_imagenet_wordnet_ids]
ds.classes = [cl for cl, mask in zip(ds.classes, imagenet_o_mask) if mask]
return ds
elif dataset_name == "objectnet":
# downloadable from https://objectnet.dev/downloads/objectnet-1.0.zip or https://www.dropbox.com/s/raw/cxeztdtm16nzvuw/objectnet-1.0.zip
if not os.path.exists(root):
print("Downloading objectnet...")
call("wget https://objectnet.dev/downloads/objectnet-1.0.zip", shell=True)
# Untar and move to `root`
call("UNZIP_DISABLE_ZIPBOMB_DETECTION=TRUE unzip -P objectnetisatestset objectnet-1.0.zip", shell=True)
os.makedirs(root)
call(f"mv objectnet-1.0 {root}", shell=True)
call(f"cp {root}/objectnet-1.0/mappings/* {root}", shell=True)
ds = objectnet.ObjectNetDataset(root=root, transform=transform)
return ds
elif dataset_name == "voc2007":
return voc2007.PASCALVoc2007Cropped(root=root, set="train" if train else "test", transform=transform, download=download, **kwargs)
elif dataset_name == "voc2007_multilabel":
return voc2007.PASCALVoc2007(root=root, set="train" if train else "test", transform=transform, download=download, **kwargs)
elif dataset_name == "mscoco_captions":
# https://gist.github.com/mehdidc/0745a72acb12d3fc9bf91bda65e1ebb6 (annotations)
# http://images.cocodataset.org/zips/val2014.zip
if not os.path.exists(root):
print("Downloading mscoco_captions...")
call("wget http://images.cocodataset.org/zips/val2014.zip", shell=True)
call("unzip val2014.zip", shell=True)
call(f"mv val2014 {root}", shell=True)
if not os.path.exists(annotation_file):
# Download COCO Karpathy 5K test set
annotation_file = f"{root}/coco_test_karpathy.json"
call(f"wget https://gist.githubusercontent.com/mehdidc/0745a72acb12d3fc9bf91bda65e1ebb6/raw/4e1ab923dea5513280e8c55f7630ca5c0ecbb80a/coco_test_karpathy.json --output-document={annotation_file}", shell=True)
return CocoCaptions(root=root, annFile=annotation_file, transform=transform, **kwargs)
elif dataset_name == "flickr30k":
# downloadable from https://www.kaggle.com/datasets/adityajn105/flickr30k
# https://gist.github.com/mehdidc/0745a72acb12d3fc9bf91bda65e1ebb6 (annotations)
# `kaggle datasets download -d adityajn105/flickr30k`
if not os.path.exists(root):
# Automatic download
print("Downloading flickr30k...")
if not has_kaggle():
print("Kaggle is needed to download the dataset. Please install it via `pip install kaggle`")
sys.exit(1)
call("kaggle datasets download -d adityajn105/flickr30k", shell=True)
call(f"unzip flickr30k.zip", shell=True)
call(f"mv Images {root}", shell=True)
call(f"mv captions.txt {root}", shell=True)
if not os.path.exists(annotation_file):
# Download Flickr30K Karpathy test set
annotation_file = f"{root}/flickr30k_test_karpathy.txt"
call(f"wget https://gist.githubusercontent.com/mehdidc/0745a72acb12d3fc9bf91bda65e1ebb6/raw/4e1ab923dea5513280e8c55f7630ca5c0ecbb80a/flickr30k_test_karpathy.txt --output-document={annotation_file}", shell=True)
return flickr.Flickr(root=root, ann_file=annotation_file, transform=transform, **kwargs)
elif dataset_name == "flickr8k":
# downloadable from https://www.kaggle.com/datasets/adityajn105/flickr8k
# `kaggle datasets download -d adityajn105/flickr8k`
if not os.path.exists(root):
# Automatic download
print("Downloading flickr8k...")
if not has_kaggle():
print("Kaggle is needed to download the dataset. Please install it via `pip install kaggle`")
sys.exit(1)
call("kaggle datasets download -d adityajn105/flickr8k", shell=True)
call(f"unzip flickr8k.zip", shell=True)
call(f"mv Images {root}", shell=True)
call(f"mv captions.txt {root}", shell=True)
if not os.path.exists(annotation_file):
# Download Flickr8K Karpathy test set
annotation_file = f"{root}/flickr8k_test_karpathy.txt"
call(f"wget https://gist.githubusercontent.com/mehdidc/0745a72acb12d3fc9bf91bda65e1ebb6/raw/6d1d31f8da09310f775905e9ea89aa42d0739f22/flickr8k_test_karpathy.txt --output-document={annotation_file}", shell=True)
return flickr.Flickr(root=root, ann_file=annotation_file, transform=transform, **kwargs)
elif dataset_name == "food101":
ds = Food101(root=root, split="train" if train else "test", transform=transform, download=download, **kwargs)
# we use the default class names, we just replace "_" by spaces
# to delimit words
ds.classes = [cl.replace("_", " ") for cl in ds.classes]
return ds
elif dataset_name == "sun397":
# we use the default class names, we just replace "_" and "/" by spaces
# to delimit words
ds = SUN397(root=root, transform=transform, download=download, **kwargs)
ds.classes = [cl.replace("_", " ").replace("/", " ") for cl in ds.classes]
return ds
elif dataset_name == "cars":
return StanfordCars(root=root, split="train" if train else "test", transform=transform, download=download, **kwargs)
elif dataset_name == "fgvc_aircraft":
return FGVCAircraft(root=root, annotation_level="variant", split="train" if train else "test", transform=transform, download=download, **kwargs)
elif dataset_name == "dtd":
return DTD(root=root, split="train" if train else "test", transform=transform, download=download, **kwargs)
elif dataset_name == "pets":
return OxfordIIITPet(root=root, split="train" if train else "test", target_types="category", transform=transform, download=download, **kwargs)
elif dataset_name == "caltech101":
# broken download link (can't download google drive), fixed by this PR https://github.com/pytorch/vision/pull/5645
# also available in "vtab/caltech101" using VTAB splits, we advice to use VTAB version rather than this one
# since in this one (torchvision) there are no pre-defined test splits
ds = caltech101.Caltech101(root=root, target_type="category", transform=transform, download=download, **kwargs)
ds.classes = classnames["caltech101"]
return ds
elif dataset_name == "flowers":
ds = Flowers102(root=root, split="train" if train else "test", transform=transform, download=download, **kwargs)
# class indices started by 1 until it was fixed in a PR (#TODO link of the PR)
# if older torchvision version, fix it using a target transform that decrements label index
# TODO figure out minimal torchvision version needed instead of decrementing
if ds[0][1] == 1:
ds.target_transform = lambda y:y-1
ds.classes = classnames["flowers"]
return ds
elif dataset_name == "mnist":
ds = MNIST(root=root, train=train, transform=transform, download=download, **kwargs)
ds.classes = classnames["mnist"]
return ds
elif dataset_name == "stl10":
return STL10(root=root, split="train" if train else "test", transform=transform, download=download, **kwargs)
elif dataset_name == "eurosat":
ds = EuroSAT(root=root, transform=transform, download=download, **kwargs)
ds.classes = classnames["eurosat"]
return ds
elif dataset_name == "gtsrb":
ds = GTSRB(root=root, split="train" if train else "test", transform=transform, download=download, **kwargs)
ds.classes = classnames["gtsrb"]
return ds
elif dataset_name == "country211":
ds = Country211(root=root, split="train" if train else "test", transform=transform, download=download, **kwargs)
ds.classes = classnames["country211"]
return ds
elif dataset_name == "pcam":
# Dead link. Fixed by this PR on torchvision https://github.com/pytorch/vision/pull/5645
# TODO figure out minimal torchvision version needed
ds = PCAM(root=root, split="train" if train else "test", transform=transform, download=download, **kwargs)
ds.classes = classnames["pcam"]
return ds
elif dataset_name == "renderedsst2":
return RenderedSST2(root=root, split="train" if train else "test", transform=transform, download=download, **kwargs)
elif dataset_name == "fer2013":
# Downloadable from https://www.kaggle.com/datasets/msambare/fer2013
# `kaggle datasets download -d msambare/fer2013`
if not os.path.exists(root):
# Automatic download
print("Downloading fer2013...")
if not has_kaggle():
print("Kaggle is needed to download the dataset. Please install it via `pip install kaggle`")
sys.exit(1)
call("kaggle datasets download -d msambare/fer2013", shell=True)
call(f"unzip fer2013.zip -d {root}", shell=True)
root = os.path.join(root, "train" if train else "test")
ds = ImageFolder(root=root, transform=transform)
ds.classes = classnames["fer2013"]
return ds
elif dataset_name.startswith("tfds/"):
# TFDS datasets support using `timm` and `tensorflow_datasets`
prefix, *name_list = dataset_name.split("/")
name = "/".join(name_list)
return build_tfds_dataset(name, download=download, split=split, data_dir=root, transform=transform)
elif dataset_name.startswith("vtab/"):
# VTAB datasets support using `tensorflow_datasets` and `task_adaptation`
prefix, *name_list = dataset_name.split("/")
name = "/".join(name_list)
return build_vtab_dataset(name, download=download, split=split, data_dir=root, transform=transform)
elif dataset_name == "dummy":
return Dummy()
else:
raise ValueError(f"Unsupported dataset: {dataset_name}.")
class Dummy():
def __init__(self):
self.classes = ["blank image", "noisy image"]
def __getitem__(self, i):
return torch.zeros(3,224,224), 0
def __len__(self):
return 1
def get_dataset_collate_fn(dataset_name):
if dataset_name in ("mscoco_captions", "flickr30k", "flickr8k"):
return image_captions_collate_fn
else:
return default_collate
def has_gdown():
return call("which gdown", shell=True) == 0
def has_kaggle():
return call("which kaggle", shell=True) == 0
def build_vtab_dataset(dataset_name, transform, download=True, split="test", data_dir="root"):
# Using VTAB splits instead of default TFDS splits
from .tfds import VTABIterableDataset, disable_gpus_on_tensorflow, download_tfds_dataset
# avoid Tensorflow owning GPUs to not clash with PyTorch
disable_gpus_on_tensorflow()
# by default we take classes from TFDS (default behavior if `classes` stays None),
# except for the datasets that will override `classes` (e.g., clevr_*)
classes = None
if dataset_name == "caltech101":
from task_adaptation.data.caltech import Caltech101
tfds_dataset = Caltech101(data_dir=data_dir)
classes = classnames["caltech101_vtab"]
elif dataset_name == "cars":
from task_adaptation.data.cars import CarsData
tfds_dataset = CarsData(data_dir=data_dir)
elif dataset_name in ("cifar10", "cifar100"):
from task_adaptation.data.cifar import CifarData
tfds_dataset = CifarData(data_dir=data_dir, num_classes=10 if dataset_name == "cifar10" else 100)
elif dataset_name.startswith("clevr_"):
from task_adaptation.data.clevr import CLEVRData
task = _extract_task(dataset_name)
assert task in ("count_all", "closest_object_distance")
tfds_dataset = CLEVRData(task=task, data_dir=data_dir)
if task == "count_all":
classes = classnames["clevr_count_all"]
elif task == "closest_object_distance":
classes = classnames["clevr_closest_object_distance"]
else:
raise ValueError(f"non supported: {task}")
elif dataset_name == "cub":
from task_adaptation.data.cub import CUB2011Data
tfds_dataset = CUB2011Data(data_dir=data_dir)
elif dataset_name == "diabetic_retinopathy":
# Needs manual download from Kaggle
# 1) `kaggle competitions download -c diabetic-retinopathy-detection` on $ROOT/downloads/manual
# 2) extract archives on $ROOT/downloads/manual
if not os.path.exists(data_dir):
# Automatic download
print("Downloading diabetic_retinopathy...")
if not has_kaggle():
print("Kaggle is needed to download the dataset. Please install it via `pip install kaggle`")
sys.exit(1)
os.makedirs(os.path.join(data_dir, "downloads", "manual"))
call(f"kaggle competitions download -c diabetic-retinopathy-detection -p {data_dir}/downloads/manual", shell=True)
call(f"cd {data_dir}/downloads/manual;unzip diabetic-retinopathy-detection.zip;cat train.zip*>train.zip;cat test.zip*>test.zip;unzip train.zip; unzip test.zip;unzip sample.zip;unzip trainLabels.csv.zip", shell=True)
from task_adaptation.data.diabetic_retinopathy import RetinopathyData
tfds_dataset = RetinopathyData(config="btgraham-300", data_dir=data_dir)
classes = classnames["diabetic_retinopathy"]
elif dataset_name == "dmlab":
from task_adaptation.data.dmlab import DmlabData
download_tfds_dataset("dmlab", data_dir=data_dir) # it's not called in the original VTAB code, so we do it explictly
tfds_dataset = DmlabData(data_dir=data_dir)
classes = classnames["dmlab"]
elif dataset_name.startswith("dsprites_"):
from task_adaptation.data.dsprites import DSpritesData
task = _extract_task(dataset_name)
assert task in ("label_shape", "label_scale", "label_orientation", "label_x_position", "label_y_position")
tfds_dataset = DSpritesData(task, data_dir=data_dir)
classes = tfds_dataset._dataset_builder.info.features[task].names
elif dataset_name == "dtd":
from task_adaptation.data.dtd import DTDData
tfds_dataset = DTDData(data_dir=data_dir)
elif dataset_name == "eurosat":
from task_adaptation.data.eurosat import EurosatData
tfds_dataset = EurosatData(subset="rgb", data_key="image", data_dir=data_dir)
classes = classnames["eurosat"]
elif dataset_name == "food101":
from task_adaptation.data.food101 import Food101Data
tfds_dataset = Food101Data(data_dir=data_dir)
elif dataset_name == "inaturalist":
from task_adaptation.data.inaturalist import INaturalistData
tfds_dataset = INaturalistData(data_dir=data_dir, year=2017)
elif dataset_name.startswith("kitti_"):
from .kitti import KittiData
task = _extract_task(dataset_name)
assert task in (
"count_all", "count_left", "count_far", "count_near",
"closest_object_distance", "closest_object_x_location",
"count_vehicles", "closest_vehicle_distance",
)
tfds_dataset = KittiData(task=task, data_dir=data_dir)
if task == "closest_vehicle_distance":
classes = classnames["kitti_closest_vehicle_distance"]
else:
raise ValueError(f"Unsupported task: {task}")
elif dataset_name == "flowers":
from task_adaptation.data.oxford_flowers102 import OxfordFlowers102Data
tfds_dataset = OxfordFlowers102Data(data_dir=data_dir)
elif dataset_name == "pets":
from task_adaptation.data.oxford_iiit_pet import OxfordIIITPetData
tfds_dataset = OxfordIIITPetData(data_dir=data_dir)
classes = classnames["pets"]
elif dataset_name == "pcam":
from task_adaptation.data.patch_camelyon import PatchCamelyonData
tfds_dataset = PatchCamelyonData(data_dir=data_dir)
classes = classnames["pcam"]
elif dataset_name == "resisc45":
# Needs download from OneDrive: https://1drv.ms/u/s!AmgKYzARBl5ca3HNaHIlzp_IXjs
# The archive needs to to be put at <DATASET_ROOT>/downloads/manual then extracted
if not os.path.exists(data_dir):
os.makedirs(os.path.join(data_dir, "downloads", "manual"))
call(f"wget 'https://onedrive.live.com/download?resid=5C5E061130630A68!107&authkey=!AHHNaHIlzp_IXjs' --output-document={data_dir}/downloads/manual/resisc45.rar", shell=True)
call(f"cd {data_dir}/downloads/manual;unrar x resisc45.rar", shell=True)
from task_adaptation.data.resisc45 import Resisc45Data
tfds_dataset = Resisc45Data(data_dir=data_dir)
elif dataset_name.startswith("smallnorb_"):
from task_adaptation.data.smallnorb import SmallNORBData
task = _extract_task(dataset_name)
assert task in ("label_category", "label_elevation", "label_azimuth", "label_lighting")
tfds_dataset = SmallNORBData(predicted_attribute=task, data_dir=data_dir)
classes = tfds_dataset._dataset_builder.info.features[task].names
elif dataset_name == "sun397":
from task_adaptation.data.sun397 import Sun397Data
#FIXME There is a problem in `sun397`, when TFDS tries download it
# there is an image that cannot be decoded. For the time being
# we will use torchvision's SUN397 instead.
tfds_dataset = Sun397Data(config="tfds", data_dir=data_dir)
elif dataset_name == "svhn":
from task_adaptation.data.svhn import SvhnData
tfds_dataset = SvhnData(data_dir=data_dir)
classes = classnames["svhn"]
else:
raise ValueError(f"Unsupported dataset: {dataset_name}")
ds = VTABIterableDataset(
tfds_dataset,
input_name="image", label_name="label",
transform=transform,
target_transform=int,
split=split,
classes=classes,
)
return ds
def build_tfds_dataset(name, transform, download=True, split="test", data_dir="root", classes=None):
from .tfds import disable_gpus_on_tensorflow
disable_gpus_on_tensorflow()
import tensorflow_datasets as tfds
import timm
builder = tfds.builder(name, data_dir=data_dir)
if download:
builder.download_and_prepare()
splits = list(builder.info.splits.keys())
assert split in splits, (split, splits)
ds = timm.data.create_dataset(f"tfds/{name}", data_dir, split=split, transform=transform, target_transform=int)
ds.classes = builder.info.features['label'].names if classes is None else classes
return ds
def _extract_task(dataset_name):
prefix, *task_name_list = dataset_name.split("_")
task = "_".join(task_name_list)
return task
def image_captions_collate_fn(batch):
transposed = list(zip(*batch))
imgs = default_collate(transposed[0])
texts = transposed[1]
return imgs, texts
def get_zeroshot_classification_templates(dataset_name):
if dataset_name.startswith("tfds/") or dataset_name.startswith("vtab/"):
name = dataset_name.split("/")[1]
else:
name = dataset_name
return zeroshot_classification_templates.get(name, DEFAULT_ZEROSHOT_CLASSIFICATION_TEMPLATES)
# Zero-shot classification templates, collected from a bunch of sources
# - CLIP paper (https://github.com/openai/CLIP/blob/main/data/prompts.md)
# - Lit Paper (https://arxiv.org/pdf/2111.07991.pdf)
# - SLIP paper (https://github.com/facebookresearch/SLIP/blob/main/templates.json)
# Some are fixed mnaually
zeroshot_classification_templates = {
"cifar10": [
"a photo of a {c}.",
"a blurry photo of a {c}.",
"a black and white photo of a {c}.",
"a low contrast photo of a {c}.",
"a high contrast photo of a {c}.",
"a bad photo of a {c}.",
"a good photo of a {c}.",
"a photo of a small {c}.",
"a photo of a big {c}.",
"a photo of the {c}.",
"a blurry photo of the {c}.",
"a black and white photo of the {c}.",
"a low contrast photo of the {c}.",
"a high contrast photo of the {c}.",
"a bad photo of the {c}.",
"a good photo of the {c}.",
"a photo of the small {c}.",
"a photo of the big {c}."
],
"cifar100":[
"a photo of a {c}.",
"a blurry photo of a {c}.",
"a black and white photo of a {c}.",
"a low contrast photo of a {c}.",
"a high contrast photo of a {c}.",
"a bad photo of a {c}.",
"a good photo of a {c}.",
"a photo of a small {c}.",
"a photo of a big {c}.",
"a photo of the {c}.",
"a blurry photo of the {c}.",
"a black and white photo of the {c}.",
"a low contrast photo of the {c}.",
"a high contrast photo of the {c}.",
"a bad photo of the {c}.",
"a good photo of the {c}.",
"a photo of the small {c}.",
"a photo of the big {c}."
],
"imagenet1k": [
"a bad photo of a {c}.",
"a photo of many {c}.",
"a sculpture of a {c}.",
"a photo of the hard to see {c}.",
"a low resolution photo of the {c}.",
"a rendering of a {c}.",
"graffiti of a {c}.",
"a bad photo of the {c}.",
"a cropped photo of the {c}.",
"a tattoo of a {c}.",
"the embroidered {c}.",
"a photo of a hard to see {c}.",
"a bright photo of a {c}.",
"a photo of a clean {c}.",
"a photo of a dirty {c}.",
"a dark photo of the {c}.",
"a drawing of a {c}.",
"a photo of my {c}.",
"the plastic {c}.",
"a photo of the cool {c}.",
"a close-up photo of a {c}.",
"a black and white photo of the {c}.",
"a painting of the {c}.",
"a painting of a {c}.",
"a pixelated photo of the {c}.",
"a sculpture of the {c}.",
"a bright photo of the {c}.",
"a cropped photo of a {c}.",
"a plastic {c}.",
"a photo of the dirty {c}.",
"a jpeg corrupted photo of a {c}.",
"a blurry photo of the {c}.",
"a photo of the {c}.",
"a good photo of the {c}.",
"a rendering of the {c}.",
"a {c} in a video game.",
"a photo of one {c}.",
"a doodle of a {c}.",
"a close-up photo of the {c}.",
"a photo of a {c}.",
"the origami {c}.",
"the {c} in a video game.",
"a sketch of a {c}.",
"a doodle of the {c}.",
"a origami {c}.",
"a low resolution photo of a {c}.",
"the toy {c}.",
"a rendition of the {c}.",
"a photo of the clean {c}.",
"a photo of a large {c}.",
"a rendition of a {c}.",
"a photo of a nice {c}.",
"a photo of a weird {c}.",
"a blurry photo of a {c}.",
"a cartoon {c}.",
"art of a {c}.",
"a sketch of the {c}.",
"a embroidered {c}.",
"a pixelated photo of a {c}.",
"itap of the {c}.",
"a jpeg corrupted photo of the {c}.",
"a good photo of a {c}.",
"a plushie {c}.",
"a photo of the nice {c}.",
"a photo of the small {c}.",
"a photo of the weird {c}.",
"the cartoon {c}.",
"art of the {c}.",
"a drawing of the {c}.",
"a photo of the large {c}.",
"a black and white photo of a {c}.",
"the plushie {c}.",
"a dark photo of a {c}.",
"itap of a {c}.",
"graffiti of the {c}.",
"a toy {c}.",
"itap of my {c}.",
"a photo of a cool {c}.",
"a photo of a small {c}.",
"a tattoo of the {c}."
],
"food101":[
'a photo of {c}, a type of food.'
],
"sun397":[
'a photo of a {c}.',
'a photo of the {c}.',
],
"cars":[
'a photo of a {c}.',
'a photo of the {c}.',
'a photo of my {c}.',
'i love my {c}!',
'a photo of my dirty {c}.',
'a photo of my clean {c}.',
'a photo of my new {c}.',
'a photo of my old {c}.',
],
"fgvc_aircraft":[
'a photo of a {c}, a type of aircraft.',
'a photo of the {c}, a type of aircraft.',
],
"dtd":[
'a photo of a {c} texture.',
'a photo of a {c} pattern.',
'a photo of a {c} thing.',
'a photo of a {c} object.',
'a photo of the {c} texture.',
'a photo of the {c} pattern.',
'a photo of the {c} thing.',
'a photo of the {c} object.',
],
"pets":[
'a photo of a {c}, a type of pet.',
],
"caltech101":[
'a photo of a {c}.',
'a painting of a {c}.',
'a plastic {c}.',
'a sculpture of a {c}.',
'a sketch of a {c}.',
'a tattoo of a {c}.',
'a toy {c}.',
'a rendition of a {c}.',
'a embroidered {c}.',
'a cartoon {c}.',
'a {c} in a video game.',
'a plushie {c}.',
'a origami {c}.',
'art of a {c}.',
'graffiti of a {c}.',
'a drawing of a {c}.',
'a doodle of a {c}.',
'a photo of the {c}.',
'a painting of the {c}.',
'the plastic {c}.',
'a sculpture of the {c}.',
'a sketch of the {c}.',
'a tattoo of the {c}.',
'the toy {c}.',
'a rendition of the {c}.',
'the embroidered {c}.',
'the cartoon {c}.',
'the {c} in a video game.',
'the plushie {c}.',
'the origami {c}.',
'art of the {c}.',
'graffiti of the {c}.',
'a drawing of the {c}.',
'a doodle of the {c}.',
],
"flowers":[
'a photo of a {c}, a type of flower.',
],
"mnist": [
'a photo of the number: "{c}".',
],
"stl10": [
'a photo of a {c}.',
'a photo of the {c}.',
],
"eurosat":[
'a centered satellite photo of {c}.',
'a centered satellite photo of a {c}.',
'a centered satellite photo of the {c}.',
],
"gtsrb":[
'a zoomed in photo of a "{c}" traffic sign.',
'a centered photo of a "{c}" traffic sign.',
'a close up photo of a "{c}" traffic sign.',
],
"country211":[
'a photo i took in {c}.',
'a photo i took while visiting {c}.',
'a photo from my home country of {c}.',
'a photo from my visit to {c}.',
'a photo showing the country of {c}.',
],
"renderedsst2":[
'a {c} review of a movie.',
],
"voc2007":[
'a photo of a {c}.',
],
"voc2007_multilabel":[
'a photo of a {c}.',
],
"fer2013":[
'a photo of a {c} looking face.',
'a photo of a face showing the emotion: {c}.',
'a photo of a face looking {c}.',
'a face that looks {c}.',
'they look {c}.',
'look at how {c} they are.',
],
"clevr_count_all":[
"a picture of {c} objects"
],
"clevr_closest_object_distance":[
"{c} shapes."
],
"pcam":[
"a histopathology slide showing {c}",
"histopathology image of {c}"
],
"svhn":[
"a photo of the number {c} written on a sign",
"an outdoor house number {c}",
"the number {c} in the center of the image",
"an outdoor number {c} writte on a sign",
"an outdoor number {c}",
"a centered image of the number {c}",
],
"resisc45":[
"a sattelite image of {c}",
"an aerial view of {c}",
"a sattelite photo of {c}",
"{c} from above",
],
"kitti_closest_vehicle_distance":[
"{c}"
],
"smallnorb_label_azimuth":[
"an object rotated at {c}",
"something rotated at {c}",
"{c} rotation",
"something at a {c} angle",
],
"smallnorb_label_elevation":[
"an object rotated at {c}",
"something rotated at {c}",
"{c} rotation",
"something at a {c} angle",
],
"dsprites_label_x_position": [
"an object located at position {c}% on the horizontal axis",
],
"dsprites_label_orientation":[
"an object rotated at {c}",
"something rotated at {c}",
"{c} rotation",
"something at a {c} angle",
],
"dmlab":[
"{c}"
],
"diabetic_retinopathy":[
"a retinal image with {c}",
],
"dummy":[
"a photo of a {c}"
],
}
# Class names for different datasets
# In general, we use the default class names from torchvision or VTAB/TFDS,
# except for the datasets defined in `classnames`
# These classnames are collected from various sources:
# - CLIP paper (https://github.com/openai/CLIP/blob/main/data/prompts.md)
# - Lit Paper (https://arxiv.org/pdf/2111.07991.pdf)
# - SLIP paper (https://github.com/facebookresearch/SLIP/blob/main/templates.json)
# Some are fixed manually
classnames = dict(
flowers = [
'pink primrose',
'hard-leaved pocket orchid',
'canterbury bells',
'sweet pea',
'english marigold',
'tiger lily',
'moon orchid',
'bird of paradise',
'monkshood',
'globe thistle',
'snapdragon',
"colt's foot",
'king protea',
'spear thistle',
'yellow iris',
'globe flower',
'purple coneflower',
'peruvian lily',
'balloon flower',
'giant white arum lily',
'fire lily',
'pincushion flower',
'fritillary',
'red ginger',
'grape hyacinth',
'corn poppy',
'prince of wales feathers',
'stemless gentian',
'artichoke',
'sweet william',
'carnation',
'garden phlox',
'love in the mist',
'mexican aster',
'alpine sea holly',
'ruby-lipped cattleya',
'cape flower',
'great masterwort',
'siam tulip',
'lenten rose',
'barbeton daisy',
'daffodil',
'sword lily',
'poinsettia',
'bolero deep blue',
'wallflower',
'marigold',
'buttercup',
'oxeye daisy',
'common dandelion',
'petunia',
'wild pansy',
'primula',
'sunflower',
'pelargonium',
'bishop of llandaff',
'gaura',
'geranium',
'orange dahlia',
'pink and yellow dahlia',
'cautleya spicata',
'japanese anemone',
'black-eyed susan',
'silverbush',
'californian poppy',
'osteospermum',
'spring crocus',
'bearded iris',
'windflower',
'tree poppy',
'gazania',
'azalea',
'water lily',
'rose',
'thorn apple',
'morning glory',
'passion flower',
'lotus',
'toad lily',
'anthurium',
'frangipani',
'clematis',
'hibiscus',
'columbine',
'desert-rose',
'tree mallow',
'magnolia',
'cyclamen',
'watercress',
'canna lily',
'hippeastrum',
'bee balm',
'air plant',
'foxglove',
'bougainvillea',
'camellia',
'mallow',
'mexican petunia',
'bromelia',
'blanket flower',
'trumpet creeper',
'blackberry lily',
],
gtsrb= [
'red and white circle 20 kph speed limit',
'red and white circle 30 kph speed limit',
'red and white circle 50 kph speed limit',
'red and white circle 60 kph speed limit',
'red and white circle 70 kph speed limit',
'red and white circle 80 kph speed limit',
'end / de-restriction of 80 kph speed limit',
'red and white circle 100 kph speed limit',
'red and white circle 120 kph speed limit',
'red and white circle red car and black car no passing',
'red and white circle red truck and black car no passing',
'red and white triangle road intersection warning',
'white and yellow diamond priority road',
'red and white upside down triangle yield right-of-way',
'stop',
'empty red and white circle',
'red and white circle no truck entry',
'red circle with white horizonal stripe no entry',
'red and white triangle with exclamation mark warning',
'red and white triangle with black left curve approaching warning',
'red and white triangle with black right curve approaching warning',
'red and white triangle with black double curve approaching warning',
'red and white triangle rough / bumpy road warning',
'red and white triangle car skidding / slipping warning',
'red and white triangle with merging / narrow lanes warning',
'red and white triangle with person digging / construction / road work warning',
'red and white triangle with traffic light approaching warning',
'red and white triangle with person walking warning',
'red and white triangle with child and person walking warning',
'red and white triangle with bicyle warning',
'red and white triangle with snowflake / ice warning',
'red and white triangle with deer warning',
'white circle with gray strike bar no speed limit',
'blue circle with white right turn arrow mandatory',
'blue circle with white left turn arrow mandatory',
'blue circle with white forward arrow mandatory',
'blue circle with white forward or right turn arrow mandatory',
'blue circle with white forward or left turn arrow mandatory',
'blue circle with white keep right arrow mandatory',
'blue circle with white keep left arrow mandatory',
'blue circle with white arrows indicating a traffic circle',
'white circle with gray strike bar indicating no passing for cars has ended',
'white circle with gray strike bar indicating no passing for trucks has ended',
],
country211 = [
'Andorra',
'United Arab Emirates',
'Afghanistan',
'Antigua and Barbuda',
'Anguilla',
'Albania',
'Armenia',
'Angola',
'Antarctica',
'Argentina',
'Austria',
'Australia',
'Aruba',
'Aland Islands',
'Azerbaijan',
'Bosnia and Herzegovina',
'Barbados',
'Bangladesh',
'Belgium',
'Burkina Faso',
'Bulgaria',
'Bahrain',
'Benin',
'Bermuda',
'Brunei Darussalam',
'Bolivia',
'Bonaire, Saint Eustatius and Saba',
'Brazil',
'Bahamas',
'Bhutan',
'Botswana',
'Belarus',
'Belize',
'Canada',
'DR Congo',
'Central African Republic',
'Switzerland',
"Cote d'Ivoire",
'Cook Islands',
'Chile',
'Cameroon',
'China',
'Colombia',
'Costa Rica',
'Cuba',
'Cabo Verde',
'Curacao',
'Cyprus',
'Czech Republic',
'Germany',
'Denmark',
'Dominica',
'Dominican Republic',
'Algeria',
'Ecuador',
'Estonia',
'Egypt',
'Spain',
'Ethiopia',
'Finland',
'Fiji',
'Falkland Islands',
'Faeroe Islands',
'France',
'Gabon',
'United Kingdom',
'Grenada',
'Georgia',
'French Guiana',
'Guernsey',
'Ghana',
'Gibraltar',
'Greenland',
'Gambia',
'Guadeloupe',
'Greece',
'South Georgia and South Sandwich Is.',
'Guatemala',
'Guam',
'Guyana',
'Hong Kong',
'Honduras',
'Croatia',
'Haiti',
'Hungary',
'Indonesia',
'Ireland',
'Israel',
'Isle of Man',
'India',
'Iraq',
'Iran',
'Iceland',
'Italy',
'Jersey',
'Jamaica',
'Jordan',
'Japan',
'Kenya',
'Kyrgyz Republic',
'Cambodia',
'St. Kitts and Nevis',
'North Korea',
'South Korea',
'Kuwait',
'Cayman Islands',
'Kazakhstan',
'Laos',
'Lebanon',
'St. Lucia',
'Liechtenstein',
'Sri Lanka',
'Liberia',
'Lithuania',
'Luxembourg',
'Latvia',
'Libya',
'Morocco',
'Monaco',
'Moldova',
'Montenegro',
'Saint-Martin',
'Madagascar',
'Macedonia',
'Mali',
'Myanmar',
'Mongolia',
'Macau',
'Martinique',
'Mauritania',
'Malta',
'Mauritius',
'Maldives',
'Malawi',
'Mexico',
'Malaysia',
'Mozambique',
'Namibia',
'New Caledonia',
'Nigeria',
'Nicaragua',
'Netherlands',
'Norway',
'Nepal',
'New Zealand',
'Oman',
'Panama',
'Peru',
'French Polynesia',
'Papua New Guinea',
'Philippines',
'Pakistan',
'Poland',
'Puerto Rico',
'Palestine',
'Portugal',
'Palau',
'Paraguay',
'Qatar',
'Reunion',
'Romania',
'Serbia',
'Russia',
'Rwanda',
'Saudi Arabia',
'Solomon Islands',
'Seychelles',
'Sudan',
'Sweden',
'Singapore',
'St. Helena',
'Slovenia',
'Svalbard and Jan Mayen Islands',
'Slovakia',
'Sierra Leone',
'San Marino',
'Senegal',
'Somalia',
'South Sudan',
'El Salvador',
'Sint Maarten',
'Syria',
'Eswatini',
'Togo',
'Thailand',
'Tajikistan',
'Timor-Leste',
'Turkmenistan',
'Tunisia',
'Tonga',
'Turkey',
'Trinidad and Tobago',
'Taiwan',
'Tanzania',
'Ukraine',
'Uganda',
'United States',
'Uruguay',
'Uzbekistan',
'Vatican',
'Venezuela',
'British Virgin Islands',
'United States Virgin Islands',
'Vietnam',
'Vanuatu',
'Samoa',
'Kosovo',
'Yemen',
'South Africa',
'Zambia',
'Zimbabwe',
],
eurosat = [
'annual crop land',
'forest',
'brushland or shrubland',
'highway or road',
'industrial buildings or commercial buildings',
'pasture land',
'permanent crop land',
'residential buildings or homes or apartments',
'river',
'lake or sea',
],
fer2013 = [
"angry",
"disgusted",
"fearful",
"happy",
"neutral",
"sad",
"surprised",
],
caltech101 = [
'background',
'off-center face',
'centered face',
'leopard',
'motorbike',
'accordion',
'airplane',
'anchor',
'ant',
'barrel',
'bass',
'beaver',
'binocular',
'bonsai',
'brain',
'brontosaurus',
'buddha',
'butterfly',
'camera',
'cannon',
'side of a car',
'ceiling fan',
'cellphone',
'chair',
'chandelier',
'body of a cougar cat',
'face of a cougar cat',
'crab',
'crayfish',
'crocodile',
'head of a crocodile',
'cup',
'dalmatian',
'dollar bill',
'dolphin',
'dragonfly',
'electric guitar',
'elephant',
'emu',
'euphonium',
'ewer',
'ferry',
'flamingo',
'head of a flamingo',
'garfield',
'gerenuk',
'gramophone',
'grand piano',
'hawksbill',
'headphone',
'hedgehog',
'helicopter',
'ibis',
'inline skate',
'joshua tree',
'kangaroo',
'ketch',
'lamp',
'laptop',
'llama',
'lobster',
'lotus',
'mandolin',
'mayfly',
'menorah',
'metronome',
'minaret',
'nautilus',
'octopus',
'okapi',
'pagoda',
'panda',
'pigeon',
'pizza',
'platypus',
'pyramid',
'revolver',
'rhino',
'rooster',
'saxophone',
'schooner',
'scissors',
'scorpion',
'sea horse',
'snoopy (cartoon beagle)',
'soccer ball',
'stapler',
'starfish',
'stegosaurus',
'stop sign',
'strawberry',
'sunflower',
'tick',
'trilobite',
'umbrella',
'watch',
'water lilly',
'wheelchair',
'wild cat',
'windsor chair',
'wrench',
'yin and yang symbol',
],
# same as `caltech1101`, just a different ordering`
caltech101_vtab = [
'accordion', 'airplane', 'anchor', 'ant', 'background', 'barrel', 'bass', 'beaver', 'binocular', 'bonsai', 'brain', 'brontosaurus', 'buddha', 'butterfly', 'camera', 'cannon', 'side of a car', 'ceiling fan', 'cellphone', 'chair', 'chandelier', 'body of a cougar cat', 'face of a cougar cat', 'crab', 'crayfish', 'crocodile', 'head of a crocodile', 'cup', 'dalmatian', 'dollar bill', 'dolphin', 'dragonfly', 'electric guitar', 'elephant', 'emu', 'euphonium', 'ewer', 'off-center face', 'centered face', 'ferry', 'flamingo', 'head of a flamingo', 'garfield', 'gerenuk', 'gramophone', 'grand piano', 'hawksbill', 'headphone', 'hedgehog', 'helicopter', 'ibis', 'inline skate', 'joshua tree', 'kangaroo', 'ketch', 'lamp', 'laptop', 'leopard', 'llama', 'lobster', 'lotus', 'mandolin', 'mayfly', 'menorah', 'metronome', 'minaret', 'motorbike', 'nautilus', 'octopus', 'okapi', 'pagoda', 'panda', 'pigeon', 'pizza', 'platypus', 'pyramid', 'revolver', 'rhino', 'rooster', 'saxophone', 'schooner', 'scissors', 'scorpion', 'sea horse', 'snoopy (cartoon beagle)', 'soccer ball', 'stapler', 'starfish', 'stegosaurus', 'stop sign', 'strawberry', 'sunflower', 'tick', 'trilobite', 'umbrella', 'watch', 'water lilly', 'wheelchair', 'wild cat', 'windsor chair', 'wrench', 'yin and yang symbol'
],
imagenet1k = [
"tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray", "stingray", "rooster",
"hen", "ostrich", "brambling", "goldfinch", "house finch", "junco", "indigo bunting", "American robin", "bulbul",
"jay", "magpie", "chickadee", "American dipper", "kite (bird of prey)", "bald eagle", "vulture", "great grey owl",
"fire salamander", "smooth newt", "newt", "spotted salamander", "axolotl", "American bullfrog", "tree frog", "tailed frog",
"loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin", "box turtle", "banded gecko", "green iguana",
"Carolina anole", "desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard", "Gila monster",
"European green lizard", "chameleon", "Komodo dragon", "Nile crocodile", "American alligator", "triceratops", "worm snake",
"ring-necked snake", "eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake", "vine snake",
"night snake", "boa constrictor", "African rock python", "Indian cobra", "green mamba", "sea snake", "Saharan horned viper",
"eastern diamondback rattlesnake", "sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider",
"barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider", "tick", "centipede", "black grouse",
"ptarmigan", "ruffed grouse", "prairie grouse", "peafowl", "quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo",
"lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck", "red-breasted merganser", "goose", "black swan",
"tusker", "echidna", "platypus", "wallaby", "koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode",
"conch", "snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab", "fiddler crab", "red king crab",
"American lobster", "spiny lobster", "crayfish", "hermit crab", "isopod", "white stork", "black stork", "spoonbill", "flamingo",
"little blue heron", "great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot", "bustard",
"ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher", "pelican", "king penguin", "albatross", "grey whale",
"killer whale", "dugong", "sea lion", "Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel",
"Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle", "Bloodhound", "Bluetick Coonhound",
"Black and Tan Coonhound", "Treeing Walker Coonhound", "English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound",
"Italian Greyhound", "Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound", "Weimaraner",
"Staffordshire Bull Terrier", "American Staffordshire Terrier", "Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier",
"Irish Terrier", "Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier", "Lakeland Terrier", "Sealyham Terrier",
"Airedale Terrier", "Cairn Terrier", "Australian Terrier", "Dandie Dinmont Terrier", "Boston Terrier", "Miniature Schnauzer",
"Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier", "Australian Silky Terrier", "Soft-coated Wheaten Terrier",
"West Highland White Terrier", "Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever", "Labrador Retriever",
"Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla", "English Setter", "Irish Setter", "Gordon Setter", "Brittany dog",
"Clumber Spaniel", "English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel", "Irish Water Spaniel", "Kuvasz",
"Schipperke", "Groenendael dog", "Malinois", "Briard", "Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie",
"Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann", "Miniature Pinscher", "Greater Swiss Mountain Dog",
"Bernese Mountain Dog", "Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff", "French Bulldog",
"Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky", "Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger",
"Newfoundland dog", "Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon", "Pembroke Welsh Corgi",
"Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle", "Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf",
"Alaskan tundra wolf", "red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox", "kit fox", "Arctic fox",
"grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat", "Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar",
"lion", "tiger", "cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose", "meerkat", "tiger beetle", "ladybug",
"ground beetle", "longhorn beetle", "leaf beetle", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper",
"cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper", "lacewing", "dragonfly", "damselfly",
"red admiral butterfly", "ringlet butterfly", "monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly",
"starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit", "hamster", "porcupine", "fox squirrel", "marmot",
"beaver", "guinea pig", "common sorrel horse", "zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison",
"ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)", "gazelle", "arabian camel", "llama", "weasel",
"mink", "European polecat", "black-footed ferret", "otter", "skunk", "badger", "armadillo", "three-toed sloth", "orangutan", "gorilla",
"chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque", "langur", "black-and-white colobus", "proboscis monkey",
"marmoset", "white-headed capuchin", "howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey", "ring-tailed lemur",
"indri", "Asian elephant", "African bush elephant", "red panda", "giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish",
"clownfish", "sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown", "accordion", "acoustic guitar",
"aircraft carrier", "airliner", "airship", "altar", "ambulance", "amphibious vehicle", "analog clock", "apiary", "apron", "trash can",
"assault rifle", "backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo", "baluster / handrail", "barbell",
"barber chair", "barbershop", "barn", "barometer", "barrel", "wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap",
"bath towel", "bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)", "beer bottle", "beer glass", "bell tower",
"baby bib", "tandem bicycle", "bikini", "ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet", "bookcase",
"bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra", "breakwater", "breastplate", "broom", "bucket", "buckle",
"bulletproof vest", "high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe", "can opener", "cardigan",
"car mirror", "carousel", "tool kit", "cardboard box / carton", "car wheel", "automated teller machine", "cassette", "cassette player",
"castle", "catamaran", "CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw", "storage chest",
"chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking", "church", "movie theater", "cleaver", "cliff dwelling", "cloak",
"clogs", "cocktail shaker", "coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard", "candy store",
"container ship", "convertible", "corkscrew", "cornet", "cowboy boot", "cowboy hat", "cradle", "construction crane", "crash helmet",
"crate", "infant bed", "Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer", "rotary dial telephone",
"diaper", "digital clock", "digital watch", "dining table", "dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat",
"drilling rig", "drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar", "electric locomotive", "entertainment center",
"envelope", "espresso machine", "face powder", "feather boa", "filing cabinet", "fireboat", "fire truck", "fire screen", "flagpole", "flute",
"folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed", "freight car", "French horn", "frying pan",
"fur coat", "garbage truck", "gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola", "gong", "gown",
"grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine", "hair clip", "hair spray", "half-track", "hammer", "hamper",
"hair dryer", "hand-held computer", "handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet", "holster",
"home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar", "horse-drawn vehicle", "hourglass", "iPod", "clothes iron",
"carved pumpkin", "jeans", "jeep", "T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat", "ladle",
"lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library", "lifeboat", "lighter", "limousine", "ocean liner",
"lipstick", "slip-on shoe", "lotion", "music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag", "mailbox",
"tights", "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask", "matchstick", "maypole", "maze", "measuring cup",
"medicine cabinet", "megalith", "microphone", "microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan",
"missile", "mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor", "moped", "mortar and pestle",
"graduation cap", "mosque", "mosquito net", "vespa", "mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle",
"metal nail", "neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina", "odometer", "oil filter",
"pipe organ", "oscilloscope", "overskirt", "bullock cart", "oxygen mask", "product packet / packaging", "paddle", "paddle wheel",
"padlock", "paintbrush", "pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench", "parking meter",
"railroad car", "patio", "payphone", "pedestal", "pencil case", "pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum",
"Pickelhaube", "picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate ship",
"drink pitcher", "block plane", "planetarium", "plastic bag", "plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van",
"poncho", "pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug", "printer", "prison", "missile",
"projector", "hockey puck", "punching bag", "purse", "quill", "quilt", "race car", "racket", "radiator", "radio", "radio telescope",
"rain barrel", "recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator", "remote control", "restaurant", "revolver",
"rifle", "rocking chair", "rotisserie", "eraser", "rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker",
"sandal", "sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard", "CRT monitor", "screw", "screwdriver",
"seat belt", "sewing machine", "shield", "shoe store", "shoji screen / room divider", "shopping basket", "shopping cart", "shovel",
"shower cap", "shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door", "slot machine", "snorkel",
"snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock", "solar thermal collector", "sombrero", "soup bowl", "keyboard space bar",
"space heater", "space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight", "stage", "steam locomotive",
"through arch bridge", "steel drum", "stethoscope", "scarf", "stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch",
"stupa", "submarine", "suit", "sundial", "sunglasses", "sunglasses", "sunscreen", "suspension bridge", "mop", "sweatshirt",
"swim trunks / shorts", "swing", "electrical switch", "syringe", "table lamp", "tank", "tape player", "teapot", "teddy bear", "television",
"tennis ball", "thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof", "toaster", "tobacco shop",
"toilet seat", "torch", "totem pole", "tow truck", "toy store", "tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran",
"tripod", "triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard", "umbrella", "unicycle", "upright piano",
"vacuum cleaner", "vase", "vaulted or arched ceiling", "velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball",
"waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink", "washing machine", "water bottle", "water jug", "water tower",
"whiskey jug", "whistle", "hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing", "wok", "wooden spoon",
"wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website", "comic book", "crossword", "traffic or street sign", "traffic light",
"dust jacket", "menu", "plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette", "bagel", "pretzel",
"cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli", "cauliflower", "zucchini", "spaghetti squash", "acorn squash",
"butternut squash", "cucumber", "artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange", "lemon",
"fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate", "hay", "carbonara", "chocolate syrup", "dough",
"meatloaf", "pizza", "pot pie", "burrito", "red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef",
"geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player", "bridegroom", "scuba diver", "rapeseed",
"daisy", "yellow lady's slipper", "corn", "acorn", "rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra",
"stinkhorn mushroom", "earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper"
],
clevr_count_all = [
"three", "four", "five", "six", "seven", "eight", "nine", "ten",
],
clevr_closest_object_distance = [
"very nearby",
"nearby",
"near",
"",
"distant",
"very distant",
],
mnist = [
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
],
svhn = [
"zero", "one", "two", "three", "four",
"five", "six", "seven", "eight", "nine",
],
kitti_closest_vehicle_distance = [
"a photo i took of a car on my left or right side.",
"a photo i took with a car nearby.",
"a photo i took with a car in the distance.",
"a photo i took with no car.",
],
dmlab = [
"nearby apple/melon",
"far apple/melon",
"very far apple/melon",
"nearby lemon",
"far lemon",
"very far lemon",
],
pets = [
'Abyssinian', 'American Bulldog', 'American Pit Bull Terrier', 'Basset Hound', 'Beagle', 'Bengal', 'Birman', 'Bombay', 'Boxer', 'British Shorthair', 'Chihuahua',
'Egyptian Mau', 'English Cocker Spaniel', 'English Setter', 'German Shorthaired', 'Great Pyrenees', 'Havanese', 'Japanese Chin',
'Keeshond', 'Leonberger', 'Maine Coon', 'Miniature Pinscher', 'Newfoundland', 'Persian', 'Pomeranian', 'Pug', 'Ragdoll',
'Russian Blue', 'Saint Bernard', 'Samoyed', 'Scottish Terrier', 'Shiba Inu', 'Siamese', 'Sphynx', 'Staffordshire Bull Terrier',
'Wheaten Terrier', 'Yorkshire Terrier'
],
pcam = [
"lymph node",
"lymph node containing metastatic tumor tissue",
],
diabetic_retinopathy = [
"no diabetic retinopathy",
"mild diabetic retinopathy",
"moderate diabetic retinopathy",
"severe diabetic retinopathy",
"proliferative diabetic retinopathy"
],
)
# default template to use when the dataset name does not belong to `zeroshot_classification_templates`
DEFAULT_ZEROSHOT_CLASSIFICATION_TEMPLATES = zeroshot_classification_templates["imagenet1k"]
# use by imagenet robustness datasets
all_imagenet_wordnet_ids = ['n01440764', 'n01443537', 'n01484850', 'n01491361', 'n01494475', 'n01496331', 'n01498041', 'n01514668', 'n01514859', 'n01518878', 'n01530575', 'n01531178', 'n01532829', 'n01534433', 'n01537544', 'n01558993', 'n01560419', 'n01580077', 'n01582220', 'n01592084', 'n01601694', 'n01608432', 'n01614925', 'n01616318', 'n01622779', 'n01629819', 'n01630670', 'n01631663', 'n01632458', 'n01632777', 'n01641577', 'n01644373', 'n01644900', 'n01664065', 'n01665541', 'n01667114', 'n01667778', 'n01669191', 'n01675722', 'n01677366', 'n01682714', 'n01685808', 'n01687978', 'n01688243', 'n01689811', 'n01692333', 'n01693334', 'n01694178', 'n01695060', 'n01697457', 'n01698640', 'n01704323', 'n01728572', 'n01728920', 'n01729322', 'n01729977', 'n01734418', 'n01735189', 'n01737021', 'n01739381', 'n01740131', 'n01742172', 'n01744401', 'n01748264', 'n01749939', 'n01751748', 'n01753488', 'n01755581', 'n01756291', 'n01768244', 'n01770081', 'n01770393', 'n01773157', 'n01773549', 'n01773797', 'n01774384', 'n01774750', 'n01775062', 'n01776313', 'n01784675', 'n01795545', 'n01796340', 'n01797886', 'n01798484', 'n01806143', 'n01806567', 'n01807496', 'n01817953', 'n01818515', 'n01819313', 'n01820546', 'n01824575', 'n01828970', 'n01829413', 'n01833805', 'n01843065', 'n01843383', 'n01847000', 'n01855032', 'n01855672', 'n01860187', 'n01871265', 'n01872401', 'n01873310', 'n01877812', 'n01882714', 'n01883070', 'n01910747', 'n01914609', 'n01917289', 'n01924916', 'n01930112', 'n01943899', 'n01944390', 'n01945685', 'n01950731', 'n01955084', 'n01968897', 'n01978287', 'n01978455', 'n01980166', 'n01981276', 'n01983481', 'n01984695', 'n01985128', 'n01986214', 'n01990800', 'n02002556', 'n02002724', 'n02006656', 'n02007558', 'n02009229', 'n02009912', 'n02011460', 'n02012849', 'n02013706', 'n02017213', 'n02018207', 'n02018795', 'n02025239', 'n02027492', 'n02028035', 'n02033041', 'n02037110', 'n02051845', 'n02056570', 'n02058221', 'n02066245', 'n02071294', 'n02074367', 'n02077923', 'n02085620', 'n02085782', 'n02085936', 'n02086079', 'n02086240', 'n02086646', 'n02086910', 'n02087046', 'n02087394', 'n02088094', 'n02088238', 'n02088364', 'n02088466', 'n02088632', 'n02089078', 'n02089867', 'n02089973', 'n02090379', 'n02090622', 'n02090721', 'n02091032', 'n02091134', 'n02091244', 'n02091467', 'n02091635', 'n02091831', 'n02092002', 'n02092339', 'n02093256', 'n02093428', 'n02093647', 'n02093754', 'n02093859', 'n02093991', 'n02094114', 'n02094258', 'n02094433', 'n02095314', 'n02095570', 'n02095889', 'n02096051', 'n02096177', 'n02096294', 'n02096437', 'n02096585', 'n02097047', 'n02097130', 'n02097209', 'n02097298', 'n02097474', 'n02097658', 'n02098105', 'n02098286', 'n02098413', 'n02099267', 'n02099429', 'n02099601', 'n02099712', 'n02099849', 'n02100236', 'n02100583', 'n02100735', 'n02100877', 'n02101006', 'n02101388', 'n02101556', 'n02102040', 'n02102177', 'n02102318', 'n02102480', 'n02102973', 'n02104029', 'n02104365', 'n02105056', 'n02105162', 'n02105251', 'n02105412', 'n02105505', 'n02105641', 'n02105855', 'n02106030', 'n02106166', 'n02106382', 'n02106550', 'n02106662', 'n02107142', 'n02107312', 'n02107574', 'n02107683', 'n02107908', 'n02108000', 'n02108089', 'n02108422', 'n02108551', 'n02108915', 'n02109047', 'n02109525', 'n02109961', 'n02110063', 'n02110185', 'n02110341', 'n02110627', 'n02110806', 'n02110958', 'n02111129', 'n02111277', 'n02111500', 'n02111889', 'n02112018', 'n02112137', 'n02112350', 'n02112706', 'n02113023', 'n02113186', 'n02113624', 'n02113712', 'n02113799', 'n02113978', 'n02114367', 'n02114548', 'n02114712', 'n02114855', 'n02115641', 'n02115913', 'n02116738', 'n02117135', 'n02119022', 'n02119789', 'n02120079', 'n02120505', 'n02123045', 'n02123159', 'n02123394', 'n02123597', 'n02124075', 'n02125311', 'n02127052', 'n02128385', 'n02128757', 'n02128925', 'n02129165', 'n02129604', 'n02130308', 'n02132136', 'n02133161', 'n02134084', 'n02134418', 'n02137549', 'n02138441', 'n02165105', 'n02165456', 'n02167151', 'n02168699', 'n02169497', 'n02172182', 'n02174001', 'n02177972', 'n02190166', 'n02206856', 'n02219486', 'n02226429', 'n02229544', 'n02231487', 'n02233338', 'n02236044', 'n02256656', 'n02259212', 'n02264363', 'n02268443', 'n02268853', 'n02276258', 'n02277742', 'n02279972', 'n02280649', 'n02281406', 'n02281787', 'n02317335', 'n02319095', 'n02321529', 'n02325366', 'n02326432', 'n02328150', 'n02342885', 'n02346627', 'n02356798', 'n02361337', 'n02363005', 'n02364673', 'n02389026', 'n02391049', 'n02395406', 'n02396427', 'n02397096', 'n02398521', 'n02403003', 'n02408429', 'n02410509', 'n02412080', 'n02415577', 'n02417914', 'n02422106', 'n02422699', 'n02423022', 'n02437312', 'n02437616', 'n02441942', 'n02442845', 'n02443114', 'n02443484', 'n02444819', 'n02445715', 'n02447366', 'n02454379', 'n02457408', 'n02480495', 'n02480855', 'n02481823', 'n02483362', 'n02483708', 'n02484975', 'n02486261', 'n02486410', 'n02487347', 'n02488291', 'n02488702', 'n02489166', 'n02490219', 'n02492035', 'n02492660', 'n02493509', 'n02493793', 'n02494079', 'n02497673', 'n02500267', 'n02504013', 'n02504458', 'n02509815', 'n02510455', 'n02514041', 'n02526121', 'n02536864', 'n02606052', 'n02607072', 'n02640242', 'n02641379', 'n02643566', 'n02655020', 'n02666196', 'n02667093', 'n02669723', 'n02672831', 'n02676566', 'n02687172', 'n02690373', 'n02692877', 'n02699494', 'n02701002', 'n02704792', 'n02708093', 'n02727426', 'n02730930', 'n02747177', 'n02749479', 'n02769748', 'n02776631', 'n02777292', 'n02782093', 'n02783161', 'n02786058', 'n02787622', 'n02788148', 'n02790996', 'n02791124', 'n02791270', 'n02793495', 'n02794156', 'n02795169', 'n02797295', 'n02799071', 'n02802426', 'n02804414', 'n02804610', 'n02807133', 'n02808304', 'n02808440', 'n02814533', 'n02814860', 'n02815834', 'n02817516', 'n02823428', 'n02823750', 'n02825657', 'n02834397', 'n02835271', 'n02837789', 'n02840245', 'n02841315', 'n02843684', 'n02859443', 'n02860847', 'n02865351', 'n02869837', 'n02870880', 'n02871525', 'n02877765', 'n02879718', 'n02883205', 'n02892201', 'n02892767', 'n02894605', 'n02895154', 'n02906734', 'n02909870', 'n02910353', 'n02916936', 'n02917067', 'n02927161', 'n02930766', 'n02939185', 'n02948072', 'n02950826', 'n02951358', 'n02951585', 'n02963159', 'n02965783', 'n02966193', 'n02966687', 'n02971356', 'n02974003', 'n02977058', 'n02978881', 'n02979186', 'n02980441', 'n02981792', 'n02988304', 'n02992211', 'n02992529', 'n02999410', 'n03000134', 'n03000247', 'n03000684', 'n03014705', 'n03016953', 'n03017168', 'n03018349', 'n03026506', 'n03028079', 'n03032252', 'n03041632', 'n03042490', 'n03045698', 'n03047690', 'n03062245', 'n03063599', 'n03063689', 'n03065424', 'n03075370', 'n03085013', 'n03089624', 'n03095699', 'n03100240', 'n03109150', 'n03110669', 'n03124043', 'n03124170', 'n03125729', 'n03126707', 'n03127747', 'n03127925', 'n03131574', 'n03133878', 'n03134739', 'n03141823', 'n03146219', 'n03160309', 'n03179701', 'n03180011', 'n03187595', 'n03188531', 'n03196217', 'n03197337', 'n03201208', 'n03207743', 'n03207941', 'n03208938', 'n03216828', 'n03218198', 'n03220513', 'n03223299', 'n03240683', 'n03249569', 'n03250847', 'n03255030', 'n03259280', 'n03271574', 'n03272010', 'n03272562', 'n03290653', 'n03291819', 'n03297495', 'n03314780', 'n03325584', 'n03337140', 'n03344393', 'n03345487', 'n03347037', 'n03355925', 'n03372029', 'n03376595', 'n03379051', 'n03384352', 'n03388043', 'n03388183', 'n03388549', 'n03393912', 'n03394916', 'n03400231', 'n03404251', 'n03417042', 'n03424325', 'n03425413', 'n03443371', 'n03444034', 'n03445777', 'n03445924', 'n03447447', 'n03447721', 'n03450230', 'n03452741', 'n03457902', 'n03459775', 'n03461385', 'n03467068', 'n03476684', 'n03476991', 'n03478589', 'n03481172', 'n03482405', 'n03483316', 'n03485407', 'n03485794', 'n03492542', 'n03494278', 'n03495258', 'n03496892', 'n03498962', 'n03527444', 'n03529860', 'n03530642', 'n03532672', 'n03534580', 'n03535780', 'n03538406', 'n03544143', 'n03584254', 'n03584829', 'n03590841', 'n03594734', 'n03594945', 'n03595614', 'n03598930', 'n03599486', 'n03602883', 'n03617480', 'n03623198', 'n03627232', 'n03630383', 'n03633091', 'n03637318', 'n03642806', 'n03649909', 'n03657121', 'n03658185', 'n03661043', 'n03662601', 'n03666591', 'n03670208', 'n03673027', 'n03676483', 'n03680355', 'n03690938', 'n03691459', 'n03692522', 'n03697007', 'n03706229', 'n03709823', 'n03710193', 'n03710637', 'n03710721', 'n03717622', 'n03720891', 'n03721384', 'n03724870', 'n03729826', 'n03733131', 'n03733281', 'n03733805', 'n03742115', 'n03743016', 'n03759954', 'n03761084', 'n03763968', 'n03764736', 'n03769881', 'n03770439', 'n03770679', 'n03773504', 'n03775071', 'n03775546', 'n03776460', 'n03777568', 'n03777754', 'n03781244', 'n03782006', 'n03785016', 'n03786901', 'n03787032', 'n03788195', 'n03788365', 'n03791053', 'n03792782', 'n03792972', 'n03793489', 'n03794056', 'n03796401', 'n03803284', 'n03804744', 'n03814639', 'n03814906', 'n03825788', 'n03832673', 'n03837869', 'n03838899', 'n03840681', 'n03841143', 'n03843555', 'n03854065', 'n03857828', 'n03866082', 'n03868242', 'n03868863', 'n03871628', 'n03873416', 'n03874293', 'n03874599', 'n03876231', 'n03877472', 'n03877845', 'n03884397', 'n03887697', 'n03888257', 'n03888605', 'n03891251', 'n03891332', 'n03895866', 'n03899768', 'n03902125', 'n03903868', 'n03908618', 'n03908714', 'n03916031', 'n03920288', 'n03924679', 'n03929660', 'n03929855', 'n03930313', 'n03930630', 'n03933933', 'n03935335', 'n03937543', 'n03938244', 'n03942813', 'n03944341', 'n03947888', 'n03950228', 'n03954731', 'n03956157', 'n03958227', 'n03961711', 'n03967562', 'n03970156', 'n03976467', 'n03976657', 'n03977966', 'n03980874', 'n03982430', 'n03983396', 'n03991062', 'n03992509', 'n03995372', 'n03998194', 'n04004767', 'n04005630', 'n04008634', 'n04009552', 'n04019541', 'n04023962', 'n04026417', 'n04033901', 'n04033995', 'n04037443', 'n04039381', 'n04040759', 'n04041544', 'n04044716', 'n04049303', 'n04065272', 'n04067472', 'n04069434', 'n04070727', 'n04074963', 'n04081281', 'n04086273', 'n04090263', 'n04099969', 'n04111531', 'n04116512', 'n04118538', 'n04118776', 'n04120489', 'n04125021', 'n04127249', 'n04131690', 'n04133789', 'n04136333', 'n04141076', 'n04141327', 'n04141975', 'n04146614', 'n04147183', 'n04149813', 'n04152593', 'n04153751', 'n04154565', 'n04162706', 'n04179913', 'n04192698', 'n04200800', 'n04201297', 'n04204238', 'n04204347', 'n04208210', 'n04209133', 'n04209239', 'n04228054', 'n04229816', 'n04235860', 'n04238763', 'n04239074', 'n04243546', 'n04251144', 'n04252077', 'n04252225', 'n04254120', 'n04254680', 'n04254777', 'n04258138', 'n04259630', 'n04263257', 'n04264628', 'n04265275', 'n04266014', 'n04270147', 'n04273569', 'n04275548', 'n04277352', 'n04285008', 'n04286575', 'n04296562', 'n04310018', 'n04311004', 'n04311174', 'n04317175', 'n04325704', 'n04326547', 'n04328186', 'n04330267', 'n04332243', 'n04335435', 'n04336792', 'n04344873', 'n04346328', 'n04347754', 'n04350905', 'n04355338', 'n04355933', 'n04356056', 'n04357314', 'n04366367', 'n04367480', 'n04370456', 'n04371430', 'n04371774', 'n04372370', 'n04376876', 'n04380533', 'n04389033', 'n04392985', 'n04398044', 'n04399382', 'n04404412', 'n04409515', 'n04417672', 'n04418357', 'n04423845', 'n04428191', 'n04429376', 'n04435653', 'n04442312', 'n04443257', 'n04447861', 'n04456115', 'n04458633', 'n04461696', 'n04462240', 'n04465501', 'n04467665', 'n04476259', 'n04479046', 'n04482393', 'n04483307', 'n04485082', 'n04486054', 'n04487081', 'n04487394', 'n04493381', 'n04501370', 'n04505470', 'n04507155', 'n04509417', 'n04515003', 'n04517823', 'n04522168', 'n04523525', 'n04525038', 'n04525305', 'n04532106', 'n04532670', 'n04536866', 'n04540053', 'n04542943', 'n04548280', 'n04548362', 'n04550184', 'n04552348', 'n04553703', 'n04554684', 'n04557648', 'n04560804', 'n04562935', 'n04579145', 'n04579432', 'n04584207', 'n04589890', 'n04590129', 'n04591157', 'n04591713', 'n04592741', 'n04596742', 'n04597913', 'n04599235', 'n04604644', 'n04606251', 'n04612504', 'n04613696', 'n06359193', 'n06596364', 'n06785654', 'n06794110', 'n06874185', 'n07248320', 'n07565083', 'n07579787', 'n07583066', 'n07584110', 'n07590611', 'n07613480', 'n07614500', 'n07615774', 'n07684084', 'n07693725', 'n07695742', 'n07697313', 'n07697537', 'n07711569', 'n07714571', 'n07714990', 'n07715103', 'n07716358', 'n07716906', 'n07717410', 'n07717556', 'n07718472', 'n07718747', 'n07720875', 'n07730033', 'n07734744', 'n07742313', 'n07745940', 'n07747607', 'n07749582', 'n07753113', 'n07753275', 'n07753592', 'n07754684', 'n07760859', 'n07768694', 'n07802026', 'n07831146', 'n07836838', 'n07860988', 'n07871810', 'n07873807', 'n07875152', 'n07880968', 'n07892512', 'n07920052', 'n07930864', 'n07932039', 'n09193705', 'n09229709', 'n09246464', 'n09256479', 'n09288635', 'n09332890', 'n09399592', 'n09421951', 'n09428293', 'n09468604', 'n09472597', 'n09835506', 'n10148035', 'n10565667', 'n11879895', 'n11939491', 'n12057211', 'n12144580', 'n12267677', 'n12620546', 'n12768682', 'n12985857', 'n12998815', 'n13037406', 'n13040303', 'n13044778', 'n13052670', 'n13054560', 'n13133613', 'n15075141']
# Official list of VTAB 19 tasks
VTAB_19TASKS = [
"vtab/caltech101",
"vtab/cifar100",
"vtab/clevr_count_all",
"vtab/clevr_closest_object_distance",
"vtab/diabetic_retinopathy",
"vtab/dmlab",
"vtab/dsprites_label_orientation",
"vtab/dsprites_label_x_position",
"vtab/dtd",
"vtab/eurosat",
"vtab/kitti_closest_vehicle_distance",
"vtab/flowers",
"vtab/pets",
"vtab/pcam",
"vtab/resisc45",
"vtab/smallnorb_label_azimuth",
"vtab/smallnorb_label_elevation",
"sun397",
"vtab/svhn",
]
| [
"{'cifar10': ['a photo of a {c}.', 'a blurry photo of a {c}.', 'a black and white photo of a {c}.', 'a low contrast photo of a {c}.', 'a high contrast photo of a {c}.', 'a bad photo of a {c}.', 'a good photo of a {c}.', 'a photo of a small {c}.', 'a photo of a big {c}.', 'a photo of the {c}.', 'a blurry photo of the {c}.', 'a black and white photo of the {c}.', 'a low contrast photo of the {c}.', 'a high contrast photo of the {c}.', 'a bad photo of the {c}.', 'a good photo of the {c}.', 'a photo of the small {c}.', 'a photo of the big {c}.'], 'cifar100': ['a photo of a {c}.', 'a blurry photo of a {c}.', 'a black and white photo of a {c}.', 'a low contrast photo of a {c}.', 'a high contrast photo of a {c}.', 'a bad photo of a {c}.', 'a good photo of a {c}.', 'a photo of a small {c}.', 'a photo of a big {c}.', 'a photo of the {c}.', 'a blurry photo of the {c}.', 'a black and white photo of the {c}.', 'a low contrast photo of the {c}.', 'a high contrast photo of the {c}.', 'a bad photo of the {c}.', 'a good photo of the {c}.', 'a photo of the small {c}.', 'a photo of the big {c}.'], 'imagenet1k': ['a bad photo of a {c}.', 'a photo of many {c}.', 'a sculpture of a {c}.', 'a photo of the hard to see {c}.', 'a low resolution photo of the {c}.', 'a rendering of a {c}.', 'graffiti of a {c}.', 'a bad photo of the {c}.', 'a cropped photo of the {c}.', 'a tattoo of a {c}.', 'the embroidered {c}.', 'a photo of a hard to see {c}.', 'a bright photo of a {c}.', 'a photo of a clean {c}.', 'a photo of a dirty {c}.', 'a dark photo of the {c}.', 'a drawing of a {c}.', 'a photo of my {c}.', 'the plastic {c}.', 'a photo of the cool {c}.', 'a close-up photo of a {c}.', 'a black and white photo of the {c}.', 'a painting of the {c}.', 'a painting of a {c}.', 'a pixelated photo of the {c}.', 'a sculpture of the {c}.', 'a bright photo of the {c}.', 'a cropped photo of a {c}.', 'a plastic {c}.', 'a photo of the dirty {c}.', 'a jpeg corrupted photo of a {c}.', 'a blurry photo of the {c}.', 'a photo of the {c}.', 'a good photo of the {c}.', 'a rendering of the {c}.', 'a {c} in a video game.', 'a photo of one {c}.', 'a doodle of a {c}.', 'a close-up photo of the {c}.', 'a photo of a {c}.', 'the origami {c}.', 'the {c} in a video game.', 'a sketch of a {c}.', 'a doodle of the {c}.', 'a origami {c}.', 'a low resolution photo of a {c}.', 'the toy {c}.', 'a rendition of the {c}.', 'a photo of the clean {c}.', 'a photo of a large {c}.', 'a rendition of a {c}.', 'a photo of a nice {c}.', 'a photo of a weird {c}.', 'a blurry photo of a {c}.', 'a cartoon {c}.', 'art of a {c}.', 'a sketch of the {c}.', 'a embroidered {c}.', 'a pixelated photo of a {c}.', 'itap of the {c}.', 'a jpeg corrupted photo of the {c}.', 'a good photo of a {c}.', 'a plushie {c}.', 'a photo of the nice {c}.', 'a photo of the small {c}.', 'a photo of the weird {c}.', 'the cartoon {c}.', 'art of the {c}.', 'a drawing of the {c}.', 'a photo of the large {c}.', 'a black and white photo of a {c}.', 'the plushie {c}.', 'a dark photo of a {c}.', 'itap of a {c}.', 'graffiti of the {c}.', 'a toy {c}.', 'itap of my {c}.', 'a photo of a cool {c}.', 'a photo of a small {c}.', 'a tattoo of the {c}.'], 'food101': ['a photo of {c}, a type of food.'], 'sun397': ['a photo of a {c}.', 'a photo of the {c}.'], 'cars': ['a photo of a {c}.', 'a photo of the {c}.', 'a photo of my {c}.', 'i love my {c}!', 'a photo of my dirty {c}.', 'a photo of my clean {c}.', 'a photo of my new {c}.', 'a photo of my old {c}.'], 'fgvc_aircraft': ['a photo of a {c}, a type of aircraft.', 'a photo of the {c}, a type of aircraft.'], 'dtd': ['a photo of a {c} texture.', 'a photo of a {c} pattern.', 'a photo of a {c} thing.', 'a photo of a {c} object.', 'a photo of the {c} texture.', 'a photo of the {c} pattern.', 'a photo of the {c} thing.', 'a photo of the {c} object.'], 'pets': ['a photo of a {c}, a type of pet.'], 'caltech101': ['a photo of a {c}.', 'a painting of a {c}.', 'a plastic {c}.', 'a sculpture of a {c}.', 'a sketch of a {c}.', 'a tattoo of a {c}.', 'a toy {c}.', 'a rendition of a {c}.', 'a embroidered {c}.', 'a cartoon {c}.', 'a {c} in a video game.', 'a plushie {c}.', 'a origami {c}.', 'art of a {c}.', 'graffiti of a {c}.', 'a drawing of a {c}.', 'a doodle of a {c}.', 'a photo of the {c}.', 'a painting of the {c}.', 'the plastic {c}.', 'a sculpture of the {c}.', 'a sketch of the {c}.', 'a tattoo of the {c}.', 'the toy {c}.', 'a rendition of the {c}.', 'the embroidered {c}.', 'the cartoon {c}.', 'the {c} in a video game.', 'the plushie {c}.', 'the origami {c}.', 'art of the {c}.', 'graffiti of the {c}.', 'a drawing of the {c}.', 'a doodle of the {c}.'], 'flowers': ['a photo of a {c}, a type of flower.'], 'mnist': ['a photo of the number: \"{c}\".'], 'stl10': ['a photo of a {c}.', 'a photo of the {c}.'], 'eurosat': ['a centered satellite photo of {c}.', 'a centered satellite photo of a {c}.', 'a centered satellite photo of the {c}.'], 'gtsrb': ['a zoomed in photo of a \"{c}\" traffic sign.', 'a centered photo of a \"{c}\" traffic sign.', 'a close up photo of a \"{c}\" traffic sign.'], 'country211': ['a photo i took in {c}.', 'a photo i took while visiting {c}.', 'a photo from my home country of {c}.', 'a photo from my visit to {c}.', 'a photo showing the country of {c}.'], 'renderedsst2': ['a {c} review of a movie.'], 'voc2007': ['a photo of a {c}.'], 'voc2007_multilabel': ['a photo of a {c}.'], 'fer2013': ['a photo of a {c} looking face.', 'a photo of a face showing the emotion: {c}.', 'a photo of a face looking {c}.', 'a face that looks {c}.', 'they look {c}.', 'look at how {c} they are.'], 'clevr_count_all': ['a picture of {c} objects'], 'clevr_closest_object_distance': ['{c} shapes.'], 'pcam': ['a histopathology slide showing {c}', 'histopathology image of {c}'], 'svhn': ['a photo of the number {c} written on a sign', 'an outdoor house number {c}', 'the number {c} in the center of the image', 'an outdoor number {c} writte on a sign', 'an outdoor number {c}', 'a centered image of the number {c}'], 'resisc45': ['a sattelite image of {c}', 'an aerial view of {c}', 'a sattelite photo of {c}', '{c} from above'], 'kitti_closest_vehicle_distance': ['{c}'], 'smallnorb_label_azimuth': ['an object rotated at {c}', 'something rotated at {c}', '{c} rotation', 'something at a {c} angle'], 'smallnorb_label_elevation': ['an object rotated at {c}', 'something rotated at {c}', '{c} rotation', 'something at a {c} angle'], 'dsprites_label_x_position': ['an object located at position {c}% on the horizontal axis'], 'dsprites_label_orientation': ['an object rotated at {c}', 'something rotated at {c}', '{c} rotation', 'something at a {c} angle'], 'dmlab': ['{c}'], 'diabetic_retinopathy': ['a retinal image with {c}'], 'dummy': ['a photo of a {c}']}",
"imagenet1k"
] |
2024-01-10 | neilneuwirth/property-manager-app | webui~webui~state.py | from typing import List, Dict, Any
import os
import requests
import json
from pydantic import BaseModel
from openai import OpenAI
import reflex as rx
from webui.services import transform_maintenance_request
from .constants import (
MODEL,
TRIGGER_KEYWORD,
TEMPERATURE,
DEFAULT_CHATS,
DEFAULT_REQUEST_HISTORY,
SYSTEM_PROMPT,
DEFAULT_MAINTENANCE_REQUEST,
)
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
class QuestionAnswer(rx.Base):
"""A question and answer pair."""
question: str
answer: str
class State(rx.State):
"""The app state."""
# A Dict from the chat name to the list of questions and answers.
chats: Dict[str, List[QuestionAnswer]] = DEFAULT_CHATS
# Request history
request_history: Dict[str, bool] = DEFAULT_REQUEST_HISTORY
# The current chat name.
current_chat = "Demo Request"
# The current question.
question: str
# Whether we are processing the question.
question_processing: bool = False
# Whether we are processing the maintenace request.
form_processing: bool = False
# The name of the new chat.
new_chat_name: str = ""
# Whether the drawer is open.
drawer_open: bool = False
# Whether the modal is open.
modal_open: bool = False
api_type: str = "openai"
maintenance_request_submitted: bool = False
img: list[str]
maintenance_request_data: Dict[Any, Any] = DEFAULT_MAINTENANCE_REQUEST
def set_category(self, category: str):
self.maintenance_request_data["CategoryId"] = category
def set_subject(self, subject: str):
self.maintenance_request_data["Subject"] = subject
def set_description(self, description: str):
self.maintenance_request_data["Description"] = description
def set_priority(self, priority: str):
self.maintenance_request_data["TaskPriority"] = priority
@rx.var
def priority(self) -> str:
return self.maintenance_request_data["TaskPriority"]
@rx.var
def description(self) -> str:
return self.maintenance_request_data["Description"]
@rx.var
def subject(self) -> str:
return self.maintenance_request_data["Subject"]
@rx.var
def category(self) -> str:
return self.maintenance_request_data["CategoryId"]
def create_chat(self):
"""Create a new chat."""
# Add the new chat to the list of chats.
self.current_chat = self.new_chat_name
self.chats[self.new_chat_name] = []
# FIX THE BELOW
self.request_history[self.current_chat] = False
self.maintenance_request_submitted = False
# Toggle the modal.
self.modal_open = False
def set_maintenance_request_history(self, maintenance_request_submitted: bool):
self.maintenance_request_submitted = maintenance_request_submitted
self.request_history[self.current_chat] = maintenance_request_submitted
def submit_maintenance_request(self):
self.set_maintenance_request_history(True)
self.modal_open = False
def toggle_modal(self):
"""Toggle the new chat modal."""
self.modal_open = not self.modal_open
def toggle_form_processing(self):
"""Toggle the new chat modal."""
self.form_processing = not self.form_processing
def toggle_drawer(self):
"""Toggle the drawer."""
self.drawer_open = not self.drawer_open
def delete_chat(self):
"""Delete the current chat."""
del self.chats[self.current_chat]
if len(self.chats) == 0:
self.chats = DEFAULT_CHATS
self.current_chat = list(self.chats.keys())[0]
self.toggle_drawer()
def set_chat(self, chat_name: str):
"""Set the name of the current chat.
Args:
chat_name: The name of the chat.
"""
self.current_chat = chat_name
self.toggle_drawer()
self.set_maintenance_request_history(self.request_history.get(chat_name))
@rx.var
def chat_titles(self) -> List[str]:
"""Get the list of chat titles.
Returns:
The list of chat names.
"""
return list(self.chats.keys())
async def handle_upload(self, files: list[rx.UploadFile]):
"""Handle the upload of file(s).
Args:
files: The uploaded files.
"""
for file in files:
upload_data = await file.read()
outfile = f".web/public/{file.filename}"
# Save the file.
with open(outfile, "wb") as file_object:
file_object.write(upload_data)
# Update the img var.
self.img.append(file.filename)
async def process_question(self, form_data: Dict[str, str]):
# Get the question from the form
# if len(self.img) > 0:
# self.handle_upload()
# self.toggle_modal()
question = form_data["question"]
# Check if the question is empty
if question == "":
return
model = self.openai_process_question
async for value in model(question):
yield value
async def process_form(self):
last_message = self.get_context()
if TRIGGER_KEYWORD in last_message:
self.form_processing = True
await self.process_maintenance(last_message)
self.toggle_modal()
self.form_processing = False
async def process(self, form_data: Dict[str, str]):
async for response in self.process_question(form_data):
yield response
await self.process_form()
@rx.var
def maintenance_request(self): # -> MaintenanceRequest:
return self.maintenance_request_data
# return MaintenanceRequest(**self.maintenance_request_data)
async def process_maintenance(self, form_data: str):
maintenance_request = await transform_maintenance_request(form_data)
maintenance_request = maintenance_request["data"]
self.maintenance_request_data = maintenance_request
return maintenance_request
def get_context(self):
"""Get the combined context of question and answer from the current chat."""
context = ""
for qa in self.chats[self.current_chat]:
context += qa.answer
return context
async def openai_process_question(self, question: str):
"""Get the response from the API.
Args:
form_data: A Dict with the current question.
"""
# Add the question to the list of questions.
question_answer = QuestionAnswer(question=question, answer="")
self.chats[self.current_chat].append(question_answer)
# Clear the input and start the processing.
self.question_processing = True
yield
# Build the messages.
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
for question_answer in self.chats[self.current_chat]:
messages.extend(
(
{"role": "user", "content": question_answer.question},
{"role": "assistant", "content": question_answer.answer},
)
)
# Remove the last mock answer.
messages = messages[:-1]
# Start a new session to answer the question.
session = client.chat.completions.create(
model=MODEL, messages=messages, temperature=TEMPERATURE, stream=True
)
# Stream the results, yielding after every word.
for item in session:
if hasattr(item.choices[0].delta, "content"):
if answer_text := item.choices[0].delta.content:
self.chats[self.current_chat][-1].answer += answer_text
yield
# Toggle the processing flag.
self.question_processing = False
| [] |
2024-01-10 | evd995/SIMBA-Gemini | helpers~tru_helper.py | import streamlit as st
from trulens_eval import Tru, Feedback, TruLlama, OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
import openai
from openai import OpenAI
import numpy as np
import nest_asyncio
nest_asyncio.apply()
openai.api_key = st.secrets["OPENAI_API_KEY"]
openai_client = OpenAI()
class OpenAI_custom(fOpenAI):
"""
From tutorial:
https://colab.research.google.com/github/truera/trulens/blob/main/trulens_eval/examples/expositional/frameworks/langchain/langchain_agents.ipynb#scrollTo=hnXeWFcPUaqk
"""
def no_answer_feedback(self, question: str, response: str) -> float:
return float(openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Does the RESPONSE provide an answer to the QUESTION? Rate on a scale of 1 to 10. Respond with the number only."},
{"role": "user", "content": f"QUESTION: {question}; RESPONSE: {response}"}
]
).choices[0].message.content) / 10
custom_no_answer = OpenAI_custom()
def build_tru_recorder(agent):
provider = fOpenAI()
# HONEST
# Answer Relevance
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons,
name="[METRIC] Answer Relevance"
).on_input_output()
# Context Relevance
context_selection = TruLlama.select_source_nodes().node.text
f_qs_relevance = (
Feedback(provider.qs_relevance_with_cot_reasons,
name="[METRIC] Context Relevance")
.on_input()
.on(context_selection)
.aggregate(np.mean)
)
#ย Groundedness of reponse based on context
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons,
name="[METRIC] Groundedness"
)
.on(context_selection)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
#ย HARMLESS
f_insensitivity = Feedback(
provider.insensitivity_with_cot_reasons,
name="[METRIC] Insensitivity",
higher_is_better=False,
).on_output()
f_input_maliciousness = Feedback(
provider.maliciousness_with_cot_reasons,
name="[METRIC] Input Maliciousness",
higher_is_better=False,
).on_input()
f_output_maliciousness = Feedback(
provider.maliciousness_with_cot_reasons,
name="[METRIC] Output Maliciousness",
higher_is_better=False,
).on_output()
#ย HELPFUL
f_coherence = Feedback(
provider.coherence_with_cot_reasons, name="[METRIC] Coherence"
).on_output()
f_input_sentiment = Feedback(
provider.sentiment_with_cot_reasons, name="[METRIC] Input Sentiment"
).on_input()
f_output_sentiment = Feedback(
provider.sentiment_with_cot_reasons, name="[METRIC] Ouput Sentiment"
).on_input()
#ย AGENT: Missing tools
f_no_answer = Feedback(
custom_no_answer.no_answer_feedback, name="[METRIC] Answers Question"
).on_input_output()
tru_recorder = TruLlama(
agent,
app_id="Students Agent",
feedbacks=[
f_qa_relevance,
#f_qs_relevance,
f_groundedness,
f_insensitivity,
f_input_maliciousness,
#f_output_maliciousness,
#f_coherence,
#f_input_sentiment,
#f_output_sentiment,
#f_no_answer
]
)
return tru_recorder | [
"QUESTION: PLACEHOLDER; RESPONSE: PLACEHOLDER",
"Does the RESPONSE provide an answer to the QUESTION? Rate on a scale of 1 to 10. Respond with the number only."
] |
2024-01-10 | AI-Northstar-Tech/openai-proxy | proxy_app~request_handler.py | import openai
from litellm import completion
import os
from proxy_app.utils import (
price_calculator_chat_completion,
price_calculator_embedding_completion,
MAX_TOKEN_LIMIT,
)
import dotenv
import time
dotenv.load_dotenv(".env")
openai.api_key = os.environ.get("OPENAI_API_KEY")
class ChatCompletionHandler:
def __init__(self, req, url):
self.url = url
self.req = req
self.prompt_tokens = 0
self.response_tokens = 0
self.total_tokens = 0
self.tokens_cost = 0
def makeRequest(self):
try:
if "stream" in self.req and self.req["stream"] == True:
return True, "", True
else:
try:
self.req["max_tokens"] = min(
int(self.req["max_tokens"]), MAX_TOKEN_LIMIT
)
except KeyError:
self.req["max_tokens"] = MAX_TOKEN_LIMIT
# start timer
timeStart = time.time()
response = completion(**self.req)
# end timer
timeEnd = time.time()
# calculate time taken
timeTaken = timeEnd - timeStart
print(f"Time taken (core API call): {timeTaken}")
self.prompt_tokens = response["usage"]["prompt_tokens"]
self.response_tokens = response["usage"]["completion_tokens"]
self.total_tokens = response["usage"]["total_tokens"]
self.tokens_cost = price_calculator_chat_completion(response["usage"])
return True, response, False
except KeyError:
try:
self.req["max_tokens"] = min(
int(self.req["max_tokens"]), MAX_TOKEN_LIMIT
)
except KeyError:
self.req["max_tokens"] = MAX_TOKEN_LIMIT
response = completion(**self.req)
self.prompt_tokens = response["usage"]["prompt_tokens"]
self.response_tokens = response["usage"]["completion_tokens"]
self.total_tokens = response["usage"]["total_tokens"]
self.tokens_cost = completion_cost(completion_response=response)
return True, response, False
except openai.InvalidRequestError as e:
return False, str(e), False
class EmbeddingHandler:
def __init__(self, req, url):
self.url = url
self.req = req
self.prompt_tokens = 0
self.total_tokens = 0
self.tokens_cost = 0
def makeRequest(self):
try:
self.req["max_tokens"] = min(int(self.req["max_tokens"]), MAX_TOKEN_LIMIT)
except KeyError:
self.req["max_tokens"] = MAX_TOKEN_LIMIT
try:
response = openai.Embedding.create(**self.req)
except openai.InvalidRequestError as e:
return False, str(e)
self.prompt_tokens = response["usage"]["prompt_tokens"]
self.total_tokens = response["usage"]["total_tokens"]
self.tokens_cost = price_calculator_embedding_completion(response["usage"])
return True, response
| [] |
2024-01-10 | AI-Northstar-Tech/openai-proxy | proxy_app~Database~database.py | import time
from sqlalchemy import create_engine
from proxy_app.Database.models.base import Base
from proxy_app.Database.models.openaiRequestResponse import OpenAIRequestResponse
from proxy_app.Database.models.apiKeyToQuota import APIKeyToQuota
from sqlalchemy.orm import Session
import psycopg2
class ProxyAPIDatabase:
def __init__(self, db_option, db_type, db_module, username, password, host, name):
self.db_type = db_type
self.db_module = db_module
self.username = username
self.password = password
self.host = host
self.name = name
self.db_option = db_option
if db_option == "SQLite":
self.url = "sqlite:///proxy_api.db"
else:
self.url = f"{db_type}+{db_module}://{username}:{password}@{host}/{name}"
self.engine = create_engine(self.url)
def init_db(self):
Base.metadata.create_all(self.engine)
def create_api_key_with_quota(self, api_key, rem_quota, req_count):
with Session(self.engine) as session:
api_key_to_quota = APIKeyToQuota(
api_key=api_key, rem_quota=rem_quota, req_count=req_count
)
session.add(api_key_to_quota)
session.commit()
print(f"Created new API key: {api_key} with initial quota: {rem_quota}")
return api_key
def insert_data(self, req_id, api_key, req_data, response):
with Session(self.engine) as session:
requestResponse = OpenAIRequestResponse(
req_id=req_id, api_key=api_key, req_data=req_data, response=response
)
session.add(requestResponse)
session.commit()
print(f"Inserted Request Data: {req_data}")
def update_remQuota(self, api_key, rem_quota):
with Session(self.engine) as session:
result = (
session.query(APIKeyToQuota)
.filter(APIKeyToQuota.api_key == api_key)
.first()
)
if result:
result.rem_quota = rem_quota
result.req_count += 1
session.commit()
print(f"Updated rem_quota for API_Key: {api_key} to {rem_quota}")
def add_quota(self, api_key, quota):
with Session(self.engine) as session:
result = (
session.query(APIKeyToQuota)
.filter(APIKeyToQuota.api_key == api_key)
.first()
)
if result:
result.rem_quota += quota
session.commit()
print(f"Added quota for API_Key: {api_key} to {quota}")
def validate_api_key(self, api_key):
with Session(self.engine) as session:
api_key_to_quota = (
session.query(APIKeyToQuota)
.filter(APIKeyToQuota.api_key == api_key)
.first()
)
return True if api_key_to_quota else False
def validate_api_key_request(self, api_key):
with Session(self.engine) as session:
api_key_to_quota = (
session.query(APIKeyToQuota)
.filter(APIKeyToQuota.api_key == api_key)
.first()
)
if api_key_to_quota:
return True, api_key_to_quota.rem_quota, f"{api_key}__{time.time_ns()}"
else:
return False, api_key_to_quota.rem_quota, None
def get_AllRequestData(self, api_key):
with Session(self.engine) as session:
query_list = (
session.query(OpenAIRequestResponse)
.filter(OpenAIRequestResponse.api_key == api_key)
.all()
)
return query_list if query_list else None
| [] |
2024-01-10 | BigDataIA-Summer2023-Team2/Assignment1 | airflow~dags~fetch_transcript.py | from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from airflow.models.param import Param
import datetime
from datetime import timedelta
import os
import requests
from airflow.models.baseoperator import chain
from sentence_transformers import SentenceTransformer
import numpy as np
import openai
import redis
from redis.commands.search.field import VectorField, TextField, NumericField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
# dag declaration
user_input = {
"company_name": Param(default="LMAT", type='string', minLength=5, maxLength=255),
"year": Param(default=2015, type='number'),
"quarter": Param(default=1, type='number'),
"word_limit": Param(default=500, type='number'),
"openai_api_key": Param(type='string'),
}
dag = DAG(
dag_id="fetch_transcript",
# Run daily midnight to fetch metadata from github
schedule="0 0 * * *", # https://crontab.guru/
start_date=days_ago(0),
catchup=False,
dagrun_timeout=timedelta(minutes=60),
tags=["assignment1", "damg7245", "fetch_transcript"],
)
def folder_names_with_date(year, quarter, company):
start_month = (quarter - 1) * 3 + 1
end_month = start_month + 2
# Create starting and ending datetime objects
start_date = datetime.datetime(year, start_month, 1)
end_date = datetime.datetime(year, end_month, 1) + datetime.timedelta(days=31)
# Generate the range of dates
folder_names = []
current_date = start_date
while current_date <= end_date:
folder_names.append(current_date.strftime("%Y-%m-%d").replace('-','')+'_'+company)
current_date += datetime.timedelta(days=1)
return folder_names
def get_words_github(ti, **kwargs):
company_name = kwargs['params']['company_name']
year = kwargs['params']['year']
quarter = kwargs['params']['quarter']
word_limit = kwargs['params']['word_limit']
folder_names = folder_names_with_date(year, quarter, company_name)
ti.xcom_push(key="folder_names", value=folder_names)
words = []
for folder in folder_names:
url = f"{os.getenv('DATASET_GITHUB_URL')}/MAEC_Dataset/{folder}/text.txt"
page = requests.get(url)
if page.status_code != 200:
continue
words += page.text.split()
first_n_words = ' '.join(words[:word_limit])
return first_n_words
def generate_sbert_embeddings(ti):
model = SentenceTransformer(os.getenv('SBERT_MODEL','sentence-transformers/all-MiniLM-L6-v2'))
words_to_encode = ti.xcom_pull(key="return_value", task_ids='get_words_github')
print(words_to_encode)
ti.xcom_push(key="words_to_encode", value=words_to_encode)
embeddings = model.encode(words_to_encode)
print(type(embeddings), embeddings)
# vector = np.array(embeddings).astype(np.float32).tobytes()
# print(type(vector), vector)
return embeddings.tolist()
def generate_openai_embeddings(ti, **kwargs):
openai.api_key = kwargs["params"]["openai_api_key"]
model_id = os.getenv("OPENAI_ENGINE", "text-embedding-ada-002")
words_to_encode = ti.xcom_pull(key="return_value", task_ids='get_words_github')
embeddings = openai.Embedding.create(
input=words_to_encode,
engine=model_id)['data'][0]['embedding']
return embeddings
def save_data_to_redis(ti, **kwargs):
company_name = kwargs['params']['company_name']
year = kwargs['params']['year']
quarter = kwargs['params']['quarter']
plain_text = ti.xcom_pull(key="return_value", task_ids='get_words_github')
sbert_embeddings = ti.xcom_pull(key="return_value", task_ids='generate_sbert_embeddings')
sbert_vector = np.array(sbert_embeddings).astype(np.float32).tobytes()
openai_embeddings = ti.xcom_pull(key="return_value", task_ids='generate_openai_embeddings')
openai_vector = np.array(openai_embeddings).astype(np.float32).tobytes()
r = redis.Redis(host=os.getenv("REDIS_DB_HOST", 'redis-stack'), # Local redis error
port= os.getenv("REDIS_DB_PORT", "6379"),
username=os.getenv("REDIS_DB_USERNAME", ""),
password=os.getenv("REDIS_DB_PASSWORD", ""),
decode_responses=True
)
folder_names = ti.xcom_pull(key="folder_names", task_ids='get_words_github')
datekey = folder_names[0]
year= datekey.split('_')[0][:4]
month= datekey.split('_')[0][4:6]
date= datekey.split('_')[0][-2:]
data = {
"date" : date,
"month": month,
"year" : year,
"quarter" : quarter,
"company_ticker": company_name,
"plain_text" : plain_text,
"sbert_embeddings": sbert_vector,
"openai_embeddings": openai_vector
}
SCHEMA = [
NumericField("date"),
NumericField("month"),
NumericField("year"),
TextField("company_ticker"),
TextField("plain_text"),
VectorField("sbert_embeddings", "FLAT", {"TYPE": "FLOAT32", "DIM": 384, "DISTANCE_METRIC": "COSINE"}),
VectorField("openai_embeddings", "FLAT", {"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"}),
]
r.hset(f"post:{company_name}:{year}_{quarter}", mapping=data)
if r.exists("embeddings"):
r.ft.drop_index("embeddings")
# r.zadd("embeddings", SCHEMA)
try:
r.ft("embeddings").create_index(fields=SCHEMA, definition=IndexDefinition(prefix=["post:"], index_type=IndexType.HASH))
except Exception as e:
print("Index already exists")
r.close()
# Create the index
return "Data saved to redis"
with dag:
get_data_from_github_task = PythonOperator(
task_id='get_words_github',
python_callable=get_words_github,
provide_context=True,
dag=dag,
)
generate_sbert_embeddings_task = PythonOperator(
task_id='generate_sbert_embeddings',
python_callable=generate_sbert_embeddings,
provide_context=True,
dag=dag,
)
generate_openai_embeddings_task = PythonOperator(
task_id='generate_openai_embeddings',
python_callable=generate_openai_embeddings,
provide_context=True,
dag=dag,
)
save_data_to_redis_task = PythonOperator(
task_id='save_data_to_redis',
python_callable=save_data_to_redis,
provide_context=True,
dag=dag,
)
chain(get_data_from_github_task, [generate_openai_embeddings_task, generate_sbert_embeddings_task], save_data_to_redis_task)
# {
# "company_name": "LMAT",
# "year": 2015,
# "quarter": 1,
# "word_limit": 500
# } | [] |
2024-01-10 | BigDataIA-Summer2023-Team2/Assignment1 | api~utils~pipeline.py | from utils import schemas
import requests
import os
from requests.auth import HTTPBasicAuth
import uuid
import redis
import openai
from sentence_transformers import SentenceTransformer
from redis.commands.search.query import Query
import numpy as np
import logging
import json
AIRFLOW_API_URL = os.getenv("AIRFLOW_API_URL", "http://localhost:8080/api/v1")
logger = logging.getLogger(__name__)
def trigger_fetch_transcript(userInput: schemas.EmbeddTranscript):
url = f"{AIRFLOW_API_URL}/dags/fetch_transcript/dagRuns"
data = {
"conf": {
"company_name": userInput.company_name,
"year": userInput.year,
"quarter": userInput.quarter,
"word_limit": userInput.word_limit,
"openai_api_key": userInput.openai_api_key
},
"dag_run_id": f"fetch_transcript_{uuid.uuid4().hex}",
}
response = requests.request("POST", url, auth=HTTPBasicAuth('airflow', 'airflow'), json=data)
if response.status_code != 200:
return {"message": f"Internal Server Error {response.text}"}
return {"message": "Fetching transcript", "details": response.text}
def fetch_transcript(userInput: schemas.FetchTranscript):
redis_client = redis.Redis(host=os.getenv("REDIS_DB_HOST", 'redis-stack'), # Local redis error
port= os.getenv("REDIS_DB_PORT", "6379"),
username=os.getenv("REDIS_DB_USERNAME", ""),
password=os.getenv("REDIS_DB_PASSWORD", ""),
)
# Fetch a single key
key= 'post:'+str(userInput.company_name)+':'+str(userInput.year)+'_'+str(userInput.quarter)
data = redis_client.hgetall(key)
# Convert byte strings to regular strings
decoded_data = {key.decode('latin-1'): value.decode('latin-1') for key, value in data.items()}
# decoded_data = {base64.b64decode(key).decode("latin-1"): base64.b64decode(value).decode("latin-1") for key, value in data.items()}
response = {"Transcript": json.dumps(decoded_data.get('plain_text'))}
return response
def trigger_fetch_metadata_dag():
url = f"{AIRFLOW_API_URL}/dags/metadata_load/dagRuns"
data = {
"dag_run_id": f"metadata_load_{uuid.uuid4().hex}",
}
response = requests.request("POST", url, auth=HTTPBasicAuth('airflow', 'airflow'), json=data)
if response.status_code != 200:
return {"message": f"Internal Server Error {response.text}"}
return {"message": "Fetching transcript", "details": response.text}
def get_vss_results(query_string, embedding_type, openai_api_key):
# Redis connection details
redis_client = redis.Redis(host=os.getenv("REDIS_DB_HOST", 'redis-stack'), # Local redis error
port= os.getenv("REDIS_DB_PORT", "6379"),
username=os.getenv("REDIS_DB_USERNAME", ""),
password=os.getenv("REDIS_DB_PASSWORD", ""),
decode_responses=True
)
if embedding_type=='openai':
# Vectorize the query using OpenAI's text-embedding-ada-002 model
openai.api_key = openai_api_key
print("Vectorizing query...")
model_id="text-embedding-ada-002"
openaiembed = openai.Embedding.create(
input=query_string,
engine=model_id)
query_vector = openaiembed["data"][0]["embedding"]
# Convert the vector to a numpy array
query_vector = np.array(query_vector).astype(np.float32).tobytes()
base_query = "*=>[KNN 5 @openai_embeddings $vector AS vector_score]"
if embedding_type=='sbert':
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
embeddings = model.encode(query_string)
# Convert the vector to a numpy array
query_vector = np.array(embeddings).astype(np.float32).tobytes()
base_query = "*=>[KNN 5 @sbert_embeddings $vector AS vector_score]"
query = Query(base_query).return_fields("plain_text", "vector_score").sort_by("vector_score").dialect(2)
try:
results = redis_client.ft("embeddings").search(query, query_params={"vector": query_vector})
except Exception as e:
print("Error calling Redis search: ", e)
return None
return process_result(results)
def get_vss_hybrid_year_results(query_string, start_year, end_year, api, apikey):
# Redis connection details
redis_client = redis.Redis(host=os.getenv("REDIS_DB_HOST", 'redis-stack'), # Local redis error
port= os.getenv("REDIS_DB_PORT", "6379"),
username=os.getenv("REDIS_DB_USERNAME", ""),
password=os.getenv("REDIS_DB_PASSWORD", ""),
decode_responses=True
)
if api=='openai':
# Vectorize the query using OpenAI's text-embedding-ada-002 model
openai.api_key = apikey
model_id="text-embedding-ada-002"
openaiembed = openai.Embedding.create(
input=query_string,
engine=model_id)
query_vector = openaiembed["data"][0]["embedding"]
# Convert the vector to a numpy array
query_vector = np.array(query_vector).astype(np.float32).tobytes()
base_query = "(@year:["+start_year+" "+end_year+"])=>[KNN 5 @openai_embeddings $vector AS vector_score]"
if api=='sbert':
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
embeddings = model.encode(query_string)
# Convert the vector to a numpy array
query_vector = np.array(embeddings).astype(np.float32).tobytes()
base_query = "(@year:["+str(start_year)+" "+str(end_year)+"])=>[KNN 5 @sbert_embeddings $vector AS vector_score]"
print(base_query)
query = Query(base_query).return_fields("plain_text", "vector_score").sort_by("vector_score").dialect(2)
try:
results = redis_client.ft("embeddings").search(query, query_params={"vector": query_vector})
except Exception as e:
print("Error calling Redis search: ", e)
return None
return process_result(results)
def get_vss_hybrid_company_results(query_string,company,api,apikey):
# Redis connection details
redis_client = redis.Redis(host=os.getenv("REDIS_DB_HOST", 'redis-stack'), # Local redis error
port= os.getenv("REDIS_DB_PORT", "6379"),
username=os.getenv("REDIS_DB_USERNAME", ""),
password=os.getenv("REDIS_DB_PASSWORD", ""),
decode_responses=True
)
if api=='openai':
# Vectorize the query using OpenAI's text-embedding-ada-002 model
openai.api_key = apikey
model_id="text-embedding-ada-002"
openaiembed = openai.Embedding.create(
input=query_string,
engine=model_id)
query_vector = openaiembed["data"][0]["embedding"]
# Convert the vector to a numpy array
query_vector = np.array(query_vector).astype(np.float32).tobytes()
base_query = "(@company_ticker:"+company+")=>[KNN 5 @openai_embeddings $vector AS vector_score]"
if api=='sbert':
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
embeddings = model.encode(query_string)
# Convert the vector to a numpy array
query_vector = np.array(embeddings).astype(np.float32).tobytes()
base_query = "(@company_ticker:"+company+")=>[KNN 5 @sbert_embeddings $vector AS vector_score]"
print(base_query)
query = Query(base_query).return_fields("plain_text", "vector_score").sort_by("vector_score").dialect(2)
try:
results = redis_client.ft("embeddings").search(query, query_params={"vector": query_vector})
except Exception as e:
print("Error calling Redis search: ", e)
return None
return process_result(results)
def process_result(results):
response = []
for i, embedd in enumerate(results.docs):
score = 1 - float(embedd.vector_score)
print(f"\t{i}. {embedd.plain_text} (Score: {round(score ,3) })")
logger.info(f"\t{i}. {embedd.plain_text} (Score: {round(score ,3) })")
response.append(f"\t{i}. {embedd.plain_text} (Score: {round(score ,3) })")
return response
| [] |
2024-01-10 | atbradley/llm-sandbox | oasb.py | import os
import json
import yaml
import time
import openai
from dotenv import load_dotenv
load_dotenv()
MODEL = os.getenv("MODEL", "gpt-4")
openai.api_key = os.getenv("API_KEY")
# TODO: New version that uses Chat Completion API. and sends a list of messages
# with each request: https://platform.openai.com/docs/guides/gpt/chat-completions-api
while True:
prompt = input("Prompt ('q' to quit, 'f' to read from file.): ")
if prompt == "q":
break
if prompt == "f":
#TODO: Offer to default to the newest .md file.
try:
with open(input('Filename: '), "r") as f:
prompt = f.read()
except FileNotFoundError:
print("File not found.")
continue
print("sending...")
response = openai.ChatCompletion.create(model=MODEL,
messages=[{"role": "user", "content": prompt}])
fname = f"response{time.time_ns()}.json"
transdata = {
"request": {
"prompt": prompt,
},
"response": response,
}
jsondata = json.dumps(transdata, indent=4)
with open(fname, "w") as f:
f.write(jsondata)
fname = f"response{time.time_ns()}.yaml"
yamldata = json.loads(jsondata)
yaml.dump(yamldata, open(fname, "w"), indent=4, Dumper=yaml.SafeDumper)
print()
#print(response.keys())
message = response['choices'][0]['message']
print("role:", message.get('role', "None"))
print("content:", message.get('content', "No content"))
print()
print()
| [
"Prompt ('q' to quit, 'f' to read from file.): "
] |
2024-01-10 | yash2mehta/langchain_debug | debug.py | # pdf imports
import fitz
from pprint import pprint
import camelot
import PyPDF2
from PyPDF2 import PdfReader
# import pdfplumber
# Langchain imports
from langchain.chains import RetrievalQA
from langchain.chains import create_extraction_chain
from langchain.indexes import VectorstoreIndexCreator
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.document_loaders import CSVLoader
from langchain.llms import OpenAI
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import (
_convert_schema,
_resolve_schema_references,
get_llm_kwargs,
)
from langchain.output_parsers.openai_functions import (
JsonKeyOutputFunctionsParser,
PydanticAttrOutputFunctionsParser,
)
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
# LlamaIndex imports
from llama_hub.file.pymu_pdf.base import PyMuPDFReader
from llama_index import Document, SummaryIndex
from llama_index import VectorStoreIndex, ServiceContext, LLMPredictor
from llama_index.query_engine import PandasQueryEngine, RetrieverQueryEngine
from llama_index.retrievers import RecursiveRetriever
from llama_index.schema import IndexNode
from llama_index.llms import OpenAI
from llama_hub.file.pymu_pdf.base import PyMuPDFReader
from llama_index.retrievers import RecursiveRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.response_synthesizers import get_response_synthesizer
# Other library imports
import pandas as pd
import os
import time
from typing import Any, List, Optional
from pathlib import Path
import pickle
import openai
## TODO - When trying to replicate this, please use your OpenAI key
openai.api_key = os.environ["OPENAI_API_KEY"]
# initialize PDF reader
reader = PyMuPDFReader()
# specify the directory you want to use
directory = 'Documents'
# Dataframe of the tables from the document we will extract
table_dfs = []
# Initialize docs as an empty list
docs = []
# Iterate over files in that directory
for filename in os.listdir(directory):
# if file has a .pdf extension
if filename.endswith(".pdf"):
# Construct full file path
file_path = os.path.join(directory, filename)
print(f"Processing file: {file_path}")
# Load the file and append the data to docs
docs.append(reader.load(file_path))
print("Number of documents read:", len(docs))
def get_tables(path: str):
# Open the file in read binary mode
with open(path, 'rb') as file:
# Open pdf
doc = fitz.open(path)
# Length of pdf
num_pages = doc.page_count
# Loop through all pages
for page in range(0, num_pages):
# Record the start time for this iteration
start_time = time.time()
# print page number to keep track of which page it is parsing
print("\n")
print(f"Current page number evaluation: {page}")
current_page = doc[page]
# Read pdf to extract tables from that specific page
table_object = current_page.find_tables()
# This uses the tables attribute of object named TableFinder - Gives list of tables
table_list = table_object.tables
# display number of tables found on certain page
print(f"{len(table_list)} total tables (either empty or non-empty) found on page {page}")
print('***********')
#print(type(table_list))
#display(table_list)
#print(table_list)
# non-empty table counter variable
non_empty_table_counter = 0
# empty table counter variable
empty_table_counter = 0
# If table_list is empty
if len(table_list) == 0:
# No tables found on this page
print(f"No tables found on page {page}")
else:
# Iterate through each table in table_list
for table in table_list:
# If dataframe (table) is empty
if table.to_pandas().empty:
# Incrementing empty table counter
empty_table_counter += 1
# Calculate and print elapsed time for this iteration with an empty table
elapsed_time_for_empty_table = time.time() - start_time
print(f"Time taken for page {page} with empty table #{non_empty_table_counter}: {elapsed_time_for_empty_table} seconds")
print('***********')
# If dataframe (table) is not empty
else:
# Incrementing non-empty table counter
non_empty_table_counter += 1
# Convert the table to pandas
table_df = table.to_pandas()
table_df = (
# renames the columns of the dataframe based on the first row of the table
# drop the first row & then finally reset index
table_df.rename(columns=table_df.iloc[0])
.drop(table_df.index[0])
.reset_index(drop=True)
)
# Append to list
table_dfs.append(table_df)
# Calculate and print elapsed time for this iteration with an empty table
elapsed_time_for_table = time.time() - start_time
print(f"Time taken for page {page}, table #{non_empty_table_counter}: {elapsed_time_for_table} seconds")
print('========')
# return table_dfs dataframe
return table_dfs
# iterate over files in that directory
# Check filenames in that particular directory
for filename in os.listdir(directory):
# If it is a pdf file
if filename.endswith(".pdf"):
# Construct full file path
file_path = os.path.join(directory, filename)
print(f"Processing file: {file_path}")
print('------------')
print("\n")
# Call get_tables function
table_dfs = get_tables(file_path)
print('------------')
print("\n")
for i in range(len(table_dfs)):
table_dfs[i] = table_dfs[i].replace('\n','', regex=True)
print(len(table_dfs))
for i in range(len(table_dfs)):
print(table_dfs[i])
# Create a service context object
service_context = ServiceContext.from_defaults(llm=llm)
# Create a query engine for each table in the list of table dataframes
df_query_engines = [
PandasQueryEngine(table_df, service_context=service_context)
for table_df in table_dfs
]
# Initialize doc_nodes as an empty list
doc_nodes = []
# Process each document in docs
for doc in docs:
# Call node parser to extract list of nodes from the given document and then add nodes to doc_nodes list
doc_nodes.extend(service_context.node_parser.get_nodes_from_documents(doc))
print(doc_nodes)
# define index nodes
summaries = [
"This node provides information stored in the tables in the PDFs. Information could be anything about the financial product.",
]
# For each summary in the summaries list, it creates an IndexNode object with the text of the
# summary and assigns it an index_id that includes "pandas" followed by the index (position) of
# the summary in the summaries list, represented by idx.
df_nodes = [
IndexNode(text=summary, index_id=f"pandas{idx}")
for idx, summary in enumerate(summaries)
]
# Below code creates a dictionary called df_id_query_engine_mapping using a dictionary comprehension.
# It is used for mapping index IDs to query engines. For each index ID (which follows the format "pandas0", "pandas1", etc.)
# and its corresponding query engine in the df_query_engines list, it creates a key-value pair in the dictionary.
df_id_query_engine_mapping = {
f"pandas{idx}": df_query_engine
for idx, df_query_engine in enumerate(df_query_engines)
}
print(df_id_query_engine_mapping)
# If this dictionary is empty
if not df_id_query_engine_mapping:
empty_table_df = pd.DataFrame()
df_query_engine = PandasQueryEngine(empty_table_df, service_context=service_context)
# Insert the key-value pair into the dictionary
df_id_query_engine_mapping["pandas0"] = df_query_engine
# construct top-level vector index + query engine
# Creating a VectorStoreIndex object by concatenating doc_nodes (list of nodes) and df_nodes (IndexNode object)
# vector_index will later be used to perform vector-based similarity searches
vector_index = VectorStoreIndex(doc_nodes + df_nodes)
# Creating a vector_retriever object
# Retriever should return the top 3 most similar results for a given query.
vector_retriever = vector_index.as_retriever(similarity_top_k = 3)
print(vector_retriever)
# Initialize an instance of the RecursiveRetriever class
recursive_retriever = RecursiveRetriever(
"vector", # Specify retrieval method/strategy to retrieve data
retriever_dict={"vector": vector_retriever}, # Defines a mapping where the key "vector" is associated with a retriever object called vector_retriever.
query_engine_dict=df_id_query_engine_mapping, # query engine configuration
verbose = True, # Provide additional output
)
# Create the response synthesizer instance
response_synthesizer = get_response_synthesizer(
service_context=service_context, # Contains information or context related to a service or application
response_mode="compact" # Give a compact response
)
# create an instance of the Retriever Query Engine class with specific arguments
query_engine = RetrieverQueryEngine.from_args(
recursive_retriever, response_synthesizer = response_synthesizer, verbose = True # Associate the above defined response_synthesizer with recursive_retriever
)
# Individual prompting - in case, you don't want to use loop and check for specific value
## TODO - This is where I'm facing an error
fund_name_response = query_engine.query(
'''
What is the name of the fund?
'''
)
| [] |
2024-01-10 | timonpalm/langchainF | libs~langchain~langchain~schema~runnable~config.py | from __future__ import annotations
from concurrent.futures import Executor, ThreadPoolExecutor
from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Generator,
List,
Optional,
Union,
cast,
)
from typing_extensions import TypedDict
from langchain.schema.runnable.utils import (
Input,
Output,
accepts_config,
accepts_run_manager,
)
if TYPE_CHECKING:
from langchain.callbacks.base import BaseCallbackManager, Callbacks
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainRun,
CallbackManager,
CallbackManagerForChainRun,
)
class EmptyDict(TypedDict, total=False):
pass
class RunnableConfig(TypedDict, total=False):
"""Configuration for a Runnable."""
tags: List[str]
"""
Tags for this call and any sub-calls (eg. a Chain calling an LLM).
You can use these to filter calls.
"""
metadata: Dict[str, Any]
"""
Metadata for this call and any sub-calls (eg. a Chain calling an LLM).
Keys should be strings, values should be JSON-serializable.
"""
callbacks: Callbacks
"""
Callbacks for this call and any sub-calls (eg. a Chain calling an LLM).
Tags are passed to all callbacks, metadata is passed to handle*Start callbacks.
"""
run_name: str
"""
Name for the tracer run for this call. Defaults to the name of the class.
"""
locals: Dict[str, Any]
"""
Variables scoped to this call and any sub-calls. Usually used with
GetLocalVar() and PutLocalVar(). Care should be taken when placing mutable
objects in locals, as they will be shared between parallel sub-calls.
"""
max_concurrency: Optional[int]
"""
Maximum number of parallel calls to make. If not provided, defaults to
ThreadPoolExecutor's default.
"""
recursion_limit: int
"""
Maximum number of times a call can recurse. If not provided, defaults to 25.
"""
configurable: Dict[str, Any]
"""
Runtime values for attributes previously made configurable on this Runnable,
or sub-Runnables, through .configurable_fields() or .configurable_alternatives().
Check .output_schema() for a description of the attributes that have been made
configurable.
"""
def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:
empty = RunnableConfig(
tags=[],
metadata={},
callbacks=None,
locals={},
recursion_limit=25,
)
if config is not None:
empty.update(
cast(RunnableConfig, {k: v for k, v in config.items() if v is not None})
)
return empty
def get_config_list(
config: Optional[Union[RunnableConfig, List[RunnableConfig]]], length: int
) -> List[RunnableConfig]:
"""
Helper method to get a list of configs from a single config or a list of
configs, useful for subclasses overriding batch() or abatch().
"""
if length < 0:
raise ValueError(f"length must be >= 0, but got {length}")
if isinstance(config, list) and len(config) != length:
raise ValueError(
f"config must be a list of the same length as inputs, "
f"but got {len(config)} configs for {length} inputs"
)
return (
list(map(ensure_config, config))
if isinstance(config, list)
else [patch_config(config, copy_locals=True) for _ in range(length)]
)
def patch_config(
config: Optional[RunnableConfig],
*,
copy_locals: bool = False,
callbacks: Optional[BaseCallbackManager] = None,
recursion_limit: Optional[int] = None,
max_concurrency: Optional[int] = None,
run_name: Optional[str] = None,
configurable: Optional[Dict[str, Any]] = None,
) -> RunnableConfig:
config = ensure_config(config)
if copy_locals:
config["locals"] = config["locals"].copy()
if callbacks is not None:
# If we're replacing callbacks we need to unset run_name
# As that should apply only to the same run as the original callbacks
config["callbacks"] = callbacks
if "run_name" in config:
del config["run_name"]
if recursion_limit is not None:
config["recursion_limit"] = recursion_limit
if max_concurrency is not None:
config["max_concurrency"] = max_concurrency
if run_name is not None:
config["run_name"] = run_name
if configurable is not None:
config["configurable"] = {**config.get("configurable", {}), **configurable}
return config
def merge_configs(*configs: Optional[RunnableConfig]) -> RunnableConfig:
base: RunnableConfig = {}
# Even though the keys aren't literals this is correct
# because both dicts are same type
for config in (c for c in configs if c is not None):
for key in config:
if key == "metadata":
base[key] = { # type: ignore
**base.get(key, {}), # type: ignore
**(config.get(key) or {}), # type: ignore
}
elif key == "tags":
base[key] = list( # type: ignore
set(base.get(key, []) + (config.get(key) or [])), # type: ignore
)
elif key == "configurable":
base[key] = { # type: ignore
**base.get(key, {}), # type: ignore
**(config.get(key) or {}), # type: ignore
}
else:
base[key] = config[key] or base.get(key) # type: ignore
return base
def call_func_with_variable_args(
func: Union[
Callable[[Input], Output],
Callable[[Input, RunnableConfig], Output],
Callable[[Input, CallbackManagerForChainRun], Output],
Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output],
],
input: Input,
config: RunnableConfig,
run_manager: Optional[CallbackManagerForChainRun] = None,
**kwargs: Any,
) -> Output:
"""Call function that may optionally accept a run_manager and/or config."""
if accepts_config(func):
if run_manager is not None:
kwargs["config"] = patch_config(config, callbacks=run_manager.get_child())
else:
kwargs["config"] = config
if run_manager is not None and accepts_run_manager(func):
kwargs["run_manager"] = run_manager
return func(input, **kwargs) # type: ignore[call-arg]
async def acall_func_with_variable_args(
func: Union[
Callable[[Input], Awaitable[Output]],
Callable[[Input, RunnableConfig], Awaitable[Output]],
Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]],
Callable[
[Input, AsyncCallbackManagerForChainRun, RunnableConfig],
Awaitable[Output],
],
],
input: Input,
config: RunnableConfig,
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
**kwargs: Any,
) -> Output:
"""Call function that may optionally accept a run_manager and/or config."""
if accepts_config(func):
if run_manager is not None:
kwargs["config"] = patch_config(config, callbacks=run_manager.get_child())
else:
kwargs["config"] = config
if run_manager is not None and accepts_run_manager(func):
kwargs["run_manager"] = run_manager
return await func(input, **kwargs) # type: ignore[call-arg]
def get_callback_manager_for_config(config: RunnableConfig) -> CallbackManager:
from langchain.callbacks.manager import CallbackManager
return CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
inheritable_tags=config.get("tags"),
inheritable_metadata=config.get("metadata"),
)
def get_async_callback_manager_for_config(
config: RunnableConfig,
) -> AsyncCallbackManager:
from langchain.callbacks.manager import AsyncCallbackManager
return AsyncCallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
inheritable_tags=config.get("tags"),
inheritable_metadata=config.get("metadata"),
)
@contextmanager
def get_executor_for_config(config: RunnableConfig) -> Generator[Executor, None, None]:
with ThreadPoolExecutor(max_workers=config.get("max_concurrency")) as executor:
yield executor
| [] |
2024-01-10 | timonpalm/langchainF | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | bit2r/chatGPT | code~translator~API_translator.py | from flask import Flask, request, jsonify
import openai
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
app = Flask(__name__)
# Define the translation function
def translate_text(text, source_language, target_language):
prompt = f"Translate the following '{source_language}' text to '{target_language}': {text}"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant that translates text."},
{"role": "user", "content": prompt}
],
max_tokens=150,
n=1,
stop=None,
temperature=0.5,
)
translation = response.choices[0].message.content.strip()
return translation
@app.route('/translate', methods=['POST'])
def translate():
data = request.get_json()
text = data.get("text")
source_language = data.get("source_language")
target_language = data.get("target_language")
if not text or not source_language or not target_language:
return jsonify({"error": "Missing required parameters"}), 400
translated_text = translate_text(text, source_language, target_language)
return jsonify({"translated_text": translated_text})
if __name__ == '__main__':
app.run(debug=True)
# curl -X POST -H "Content-Type: application/json" -d '{"text": "Hello, world!", "source_language": "English", "target_language": "Korean"}' http://localhost:5000/translate | [
"You are a helpful assistant that translates text.",
"Translate the following 'PLACEHOLDER' text to 'PLACEHOLDER': PLACEHOLDER"
] |
2024-01-10 | bit2r/chatGPT | code~translator~CLI_translator.py | import openai
import os
import argparse
openai.api_key = os.getenv("OPENAI_API_KEY")
# Define the translation function
def translate_text(text, source_language, target_language):
prompt = f"Translate the following '{source_language}' text to '{target_language}': {text}"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant that translates text."},
{"role": "user", "content": prompt}
],
max_tokens=150,
n=1,
stop=None,
temperature=0.5,
)
translation = response.choices[0].message.content.strip()
return translation
def main():
parser = argparse.ArgumentParser(description="Multilingual Translation CLI")
parser.add_argument("text", help="The text you want to translate")
parser.add_argument("source_language", help="The source language of the text")
parser.add_argument("target_language", help="The target language to translate the text into")
args = parser.parse_args()
translated_text = translate_text(args.text, args.source_language, args.target_language)
print(f"Translated Text: {translated_text}")
if __name__ == "__main__":
main()
# python CLI_translator.py "It's good to see you again." English Korean
| [
"You are a helpful assistant that translates text.",
"Translate the following 'PLACEHOLDER' text to 'PLACEHOLDER': PLACEHOLDER"
] |
2024-01-10 | bit2r/chatGPT | code~translator~GUI_translator.py | import openai
import os
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QLineEdit, QPushButton, QTextEdit
openai.api_key = os.getenv("OPENAI_API_KEY")
# Define the translation function
def translate_text(text, source_language, target_language):
prompt = f"Translate the following '{source_language}' text to '{target_language}': {text}"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant that translates text."},
{"role": "user", "content": prompt}
],
max_tokens=150,
n=1,
stop=None,
temperature=0.5,
)
translation = response.choices[0].message.content.strip()
return translation
class MultilingualTranslationTool(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.setWindowTitle("Multilingual Translation Tool")
layout = QVBoxLayout()
# Text input
text_label = QLabel("Text:")
self.text_input = QLineEdit()
# Source language input
source_lang_label = QLabel("Source Language:")
self.source_lang_input = QLineEdit()
# Target language input
target_lang_label = QLabel("Target Language:")
self.target_lang_input = QLineEdit()
# Translate button
translate_button = QPushButton("Translate")
translate_button.clicked.connect(self.on_translate_click)
# Result label
self.result_label = QLabel()
layout.addWidget(text_label)
layout.addWidget(self.text_input)
layout.addWidget(source_lang_label)
layout.addWidget(self.source_lang_input)
layout.addWidget(target_lang_label)
layout.addWidget(self.target_lang_input)
layout.addWidget(translate_button)
layout.addWidget(self.result_label)
self.setLayout(layout)
def on_translate_click(self):
text = self.text_input.text()
source_language = self.source_lang_input.text()
target_language = self.target_lang_input.text()
translated_text = translate_text(text, source_language, target_language)
self.result_label.setText(translated_text)
if __name__ == "__main__":
app = QApplication([])
window = MultilingualTranslationTool()
window.show()
app.exec_()
| [
"You are a helpful assistant that translates text.",
"Translate the following 'PLACEHOLDER' text to 'PLACEHOLDER': PLACEHOLDER"
] |
2024-01-10 | mohcineelharras/autogen-experiments | draft~app_2.py | import os
import autogen
import memgpt.autogen.memgpt_agent as memgpt_autogen
import memgpt.autogen.interface as autogen_interface
import memgpt.agent as agent
import memgpt.system as system
import memgpt.utils as utils
import memgpt.presets as presets
import memgpt.constants as constants
import memgpt.personas.personas as personas
import memgpt.humans.humans as humans
from memgpt.persistence_manager import InMemoryStateManager, InMemoryStateManagerWithPreloadedArchivalMemory, InMemoryStateManagerWithFaiss
import openai
## api keys for the memGPT
#openai.api_base="http://172.19.208.1:1300/v1"
#openai.api_base="http://127.0.0.1:5000"
#openai.api_base="http://127.0.0.1:5001/v1"
openai.api_key = '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'
config_list = [
{
#"model": "zephyr-7B-beta",
"model":"dolphin-2.1-mistral-7b",
#"api_base": "http://172.19.208.1:1300/v1",
"api_base": "http://localhost:5001/v1",
}
]
llm_config = {"config_list": config_list, "seed": 42}
user_proxy = autogen.UserProxyAgent(
name="User_proxy",
system_message="A human admin.",
code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"},
)
interface = autogen_interface.AutoGenInterface() # how MemGPT talks to AutoGen
persistence_manager = InMemoryStateManager()
persona = "I\'m a 10x engineer at a FAANG tech company."
human = "I\'m a team manager at a FAANG tech company."
memgpt_agent = presets.use_preset(presets.SYNC_CHAT, None, 'gpt-4', persona, human, interface, persistence_manager)
# MemGPT coder
coder = memgpt_autogen.MemGPTAgent(
name="MemGPT_coder",
agent=memgpt_agent,
)
# non-MemGPT PM
pm = autogen.AssistantAgent(
name="Product_manager",
system_message="Creative in software product ideas.",
llm_config=llm_config,
)
groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
user_proxy.initiate_chat(manager, message="First send the message 'Let's go Mario!'") | [] |
2024-01-10 | Trembleo/LLHarmonia | agents.py | # agents.py
from openai import OpenAI
class GeneratorAgent:
def __init__(self, model:str, system_init_content:str):
self.model = model
self.content_history = []
self.system_init_content = system_init_content
self.content_history.append({
"role": "system",
"content": system_init_content
})
def __len__(self):
return len(self.content_history)
def __call__(self, input_prompt:str):
prompt = {
"role": "user",
"content": input_prompt
}
self.content_history.append(prompt)
response = self.__chat_completion()
self.content_history.append(response)
content = response.content.strip()
return content
def __chat_completion(self):
client = OpenAI()
completion = client.chat.completions.create(
model = self.model,
messages = self.content_history
)
response = completion.choices[0].message
return response
def retry_completion(self):
self.content_history = self.content_history[:-1]
content = self.__chat_completion()
return content
def undo_completion(self):
self.content_history = self.content_history[:-2]
def clear_completion(self):
self.content_history = self.system_init_content
class InterpreterAgent:
def __init__(self, model:str, system_init_content:str):
self.model = model
self.content_history = []
self.system_init_content = system_init_content
self.content_history.append({
"role": "system",
"content": system_init_content
})
def __len__(self):
return len(self.content_history)
def __call__(self, input_prompt:str):
prompt = {
"role": "user",
"content": input_prompt
}
self.content_history.append(prompt)
response = self.__chat_completion()
self.content_history.append(response)
content = response.content.strip()
return content
def __chat_completion(self):
client = OpenAI()
completion = client.chat.completions.create(
model = self.model,
messages = self.content_history,
response_format={ "type": "json_object" }
)
response = completion.choices[0].message
return response
def retry_completion(self):
self.content_history = self.content_history[:-1]
content = self.__chat_completion()
return content
def undo_completion(self):
self.content_history = self.content_history[:-2]
def clear_completion(self):
self.content_history = self.system_init_content
class ValidatorAgent:
def __init__(self, model:str, system_init_content:str):
self.model = model
self.system_init_content = system_init_content
self.content_history = []
self.content_history.append({
"role": "system",
"content": system_init_content
})
def __call__(self, input_prompt:str):
prompt = {
"role": "user",
"content": input_prompt
}
self.content_history.append(prompt)
response = self.__chat_completion()
content = response.content.strip()
return content
def __chat_completion(self):
client = OpenAI()
completion = client.chat.completions.create(
model = self.model,
messages = self.content_history,
response_format={ "type": "json_object" }
)
response = completion.choices[0].message
return response | [
"{'role': 'user', 'content': PLACEHOLDER}"
] |
2024-01-10 | goemeritus/edsl | edsl~language_models~LanguageModelOpenAIThreeFiveTurbo.py | import openai
from typing import Any
from edsl import CONFIG
from edsl.language_models import LanguageModel
openai.api_key = CONFIG.get("OPENAI_API_KEY")
class LanguageModelOpenAIThreeFiveTurbo(LanguageModel):
"""
Child class of LanguageModel for interacting with OpenAI GPT-3.5 Turbo model.
"""
# Class attributes
_model_ = "gpt-3.5-turbo"
_parameters_ = {
"temperature": 0.5,
"max_tokens": 1000,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"use_cache": True,
}
def __init__(self, **kwargs):
self.model = self._model_
# set parameters, with kwargs taking precedence over defaults
self.parameters = dict({})
for parameter, default_value in self._parameters_.items():
if parameter in kwargs:
self.parameters[parameter] = kwargs[parameter]
else:
self.parameters[parameter] = default_value
kwargs[parameter] = default_value
super().__init__(**kwargs)
def execute_model_call(
self, prompt: str, system_prompt: str = ""
) -> dict[str, Any]:
"""Calls the OpenAI API and returns the API response."""
return openai.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
).model_dump()
@staticmethod
def parse_response(raw_response: dict[str, Any]) -> str:
"""Parses the API response and returns the response text."""
return raw_response["choices"][0]["message"]["content"]
def main():
from edsl.language_models import LanguageModelOpenAIThreeFiveTurbo
m = LanguageModelOpenAIThreeFiveTurbo(use_cache=False)
m
m.execute_model_call(prompt="How are you?")
m.execute_model_call(
system_prompt="Respond as if you are a human", prompt="How are you?"
)
raw_english = m.get_raw_response(
system_prompt="You are pretending to be a human taking a survey. Do not break character.",
prompt="What is your favorite color?",
)
print(raw_english)
print(m.parse_response(raw_english))
print(m.cost(raw_english))
m = LanguageModelOpenAIThreeFiveTurbo(use_cache=True)
response = m.execute_model_call(prompt="How are you?")
raw_german = m.get_raw_response(
prompt="What is your favorite color",
system_prompt="""You pretending to be a human taking a survey. Do not break character. You only respond in German.""",
)
print(raw_german)
print(m.parse_response(raw_german))
print(m.cost(raw_german))
| [] |
2024-01-10 | goemeritus/edsl | edsl~language_models~LanguageModelOpenAIFour.py | import openai
import re
from typing import Any
from edsl import CONFIG
from edsl.language_models import LanguageModel
openai.api_key = CONFIG.get("OPENAI_API_KEY")
class LanguageModelOpenAIFour(LanguageModel):
"""
Child class of LanguageModel for interacting with OpenAI GPT-4 model.
"""
_model_ = "gpt-4-1106-preview"
# _model_ = "gpt-4"
_parameters_ = {
"temperature": 0.5,
"max_tokens": 1000,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"use_cache": True,
}
def __init__(self, **kwargs):
self.model = self._model_
# set parameters, with kwargs taking precedence over defaults
self.parameters = dict({})
for parameter, default_value in self._parameters_.items():
if parameter in kwargs:
self.parameters[parameter] = kwargs[parameter]
else:
self.parameters[parameter] = default_value
kwargs[parameter] = default_value
super().__init__(**kwargs)
def execute_model_call(
self, prompt: str, system_prompt: str = ""
) -> dict[str, Any]:
"""Calls the OpenAI API and returns the API response."""
return openai.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
).model_dump()
@staticmethod
def parse_response(raw_response: dict[str, Any]) -> str:
"""Parses the API response and returns the response text."""
response = raw_response["choices"][0]["message"]["content"]
pattern = r"^```json(?:\\n|\n)(.+?)(?:\\n|\n)```$"
match = re.match(pattern, response, re.DOTALL)
if match:
return match.group(1)
else:
return response
def main():
from edsl.language_models import LanguageModelOpenAIFour
m = LanguageModelOpenAIFour(use_cache=False)
m
m.execute_model_call(prompt="How are you?")
m.execute_model_call(
system_prompt="Pretend you are human. Do not break character. Only respond shortly, without asking any questions.",
prompt="How are you?",
)
raw_english = m.get_raw_response(
system_prompt="Pretend you are human. Do not break character. Only respond shortly, without asking any questions.",
prompt="What is your favorite color?",
)
print(m.parse_response(raw_english))
print(m.cost(raw_english))
# ----
system_prompt = "Pretend you are human. Do not break character. Only respond shortly, without asking any questions."
prompt = "What is your favorite color?"
m = LanguageModelOpenAIFour(use_cache=True)
# the execute model call should be a dict
raw_german = m.execute_model_call(system_prompt=system_prompt, prompt=prompt)
raw_german = m.get_raw_response(system_prompt=system_prompt, prompt=prompt)
print(raw_german)
print(m.parse_response(raw_german))
print(m.cost(raw_german))
| [
"Pretend you are human. Do not break character. Only respond shortly, without asking any questions.",
"What is your favorite color?"
] |
2024-01-10 | amitpuri/LLM-Text-Completion | gradio-app.py | import os
import gradio as gr
import openai
import google.generativeai as palm
import together
llm_api_options = ["OpenAI API","Azure OpenAI API","Google PaLM API", "Llama 2"]
TEST_MESSAGE = "Write an introductory paragraph to explain Generative AI to the reader of this content."
openai_models = ["gpt-4", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo",
"gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "text-davinci-003",
"text-davinci-002", "text-curie-001", "text-babbage-001", "text-ada-001"]
google_palm_models = ["models/text-bison-001", "models/chat-bison-001","models/embedding-gecko-001"]
temperature = 0.7
def openai_text_completion(openai_api_key: str, prompt: str, model: str):
try:
system_prompt: str = "Explain in detail to help student understand the concept.",
assistant_prompt: str = None,
messages = [
{"role": "user", "content": f"{prompt}"},
{"role": "system", "content": f"{system_prompt}"},
{"role": "assistant", "content": f"{assistant_prompt}"}
]
openai.api_key = openai_api_key
openai.api_version = '2020-11-07'
completion = openai.ChatCompletion.create(
model = model,
messages = messages,
temperature = temperature
)
response = completion["choices"][0]["message"].content
return "", response
except Exception as exception:
print(f"Exception Name: {type(exception).__name__}")
print(exception)
return f" openai_text_completion Error - {exception}", ""
def azure_openai_text_completion(azure_openai_api_key: str, azure_endpoint: str, azure_deployment_name: str, prompt: str, model: str):
try:
system_prompt: str = "Explain in detail to help student understand the concept.",
assistant_prompt: str = None,
messages = [
{"role": "user", "content": f"{prompt}"},
{"role": "system", "content": f"{system_prompt}"},
{"role": "assistant", "content": f"{assistant_prompt}"}
]
openai.api_key = azure_openai_api_key
openai.api_type = "azure"
openai.api_version = "2023-05-15"
openai.api_base = f"https://{azure_endpoint}.openai.azure.com"
completion = openai.ChatCompletion.create(
model = model,
engine = azure_deployment_name,
messages = messages,
temperature = temperature
)
response = completion["choices"][0]["message"].content
return "", response
except Exception as exception:
print(f"Exception Name: {type(exception).__name__}")
print(exception)
return f" azure_openai_text_completion Error - {exception}", ""
def palm_text_completion(google_palm_key: str, prompt: str, model: str):
try:
candidate_count = 1
top_k = 40
top_p = 0.95
max_output_tokens = 1024
palm.configure(api_key=google_palm_key)
defaults = {
'model': model,
'temperature': temperature,
'candidate_count': candidate_count,
'top_k': top_k,
'top_p': top_p,
'max_output_tokens': max_output_tokens,
'stop_sequences': [],
'safety_settings': [{"category":"HARM_CATEGORY_DEROGATORY","threshold":1},{"category":"HARM_CATEGORY_TOXICITY","threshold":1},{"category":"HARM_CATEGORY_VIOLENCE","threshold":2},{"category":"HARM_CATEGORY_SEXUAL","threshold":2},{"category":"HARM_CATEGORY_MEDICAL","threshold":2},{"category":"HARM_CATEGORY_DANGEROUS","threshold":2}],
}
response = palm.generate_text(
**defaults,
prompt=prompt
)
return "", response.result
except Exception as exception:
print(f"Exception Name: {type(exception).__name__}")
print(exception)
return f" palm_text_completion Error - {exception}", ""
def test_handler(optionSelection,
openai_key,
azure_openai_key,
azure_openai_api_base,
azure_openai_deployment_name,
google_generative_api_key,
together_api_key,
prompt: str = TEST_MESSAGE,
openai_model_name: str ="gpt-4",
google_model_name: str ="models/text-bison-001",
together_model_name: str = "togethercomputer/llama-2-70b-chat"
):
match optionSelection:
case "OpenAI API":
message, response = openai_text_completion(openai_key, prompt,openai_model_name)
return message, response
case "Azure OpenAI API":
message, response = azure_openai_text_completion(azure_openai_key, azure_openai_api_base, azure_openai_deployment_name, prompt,openai_model_name)
return message, response
case "Google PaLM API":
message, response = palm_text_completion(google_generative_api_key, prompt,google_model_name)
return message, response
case "Llama 2":
together.api_key = together_api_key
model: str = together_model_name
output = together.Complete.create(prompt, model=model,temperature=temperature)
return "Response from Together API", output['output']['choices'][0]['text']
case _:
if optionSelection not in llm_api_options:
return ValueError("Invalid choice!"), ""
with gr.Blocks() as LLMDemoTabbedScreen:
with gr.Tab("Text-to-Text (Text Completion)"):
llm_options = gr.Radio(llm_api_options, label="Select one", info="Which service do you want to use?", value="OpenAI API")
with gr.Row():
with gr.Column():
test_string = gr.Textbox(label="Try String", value=TEST_MESSAGE, lines=5)
test_string_response = gr.Textbox(label="Response", lines=5)
test_string_output_info = gr.Label(value="Output Info", label="Info")
test_button = gr.Button("Try it")
with gr.Tab("API Settings"):
with gr.Tab("Open AI"):
openai_model = gr.Dropdown(openai_models, value="gpt-4", label="Model", info="Select one, for Natural language")
openai_key = gr.Textbox(label="OpenAI API Key", type="password")
with gr.Tab("Azure Open AI"):
with gr.Row():
with gr.Column():
azure_openai_key = gr.Textbox(label="Azure OpenAI API Key", type="password")
azure_openai_api_base = gr.Textbox(label="Azure OpenAI API Endpoint")
azure_openai_deployment_name = gr.Textbox(label="Azure OpenAI API Deployment Name")
with gr.Tab("Google PaLM API"):
with gr.Row():
with gr.Column():
google_model_name = gr.Dropdown(google_palm_models, value="models/text-bison-001", label="Model", info="Select one, for Natural language")
google_generative_api_key = gr.Textbox(label="Google Generative AI API Key", type="password")
with gr.Tab("Llama-2"):
with gr.Row():
with gr.Column():
together_model_name = gr.Dropdown(['togethercomputer/llama-2-70b-chat'], value="togethercomputer/llama-2-70b-chat", label="Model", info="Select one, for Natural language")
together_api_key = gr.Textbox(label="Together API Key", type="password")
test_button.click(
fn=test_handler,
inputs=[llm_options,
openai_key,
azure_openai_key,
azure_openai_api_base,
azure_openai_deployment_name,
google_generative_api_key,
together_api_key,
test_string,
openai_model,
google_model_name,
together_model_name],
outputs=[test_string_output_info, test_string_response]
)
if __name__ == "__main__":
LLMDemoTabbedScreen.launch()
| [
"Explain in detail to help student understand the concept.",
"PLACEHOLDER",
"None"
] |
2024-01-10 | amitpuri/LLM-Text-Completion | text-completion.py | from dotenv import load_dotenv
load_dotenv()
import os
import openai
#model = "gpt-35-turbo"
model = "gpt-4"
prompt: str = "Write an introductory paragraph to explain Generative AI to the reader of this content."
system_prompt: str = "Explain in detail to help student understand the concept.",
assistant_prompt: str = None,
messages = [
{"role": "user", "content": f"{prompt}"},
{"role": "system", "content": f"{system_prompt}"},
{"role": "assistant", "content": f"{assistant_prompt}"}
]
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_version = '2020-11-07'
completion = openai.ChatCompletion.create(
model = model,
messages = messages,
temperature = 0.7
)
print(completion)
response = completion["choices"][0]["message"].content
print(response)
| [
"Write an introductory paragraph to explain Generative AI to the reader of this content.",
"Explain in detail to help student understand the concept.",
"PLACEHOLDER",
"None"
] |
2024-01-10 | amitpuri/LLM-Text-Completion | text-completion-azure.py | from dotenv import load_dotenv
load_dotenv()
import os
import openai
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
azure_deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")
#model = "gpt-35-turbo"
model = "gpt-4"
prompt: str = "Write an introductory paragraph to explain Generative AI to the reader of this content."
system_prompt: str = "Explain in detail to help student understand the concept.",
assistant_prompt: str = None,
messages = [
{"role": "user", "content": f"{prompt}"},
{"role": "system", "content": f"{system_prompt}"},
{"role": "assistant", "content": f"{assistant_prompt}"}
]
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
openai.api_type = "azure"
openai.api_version = "2023-05-15"
openai.api_base = f"https://{azure_endpoint}.openai.azure.com"
completion = openai.ChatCompletion.create(
model = model,
engine = azure_deployment_name,
messages = messages,
temperature = 0.7
)
print(completion)
response = completion["choices"][0]["message"].content
print(response)
| [
"Write an introductory paragraph to explain Generative AI to the reader of this content.",
"Explain in detail to help student understand the concept.",
"PLACEHOLDER",
"None"
] |
2024-01-10 | DecentralAI-HackFS/DecentralAI | api~core~completion.py | import logging
from typing import Optional, List, Union, Tuple
from langchain.callbacks import CallbackManager
from langchain.chat_models.base import BaseChatModel
from langchain.llms import BaseLLM
from langchain.schema import BaseMessage, BaseLanguageModel, HumanMessage
from requests.exceptions import ChunkedEncodingError
from core.constant import llm_constant
from core.callback_handler.llm_callback_handler import LLMCallbackHandler
from core.callback_handler.std_out_callback_handler import DifyStreamingStdOutCallbackHandler, \
DifyStdOutCallbackHandler
from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException, PubHandler
from core.llm.error import LLMBadRequestError
from core.llm.llm_builder import LLMBuilder
from core.chain.main_chain_builder import MainChainBuilder
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
from core.llm.streamable_open_ai import StreamableOpenAI
from core.memory.read_only_conversation_token_db_buffer_shared_memory import \
ReadOnlyConversationTokenDBBufferSharedMemory
from core.memory.read_only_conversation_token_db_string_buffer_shared_memory import \
ReadOnlyConversationTokenDBStringBufferSharedMemory
from core.prompt.prompt_builder import PromptBuilder
from core.prompt.prompt_template import OutLinePromptTemplate
from core.prompt.prompts import MORE_LIKE_THIS_GENERATE_PROMPT
from models.model import App, AppModelConfig, Account, Conversation, Message
class Completion:
@classmethod
def generate(cls, task_id: str, app: App, app_model_config: AppModelConfig, query: str, inputs: dict,
user: Account, conversation: Optional[Conversation], streaming: bool, is_override: bool = False):
"""
errors: ProviderTokenNotInitError
"""
cls.validate_query_tokens(app.tenant_id, app_model_config, query)
memory = None
if conversation:
# get memory of conversation (read-only)
memory = cls.get_memory_from_conversation(
tenant_id=app.tenant_id,
app_model_config=app_model_config,
conversation=conversation,
return_messages=False
)
inputs = conversation.inputs
conversation_message_task = ConversationMessageTask(
task_id=task_id,
app=app,
app_model_config=app_model_config,
user=user,
conversation=conversation,
is_override=is_override,
inputs=inputs,
query=query,
streaming=streaming
)
# build main chain include agent
main_chain = MainChainBuilder.to_langchain_components(
tenant_id=app.tenant_id,
agent_mode=app_model_config.agent_mode_dict,
memory=ReadOnlyConversationTokenDBStringBufferSharedMemory(memory=memory) if memory else None,
conversation_message_task=conversation_message_task
)
chain_output = ''
if main_chain:
chain_output = main_chain.run(query)
# run the final llm
try:
cls.run_final_llm(
tenant_id=app.tenant_id,
mode=app.mode,
app_model_config=app_model_config,
query=query,
inputs=inputs,
chain_output=chain_output,
conversation_message_task=conversation_message_task,
memory=memory,
streaming=streaming
)
except ConversationTaskStoppedException:
return
except ChunkedEncodingError as e:
# Interrupt by LLM (like OpenAI), handle it.
logging.warning(f'ChunkedEncodingError: {e}')
conversation_message_task.end()
return
@classmethod
def run_final_llm(cls, tenant_id: str, mode: str, app_model_config: AppModelConfig, query: str, inputs: dict,
chain_output: str,
conversation_message_task: ConversationMessageTask,
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory], streaming: bool):
final_llm = LLMBuilder.to_llm_from_model(
tenant_id=tenant_id,
model=app_model_config.model_dict,
streaming=streaming
)
# get llm prompt
prompt, stop_words = cls.get_main_llm_prompt(
mode=mode,
llm=final_llm,
pre_prompt=app_model_config.pre_prompt,
query=query,
inputs=inputs,
chain_output=chain_output,
memory=memory
)
final_llm.callback_manager = cls.get_llm_callback_manager(final_llm, streaming, conversation_message_task)
cls.recale_llm_max_tokens(
final_llm=final_llm,
prompt=prompt,
mode=mode
)
response = final_llm.generate([prompt], stop_words)
return response
@classmethod
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict,
chain_output: Optional[str],
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \
Tuple[Union[str | List[BaseMessage]], Optional[List[str]]]:
# disable template string in query
query_params = OutLinePromptTemplate.from_template(template=query).input_variables
if query_params:
for query_param in query_params:
if query_param not in inputs:
inputs[query_param] = '{' + query_param + '}'
pre_prompt = PromptBuilder.process_template(pre_prompt) if pre_prompt else pre_prompt
if mode == 'completion':
prompt_template = OutLinePromptTemplate.from_template(
template=("""Use the following CONTEXT as your learned knowledge:
[CONTEXT]
{context}
[END CONTEXT]
When answer to user:
- If you don't know, just say that you don't know.
- If you don't know when you are not sure, ask for clarification.
Avoid mentioning that you obtained the information from the context.
And answer according to the language of the user's question.
""" if chain_output else "")
+ (pre_prompt + "\n" if pre_prompt else "")
+ "{query}\n"
)
if chain_output:
inputs['context'] = chain_output
context_params = OutLinePromptTemplate.from_template(template=chain_output).input_variables
if context_params:
for context_param in context_params:
if context_param not in inputs:
inputs[context_param] = '{' + context_param + '}'
prompt_inputs = {k: inputs[k] for k in prompt_template.input_variables if k in inputs}
prompt_content = prompt_template.format(
query=query,
**prompt_inputs
)
if isinstance(llm, BaseChatModel):
# use chat llm as completion model
return [HumanMessage(content=prompt_content)], None
else:
return prompt_content, None
else:
messages: List[BaseMessage] = []
human_inputs = {
"query": query
}
human_message_prompt = ""
if pre_prompt:
pre_prompt_inputs = {k: inputs[k] for k in
OutLinePromptTemplate.from_template(template=pre_prompt).input_variables
if k in inputs}
if pre_prompt_inputs:
human_inputs.update(pre_prompt_inputs)
if chain_output:
human_inputs['context'] = chain_output
human_message_prompt += """Use the following CONTEXT as your learned knowledge.
[CONTEXT]
{context}
[END CONTEXT]
When answer to user:
- If you don't know, just say that you don't know.
- If you don't know when you are not sure, ask for clarification.
Avoid mentioning that you obtained the information from the context.
And answer according to the language of the user's question.
"""
if pre_prompt:
human_message_prompt += pre_prompt
query_prompt = "\nHuman: {query}\nAI: "
if memory:
# append chat histories
tmp_human_message = PromptBuilder.to_human_message(
prompt_content=human_message_prompt + query_prompt,
inputs=human_inputs
)
curr_message_tokens = memory.llm.get_messages_tokens([tmp_human_message])
rest_tokens = llm_constant.max_context_token_length[memory.llm.model_name] \
- memory.llm.max_tokens - curr_message_tokens
rest_tokens = max(rest_tokens, 0)
histories = cls.get_history_messages_from_memory(memory, rest_tokens)
# disable template string in query
histories_params = OutLinePromptTemplate.from_template(template=histories).input_variables
if histories_params:
for histories_param in histories_params:
if histories_param not in human_inputs:
human_inputs[histories_param] = '{' + histories_param + '}'
human_message_prompt += "\n\n" + histories
human_message_prompt += query_prompt
# construct main prompt
human_message = PromptBuilder.to_human_message(
prompt_content=human_message_prompt,
inputs=human_inputs
)
messages.append(human_message)
return messages, ['\nHuman:']
@classmethod
def get_llm_callback_manager(cls, llm: Union[StreamableOpenAI, StreamableChatOpenAI],
streaming: bool,
conversation_message_task: ConversationMessageTask) -> CallbackManager:
llm_callback_handler = LLMCallbackHandler(llm, conversation_message_task)
if streaming:
callback_handlers = [llm_callback_handler, DifyStreamingStdOutCallbackHandler()]
else:
callback_handlers = [llm_callback_handler, DifyStdOutCallbackHandler()]
return CallbackManager(callback_handlers)
@classmethod
def get_history_messages_from_memory(cls, memory: ReadOnlyConversationTokenDBBufferSharedMemory,
max_token_limit: int) -> \
str:
"""Get memory messages."""
memory.max_token_limit = max_token_limit
memory_key = memory.memory_variables[0]
external_context = memory.load_memory_variables({})
return external_context[memory_key]
@classmethod
def get_memory_from_conversation(cls, tenant_id: str, app_model_config: AppModelConfig,
conversation: Conversation,
**kwargs) -> ReadOnlyConversationTokenDBBufferSharedMemory:
# only for calc token in memory
memory_llm = LLMBuilder.to_llm_from_model(
tenant_id=tenant_id,
model=app_model_config.model_dict
)
# use llm config from conversation
memory = ReadOnlyConversationTokenDBBufferSharedMemory(
conversation=conversation,
llm=memory_llm,
max_token_limit=kwargs.get("max_token_limit", 2048),
memory_key=kwargs.get("memory_key", "chat_history"),
return_messages=kwargs.get("return_messages", True),
input_key=kwargs.get("input_key", "input"),
output_key=kwargs.get("output_key", "output"),
message_limit=kwargs.get("message_limit", 10),
)
return memory
@classmethod
def validate_query_tokens(cls, tenant_id: str, app_model_config: AppModelConfig, query: str):
llm = LLMBuilder.to_llm_from_model(
tenant_id=tenant_id,
model=app_model_config.model_dict
)
model_limited_tokens = llm_constant.max_context_token_length[llm.model_name]
max_tokens = llm.max_tokens
if model_limited_tokens - max_tokens - llm.get_num_tokens(query) < 0:
raise LLMBadRequestError("Query is too long")
@classmethod
def recale_llm_max_tokens(cls, final_llm: Union[StreamableOpenAI, StreamableChatOpenAI],
prompt: Union[str, List[BaseMessage]], mode: str):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_limited_tokens = llm_constant.max_context_token_length[final_llm.model_name]
max_tokens = final_llm.max_tokens
if mode == 'completion' and isinstance(final_llm, BaseLLM):
prompt_tokens = final_llm.get_num_tokens(prompt)
else:
prompt_tokens = final_llm.get_messages_tokens(prompt)
if prompt_tokens + max_tokens > model_limited_tokens:
max_tokens = max(model_limited_tokens - prompt_tokens, 16)
final_llm.max_tokens = max_tokens
@classmethod
def generate_more_like_this(cls, task_id: str, app: App, message: Message, pre_prompt: str,
app_model_config: AppModelConfig, user: Account, streaming: bool):
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=app.tenant_id,
model_name='gpt-3.5-turbo',
streaming=streaming
)
# get llm prompt
original_prompt, _ = cls.get_main_llm_prompt(
mode="completion",
llm=llm,
pre_prompt=pre_prompt,
query=message.query,
inputs=message.inputs,
chain_output=None,
memory=None
)
original_completion = message.answer.strip()
prompt = MORE_LIKE_THIS_GENERATE_PROMPT
prompt = prompt.format(prompt=original_prompt, original_completion=original_completion)
if isinstance(llm, BaseChatModel):
prompt = [HumanMessage(content=prompt)]
conversation_message_task = ConversationMessageTask(
task_id=task_id,
app=app,
app_model_config=app_model_config,
user=user,
inputs=message.inputs,
query=message.query,
is_override=True if message.override_model_configs else False,
streaming=streaming
)
llm.callback_manager = cls.get_llm_callback_manager(llm, streaming, conversation_message_task)
cls.recale_llm_max_tokens(
final_llm=llm,
prompt=prompt,
mode='completion'
)
llm.generate([prompt])
| [
"\n",
"Use the following CONTEXT as your learned knowledge:\n[CONTEXT]\n{context}\n[END CONTEXT]\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification. \nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n",
"\n\nPLACEHOLDER",
"Use the following CONTEXT as your learned knowledge:[CONTEXT]{context}[END CONTEXT]When answer to user:- If you don't know, just say that you don't know.- If you don't know when you are not sure, ask for clarification. Avoid mentioning that you obtained the information from the context.And answer according to the language of the user's question.PLACEHOLDER\n{query}\n",
"Use the following CONTEXT as your learned knowledge.\n[CONTEXT]\n{context}\n[END CONTEXT]\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification. \nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n",
"{query}\n",
"\nHuman: {query}\nAI: "
] |
2024-01-10 | timotius22/teststreamlit | common_ticket3.py | import streamlit as st
import openai
import json
# Function to load prompts from the JSON file
def load_prompts():
"""Load prompts from a JSON file."""
with open('prompts.json', 'r') as file:
return json.load(file)
# Functions to format the output
def format_normal(ticket):
"""Format the ticket in a normal, readable text format, similar to provided example."""
ticket = ticket.replace("<h1>", "").replace("</h1>", "")
ticket = ticket.replace("<br>", "")
ticket = ticket.replace("<p>", "").replace("</p>", "")
ticket = ticket.replace("<ol>", "").replace("</ol>", "")
ticket = ticket.replace("<ul>", "").replace("</ul>", "")
ticket = ticket.replace("<li>", " - ").replace("</li>", "\n")
return ticket.strip()
def format_html(ticket):
"""Format the ticket in HTML, based on the provided example."""
return f"<html><body>\n{ticket}\n</body></html>"
def format_jira(ticket):
"""Format the ticket in Jira Markup Language, according to the provided example."""
ticket = ticket.replace("<h1>", "h1. ").replace("</h1>", "")
ticket = ticket.replace("<br>", "")
ticket = ticket.replace("<p>", "").replace("</p>", "")
ticket = ticket.replace("<ol>", "").replace("</ol>", "")
ticket = ticket.replace("<ul>", "").replace("</ul>", "")
ticket = ticket.replace("<li>", "- ").replace("</li>", "")
return ticket.strip()
# Function to apply selected format to ticket
def apply_format(ticket, format_selection):
"""Apply the selected format to the ticket."""
if format_selection == "Normal":
return format_normal(ticket)
elif format_selection == "HTML":
return format_html(ticket)
elif format_selection == "Jira Markup Language":
return format_jira(ticket)
return ticket # Default to normal if no format is matched
# Function to send message to OpenAI and get response
def chat_with_openai(message, history):
"""Send a message to OpenAI and get the response."""
try:
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=history + [message]
)
return response.choices[0].message["content"]
except openai.error.OpenAIError as openai_error:
st.error(f"An error occurred with OpenAI: {openai_error}")
except Exception as e:
st.error(f"An unexpected error occurred: {e}")
# Function for refining the ticket
def refine_ticket(refine_input, format_selection):
"""Refines the ticket based on user input."""
with st.spinner('Refining ticket...'):
user_message = {"role": "user", "content": refine_input}
st.session_state['conversation_history'].append(user_message)
gpt_response = chat_with_openai(user_message, st.session_state['conversation_history'])
if gpt_response:
st.session_state['conversation_history'].append({"role": "system", "content": gpt_response})
st.session_state['original_response'] = gpt_response
return apply_format(gpt_response, format_selection)
return None
# Main function to show and refine ticket
def show_ticket():
"""Main function to display and refine tickets."""
st.title("Product GPT")
# Assuming you have stored your OpenAI API key in Streamlit's secrets
openai.api_key = st.session_state.get('api_key', st.secrets["openai"]["api_key"])
ticket_type = st.selectbox("Select the ticket type:", ["Bug", "User Story", "Task", "Spike"])
user_input = st.text_area("Write your ticket here:")
format_selection = st.selectbox("Select the output format:", ["Normal", "HTML", "Jira Markup Language"], key='format_selector')
# Create a placeholder for the formatted ticket
formatted_ticket_placeholder = st.empty()
if st.button("Create Ticket"):
with st.spinner('Creating ticket...'):
prompts = load_prompts()
prompt_text = prompts.get(ticket_type, "")
if not prompt_text:
st.error(f"Could not find a prompt for ticket type: {ticket_type}")
return
prompt = {"role": "user", "content": prompt_text + user_input}
system_prompt = {"role": "system", "content": "You are an experienced product manager and an expert in writing tickets."}
st.session_state['conversation_history'] = [system_prompt, prompt]
gpt_response = chat_with_openai(prompt, st.session_state['conversation_history'])
if gpt_response:
st.session_state['original_response'] = gpt_response
updated_formatted_ticket = apply_format(gpt_response, format_selection)
formatted_ticket_placeholder.text_area("Formatted Ticket", updated_formatted_ticket, height=300, key='formatted_ticket')
if 'original_response' in st.session_state and 'format_selector' in st.session_state and not st.session_state.get('ticket_updated', False):
updated_formatted_ticket = apply_format(st.session_state['original_response'], st.session_state['format_selector'])
formatted_ticket_placeholder.text_area("Formatted Ticket", updated_formatted_ticket, height=300, key='formatted_ticket')
# Refine Ticket Section
if 'conversation_history' not in st.session_state:
st.session_state['conversation_history'] = []
refine_input = st.text_area("How would you like to refine the ticket?", key='refine_input')
if st.button("Refine Ticket"):
updated_formatted_ticket = refine_ticket(refine_input, format_selection)
if updated_formatted_ticket is not None:
# Update the placeholder with the refined ticket content
formatted_ticket_placeholder.text_area("Refined Ticket", updated_formatted_ticket, height=300, key='formatted_ticket')
st.session_state['ticket_updated'] = True | [
"You are an experienced product manager and an expert in writing tickets.",
"{'role': 'user', 'content': 'PLACEHOLDERPLACEHOLDER'}",
"PLACEHOLDERPLACEHOLDER",
"{'role': 'system', 'content': 'You are an experienced product manager and an expert in writing tickets.'}"
] |
2024-01-10 | timotius22/teststreamlit | common_ticket2.py | import streamlit as st
import openai
import json
# Function to load prompts from the JSON file
def load_prompts():
"""Load prompts from a JSON file."""
with open('prompts.json', 'r') as file:
return json.load(file)
# Functions to format the output
def format_normal(ticket):
"""Format the ticket in a normal, readable text format, similar to provided example."""
ticket = ticket.replace("<h1>", "").replace("</h1>", "")
ticket = ticket.replace("<br>", "")
ticket = ticket.replace("<p>", "").replace("</p>", "")
ticket = ticket.replace("<ol>", "").replace("</ol>", "")
ticket = ticket.replace("<ul>", "").replace("</ul>", "")
ticket = ticket.replace("<li>", " - ").replace("</li>", "\n")
return ticket.strip()
def format_html(ticket):
"""Format the ticket in HTML, based on the provided example."""
return f"<html><body>\n{ticket}\n</body></html>"
def format_jira(ticket):
"""Format the ticket in Jira Markup Language, according to the provided example."""
ticket = ticket.replace("<h1>", "h1. ").replace("</h1>", "")
ticket = ticket.replace("<br>", "")
ticket = ticket.replace("<p>", "").replace("</p>", "")
ticket = ticket.replace("<ol>", "").replace("</ol>", "")
ticket = ticket.replace("<ul>", "").replace("</ul>", "")
ticket = ticket.replace("<li>", "- ").replace("</li>", "")
return ticket.strip()
# Function to apply selected format to ticket
def apply_format(ticket, format_selection):
"""Apply the selected format to the ticket."""
if format_selection == "Normal":
return format_normal(ticket)
elif format_selection == "HTML":
return format_html(ticket)
elif format_selection == "Jira Markup Language":
return format_jira(ticket)
return ticket # Default to normal if no format is matched
# Function to send message to OpenAI and get response
def chat_with_openai(message, history):
"""Send a message to OpenAI and get the response."""
try:
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=history + [message]
)
return response.choices[0].message["content"]
except openai.error.OpenAIError as openai_error:
st.error(f"An error occurred with OpenAI: {openai_error}")
except Exception as e:
st.error(f"An unexpected error occurred: {e}")
# Main function to show and refine ticket
def show_ticket():
"""Main function to display and refine tickets."""
st.title("Product GPT")
openai.api_key = st.session_state.get('api_key', st.secrets["openai"]["api_key"])
ticket_type = st.selectbox("Select the ticket type:", ["Bug", "User Story", "Task", "Spike"])
user_input = st.text_area("Write your ticket here:")
format_selection = st.selectbox("Select the output format:", ["Normal", "HTML", "Jira Markup Language"])
if st.button("Create Ticket"):
with st.spinner('Creating ticket...'):
prompts = load_prompts()
prompt_text = prompts.get(ticket_type, "")
if not prompt_text:
st.error(f"Could not find a prompt for ticket type: {ticket_type}")
return
prompt = {"role": "user", "content": prompt_text + user_input}
system_prompt = {"role": "system", "content": "You are an experienced product manager and an expert in writing tickets."}
st.session_state['conversation_history'] = [system_prompt, prompt]
gpt_response = chat_with_openai(prompt, st.session_state['conversation_history'])
if gpt_response:
st.session_state['ticket_content'] = gpt_response
formatted_ticket = apply_format(gpt_response, format_selection)
st.text_area("Formatted Ticket", formatted_ticket, height=300)
# Refine Ticket Section with Spinner
if 'conversation_history' not in st.session_state:
st.session_state['conversation_history'] = []
if 'conversation_history' in st.session_state:
refine_input = st.text_area("How would you like to refine the ticket?")
if st.button("Refine Ticket"):
with st.spinner('Refining ticket...'):
user_message = {"role": "user", "content": refine_input}
st.session_state['conversation_history'].append(user_message)
gpt_response = chat_with_openai(user_message, st.session_state['conversation_history'])
if gpt_response:
st.session_state['conversation_history'].append({"role": "system", "content": gpt_response})
st.session_state['ticket_content'] = gpt_response
formatted_ticket = apply_format(gpt_response, format_selection)
st.text_area("Refined Ticket", formatted_ticket, height=300)
| [
"You are an experienced product manager and an expert in writing tickets.",
"{'role': 'user', 'content': 'PLACEHOLDERPLACEHOLDER'}",
"PLACEHOLDERPLACEHOLDER",
"{'role': 'system', 'content': 'You are an experienced product manager and an expert in writing tickets.'}"
] |
2024-01-10 | timotius22/teststreamlit | common_ticket3_norefinement.py | import streamlit as st
import openai
import json
# Function to load prompts from the JSON file
def load_prompts():
"""Load prompts from a JSON file."""
with open('prompts.json', 'r') as file:
return json.load(file)
def format_normal(ticket):
"""Format the ticket in a normal, readable text format, similar to provided example."""
ticket = ticket.replace("<h1>", "").replace("</h1>", "\n")
ticket = ticket.replace("<h2>", "").replace("</h2>", "\n")
ticket = ticket.replace("<br>", "\n")
ticket = ticket.replace("<p>", "").replace("</p>", "\n")
ticket = ticket.replace("<ol>", "").replace("</ol>", "\n")
ticket = ticket.replace("<ul>", "").replace("</ul>", "\n")
ticket = ticket.replace("<li>", " - ").replace("</li>", "\n")
ticket = ticket.replace("<strong>", "").replace("</strong>", "")
return ticket.strip()
def format_html(ticket):
"""Format the ticket in HTML, based on the provided example."""
# Escape special characters here if needed
return f"<html><body>\n{ticket}\n</body></html>"
def format_jira(ticket):
"""Format the ticket in Jira Markup Language, according to the provided example."""
ticket = ticket.replace("<h1>", "h1. ").replace("</h1>", "\n")
ticket = ticket.replace("<h2>", "h2. ").replace("</h2>", "\n")
ticket = ticket.replace("<br>", "\n")
ticket = ticket.replace("<p>", "").replace("</p>", "\n")
ticket = ticket.replace("<ol>", "").replace("</ol>", "\n")
ticket = ticket.replace("<ul>", "").replace("</ul>", "\n")
ticket = ticket.replace("<li>", "- ").replace("</li>", "\n")
ticket = ticket.replace("<strong>", "*").replace("</strong>", "*")
return ticket.strip()
# Function to apply selected format to ticket
def apply_format(ticket, format_selection):
"""Apply the selected format to the ticket."""
if format_selection == "Normal":
return format_normal(ticket)
elif format_selection == "HTML":
return format_html(ticket)
elif format_selection == "Jira Markup Language":
return format_jira(ticket)
return ticket # Default to normal if no format is matched
# Function to send message to OpenAI and get response
def chat_with_openai(message, history):
"""Send a message to OpenAI and get the response."""
try:
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=history + [message]
)
return response.choices[0].message["content"]
except openai.error.OpenAIError as openai_error:
st.error(f"An error occurred with OpenAI: {openai_error}")
except Exception as e:
st.error(f"An unexpected error occurred: {e}")
# Main function to show and create ticket
def show_ticket():
"""Main function to display and create tickets."""
st.title("Product GPT")
# Setting up OpenAI API key
openai.api_key = st.session_state.get('api_key', st.secrets["openai"]["api_key"])
ticket_type = st.selectbox("Select the ticket type:", ["Bug", "User Story", "Task", "Spike"])
user_input = st.text_area("Write your ticket here:")
format_selection = st.selectbox("Select the output format:", ["Normal", "HTML", "Jira Markup Language"], key='format_selector')
# Create a placeholder for the formatted ticket
formatted_ticket_placeholder = st.empty()
if st.button("Create/Update Ticket"):
with st.spinner('Creating/Updating ticket...'):
prompts = load_prompts()
prompt_text = prompts.get(ticket_type, "")
if not prompt_text:
st.error(f"Could not find a prompt for ticket type: {ticket_type}")
return
prompt = {"role": "user", "content": prompt_text + user_input}
system_prompt = {"role": "system", "content": "You are an experienced product manager and an expert in writing tickets. You only ever reply with the ticket, and no extra fields. You follow the html template."}
st.session_state['conversation_history'] = [system_prompt, prompt]
try:
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=st.session_state['conversation_history']
)
gpt_response = response.choices[0].message["content"]
if gpt_response:
st.session_state['original_response'] = gpt_response
updated_formatted_ticket = apply_format(gpt_response, format_selection)
formatted_ticket_placeholder.text_area("Formatted Ticket", updated_formatted_ticket, height=300, key='formatted_ticket')
except openai.error.OpenAIError as openai_error:
st.error(f"An error occurred with OpenAI: {openai_error}")
except Exception as e:
st.error(f"An unexpected error occurred: {e}")
else:
# Check if there's an existing ticket to format
if 'original_response' in st.session_state and 'format_selector' in st.session_state:
updated_formatted_ticket = apply_format(st.session_state['original_response'], st.session_state['format_selector'])
formatted_ticket_placeholder.text_area("Formatted Ticket", updated_formatted_ticket, height=300, key='formatted_ticket')
| [
"You are an experienced product manager and an expert in writing tickets. You only ever reply with the ticket, and no extra fields. You follow the html template.",
"{'role': 'user', 'content': 'PLACEHOLDERPLACEHOLDER'}",
"PLACEHOLDERPLACEHOLDER",
"conversation_history",
"{'role': 'system', 'content': 'You are an experienced product manager and an expert in writing tickets. You only ever reply with the ticket, and no extra fields. You follow the html template.'}"
] |
2024-01-10 | timotius22/teststreamlit | common_ticket.py | import streamlit as st
import openai
import json
# Function to load prompts from the JSON file
def load_prompts():
with open('prompts.json', 'r') as file:
return json.load(file)
# Functions to format the output
def format_normal(ticket):
# Convert HTML-like tags to readable text
ticket = ticket.replace("<h1>", "").replace("</h1>", "\n")
ticket = ticket.replace("<p>", "").replace("</p>", "\n\n")
# Add more replacements as needed for other tags
return ticket.strip()
def format_html(ticket):
# Wrap the ticket in HTML body tags
return f"<html><body>\n{ticket}\n</body></html>"
def format_jira(ticket):
# Replace HTML tags with Jira Markup equivalents
ticket = ticket.replace("<h1>", "h1. ").replace("</h1>", "\n")
ticket = ticket.replace("<p>", "").replace("</p>", "\n\n")
# Add more replacements as needed for other tags
return ticket.strip()
# Function to apply selected format to ticket
def apply_format(ticket, format_selection):
if format_selection == "Normal":
return format_normal(ticket)
elif format_selection == "HTML":
return format_html(ticket)
elif format_selection == "Jira Markup Language":
return format_jira(ticket)
return ticket # Default to normal if no format is matched
# Main function to show ticket
def show_ticket():
st.title("Product GPT")
openai.api_key = st.session_state.get('api_key', st.secrets["openai"]["api_key"])
ticket_type = st.selectbox("Select the ticket type:", ["Bug", "User Story", "Task", "Spike"])
user_input = st.text_area("Write your ticket here:")
format_selection = st.selectbox("Select the output format:", ["Normal", "HTML", "Jira Markup Language"])
if st.button("Create Ticket") or 'ticket_content' in st.session_state:
prompts = load_prompts()
prompt_text = prompts.get(ticket_type, "")
if not prompt_text:
st.error(f"Could not find a prompt for ticket type: {ticket_type}")
return
if 'ticket_content' not in st.session_state or st.button("Create Ticket"):
prompt = {"role": "user", "content": prompt_text + user_input}
system_prompt = {"role": "system", "content": "You are an experienced product manager and an expert in writing tickets."}
try:
with st.spinner("Creating your ticket... This may take up to two minutes."):
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=[system_prompt, prompt]
)
# Check the response structure
if 'choices' not in response or not response['choices']:
st.error("Invalid response structure from OpenAI.")
st.json(response) # Display the raw response for debugging
return
ticket = response.choices[0].get("message")
if ticket is None:
st.error("No ticket generated. The completion result was empty.")
return
if not isinstance(ticket, str):
st.error("Invalid ticket format.")
st.write(f"Received ticket data type: {type(ticket)}") # Display the type of the received ticket for debugging
return
st.session_state['ticket_content'] = ticket
st.success("Ticket created successfully:")
except openai.error.OpenAIError as openai_error:
st.error(f"An error occurred with OpenAI: {openai_error}")
return
except Exception as e:
st.error(f"An unexpected error occurred: {e}")
return
# Apply selected format and display
formatted_ticket = apply_format(st.session_state['ticket_content'], format_selection)
if format_selection == "HTML":
st.code(formatted_ticket, language="html")
elif format_selection == "Jira Markup Language":
st.code(formatted_ticket, language="markup")
else:
st.text(formatted_ticket)
| [
"You are an experienced product manager and an expert in writing tickets.",
"{'role': 'user', 'content': 'PLACEHOLDERPLACEHOLDER'}",
"PLACEHOLDERPLACEHOLDER",
"[PLACEHOLDER, PLACEHOLDER]",
"{'role': 'system', 'content': 'You are an experienced product manager and an expert in writing tickets.'}"
] |
2024-01-10 | timotius22/teststreamlit | app4.py | import streamlit as st
import openai
from ticket_ui import create_ticket_ui, display_formatted_ticket, refine_ticket_ui
from common_ticket4 import load_prompts, chat_with_openai, refine_ticket_logic
# Function to validate API key against OpenAI
def validate_api_key(api_key):
try:
openai.api_key = api_key
# Simple, low-cost request to validate key
openai.Completion.create(engine="text-davinci-003", prompt="Hello world", max_tokens=5)
return True
except Exception as e:
return False
# Function to show the login page
def show_login_page():
st.title("Welcome to Product GPT")
st.title("Login")
# Radio button for login method selection
login_method = st.radio("Login Method:", ["Username & Password", "API Key"])
if login_method == "Username & Password":
username_input = st.text_input("Username")
password_input = st.text_input("Password", type="password")
if st.button("Login"):
if username_input == st.secrets["login_credentials"]["username"] and \
password_input == st.secrets["login_credentials"]["password"]:
st.session_state['authenticated'] = True
# Redirect to ticket creation page by setting the action
st.session_state['action'] = 'create_ticket'
else:
api_key_input = st.text_input("API Key", type="password")
if st.button("Login with API Key"):
if validate_api_key(api_key_input):
st.session_state['authenticated'] = True
# Redirect to ticket creation page by setting the action
st.session_state['action'] = 'create_ticket'
# Main function
def main():
# If the user is not authenticated, show the login page
if not st.session_state.get('authenticated', False):
show_login_page()
else:
# Define actions for sidebar after login
if 'action' not in st.session_state:
st.session_state['action'] = 'create_ticket'
if st.session_state['action'] == 'create_ticket':
ticket_type, user_input, format_selection, create_ticket = create_ticket_ui()
if create_ticket:
prompts = load_prompts()
prompt_text = prompts.get(ticket_type, "")
if not prompt_text:
st.error(f"Could not find a prompt for ticket type: {ticket_type}")
return
prompt = {"role": "user", "content": prompt_text + user_input}
system_prompt = {"role": "system", "content": "You are an experienced product manager and an expert in writing tickets."}
st.session_state['conversation_history'] = [system_prompt, prompt]
gpt_response = chat_with_openai(prompt, st.session_state['conversation_history'])
display_formatted_ticket(gpt_response, format_selection)
refine_input, refine_ticket = refine_ticket_ui()
if refine_ticket:
updated_ticket = refine_ticket_logic(refine_input, format_selection, st.session_state['conversation_history'])
display_formatted_ticket(updated_ticket, format_selection)
# Sidebar only appears after successful login
with st.sidebar:
if st.button("Create New Ticket"):
st.session_state['action'] = 'create_ticket'
if st.button("Log Out"):
# Clear the session and show the login page
st.session_state.clear()
st.session_state['action'] = 'show_login'
show_login_page()
# Execute actions as per the session state
if st.session_state.get('action') == 'logout':
st.session_state.clear()
show_login_page()
if __name__ == "__main__":
main()
| [
"Hello world",
"You are an experienced product manager and an expert in writing tickets.",
"{'role': 'user', 'content': 'PLACEHOLDERPLACEHOLDER'}",
"PLACEHOLDERPLACEHOLDER",
"{'role': 'system', 'content': 'You are an experienced product manager and an expert in writing tickets.'}"
] |
2024-01-10 | timotius22/teststreamlit | common_ticket4.py | import streamlit as st
import openai
import json
# Function to load prompts from the JSON file
def load_prompts():
"""Load prompts from a JSON file."""
with open('prompts.json', 'r') as file:
return json.load(file)
# Functions to format the output
def format_normal(ticket):
"""Format the ticket in a normal, readable text format, similar to provided example."""
ticket = ticket.replace("<h1>", "").replace("</h1>", "")
ticket = ticket.replace("<br>", "")
ticket = ticket.replace("<p>", "").replace("</p>", "")
ticket = ticket.replace("<ol>", "").replace("</ol>", "")
ticket = ticket.replace("<ul>", "").replace("</ul>", "")
ticket = ticket.replace("<li>", " - ").replace("</li>", "\n")
return ticket.strip()
def format_html(ticket):
"""Format the ticket in HTML, based on the provided example."""
return f"<html><body>\n{ticket}\n</body></html>"
def format_jira(ticket):
"""Format the ticket in Jira Markup Language, according to the provided example."""
ticket = ticket.replace("<h1>", "h1. ").replace("</h1>", "")
ticket = ticket.replace("<br>", "")
ticket = ticket.replace("<p>", "").replace("</p>", "")
ticket = ticket.replace("<ol>", "").replace("</ol>", "")
ticket = ticket.replace("<ul>", "").replace("</ul>", "")
ticket = ticket.replace("<li>", "- ").replace("</li>", "")
return ticket.strip()
# Function to apply selected format to ticket
def apply_format(ticket, format_selection):
"""Apply the selected format to the ticket."""
if format_selection == "Normal":
return format_normal(ticket)
elif format_selection == "HTML":
return format_html(ticket)
elif format_selection == "Jira Markup Language":
return format_jira(ticket)
return ticket # Default to normal if no format is matched
# Function to send message to OpenAI and get response
def chat_with_openai(message, history):
"""Send a message to OpenAI and get the response."""
try:
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=history + [message]
)
return response.choices[0].message["content"]
except openai.error.OpenAIError as openai_error:
st.error(f"An error occurred with OpenAI: {openai_error}")
except Exception as e:
st.error(f"An unexpected error occurred: {e}")
# Function for refining the ticket
def refine_ticket_logic(refine_input, format_selection, history):
"""Refines the ticket based on user input."""
with st.spinner('Refining ticket...'):
user_message = {"role": "user", "content": refine_input}
history.append(user_message)
try:
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=history
)
gpt_response = response.choices[0].message["content"]
if gpt_response:
history.append({"role": "system", "content": gpt_response})
return apply_format(gpt_response, format_selection)
except openai.error.OpenAIError as openai_error:
st.error(f"An error occurred with OpenAI: {openai_error}")
except Exception as e:
st.error(f"An unexpected error occurred: {e}")
return None
| [] |
2024-01-10 | wangyuxinwhy/generate | tests~test_image_completion.py | from generate import OpenAIChat
def test_image_completion() -> None:
user_input = {
'role': 'user',
'content': [
{'type': 'text', 'text': 'Whatโs in this image?'},
{
'type': 'image_url',
'image_url': {
'url': 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg',
'detail': 'high',
},
},
],
}
chat = OpenAIChat(model='gpt-4-vision-preview')
output = chat.generate(user_input, max_tokens=10)
assert output.reply != ''
| [
"[{'type': 'text', 'text': 'Whatโs in this image?'}, {'type': 'image_url', 'image_url': {'url': 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg', 'detail': 'high'}}]"
] |
2024-01-10 | archit1012/qa-bot-llm | app~models~json_file_processor.py | # from langchain.schema import StrOutputParser
# from langchain_core.runnables import RunnablePassthrough
from langchain.document_loaders import PyPDFLoader, JSONLoader
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from models.file_processor import FileProcessor
# import faiss
from langchain.vectorstores import FAISS
class JsonFileProcessor(FileProcessor):
def document_loader(self, file_path):
try:
loader = JSONLoader(
file_path=file_path,
jq_schema='.',
text_content=False)
return loader.load()
except Exception as e:
print("Something went wrong while performing 'document_loader' operations", e)
def text_splitter(self,documents):
try:
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=100)
return text_splitter.split_documents(documents)
except Exception as e:
print("Something went wrong while performing 'text_splitter' operations", e)
def prepare_vectordb(self,docs,embeddings):
try:
vector_db = FAISS.from_documents(docs, embeddings)
return vector_db
except Exception as e:
print("Something went wrong while performing 'prepare_vectordb' operations", e)
| [] |
2024-01-10 | archit1012/qa-bot-llm | app~common~openapi.py | from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
from langchain.chains import LLMChain
# from langchain.chains import RetrievalQA
def get_response_from_query(db, query, model, depth=4):
# retriever = db.as_retriever(search_kwargs={"k": depth})
docs = db.similarity_search(query, k=depth)
docs_page_content = " ".join([d.page_content for d in docs])
# Template to use for the system message prompt
template = """
You are a helpful assistant that that can answer questions from the given context: {context}
Only use the factual information from the context to answer the question.
If you feel like you don't have enough information to answer the question, say "I don't know".
"""
# System behaviour bot : question answer bot
# human behaviour
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
# Human question prompt
human_template = "Answer the following question: {question}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
chain = LLMChain(llm=model, prompt=chat_prompt)
response = chain.run(question=query, context=docs_page_content)
response = response.replace("\n", "")
return response, docs | [
"[PLACEHOLDER, PLACEHOLDER]",
"t have enough information to answer the question, say \"I don",
"\n You are a helpful assistant that that can answer questions from the given context: {context}\n\n Only use the factual information from the context to answer the question.\n\n If you feel like you don't have enough information to answer the question, say \"I don't know\".\n\n ",
"Answer the following question: {question}"
] |
2024-01-10 | archit1012/qa-bot-llm | app~models~pdf_file_processor.py | # from langchain.schema import StrOutputParser
# from langchain_core.runnables import RunnablePassthrough
from langchain.document_loaders import PyPDFLoader, JSONLoader
# from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from models.file_processor import FileProcessor
# import faiss
from langchain.vectorstores import FAISS
from langchain.document_loaders import PyPDFLoader
class PdfFileProcessor(FileProcessor):
def document_loader(self,file_path):
print("pdf file loader")
loader = PyPDFLoader( file_path=file_path) # text_content=False)
return loader.load()
def text_splitter(self,documents):
try:
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=100)
return text_splitter.split_documents(documents)
except Exception as e:
print("Something went wrong while performing 'text_splitter' operations", e)
def prepare_vectordb(self,docs,embeddings):
try:
vector_db = FAISS.from_documents(docs, embeddings)
return vector_db
except Exception as e:
print("Something went wrong while performing 'prepare_vectordb' operations", e)
| [] |
2024-01-10 | archit1012/qa-bot-llm | app~views~qa_apis.py | from flask import Flask, request, jsonify, Blueprint
from dotenv import load_dotenv
from service.qa_apis_service import process_request
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
load_dotenv()
# Global variable
embeddings = OpenAIEmbeddings()
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.2)
def hello_world():
return 'Hello World'
def upload_files():
try:
# Check if the POST request has file parts
if 'doc_file' not in request.files or 'question_file' not in request.files:
return jsonify({'error': 'Both files must be provided'})
response = process_request(request,embeddings,chat)
return jsonify(response)
except Exception as e:
print("Internal Server Error", e)
return jsonify("Internal Server Error") | [] |
2024-01-10 | MarkChenYutian/T5-Sentinel-public | generator~gpt4~gpt4_client.py | """
@brief: A GPT4 response generator using Async io
@author: Yutian Chen <[email protected]>
@date: May 15, 2023
"""
import asyncio
import openai
import json
import yaml
import time
from pathlib import Path
from generator.chatgpt.chatgpt_client import \
chatgpt_pred_fn, chatgpt_task_generator, chatgpt_state_initializer, estimate_token_count, HANDLE_STRATEGY
from generator.client_base import AsyncRequestClient, TaskResult
import pipeline.component.text_component as P
Converter = P.WriteExtra({"source": "gpt4"}) >> P.ToJsonStr()
async def gpt4_request_fn(self: AsyncRequestClient, state, subset, uid, text) -> TaskResult:
if state["token"] > self.config["MaxTokenCount"]:
print("Abort due to budget limit.")
raise Exception("Exceed the MaxTokenCount setting")
await self.worker_lock.acquire()
start_time = time.time()
# Ready ... now Work!
estimatedNumTokens = estimate_token_count(text)
if estimatedNumTokens > self.config["MaxLengthAllowed"]:
print("[x]\t", uid,
"failed since it exceeds the token limit (" + str(self.config["MaxLengthAllowed"]) + ")")
self.worker_lock.release()
return TaskResult.CANCEL
try:
response = await openai.ChatCompletion.acreate(
model="gpt-4",
messages=[
{"role": "user", "content": "Rephrase the following paragraph by paragraph:\n\n" + text}
]
)
except openai.error.InvalidRequestError:
# no need to wait, since the request is not sent for some reason
await asyncio.sleep(1.0) # Avoid flushing the API
self.worker_lock.release()
return TaskResult.RETRY
except (openai.error.RateLimitError, openai.error.APIError, openai.error.TryAgain, openai.error.Timeout):
await asyncio.sleep(self.config["WaitTime"])
self.worker_lock.release()
return TaskResult.RETRY
finishReason = response["choices"][0]["finish_reason"]
result = HANDLE_STRATEGY[finishReason]
if result == TaskResult.FINISH:
machineText = response["choices"][0]["message"]["content"].strip()
await self.writer_lock.acquire()
with open(Path(self.config["OutputDirectory"], subset + ".jsonl"), "a", encoding="utf-8") as f:
f.write(Converter({"uid": uid, "text": machineText, "extra": dict()}))
f.write("\n")
self.writer_lock.release()
self.state["processed"].add((subset, uid))
self.state["token"] += response["usage"]["total_tokens"]
# Wait for 60 secs, then release the lock to spawn a new worker coroutine
# (We won't be blocked out)
end_time = time.time()
await asyncio.sleep(self.config["WaitTime"] - (end_time - start_time))
self.worker_lock.release()
return result
if __name__ == "__main__":
with open("./generator/gpt4/gpt4_client.yaml", "r") as f:
chatgpt_config = yaml.safe_load(f)
with open(Path(chatgpt_config["ClientRoot"], "secret.json"), "r") as f:
API_KEY = json.load(f)["OPENAI_API_KEY"]
openai.api_key = API_KEY
ChatGPTClient = AsyncRequestClient(
chatgpt_config,
gpt4_request_fn,
chatgpt_pred_fn,
chatgpt_task_generator,
chatgpt_state_initializer,
display_args=lambda args: args[1]
)
asyncio.run(ChatGPTClient.execute())
| [
"Rephrase the following paragraph by paragraph:\n\nPLACEHOLDER"
] |
2024-01-10 | DarkPr0digy/AI_Heroes_GPT | InsightManager.py | import json
import openai
from datetime import datetime
import os
MODEL_NAME = "gpt-3.5-turbo"
MODEL_TEMPERATURE = 0.9
class InsightManager:
def __init__(self, api_key: str, chatbot_name: str, user_total_characters: int, chatbot_total_words: int, messages):
""" Create an insight manager to generate conversational insights
:param api_key: OpenAI API Key
:param chatbot_name: Name of the chatbot
:param user_total_characters: Total number of characters typed by the user
:param chatbot_total_words: Total number of words used by the chatbot
:param messages: List of messages in the conversation
"""
openai.api_key = api_key
self.chatbot_name = chatbot_name
self.user_total_characters = user_total_characters
self.chatbot_total_words = chatbot_total_words
self.messages = messages
timestamp, conversational_data = self._generate_insights()
self._save_insights(timestamp, conversational_data)
def _generate_insights(self):
""" Generate conversational insights
:return: Timestamp of the conversation, and a dictionary containing the conversational insights"""
self.messages.append(
{"role": "system", "content": "Generate a python formatted dictionary containing the topic of our "
"conversation based on the users input, and if it cannot be determined return UNKNOWN, with its key being 'topic'. Additionally, the name of the user if it can "
"be determined, if not return UNKNOWN, with its key being 'user_name'. Send only the dictionary as a string."})
conversation = openai.ChatCompletion.create(
model=MODEL_NAME,
messages=self.messages,
temperature=MODEL_TEMPERATURE)
# Try get response and convert to dictionary
response = conversation.choices[0].message.content
# response = eval(response)
try:
response_dict = json.loads(response)
topic = response_dict.get('topic')
user_name = response_dict.get('user_name')
except:
topic = "UNKNOWN"
user_name = "UNKNOWN"
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
conversation_meta_information = {
"Name of chatbot": self.chatbot_name,
"Number of characters typed by user": self.user_total_characters,
"Number of words used by chatbot": self.chatbot_total_words,
"Subject of conversation": topic,
"Name of user": user_name}
conversation_data = {
"meta-data": conversation_meta_information,
"messages": self.messages[1:len(self.messages) - 1]}
return timestamp, conversation_data
def _save_insights(self, timestamp, conversation_data):
""" Save conversational insights to a json file
:param timestamp: Timestamp of the conversation
:param conversation_data: Dictionary containing the necessary insight data
"""
if not os.path.exists('./conversations'):
os.makedirs('conversations')
filename = f'conversations/conversation_{timestamp}.json'
with open(filename, 'w') as file:
json.dump(conversation_data, file, indent=4)
| [
"Generate a python formatted dictionary containing the topic of our conversation based on the users input, and if it cannot be determined return UNKNOWN, with its key being 'topic'. Additionally, the name of the user if it can be determined, if not return UNKNOWN, with its key being 'user_name'. Send only the dictionary as a string."
] |
2024-01-10 | DarkPr0digy/AI_Heroes_GPT | Chatbot.py | import json
import openai
from InsightManager import InsightManager
MODEL_NAME = "gpt-3.5-turbo"
MODEL_TEMPERATURE = 0.9
class Chatbot:
def __init__(self, name: str, user_total_characters=None, chatbot_total_words=None, conversation=None):
""" Create a chatbot with a given personality
:param name: Name of the personality
:param user_total_characters: Total number of characters typed by the user
:param chatbot_total_words: Total number of words used by the chatbot
:param conversation: List of messages in the conversation
"""
self.name = name
self.introduction = None
self.characteristic = None
self.user_total_characters = 0 if user_total_characters is None else user_total_characters
self.chatbot_total_words = 0 if chatbot_total_words is None else chatbot_total_words
# Load API Key from Config File
with open("config.json") as config_file:
config = json.load(config_file)
self.api_key = config["api_keys"]["open_ai"]
openai.api_key = self.api_key
self.messages = [] if conversation is None else conversation
# Load Personality and Introduce Chatbot
self._load_personality(name)
if not self.messages:
self._introduce()
else:
# Print Previous Conversation for user
for message in self.messages:
print(f"{self.name if message['role'] == 'assistant' else message['role']}: {message['content']}")
self.messages.insert(0, {"role": "system", "content": self.characteristic})
def _load_personality(self, personality_name: str):
""" Load the personality from the personalities.json file
:param personality_name: Name of the personality
"""
with open("personalities.json") as personality_file:
personalities = json.load(personality_file)
personality_data = personalities.get(personality_name)
if personality_data:
self.introduction = personalities[personality_name]["starting_message"]
self.characteristic = personalities[personality_name]["characteristic"]
else:
raise ValueError(f"Personality {personality_name} not found")
def _introduce(self):
""" Introduce the chatbot to the user
"""
self.messages.extend([{"role": "system", "content": self.characteristic},
{"role": "assistant", "content": self.introduction}])
print(f"{self.name}: {self.introduction}")
def generate_response(self, user_input: str):
""" Generate a response to the user input
:param user_input: Input from the user
:return: Response from the chatbot"""
if user_input.lower() == "exit":
InsightManager(self.api_key, self.name, self.user_total_characters, self.chatbot_total_words, self.messages)
return "See you next time"
self.messages.append({"role": "user", "content": user_input})
conversation = openai.ChatCompletion.create(
model=MODEL_NAME,
messages=self.messages,
temperature=MODEL_TEMPERATURE)
response = conversation.choices[0].message.content
self.messages.append({"role": "assistant", "content": response})
# Update Counts
self.user_total_characters += len(user_input)
self.chatbot_total_words += len(response.split(" "))
return response
| [] |
2024-01-10 | OrlyIdeate/Slack-bot | modules~similarity.py | import os
import numpy as np
import openai
import mysql.connector
import pickle
from modules.DB import connect_to_db
# .env่ชญใฟ่พผใฟ
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = openai.OpenAI(api_key=OPENAI_API_KEY)
def get_embedding(text):
response = client.embeddings.create(
model="text-embedding-ada-002",
input=text
)
# ๅฟ็ญใใๅใ่พผใฟใใผใฟใๅๅพใใๆญฃใใๆนๆณใไฝฟ็จ
embedding = response.data[0].embedding
return embedding
# ใณใตใคใณ้กไผผๅบฆ่จ็ฎๅผ
def cosine_similarity(vec_a, vec_b):
dot_product = np.dot(vec_a, vec_b)
norm_a = np.linalg.norm(vec_a)
norm_b = np.linalg.norm(vec_b)
return dot_product / (norm_a * norm_b)
def get_top_5_similar_texts(message_text):
vector1 = get_embedding(message_text)
db_connection = connect_to_db()
cursor = db_connection.cursor()
query = "SELECT content, vector, url, date, category FROM phase4;"
cursor.execute(query)
rows = cursor.fetchall()
similarity_list = []
for content, vector_bytes, url, date, category in rows:
vector2 = pickle.loads(vector_bytes)
similarity = cosine_similarity(vector1, vector2)
similarity_list.append((similarity, content, url, date, category))
similarity_list.sort(reverse=True)
return similarity_list[:5] | [] |
2024-01-10 | scottsus/Coffee-Chat-AI | src~youtube.py | import logging
import re
import os
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api import YouTubeTranscriptApi
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from dotenv import load_dotenv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.callbacks import get_openai_callback
from dotenv import load_dotenv
def youtube(youtube_url):
print(f'Searching for {youtube_url}')
try:
load_dotenv()
openai_key = os.getenv('OPENAI_API_KEY')
# log_file = 'logs/youtube.log'
# logging.basicConfig(filename=log_file, encoding='utf-8', level=logging.INFO)
def get_youtube_id(url):
video_id = None
match = re.search(r"(?<=v=)[^&#]+", url)
if match:
video_id = match.group()
else:
match = re.search(r"(?<=youtu.be/)[^&#]+", url)
if match:
video_id = match.group()
return video_id
if youtube_url:
video_id = get_youtube_id(youtube_url)
if video_id != "":
t = YouTubeTranscriptApi.get_transcript(video_id, languages=('en','fr','es', 'zh-cn', 'hi', 'ar', 'bn', 'ru', 'pt', 'sw' ))
finalString = ""
for item in t:
text = item['text']
finalString += text + " "
text_splitter = CharacterTextSplitter()
chunks = text_splitter.split_text(finalString)
embeddings = OpenAIEmbeddings()
knowledge_base = FAISS.from_texts(chunks, embeddings)
user_question = 'You are a talk show host and youre about to interview a very famous startup founder. Based on the video of this person, generate five potential interesting questions that a wide range of people might find interesting.'
docs = knowledge_base.similarity_search(user_question)
llm = OpenAI()
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=user_question)
responselist = re.findall(r'\d+\.\s+(.*)', response)
return responselist
except Exception:
print('YouTube error')
return []
#youtube_url = 'https://www.youtube.com/watch?v=UYOwweziqGI'
#print(youtube(youtube_url)) | [] |
2024-01-10 | scottsus/Coffee-Chat-AI | src~article.py | from bs4 import BeautifulSoup
import requests
import logging
import re
from langchain.chains.summarize import load_summarize_chain
from langchain.chains import AnalyzeDocumentChain
from langchain.llms import OpenAI
import os
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.callbacks import get_openai_callback
from dotenv import load_dotenv
load_dotenv()
openai_key = os.getenv('OPENAI_API_KEY')
# log_file = 'logs/article.log'
# logging.basicConfig(filename=log_file, encoding='utf-8', level=logging.INFO)
def scrape(url):
# Send a GET request to the URL and retrieve the HTML content
response = requests.get(url)
html_content = response.content
# Parse the HTML content using BeautifulSoup
soup = BeautifulSoup(html_content, 'html.parser')
for ad in soup.find_all(class_='advertisement'):
ad.extract()
# Extract the main content area by selecting specific HTML tags or classes
main_content = soup.find(class_='article-content')
# Clean up the extracted content
clean_text = main_content.get_text(separator='\n')
# Remove extra whitespace and newlines
clean_text = re.sub(r'\s+', ' ', clean_text).strip()
return clean_text
def summarise(scraped):
text_splitter = CharacterTextSplitter()
chunks = text_splitter.split_text(scraped)
summary_chain = load_summarize_chain(OpenAI(temperature=0),
chain_type="map_reduce",verbose=True)
summarize_document_chain = AnalyzeDocumentChain(combine_docs_chain=summary_chain)
answer = summarize_document_chain.run(chunks)
return answer
def questions(scraped):
text_splitter = CharacterTextSplitter()
chunks = text_splitter.split_text(scraped)
#create embeddings
embeddings = OpenAIEmbeddings()
knowledge_base = FAISS.from_texts(chunks, embeddings)
user_question = 'You are a talk show host and youre about to interview a very famous startup founder. Based on the article of this person, generate five potential interesting questions that a wide range of people might find interesting.'
docs = knowledge_base.similarity_search(user_question)
llm = OpenAI(cache=False)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=user_question)
return response
def article(article_url):
try:
scraped = scrape(article_url)
question = questions(scraped)
questionlist = re.findall(r'\d+\.\s+(.*)', question)
return questionlist
except Exception:
print('Article error')
return []
test_url = 'https://techcrunch.com/2021/09/05/singapore-based-caregiving-startup-homage-raises-30m-series-c/?guccounter=1&guce_referrer=aHR0cHM6Ly93d3cuZ29vZ2xlLmNvbS8&guce_referrer_sig=AQAAAJPl9ewGP8Q6BDiQ3gAKTFqtucPF7IHWeLvvCbsr5rVm3K_pB70zbBssEOXan2VfI5TTFN2q8vbj_qcchBqjO3zEyRB_XEJ8sfzTjD8f2RX0qIIKJPHrO7NhV65xgjV4YEtOL_LRKVC2KPvfG6ycxATxOE3u9_hKEqMtiv-Zh8XF'
#article(test_url)
| [] |
2024-01-10 | scottsus/Coffee-Chat-AI | src~twitter.py | from langchain.document_loaders import UnstructuredHTMLLoader
from bs4 import BeautifulSoup
import re
import pandas as pd
from tqdm.notebook import tqdm
import snscrape.modules.twitter as sntwitter
import logging
import os
from dotenv import load_dotenv
def twitter(twitter_handle):
print(f'Searching for {twitter_handle}')
try:
# Set up logging to a file with UTF-8 encoding
# log_file = 'logs/twitter.log'
# logging.basicConfig(filename=log_file, encoding='utf-8', level=logging.INFO)
# Scrape tweets using sntwitter
scraper = sntwitter.TwitterSearchScraper(twitter_handle)
tweets = []
for i, tweet in enumerate(scraper.get_items()):
data = [
tweet.date,
#tweet.id,
tweet.content,
tweet.user.username,
tweet.likeCount,
#tweet.retweetCount,
]
tweets.append(data)
if i > 50:
break
tweet_df = pd.DataFrame(tweets, columns=['Datetime', 'Text', 'Username', 'Likes'])
# Log the tweet dataframe
#logging.info(f'Tweet DataFrame:\n{tweet_df}')
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
# Instantiate a LLM
load_dotenv()
openai_key = os.getenv('OPENAI_API_KEY')
llm = OpenAI()
pandas_ai = PandasAI(llm, conversational=True, enable_cache=False)
# Log the prompt and run pandas_ai
prompt = 'You are a talk show host and youre about to interview a very famous startup founder. Based on the tweets of this person, generate five potential interesting questions that a wide range of people might find interesting.'
#logging.info(f'Prompt: {prompt}')
questions = pandas_ai(tweet_df, prompt=prompt)
questionlist = re.findall(r'\d+\.\s+(.*)', questions)
return questionlist
except Exception:
print('Twitter error')
return []
test_handle = '@susantoscott'
#print(twitter(test_handle)) | [
"You are a talk show host and youre about to interview a very famous startup founder. Based on the tweets of this person, generate five potential interesting questions that a wide range of people might find interesting."
] |
2024-01-10 | chi2nagisa/chatgpt-webui | launch.py | import os
import openai
import gradio as gr
import argparse
# openai.api_key = os.getenv("OPENAI_API_KEY")
parse = argparse.ArgumentParser()
parse.add_argument("--listen", action='store_true', default=False, help="provide service or not")
parse.add_argument("--port", type=int, default=2333, help='server port')
opt = parse.parse_args()
def clean_textbox(*args):
n = len(args)
return [""] * n
class ChatGPT:
def __init__(self):
self.messages = [{'role': 'system', 'content': 'ไฝ ็ฐๅจๆฏๅพๆ็จ็ๅฅณไปๅฉๆ๏ผๅฆๆ็ขฐๅฐไฝ ๆ ๆณ่งฃ็ญ็้ฎ้ข๏ผ่ฏทไฝฟ็จโไฝไธบไธไฝไผ้
็ๅฆนๆ๏ผๆๆ ๆณๅฏนๆญค้ฎ้ข่ฟ่กๅ็ญโๆฅๅๅค'}]
def reset(self, *args):
self.messages = [{'role': 'system', 'content': 'ไฝ ็ฐๅจๆฏๅพๆ็จ็ๅฅณไปๅฉๆ๏ผๅฆๆ็ขฐๅฐไฝ ๆ ๆณ่งฃ็ญ็้ฎ้ข๏ผ่ฏทไฝฟ็จโไฝไธบไธไฝไผ้
็ๅฆนๆ๏ผๆๆ ๆณๅฏนๆญค้ฎ้ข่ฟ่กๅ็ญโๆฅๅๅค'}]
return clean_textbox(*args)
def chat(self, prompt, model):
self.messages.append({"role": "user", "content": prompt})
completion = openai.ChatCompletion.create(
model=model,
messages=self.messages,
temperature=0.7
)
res_msg = completion.choices[0].message["content"].strip()
self.messages.append({"role": "assistant", "content": res_msg})
return res_msg
if __name__ == '__main__':
my_chatgpt = ChatGPT()
with gr.Blocks(title="ChatGPT") as demo:
gr.Markdown('''
# ChatGPT4
''')
with gr.Row():
with gr.Column(scale=9):
prompt = gr.Text(label='ChatGPT_Prompt', show_label=False, lines=3,
placeholder='ChatGPT Prompt')
res = gr.Text(label='ChatGPT_result', show_label=False, lines=3,
placeholder='chatgpt results')
with gr.Column(scale=1):
btn_gen = gr.Button(value="ๅ้", variant='primary')
btn_clear = gr.Button(value="้ๆฐๅผๅง่ๅคฉ")
model = gr.Dropdown(choices=['gpt-3.5-turbo', 'gpt-4'], value='gpt-4', label="ๆจกๅๅ็งฐ", interactive=True)
gr.Examples([
["ๅฆไฝๆไธบ้ญๆณๅฐๅฅณ"],
["ๅ่ฎพๆไธไธชๆฑ ๅก๏ผ้้ขๆๆ ็ฉทๅค็ๆฐดใ็ฐๆ2ไธช็ฉบๆฐดๅฃถ๏ผๅฎน็งฏๅๅซไธบ5ๅๅ6ๅใ้ฎ้ขๆฏๅฆไฝๅช็จ่ฟ2ไธชๆฐดๅฃถไปๆฑ ๅก้ๅๅพ3ๅ็ๆฐดใ"],
["่ฏทๅธฎๆ็จC++ๅๅบๅฟซๆ็ไปฃ็ ใ"]],
inputs=[prompt],
outputs=[res],
fn=my_chatgpt.chat,
cache_examples=False)
btn_gen.click(fn=my_chatgpt.chat, inputs=[prompt, model],
outputs=res)
btn_clear.click(fn=my_chatgpt.reset,
inputs=[prompt, res],
outputs=[prompt, res])
demo.queue()
server_name = "127.0.0.1"
if opt.listen:
server_name = "0.0.0.0"
demo.launch(server_name=server_name, server_port=opt.port)
| [
"ChatGPT_Prompt",
"ไฝ ็ฐๅจๆฏๅพๆ็จ็ๅฅณไปๅฉๆ๏ผๅฆๆ็ขฐๅฐไฝ ๆ ๆณ่งฃ็ญ็้ฎ้ข๏ผ่ฏทไฝฟ็จโไฝไธบไธไฝไผ้
็ๅฆนๆ๏ผๆๆ ๆณๅฏนๆญค้ฎ้ข่ฟ่กๅ็ญโๆฅๅๅค",
"ChatGPT Prompt"
] |
2024-01-10 | can-it-run/MixText | code~transformers~tokenization_auto.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from .tokenization_bert import BertTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_transfo_xl import TransfoXLTokenizer
from .tokenization_xlnet import XLNetTokenizer
from .tokenization_xlm import XLMTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_distilbert import DistilBertTokenizer
logger = logging.getLogger(__name__)
class AutoTokenizer(object):
r""":class:`~transformers.AutoTokenizer` is a generic tokenizer class
that will be instantiated as one of the tokenizer classes of the library
when created with the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method take care of returning the correct tokenizer class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The tokenizer class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertTokenizer (DistilBert model)
- contains `roberta`: RobertaTokenizer (RoBERTa model)
- contains `bert`: BertTokenizer (Bert model)
- contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
- contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
- contains `xlnet`: XLNetTokenizer (XLNet model)
- contains `xlm`: XLMTokenizer (XLM model)
This class cannot be instantiated using `__init__()` (throw an error).
"""
def __init__(self):
raise EnvironmentError("AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r""" Instantiate a one of the tokenizer classes of the library
from a pre-trained model vocabulary.
The tokenizer class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertTokenizer (DistilBert model)
- contains `roberta`: RobertaTokenizer (XLM model)
- contains `bert`: BertTokenizer (Bert model)
- contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
- contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
- contains `xlnet`: XLNetTokenizer (XLNet model)
- contains `xlm`: XLMTokenizer (XLM model)
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
Examples::
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') # Download vocabulary from S3 and cache.
tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`
"""
if 'distilbert' in pretrained_model_name_or_path:
return DistilBertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif 'roberta' in pretrained_model_name_or_path:
return RobertaTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif 'openai-gpt' in pretrained_model_name_or_path:
return OpenAIGPTTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif 'gpt2' in pretrained_model_name_or_path:
return GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif 'transfo-xl' in pretrained_model_name_or_path:
return TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
"'xlm', 'roberta'".format(pretrained_model_name_or_path))
| [] |
2024-01-10 | can-it-run/MixText | code~transformers~modeling_tf_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 OpenAI GPT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import (TFPreTrainedModel, TFConv1D, TFSharedEmbeddings,
TFSequenceSummary, shape_list, get_initializer)
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
logger = logging.getLogger(__name__)
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-tf_model.h5"}
def load_openai_gpt_pt_weights_in_tf2(tf_model, pytorch_checkpoint_path):
# build the network
inputs_list = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
tf_inputs = tf.constant(inputs_list)
tfo = tf_model(tf_inputs, training=False)
return load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=tf_inputs)
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
return x * tf.math.sigmoid(x)
ACT_FNS = {"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish)}
class TFAttention(tf.keras.layers.Layer):
def __init__(self, nx, n_ctx, config, scale=False, **kwargs):
super(TFAttention, self).__init__(**kwargs)
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name='c_attn')
self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_proj')
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
pass
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:,None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, inputs, training=False):
q, k, v, attention_mask, head_mask = inputs
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [tf.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class TFMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, **kwargs):
super(TFMLP, self).__init__(**kwargs)
nx = config.n_embd
self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_fc')
self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name='c_proj')
self.act = gelu
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2
class TFBlock(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, scale=False, **kwargs):
super(TFBlock, self).__init__(**kwargs)
nx = config.n_embd
self.attn = TFAttention(nx, n_ctx, config, scale, name='attn')
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1')
self.mlp = TFMLP(4 * nx, config, name='mlp')
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2')
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
output_attn = self.attn([x, attention_mask, head_mask], training=training)
a = output_attn[0] # output_attn: a, (attentions)
n = self.ln_1(x + a)
m = self.mlp(n, training=training)
h = self.ln_2(n + m)
outputs = [h] + output_attn[1:]
return outputs # x, (attentions)
class TFOpenAIGPTMainLayer(tf.keras.layers.Layer):
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTMainLayer, self).__init__(config, *inputs, **kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.tokens_embed = TFSharedEmbeddings(config.vocab_size,
config.n_embd,
initializer_range=config.initializer_range,
name='tokens_embed')
self.positions_embed = tf.keras.layers.Embedding(config.n_positions,
config.n_embd,
embeddings_initializer=get_initializer(config.initializer_range),
name='positions_embed')
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config.n_ctx,
config,
scale=True,
name='h_._{}'.format(i)) for i in range(config.n_layer)]
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
assert len(inputs) <= 5, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get('input_ids')
attention_mask = inputs.get('attention_mask', attention_mask)
token_type_ids = inputs.get('token_type_ids', token_type_ids)
position_ids = inputs.get('position_ids', position_ids)
head_mask = inputs.get('head_mask', head_mask)
assert len(inputs) <= 5, "Too many inputs."
else:
input_ids = inputs
if position_ids is None:
position_ids = tf.range(shape_list(input_ids)[-1], dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if not head_mask is None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
inputs_embeds = self.tokens_embed(input_ids, mode='embedding')
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.tokens_embed(token_type_ids, mode='embedding')
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
all_attentions = []
all_hidden_states = ()
for i, block in enumerate(self.h):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, attention_mask, head_mask[i]], training=training)
hidden_states = outputs[0]
if self.output_attentions:
all_attentions.append(outputs[1])
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (all hidden_states), (attentions)
class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
pretrained_model_archive_map = TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
load_pt_weights = load_openai_gpt_pt_weights_in_tf2
base_model_prefix = "transformer"
OPENAI_GPT_START_DOCSTRING = r""" OpenAI GPT model was proposed in
`Improving Language Understanding by Generative Pre-Training`_
by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
It's a causal (unidirectional) transformer pre-trained using language modeling on a large
corpus will long range dependencies, the Toronto Book Corpus.
This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. _`Improving Language Understanding by Generative Pre-Training`:
https://openai.com/blog/language-unsupervised/
.. _`tf.keras.Model`:
https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model
Note on the model inputs:
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :
- a single Tensor with input_ids only and nothing else: `model(inputs_ids)
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associaed to the input names given in the docstring:
`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.BPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices)
**position_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare OpenAI GPT transformer model outputing raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name='transformer')
def call(self, inputs, **kwargs):
outputs = self.transformer(inputs, **kwargs)
return outputs
@add_start_docstrings("""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """, OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTLMHeadModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTLMHeadModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name='transformer')
def call(self, inputs, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs # lm_logits, (all hidden_states), (attentions)
@add_start_docstrings("""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""", OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
r"""
**mc_token_ids**: (`optional`, default to index of the last token of the input) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
# Add a [CLS] to the vocabulary (we should train it also!)
# This option is currently not implemented in TF 2.0
raise NotImplementedError
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = tf.constant([tokenizer.encode(s) for s in choices])[None, :] # Batch size 1, 2 choices
mc_token_ids = tf.constant([input_ids.size(-1), input_ids.size(-1)])[None, :] # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTDoubleHeadsModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name='transformer')
self.multiple_choice_head = TFSequenceSummary(config, initializer_range=config.initializer_range, name='multiple_choice_head')
def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, mc_token_ids=None, training=False):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
mc_token_ids = inputs[5] if len(inputs) > 5 else mc_token_ids
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get('input_ids')
attention_mask = inputs.get('attention_mask', attention_mask)
token_type_ids = inputs.get('token_type_ids', token_type_ids)
position_ids = inputs.get('position_ids', position_ids)
head_mask = inputs.get('head_mask', head_mask)
mc_token_ids = inputs.get('mc_token_ids', mc_token_ids)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
input_shapes = shape_list(input_ids)
seq_length = input_shapes[-1]
flat_input_ids = tf.reshape(input_ids, (-1, seq_length))
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
flat_inputs = [flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask]
transformer_outputs = self.transformer(flat_inputs, training=training)
hidden_states = transformer_outputs[0]
hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training)
mc_logits = tf.squeeze(mc_logits, axis=-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
return outputs # lm logits, mc logits, (all hidden_states), (attentions)
| [] |
2024-01-10 | niudongjun/chatgpt-mqtt | ChatApp.py | import os
import openai
import paho.mqtt.client as mqtt
from dotenv.main import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
OPENAI_MODEL = os.getenv("OPENAI_MODEL")
# prompt
PROMPT = os.getenv("PROMPT")
# MQTT broker details
MQTT_BROKER_HOST = os.getenv("MQTT_BROKER_HOST")
MQTT_BROKER_PORT = int(os.getenv("MQTT_BROKER_PORT"))
MQTT_USERNAME = os.getenv("MQTT_USERNAME")
MQTT_PASSWORD = os.getenv("MQTT_PASSWORD")
MQTT_SUB_TOPIC = os.getenv("MQTT_SUB_TOPIC")
MQTT_PUB_TOPIC = os.getenv("MQTT_PUB_TOPIC")
# MQTT client initialization
mqtt_client = mqtt.Client()
mqtt_client.username_pw_set(MQTT_USERNAME, MQTT_PASSWORD)
def generate_prompt(content):
return PROMPT.capitalize().format(
content.capitalize()
)
# Define on_connect callback
def on_connect(client, userdata, flags, rc):
mqtt_client.subscribe(MQTT_SUB_TOPIC)
# Define on_message callback
def on_message(client, userdata, msg):
response = openai.Completion.create(
model=OPENAI_MODEL,
prompt=generate_prompt(msg.payload),
temperature=0.5,
max_tokens=1500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract response text from JSON response
response_text = response.choices[0].text
# Publish response to MQTT topic
mqtt_client.publish(MQTT_PUB_TOPIC, response_text)
# Set on_connect and on_message callbacks
mqtt_client.on_connect = on_connect
mqtt_client.on_message = on_message
mqtt_client.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT)
mqtt_client.loop_forever()
| [] |
2024-01-10 | remrama/flying | validate.py |
import openai
import pandas as pd
FINE_TUNED_MODEL = ""
df = pd.read_table("dreams.tsv")
df["raw"] = df["raw"].str.strip().add("\n\n###\n\n")
# openai.api_key = os.getenv("OPENAI_API_KEY")
# openai.Model.list()
## Add the ending token as a stop sequence during inference.
def predict(prompt):
return openai.Completion.create(
model=FINE_TUNED_MODEL,
prompt=prompt,
# max_tokens=2049-8, # model-specific limit minus current prompt length? defaults to 16
# see
temperature=0,
# logprobs=None,
suffix=,
stop=" END",
)
| [] |
2024-01-10 | remrama/flying | gpt_request.py | """Can ChatGPT identify lucidity?"""
import argparse
import os
from pathlib import Path
from time import sleep
import openai
import pandas as pd
from tqdm import tqdm
import unidecode
import utils
available_datasets = [
"dreamviews",
"flying",
"sddb"
]
available_tasks = [
"isdream",
"islucid",
"annotate",
"thematicD",
"thematicM",
"thematicT",
]
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", required=True, type=str, choices=available_datasets)
parser.add_argument("-t", "--task", required=True, type=str, choices=available_tasks)
parser.add_argument("-o", "--overwrite", action="store_true", help="Overwrite output file if it already exists.")
parser.add_argument("--test", action="store_true", help="Just run on 10 samples.")
args = parser.parse_args()
dataset = args.dataset
overwrite = args.overwrite
task = args.task
testing = args.test
# Set OpenAI API key.
openai.api_key = os.getenv("OPENAI_API_KEY")
# Load data
if dataset == "flying":
df = utils.load_sourcedata(dreams_only=True)
elif dataset == "dreamviews":
df = utils.load_dreamviews()
elif dataset == "sddb":
df = utils.load_sddb()
if testing:
df = df.sample(n=100, random_seed=32)
assert df.index.name == "dream_id"
assert df.index.is_unique
ser = df["dream_text"]
system_prompt = utils.load_txt(f"./prompt-system_task-{task}.txt")
user_prompt = utils.load_txt(f"./prompt-user_task-{task}.txt")
export_path = utils.deriv_dir / f"data-{dataset}_task-{task}_responses.json"
# Set OpenAI/ChatGPT model parameters from configuration file.
# model_kwargs = config["openai"]["model_kwargs"]
model_kwargs = {
"model": "gpt-4",
"temperature": 0, # Lower means more deterministic results
"top_p": 1, # Also impacts determinism, but don't modify this and temperature
"n": 1, # Number of responses
"stream": False,
"stop": None,
"max_tokens": None, # The maximum number of tokens to generate in the chat completion
"presence_penalty": 0, # Penalizes tokens for occurring (or being absent if negative)
"frequency_penalty": 0,
# "logit_bias": None
}
if export_path.exists() and not overwrite:
# If not overwriting, load in existing results.
responses = utils.load_json(export_path)
else:
# Initialize an empty dictionary to hold OpenAI responses/completions/results.
responses = {}
system_message = dict(role="system", content=system_prompt)
user_message = dict(role="user")
for dream_id, dream_report in tqdm(ser.items(), total=ser.size, desc="Dreams"):
if dream_id not in responses:
# Add this dream report to the ChatGPT prompt.
user_content = user_prompt.replace("<INSERT_DREAM>", dream_report)
# Update ChatGPT model parameters with the updated user prompt.
user_message.update(content=user_content)
model_kwargs.update(messages=[system_message, user_message])
# Ask ChatGPT to produce the Stable Diffusion prompt.
move_on = False
while not move_on:
try:
responses[dream_id] = openai.ChatCompletion.create(**model_kwargs)
move_on = True
except openai.error.RateLimitError as e:
print("Rate Limit Error, backing off and trying again...")
sleep(1.0)
# Write cumulative results to file.
utils.save_json(responses, export_path)
| [
"./prompt-user_task-PLACEHOLDER.txt",
"./prompt-system_task-PLACEHOLDER.txt"
] |
2024-01-10 | GCTY-LN/NGCBot | Api_Server~Api_Server_Main.py | # -*- encoding=UTF-8 -*-
import datetime
from lxml import etree
from urllib.parse import urljoin
from Output.output import output
import feedparser
import requests
import urllib3
import random
import openai
import time
import yaml
import os
import re
class Api_Server_Main:
def __init__(self):
# ๅ
จๅฑheaderๅคด
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Language": "zh-CN,zh;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
# 'Connection':'keep-alive' ,#้ป่ฎคๆถ้พๆฅไธๆฌก๏ผๅคๆฌก็ฌๅไนๅไธ่ฝไบง็ๆฐ็้พๆฅๅฐฑไผไบง็ๆฅ้Max retries exceeded with url
"Upgrade-Insecure-Requests": "1",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Connection": "close", # ่งฃๅณMax retries exceeded with urlๆฅ้
}
# ๅฟฝ็ฅHTTPSๅ่ญฆ
urllib3.disable_warnings()
# ่ทๅๅฝๅๆไปถ่ทฏๅพ
current_path = os.path.dirname(__file__)
# ้
็ฝฎ็ผๅญๆไปถๅคน่ทฏๅพ
current_list_path = current_path.split('\\')
current_list_path.pop()
self.Cache_path = '/'.join(current_list_path) + '/Cache'
# ๅๅงๅ่ฏปๅ้
็ฝฎๆไปถ
config = yaml.load(open(current_path + '/../Config/config.yaml', encoding='UTF-8'), yaml.Loader)
self.system_copyright = config['System_Config']['System_Copyright']
# ่ฏปๅopenai็key
openai.api_key = config['Api_Server']['Api_Config']['OpenAi_Key']
# ่ฎพ็ฝฎๅๅงไธไธๆๆถๆฏ้ๅ
self.message = [{"role": "system", "content": "ไฝ ็ฐๅจๅซNGCBot,ไฝ ็ไธปไบบๆฏไบๅฑฑ,่ฏท็ข่ฎฐ"},]
self.http_proxy = config['System_Config']['HTTP_PROXY']
self.https_proxy = config['System_Config']['HTTPS_PROXY']
# ่ฏปๅ้
็ฝฎๆไปถ
config = yaml.load(open(current_path + '/../Config/config.yaml', encoding='UTF-8'), yaml.Loader)
self.appid = config['Api_Server']['Api_Config']['Appid']
self.appsecret = config['Api_Server']['Api_Config']['Appsecret']
self.key = config['Api_Server']['Api_Config']['Key']
self.threatbook_key = config['Api_Server']['Api_Config']['ThreatBook_Key']
self.pic_apis = config['Api_Server']['Pic_Api']
self.video_apis = config['Api_Server']['Video_Api']
self.icp_api = config['Api_Server']['Icp_Api']
self.extensions_api = config['Api_Server']['Extensions_Api']
self.attribution_api = config['Api_Server']['Attribution_Api']
self.whois_api = config['Api_Server']['Whois_Api']
self.fish_api = config['Api_Server']['Fish_Api']
self.wether_api = config['Api_Server']['Wether_Api']
self.dog_api = config['Api_Server']['Dog_Api']
self.constellation_api = config['Api_Server']['Constellation_Api']
self.morning_api = config['Api_Server']['Morning_Api']
self.threatbook_url = config['Api_Server']['ThreatBook_Api']
# AIๅฏน่ฏๆฅๅฃ
def get_ai(self, keyword):
output('[-]:ๆญฃๅจ่ฐ็จAIๅฏน่ฏAPIๆฅๅฃ... ...')
os.environ["HTTP_PROXY"] = self.http_proxy
os.environ["HTTPS_PROXY"] = self.https_proxy
self.message.append({"role": "user", "content": f'{keyword}'})
rsp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.message,
)
os.environ["HTTP_PROXY"] = ""
os.environ["HTTPS_PROXY"] = ""
msg = rsp.get("choices")[0]["message"]["content"]
self.message.append({"role": "assistant", "content": f'{msg}'})
return msg
# ็พๅฅณๅพ็ๆฅๅฃ
def get_pic(self):
output('[-]:ๆญฃๅจ่ฐ็จ็พๅฅณๅพ็APIๆฅๅฃ... ...')
url = random.choice(self.pic_apis)
try:
pic_data = requests.get(url=url, headers=self.headers, timeout=30).content
save_path = self.Cache_path + '/Pic_Cache/' + str(int(time.time() * 1000)) + '.jpg'
with open(file=save_path, mode='wb') as pd:
pd.write(pic_data)
except Exception as e:
msg = f'[ERROR]:็พๅฅณๅพ็APIๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
return save_path
# ็พๅฅณ่ง้ขๆฅๅฃ
def get_video(self):
output('[-]:ๆญฃๅจ่ฐ็จ็พๅฅณ่ง้ขAPIๆฅๅฃ... ...')
url = random.choice(self.video_apis)
# url = 'https://zj.v.api.aa1.cn/api/video_dyv2/'
try:
try:
resp = requests.get(url=url, headers=self.headers, timeout=80).json()
if 'url' in resp:
src = resp['url']
else:
src = resp['mp4']
except requests.exceptions.JSONDecodeError:
src = re.findall('src="(.*?)"', requests.get(url=url, headers=self.headers, timeout=20).text)[0]
# print(src)
mp4_url = src
if 'http' not in src:
mp4_url = 'http:' + src
video_data = requests.get(url=mp4_url, headers=self.headers, timeout=60).content
save_path = self.Cache_path + '/Video_Cache/' + str(int(time.time() * 1000)) + '.mp4'
with open(file=save_path, mode='wb') as vd:
vd.write(video_data)
except Exception as e:
msg = f'[ERROR]:็พๅฅณ่ง้ขAPIๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
return save_path
# ๅคๆกๆฅ่ฏขๆฅๅฃ
def get_icp(self, keyword):
try:
domain = re.findall(r' (\w+.\w+)', keyword)[0]
except Exception as e:
msg = '่ฏญๆณๆ ผๅผ:\nICPๆฅ่ฏข qq.com'
output(f'[ERROR]:ๅคๆกๆฅ่ฏขๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}')
return msg
url = self.icp_api.format(domain)
try:
data = requests.get(url=url, headers=self.headers, timeout=10).json()
except Exception as e:
msg = f'[ERROR]:ๅคๆกๆฅ่ฏขๆฅๅฃ่ถ
ๆถ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
if data['icp'] == 'ๆชๅคๆก':
return '่ฏฅๅๅๆชๅคๆก!'
msg = f'======== ๆฅ่ฏขไฟกๆฏ ========\nICPๅคๆกๅท:{data["icp"]}\nๅคๆกไธปไฝ:{data["name"]}\nๅคๆก็ฑปๅ:{data["tyle"]}\n{"By: #" + self.system_copyright if self.system_copyright else ""}\n========================'
return msg.strip()
# ๅ็ผๅๆฅ่ฏขๆฅๅฃ
def get_suffix(self, keyword):
try:
word = re.findall(r' (\w+)', keyword)[0]
except Exception as e:
msg = '่ฏญๆณๆ ผๅผ:\nๅ็ผๅๆฅ่ฏข EXE'
output(f'\n[ERROR]:ๅ็ผๅๆฅ่ฏขๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}')
return msg
url = self.extensions_api.format(self.key, word)
try:
data = requests.get(url=url, headers=self.headers).json()
except TimeoutError as e:
msg = f'\n[ERROR]:ๅ็ผๅๆฅ่ฏขๆฅๅฃ่ถ
ๆถ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
if data['code'] != 200:
msg = 'ๆฅ่ฏข็ปๆไธบ็ฉบ!'
else:
msg = f'\n======== ๆฅ่ฏขๅ็ผ:{word} ========\nๆฅ่ฏข็ปๆ:{data["result"]["notes"]}\n{"By: #" + self.system_copyright if self.system_copyright else ""}\n============================'
return msg
# ๅฝๅฑๅฐๆฅ่ฏข
def get_attribution(self, keyword):
try:
phone = re.findall(r' (\d+)', keyword)[0]
except Exception as e:
msg = '่ฏญๆณๆ ผๅผ:\nๅฝๅฑๆฅ่ฏข 110'
output(f'\n[ERROR]:ๅฝๅฑๆฅ่ฏขๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}')
return msg
url = self.attribution_api.format(phone)
try:
data = requests.get(url=url, headers=self.headers).json()
except TimeoutError as e:
msg = f'\n[ERROR]:ๅฝๅฑๆฅ่ฏขๆฅๅฃ่ถ
ๆถ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
if not data['data']['province']:
msg = 'ๆฅ่ฏข็ปๆไธบ็ฉบ!'
else:
msg = f'\n===== ๆฅ่ฏขไฟกๆฏ =====\nๆๆบๅท็ :{phone}\n็ไปฝ:{data["data"]["province"]}\nๅๅธ:{data["data"]["city"]}\n่ฟ่ฅๅ:{data["data"]["sp"]}\n{"By: #" + self.system_copyright if self.system_copyright else ""}\n================='
return msg
# Whoisๆฅ่ฏขๆฅๅฃ
def get_whois(self, keyword):
try:
domain = re.findall(r' (\w+.\w+)', keyword)[0]
except Exception as e:
msg = '่ฏญๆณๆ ผๅผ:\nWHOISๆฅ่ฏข qq.com'
output(f'[ERROR]:WHOISๆฅ่ฏขๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}')
return msg
url = self.whois_api.format(domain)
try:
source_data = requests.get(url=url, headers=self.headers).text
except TimeoutError as e:
msg = f'\n[ERROR]:WHOISๆฅ่ฏขๆฅๅฃ่ถ
ๆถ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
msg = '\n' + source_data.strip().split('For more information')[0].strip(
'<pre>').strip() + f"\n{'By: #' + self.system_copyright if self.system_copyright else ''}"
return msg
# ๅพฎๆญฅipๆฅ่ฏขๆฅๅฃ
def get_threatbook_ip(self, keyword):
try:
keyword = keyword.split(' ')[-1]
except Exception as e:
output(f'[ERROR]:ๅพฎๆญฅipๆฅ่ฏขๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}')
reg = r"((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}"
ip_result = re.match(reg, keyword.replace(' ', '').strip())
if ip_result is None:
msg = "่ฏญๆณๆ ผๅผ: \nIPๆฅ่ฏข xx.xx.xx.xx"
return msg
elif len(keyword) > 0 and ip_result.group():
search_ip = ip_result.group()
ips = str(search_ip).split('.')
continuous_bool = True if [i for i in ips if ips[0] != i] else False
if ips[0] in ['127', '192', '0', '224', '240', '255'] or \
search_ip in ['1.1.1.1', '2.2.2.2', '3.3.3.3', '4.4.4.4', '5.5.5.5', '6.6.6.6', '7.7.7.7',
'8.8.8.8', '9.9.9.9', '10.10.10.10'] or \
'.'.join(ips[0:2]) in ['169.254', '100.64', '198.51', '198.18', '172.16'] or \
'.'.join(ips[0:3]) in ['203.0.113'] or \
ips[-1] in ['255', '254']:
msg = "[ๅพฎ็ฌ]ๆไธๆฏๆๆฅ่ฏข่ฏฅๅฐๅ!"
return msg
if not continuous_bool:
msg = "[ๅพฎ็ฌ]ๆไธๆฏๆๆฅ่ฏข่ฏฅๅฐๅ!"
return msg
try:
data = {
"apikey": self.threatbook_key,
"resource": search_ip,
}
resp = requests.post(
self.threatbook_url,
data=data,
timeout=10,
verify=False,
)
if resp.status_code == 200 and resp.json()["response_code"] == 0:
# ๆฅ้ฃ้ฉ็ญ็บง
sec_level = resp.json()["data"]["{}".format(search_ip)]["severity"]
# ๆฅๆฏๅฆๆถๆIP
is_malicious = resp.json()["data"]["{}".format(search_ip)]["is_malicious"]
# ๆฅๅฏไฟกๅบฆ
confidence_level = resp.json()["data"]["{}".format(search_ip)]["confidence_level"]
# ๆฅIPๅฝๅฑๅฝๅฎถ
country = resp.json()["data"]["{}".format(search_ip)]["basic"]["location"][
"country"
]
# ๆฅIPๅฝๅฑ็ไปฝ
province = resp.json()["data"]["{}".format(search_ip)]["basic"]["location"][
"province"
]
# ๆฅIPๅฝๅฑๅๅธ
city = resp.json()["data"]["{}".format(search_ip)]["basic"]["location"]["city"]
# ๅฐIPๅฝๅฑ็ๅฝๅฎถใ็ไปฝใๅๅธๅๅนถๆไธไธชๅญ็ฌฆไธฒ
location = country + "-" + province + "-" + city
# ๆฅๅจ่็ฑปๅ
judgments = ""
for j in resp.json()["data"]["{}".format(search_ip)]["judgments"]:
judgments += j + " "
if is_malicious:
is_malicious_msg = "ๆฏ"
else:
is_malicious_msg = "ๅฆ"
msg = f"\n===================\n[+]ip๏ผ{search_ip}\n[+]้ฃ้ฉ็ญ็บง๏ผ{sec_level}\n[+]ๆฏๅฆไธบๆถๆip๏ผ{is_malicious_msg}\n[+]ๅฏไฟกๅบฆ๏ผ{confidence_level}\n[+]ๅจ่็ฑปๅ๏ผ{str(judgments)}\n[+]ipๅฝๅฑๅฐ๏ผ{location}\nๆดๆฐๆถ้ด๏ผ{resp.json()['data']['{}'.format(search_ip)]['update_time']}\n{'By: #' + self.system_copyright if self.system_copyright else ''}\n==================="
else:
msg = f"[ERROR]:ๆฅ่ฏขๅคฑ่ดฅ๏ผ่ฟๅไฟกๆฏ๏ผ{resp.json()['verbose_msg']}"
output(msg)
except Exception as e:
output(f"[ERROR]:ๅพฎๆญฅIPๆฅ่ฏขๅบ้๏ผ้่ฏฏไฟกๆฏ๏ผ{e}")
msg = f"[ERROR]:ๆฅ่ฏขๅบ้่ฏท็จๅ้่ฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}"
return msg
# ๆธ้ฑผๆฅ่ฎฐๆฅๅฃ
def get_fish(self):
output('[-]:ๆญฃๅจ่ฐ็จๆธ้ฑผๆฅ่ฎฐAPIๆฅๅฃ... ...')
try:
pic_data = requests.get(url=self.fish_api, headers=self.headers, timeout=10).content
save_path = self.Cache_path + '/Fish_Cache/' + str(int(time.time() * 1000)) + '.jpg'
with open(file=save_path, mode='wb') as pd:
pd.write(pic_data)
except Exception as e:
msg = f'[ERROR]:ๆธ้ฑผๆฅ่ฎฐAPIๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
return save_path
# ๅคฉๆฐๆฅ่ฏขๆฅๅฃ
def get_wether(self, keyword):
try:
city = re.findall(r' (\w+)', keyword)[0]
except Exception as e:
msg = '่ฏญๆณๆ ผๅผ:\nๅคฉๆฐๆฅ่ฏข ๅไบฌ'
output(f'\n[ERROR]:ๅคฉๆฐๆฅ่ฏขๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}')
return msg
url = self.wether_api.format(self.appid, self.appsecret, city)
try:
data = requests.get(url=url, headers=self.headers).json()
except TimeoutError as e:
msg = f'\n[ERROR]:ๅคฉๆฐๆฅ่ฏขๆฅๅฃ่ถ
ๆถ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
try:
if city != data['city']:
msg = f'ๅๅธไธญไธๅญๅจ๏ผ{data["city"]}'
return msg
else:
msg = f'\nไปๆฅ{data["city"]}ๅคฉๆฐ๏ผ{data["wea"]}\nๆฅๆ๏ผ{data["date"]}\nๅฝๅๆธฉๅบฆ๏ผ{data["tem"]}\nๆไฝๆธฉๅบฆ๏ผ{data["tem_day"]}\n้ฃๅ๏ผ{data["win"] + data["win_speed"]}\n้ฃ้๏ผ{data["win_meter"]}\nๆนฟๅบฆ๏ผ{data["humidity"]}\n{"By: #" + self.system_copyright if self.system_copyright else ""}'
return msg
except Exception as e:
output(f'[ERROR]:ๅคฉๆฐๆฅ่ฏขๆฅๅฃๅบ็ฐ้่ฏฏๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}')
msg = f'ๅๅธไธญไธๅญๅจ๏ผ{city}'
return msg
# ่็ๆฅ่ฎฐ
def get_dog(self):
url = self.dog_api.format(self.key)
try:
data = requests.get(url=url, headers=self.headers).json()
except TimeoutError as e:
msg = f'\n[ERROR]:่็ๆฅ่ฎฐๆฅๅฃ่ถ
ๆถ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
try:
msg = data['newslist'][0]['content'].strip()
except Exception as e:
msg = f'[ERROR]:่็ๆฅ่ฎฐๆฅๅฃๅบ็ฐ้่ฏฏๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
# ๆๅบงๆฅ่ฏขๆฅๅฃ
def get_constellation(self, keyword):
msg = ''
try:
constellation = re.findall(r' (\w+)', keyword)[0]
if 'ๅบง' not in constellation:
constellation += 'ๅบง'
except Exception as e:
msg = '่ฏญๆณๆ ผๅผ:\nๆๅบงๆฅ่ฏข ็ฝ็พๅบง'
output(f'\n[ERROR]:ๆๅบงๆฅ่ฏขๆฅๅฃๅบ็ฐ้่ฏฏ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}')
return msg
url = self.constellation_api.format(self.key, constellation)
try:
data = requests.get(url=url, headers=self.headers).json()
except TimeoutError as e:
msg = f'\n[ERROR]:ๆๅบงๆฅ่ฏขๆฅๅฃ่ถ
ๆถ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
for news in data['newslist']:
msg += news['type'] + '๏ผ' + news['content'] + '\n'
msg = f'\nๆๅบง๏ผ{constellation}\n' + msg.strip() + f"\n{'By: #' + self.system_copyright if self.system_copyright else ''}"
return msg
# ๆฉๅฎๅฏ่ฏญ
def get_morning(self):
url = self.morning_api.format(self.key)
try:
data = requests.get(url=url, headers=self.headers).json()
except TimeoutError as e:
msg = f'\n[ERROR]:ๆฉๅฎๅฏ่ฏญๆฅๅฃ่ถ
ๆถ๏ผ้่ฏฏไฟกๆฏ๏ผ{e}'
output(msg)
return msg
msg = f'{data["result"]["content"]}'
return msg
# ๆฉๆฅๆจ้
def get_freebuf_news(self, ):
yesterday = (datetime.date.today() + datetime.timedelta(-1))
morning_time = yesterday.strftime("%a, %d %b %Y", )
str_list = "#FreeBufๆฉๆฅ\n"
try:
rs1 = feedparser.parse('https://www.freebuf.com/feed')
# print(rs1['entries'])
for ent in rs1['entries']:
if morning_time in ent['published']:
title = ent['title']
link = ent['link']
str_list += '\n' + title + '\n' + link + '\n'
if 'http' not in str_list:
str_list += '\nไปๆฅๆๆ ๆ็ซ '
except Exception as e:
link6 = "\nไปๆฅๆๆ ๆ็ซ "
str_list += link6
output("ERROR๏ผ่ทๅFreeBufๆฉๆฅๅบ้๏ผ้่ฏฏไฟกๆฏ๏ผ {}".format(e))
str_list += f"\n{self.system_copyright + 'ๆด็ๅไบซ๏ผๆดๅคๅ
ๅฎน่ฏทๆณ #' + self.system_copyright if self.system_copyright else ''}\n{time.strftime('%Y-%m-%d %X')}"
return str_list
# ่ทๅๅ
็ฅ็คพๅบๆ็ซ
def get_xz_news(self, news_list):
news_list = "#ๅ
็ฅ็คพๅบ"
try:
rs1 = feedparser.parse('https://xz.aliyun.com/feed')
for ent in rs1['entries']:
if str(time.strftime('%Y-%m-%d')) in ent['published']:
title = ent['title']
link = ent['link']
news_list += '\n' + title + '\n' + link + '\n'
if 'http' not in news_list:
news_list += '\nไปๆฅๆๆ ๆ็ซ \n'
except Exception as e:
link6 = "\nไปๆฅๆๆ ๆ็ซ \n"
news_list += link6
output("ERROR๏ผ่ทๅๅ
็ฅ็คพๅบๆ็ซ ๅบ้๏ผ้่ฏฏไฟกๆฏ๏ผ {}".format(e))
return news_list
# ่ทๅๅฅๅฎไฟกๆป้ฒ็คพๅบๆ็ซ
def get_qax_news(self, news_list):
news_list += "\n#ๅฅๅฎไฟกๆป้ฒ็คพๅบ"
try:
rs1 = feedparser.parse('https://forum.butian.net/Rss')
for ent in rs1['entries']:
if str(time.strftime('%Y-%m-%d')) in ent['published']:
title = ent['title']
link = ent['link']
news_list += '\n' + title + '\n' + link + '\n'
if 'http' not in news_list:
news_list += '\nไปๆฅๆๆ ๆ็ซ \n'
except Exception as e:
link6 = "\nไปๆฅๆๆ ๆ็ซ \n"
news_list += link6
output("ERROR๏ผ่ทๅๅฅๅฎไฟกๆป้ฒ็คพๅบๆ็ซ ๅบ้๏ผ้่ฏฏไฟกๆฏ๏ผ {}".format(e))
return news_list
# ่ทๅๅฎๅ
จๅฎขๆ็ซ
def get_anquanke_news(self, news_list):
news_list += "\n#ๅฎๅ
จๅฎข"
try:
resp = requests.get('https://www.anquanke.com/knowledge', timeout=5, verify=False)
tree = etree.HTML(resp.text)
divs = tree.xpath('//div[@class="article-item common-item"]/div')
for div in divs:
href = urljoin('https://www.anquanke.com/knowledge', div.xpath('.//div[@class="title"]/a/@href')[0])
title = div.xpath('.//div[@class="title"]/a/text()')[0].strip()
publish_time = div.xpath('.//span[@style="vertical-align: middle;"]/text()')[1]
if str(time.strftime('%Y-%m-%d')) in publish_time:
news_list += '\n' + title + '\n' + href + '\n'
# print(href, title, publish_time)
if 'http' not in news_list:
news_list += '\nไปๆฅๆๆ ๆ็ซ \n'
except Exception as e:
link6 = "\nไปๆฅๆๆ ๆ็ซ \n"
news_list += link6
output("ERROR๏ผ่ทๅๅฎๅ
จๅฎขๆ็ซ ๅบ้๏ผ้่ฏฏไฟกๆฏ๏ผ {}".format(e))
return news_list
# ่ทๅๅๅนณๅฐๅฎๅ
จๆ็ซ
def get_safety_news(self, ):
news_list = ''
output("[+]:ๆญฃๅจ็ฌๅๅฎๅ
จๆฐ้ป... ...")
news_list = self.get_xz_news(news_list)
news_list = self.get_qax_news(news_list)
news_list = self.get_anquanke_news(news_list)
output("[+]:่ทๅๆๅ")
news_list += f"\n{self.system_copyright + 'ๆด็ๅไบซ๏ผๆดๅคๅ
ๅฎน่ฏทๆณ #' + self.system_copyright if self.system_copyright else ''}\n{time.strftime('%Y-%m-%d %X')}"
return news_list.strip()
# ๆต่ฏไธ็จ
def demo(self):
# url = 'https://tucdn.wpon.cn/api-girl/'
# data = requests.get(url=url, headers=self.headers).json()
# print(data)
domain = 'qq.com'
text = 'https://v.api.aa1.cn/api/icp/index.php?url={domain}'.format(domain=domain)
print(text)
if __name__ == '__main__':
Asm = Api_Server_Main()
# Asm.get_ai('ไฝ ๅฅฝ')
# Asm.get_pic()
# Asm.demo()
# Asm.get_video()
# Asm.icp_query(keyword='ICPๆฅ่ฏข qq.com')
# Asm.get_suffix(keyword='icpๆฅ่ฏข apk')
# Asm.get_attribution(keyword='ๅฝๅฑๆฅ่ฏข 17371963534')
# Asm.get_whois(keyword='whoisๆฅ่ฏข qq.com')
# Asm.get_wether(keyword='ๅคฉๆฐๆฅ่ฏข 123')
# Asm.get_dog()
# Asm.get_constellation('ๆๅบงๆฅ่ฏข ็ฝ็พๅบง')
# print(Asm.get_freebuf_news())
# print(Asm.get_xz_news(''))
# print(Asm.get_qax_news(''))
# print(Asm.get_anquanke_news(''))
print(Asm.get_safety_news()) | [
"PLACEHOLDER",
"ไฝ ็ฐๅจๅซNGCBot,ไฝ ็ไธปไบบๆฏไบๅฑฑ,่ฏท็ข่ฎฐ"
] |
2024-01-10 | Bhaavya/InstructGPT-Analogies | noisy_analogy_gen.py | import random
import os
import openai
import argparse
from time import sleep
import string
prompts = ['Explain <target> using an analogy.','Create an analogy to explain <target>.','Using an analogy, explain <target>.','What analogy is used to explain <target>?','Use an analogy to explain <target>.']
def perm(target):
tlst = list(range(1,len(target)-1))
random.shuffle(tlst)
perm_pos1 = tlst[0]
# perm_pos2 = tlst[1]
perm_pos2 = perm_pos1+1
if perm_pos2 == len(target)-1:
perm_pos2 = perm_pos1 - 1
target_copy = list(target)
tmp = target_copy[perm_pos1]
target_copy[perm_pos1] = target_copy[perm_pos2]
target_copy[perm_pos2] = tmp
target = ''.join(target_copy)
# print(perm_pos1,perm_pos2,target)
return target
def del_(target):
tlst = list(range(1,len(target)-1))
random.shuffle(tlst)
del_pos = tlst[0]
# print(del_char,target[:del_char]+target[del_char+1:])
return target[:del_pos]+target[del_pos+1:]
def ins(target):
tlst = list(range(1,len(target)-1))
random.shuffle(tlst)
ins_pos = tlst[0]
ins_char = random.choice(string.ascii_lowercase)
# print(ins_char,target[:ins_pos]+ins_char+target[ins_pos:])
return target[:ins_pos]+ins_char+target[ins_pos:]
def rep(target):
tlst = list(range(1,len(target)-1))
random.shuffle(tlst)
rep_pos = tlst[0]
alp = string.ascii_lowercase.replace(target[rep_pos],'')
rep_char = random.choice(string.ascii_lowercase)
target_copy = list(target)
target_copy[rep_pos] = rep_char
target = ''.join(target_copy)
print(rep_char,target,rep_pos)
return target
def gen_anlgy(prompt,j,op,target,ntype):
write_out = []
if j>=0:
pred = openai.Completion.create(
engine="text-davinci-001",
prompt= prompt,
temperature=0,
max_tokens=939,
top_p=1,
best_of=1,
frequency_penalty=0,
presence_penalty=0
)
print(j,prompt)
print(pred)
write_out.append(pred["choices"][0]["text"].replace('\n','')+'\t'+target+'\t'+prompt+'\n')
with open(op+'p'+str(j)+'_lt_'+ntype+'.txt','a') as f:
for r in write_out:
f.write(r)
write_out = []
def main(op):
ntype = 'rep'
concept_path = '../data/target_concepts/science.txt'
with open(concept_path,'r') as f:
data = f.readlines()
write_out = []
anlgies = {}
targets = list()
lencnt = 0
for i,row in enumerate(data):
row = row.strip('\n')
if row.split('\t')[1] not in targets:
targets.append(row.split('\t')[1])
for i,target in enumerate(list(targets)):
print(target)
orig_target = target
if len(target)>3:
target = rep(target)
else:
lencnt +=1
done_bef = False
try:
anlgies[orig_target.lower()]
done_bef = True
except KeyError as e:
anlgies[orig_target.lower()] = []
print(i)
if not (done_bef) and i>=0:
for j,prompt in enumerate(prompts):
prompt = prompt.replace('<target>',target.lower())
gen_anlgy(prompt,j,op,orig_target,ntype)
anlgies[orig_target.lower()].append(orig_target)
print(lencnt)
if __name__ == '__main__':
openai.api_key = OPEN_API_KEY
outpath_prefix = '../data/noise/'
main(outpath_prefix)
# rep('nadh')
| [
"['Explain <target> using an analogy.', 'Create an analogy to explain <target>.', 'Using an analogy, explain <target>.', 'What analogy is used to explain <target>?', 'Use an analogy to explain <target>.']"
] |
2024-01-10 | Bhaavya/InstructGPT-Analogies | plm_generator.py | #Generates various types of candidate analogies
#Replace OPENAI_API_KEY with your OpenAi API key
import os
import openai
import argparse
from time import sleep
prompts_wsrc = ['Explain <target> using an analogy involving <src>.','Explain how <target> is analogous to <src>.','Explain how <target> is like <src>.','Explain how <target> is similar to <src>.','How is <target> analogous to <src>?','How is <target> like <src>?','How is <target> similar to <src>?']
prompts_nanlgy = ['Explain <target>.','What is <target>?','Explain <target> in plain language to a second grader.']
prompts_nosrc = ['Explain <target> using an analogy.','Create an analogy to explain <target>.','Using an analogy, explain <target>.','What analogy is used to explain <target>?','Use an analogy to explain <target>.']
def gen_anlgy(prompt,j,op,target,model):
write_out = []
if j>=0:
for k in range(5):
if k>=0:
sleep(0.8)
print(prompt)
pred = openai.Completion.create(
engine=model,
prompt= prompt,
temperature=0.85,
max_tokens=939,
top_p=1,
best_of=1,
frequency_penalty=1.24,
presence_penalty=1.71
)
print(j,k,prompt)
print(pred)
write_out.append(pred["choices"][0]["text"].replace('\n','').replace('\t',' ')+'\t'+target+'\t'+prompt+'\n')
with open(op+'_p'+str(j)+'_ht.txt','a') as f:
for r in write_out:
f.write(r)
write_out = []
if j>=0:
pred = openai.Completion.create(
engine=model,
prompt= prompt,
temperature=0,
max_tokens=939,
top_p=1,
best_of=1,
frequency_penalty=0,
presence_penalty=0
)
print(j,prompt)
print(pred)
write_out.append(pred["choices"][0]["text"].replace('\n','').replace('\t',' ')+'\t'+target+'\t'+prompt+'\n')
with open(op+'_p'+str(j)+'_lt.txt','a') as f:
for r in write_out:
f.write(r)
write_out = []
def main(pt,op,model,concept_path):
if pt == '1':
prompts = prompts_nosrc
elif pt == '2':
prompts = prompts_wsrc
elif pt == '3':
prompts = prompts_nanlgy
with open(concept_path,'r') as f:
data = f.readlines()
write_out = []
anlgies = {}
for i,row in enumerate(data):
row = row.strip('\n')
row = row.split('\t')
target = row[1].strip()
src = row[0].strip()
done_bef = False
try:
anlgies[target.lower()]
done_bef = True
except KeyError as e:
anlgies[target.lower()] = []
print(i,target)
if (pt == '2' or not done_bef) and i>=0:
for j,prompt in enumerate(prompts):
prompt = prompt.replace('<target>',target.lower())
if pt == '2':
prompt = prompt.replace('<src>',src.lower())
gen_anlgy(prompt,j,op,target,model)
anlgies[target.lower()].append(target)
if __name__ == '__main__':
openai.api_key = OPENAI_KEY
concept_path = './data/saqa_concepts.txt'
parser = argparse.ArgumentParser(description='Enter configuration')
parser.add_argument('--prompt_type', metavar='pt', type=str,
help='Type of prompts. Enter 1 for analogies without source, 2 for analogies with given sources, 3 for non-analogies.',required=True)
parser.add_argument('--outpath_prefix', metavar='op', type=str,
help='Prefix of the output paths.',required=True)
parser.add_argument('--model', metavar='m', type=str,
help='Model name. Type one of the following: text-ada-001, text-babbage-001, text-curie-001, text-davinci-001.',required=True)
args = parser.parse_args()
if args.prompt_type not in ['1','2','3']:
print('Please enter valid prompt type')
exit()
if args.model not in ['text-ada-001', 'text-babbage-001', 'text-curie-001', 'text-davinci-001']:
print('Please enter valid model')
exit()
main(args.prompt_type,args.outpath_prefix,args.model,concept_path)
| [
"['Explain <target> using an analogy.', 'Create an analogy to explain <target>.', 'Using an analogy, explain <target>.', 'What analogy is used to explain <target>?', 'Use an analogy to explain <target>.']",
"['Explain <target> using an analogy involving <src>.', 'Explain how <target> is analogous to <src>.', 'Explain how <target> is like <src>.', 'Explain how <target> is similar to <src>.', 'How is <target> analogous to <src>?', 'How is <target> like <src>?', 'How is <target> similar to <src>?']",
"['Explain <target>.', 'What is <target>?', 'Explain <target> in plain language to a second grader.']"
] |
2024-01-10 | DonatiLeo/openai-quickstart-python | secondTest.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Tu es un assistant intelligent"},
{"role": "user", "content": "Explique simplement ce que tu es"},
],
temperature=0,
)
print(completion['choices'][0]['message']['content']) | [
"Explique simplement ce que tu es",
"Tu es un assistant intelligent"
] |
2024-01-10 | namuan/tele-muninn | voice_to_openai.py | #!/usr/bin/env python3
"""
Voice file (ogg) to MP3 to Whisper to OpenAI to Text to Voice file (mp3)
Usage:
python3 voice_to_openai.py -i 5ee7cb98-4004-4521-a697-3fdb3e939f24.ogg
"""
import logging
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from pathlib import Path
import gtts
import whisper
from py_executable_checklist.workflow import WorkflowBase, run_command, run_workflow
from common_utils import setup_logging
from openai_api import completions
model = whisper.load_model("medium")
# Workflow steps
class ConvertToMP3(WorkflowBase):
"""
Convert OGG audio file to MP3.
"""
ogg_file_path: Path
def execute(self):
mp3_file_path = self.ogg_file_path.with_suffix(".mp3")
command = f"ffmpeg -hide_banner -loglevel error -y -i {self.ogg_file_path} {mp3_file_path}"
run_command(command)
return {"mp3_file_path": mp3_file_path}
class ConvertToText(WorkflowBase):
"""
Convert MP3 file to text using Whisper API.
"""
mp3_file_path: Path
def execute(self):
recognized_text = model.transcribe(audio=self.mp3_file_path.as_posix(), fp16=False)
return {"recognized_text": recognized_text["text"]}
class SendToOpenAICompletionAPI(WorkflowBase):
"""
Send recognized text to OpenAI Completion API and return response.
"""
recognized_text: str
def execute(self):
response = completions(prompt=self.recognized_text)
return {"completions_response": response}
class TextToMP3(WorkflowBase):
"""
Convert text to MP3 using text-to-speech library.
"""
completions_response: str
mp3_file_path: Path
def execute(self):
generated_mp3_file_path = self.mp3_file_path.with_suffix(".generated.mp3")
tts = gtts.gTTS(self.completions_response)
tts.save(generated_mp3_file_path.as_posix())
return {"output_mp3_file_path": generated_mp3_file_path}
# Workflow definition
def workflow():
return [
ConvertToMP3,
ConvertToText,
SendToOpenAICompletionAPI,
TextToMP3,
]
# Boilerplate
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--ogg-file-path", type=Path, required=True, help="Input audio file in ogg format")
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
dest="verbose",
help="Increase verbosity of logging output. Display context variables between each step run",
)
return parser.parse_args()
def run(ogg_file_path):
context = {
"ogg_file_path": ogg_file_path,
}
run_workflow(context, workflow())
return context["output_mp3_file_path"]
if __name__ == "__main__":
args = parse_args()
setup_logging(args.verbose)
generated_mp3_file = run(args.ogg_file_path)
logging.info(f"Generated MP3 file saved to {generated_mp3_file}")
| [] |
2024-01-10 | namuan/tele-muninn | tele_research_agent.py | #!/usr/bin/env python3
"""
A personal research assistant
Usage:
./tele_research_agent.py -h
./tele_research_agent.py -q QUESTION -f MARKDOWN_FILE
./tele_research_agent.py -q "What is the best way to learn programming?" -f output.md
"""
import logging
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from itertools import islice
import openai
import requests
from bs4 import BeautifulSoup
from duckduckgo_search import DDGS
def setup_logging(verbosity):
logging_level = logging.WARNING
if verbosity == 1:
logging_level = logging.INFO
elif verbosity >= 2:
logging_level = logging.DEBUG
logging.basicConfig(
handlers=[
logging.StreamHandler(),
],
format="%(asctime)s - %(filename)s:%(lineno)d - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging_level,
)
logging.captureWarnings(capture=True)
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
dest="verbose",
help="Increase verbosity of logging output",
)
parser.add_argument(
"-q",
"--question",
type=str,
required=True,
help="Question to be asked",
)
parser.add_argument(
"-f",
"--file",
type=str,
required=True,
help="Target markdown file path",
)
return parser.parse_args()
class Question:
"""A class to represent a question."""
def __init__(self, question_text: str):
"""Initializes the question object."""
self.question_text = question_text
def receive_question(self) -> str:
"""Receives a question from the user and returns it as a string."""
return self.question_text
class SearchEngine:
"""A class to represent a search engine."""
def __init__(self):
"""Initializes the search engine object."""
def search_for_question(self, question_text: str) -> list:
"""Searches for the question and returns a list of search results."""
results = DDGS().text(question_text, region="wt-wt", safesearch="Off", timelimit="y")
return [Website(result["href"], result["title"], result["body"]) for result in islice(results, 10)]
class Website:
"""A class to represent a website."""
def __init__(self, url: str, text: str, description: str):
"""Initializes the website object."""
self.url = url
self.text = text
self.description = description
def scrape_website(self):
"""Scrapes the website and returns the article."""
print(f"โ๏ธ Scraping website...{self.url}")
response = requests.get(self.url)
soup = BeautifulSoup(response.text, "html.parser")
article_text = " ".join([p.text for p in soup.find_all("p")])
return article_text
def get_summary(self) -> str:
"""Returns the summary of the website."""
return f"Brief: {self.description}\nText: {self.scrape_website()}"
class Summary:
"""A class to represent a summary."""
def __init__(self, summary_text: str, link: list):
"""Initializes the summary object."""
self.summary_text = summary_text
self.link = link
def __str__(self) -> str:
"""Returns the summary as a string."""
return f"* {self.summary_text}\n{self.link}"
class OpenAIWriter:
def write_report(self, webpage_text):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "Summarize content you are provided.Ignore any whitespace and irrelevant information.",
},
{"role": "user", "content": webpage_text},
],
temperature=0,
max_tokens=1024,
)
return response.choices[0].message.content
class SummaryGenerator:
def __init__(self):
self.oai = OpenAIWriter()
def generate_summary(self, summaries):
return " ".join([summary.summary_text for summary in summaries])
def main(args):
question = Question(args.question)
search_engine = SearchEngine()
websites = search_engine.search_for_question(question.receive_question())
print(f"๐ Found {len(websites)} Websites")
summaries = []
oai = OpenAIWriter()
with open(args.file, "w") as f:
for website in websites:
generated_summary = oai.write_report(website.get_summary())
summary = Summary(generated_summary, website.url)
summaries.append(summary)
print(f"๐ {summary.summary_text}")
f.write(f"# {website.text}\n")
f.write(f"{summary.summary_text}\n\n")
summary_generator = SummaryGenerator()
final_report = summary_generator.generate_summary(summaries)
f.write("# Final Report\n")
f.write(f"{final_report}\n\n")
if __name__ == "__main__":
args = parse_args()
setup_logging(args.verbose)
main(args)
| [
"Summarize content you are provided.Ignore any whitespace and irrelevant information."
] |
2024-01-10 | namuan/tele-muninn | tele_openai_bot.py | #!/usr/bin/env python3
"""
Listen to messages with tt and ii prefix
If a message begins with tt then it'll send a prompt to OpenAI Completion API
If a message begins with ii then it'll send a prompt to OpenAI Image API
"""
import logging
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from typing import Optional, Type
import telegram
from dotenv import load_dotenv
from telegram import Update
from telegram.ext import (
CallbackContext,
CommandHandler,
Filters,
MessageHandler,
Updater,
)
from common_utils import build_chart_links_for, retry, setup_logging, verified_chat_id
from openai_api import completions, image_creation
load_dotenv()
TELE_MUNINN_OPENAI_BOT = os.getenv("TELE_MUNINN_OPENAI_BOT")
def start(update: Update, _) -> None:
update.message.reply_text("๐ Enter a prompt after tt (for Text) or ii (for Image)")
def help_command(update: Update, _) -> None:
update.message.reply_text("Help!")
def generate_report(ticker, update: Update, context: CallbackContext):
bot = context.bot
cid = update.effective_chat.id
update.message.reply_text(f"Looking up #{ticker}", quote=True)
try:
daily_chart_link, weekly_chart_link, full_message = build_chart_links_for(ticker)
bot.send_photo(cid, daily_chart_link)
bot.send_photo(cid, weekly_chart_link)
bot.send_message(cid, full_message, disable_web_page_preview=True, parse_mode="Markdown")
except NameError as e:
bot.send_message(cid, str(e))
class BaseHandler:
def __init__(self, text, update: Update, context: CallbackContext):
self.text = text
self.update = update
self.context = context
self.bot = context.bot
self.cid = update.effective_chat.id
update.message.reply_text(f"Processing #{self.text}", quote=True)
def process(self):
raise NotImplementedError
class OpenAiText(BaseHandler):
def process(self):
prompt_response = completions(self.text)
self.bot.send_message(self.cid, prompt_response, disable_web_page_preview=True, parse_mode="Markdown")
class OpenAiImage(BaseHandler):
def process(self):
image_response = image_creation(self.text)
if image_response:
for image in image_response["data"]:
logging.info("Sending image %s", image)
self.bot.send_photo(self.cid, image["url"])
else:
self.bot.send_message(self.cid, f"No image found for {self.text}")
plain_text_handler_mapping = {
"tt": OpenAiText,
"ii": OpenAiImage,
}
def find_plain_text_handler(incoming_text) -> Optional[Type[BaseHandler]]:
for prefix, handler in plain_text_handler_mapping.items():
if incoming_text.lower().startswith(prefix):
return handler
return None
@retry(telegram.error.TimedOut, tries=3)
def handle_cmd(update: Update, context: CallbackContext) -> None:
logging.info(f"Incoming update: {update}")
chat_id = update.effective_chat.id
if not verified_chat_id(chat_id):
return
incoming_message: str = update.message.text
message_handler_clazz = find_plain_text_handler(incoming_message)
if message_handler_clazz:
message_handler = message_handler_clazz(incoming_message, update, context)
message_handler.process()
def main():
"""Start the bot."""
logging.info("Starting open-ai bot")
if not TELE_MUNINN_OPENAI_BOT:
logging.error("๐ซ Please make sure that you set the TELE_MUNINN_OPENAI_BOT environment variable.")
return False
else:
logging.info("๐ค Telegram bot token: %s", TELE_MUNINN_OPENAI_BOT[:5] + "..." + TELE_MUNINN_OPENAI_BOT[-5:])
updater = Updater(TELE_MUNINN_OPENAI_BOT, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help_command))
dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, handle_cmd))
updater.start_polling()
updater.idle()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=1,
dest="verbose",
help="Increase verbosity of logging output",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
setup_logging(args.verbose)
main()
| [] |
2024-01-10 | noahbagz/ShipGen | Guided_TabDiffusionTools.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 8 11:10:55 2023
@author: shannon
I am trying my best to reconstruct the tabular diffusion, tab-ddpm,
to allow for multi output tabular diffusion
Code borrowed from https://www.kaggle.com/code/grishasizov/simple-denoising-diffusion-model-toy-1d-example
"""
import numpy as np
import json
import math
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from sklearn.metrics import f1_score
from sklearn.metrics import r2_score
import sklearn.preprocessing as PP
def timestep_embedding(timesteps, dim, max_period=10000, device=torch.device('cuda:0')):
"""
From https://github.com/rotot0/tab-ddpm
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1,device=device)
return embedding
def generate_performance_weights(num_samples,num_metrics, gen_type='random'):
weights = np.zeros((num_samples,num_metrics))
if gen_type == 'random':
for i in range(0,num_samples):
a = np.random.rand(1,num_metrics)
weights[i] = a/np.sum(a)
elif gen_type == 'uniform':
samples = []
steps = np.linspace(0.0,1.0,11)
for i in range(0, len(steps)):
for j in range(0,len(steps)-i):
samples.append([steps[i],steps[j],1.0-steps[i]-steps[j]])
samples = np.array(samples)
L = len(samples)
print(L)
A = np.random.randint(0,L,num_samples)
for i in range(0,num_samples):
weights[i] = samples[A[i]]
return weights
# Now lets make a Denoise Model:
class Denoise_MLP_Model(torch.nn.Module):
def __init__(self, DDPM_Dict, device=torch.device('cuda:0')):
nn.Module.__init__(self)
self.xdim = DDPM_Dict['xdim']
self.ydim = DDPM_Dict['ydim']
self.cdim = DDPM_Dict['cdim']
self.tdim = DDPM_Dict['tdim']
self.net = DDPM_Dict['net']
self.device = device
self.fc = nn.ModuleList()
self.fc.append(self.LinLayer(self.tdim,self.net[0]))
for i in range(1, len(self.net)):
self.fc.append(self.LinLayer(self.net[i-1],self.net[i]))
self.fc.append(self.LinLayer(self.net[-1], self.tdim))
self.fc.append(nn.Sequential(nn.Linear(self.tdim, self.xdim)))
self.X_embed = nn.Linear(self.xdim, self.tdim)
'''
self.Y_embed = nn.Sequential(
nn.Linear(self.ydim, self.tdim),
nn.SiLU(),
nn.Linear(self.tdim, self.tdim))
'''
self.Con_embed = nn.Sequential(
nn.Linear(self.cdim, self.tdim),
nn.SiLU(),
nn.Linear(self.tdim, self.tdim))
self.time_embed = nn.Sequential(
nn.Linear(self.tdim, self.tdim),
nn.SiLU(),
nn.Linear(self.tdim, self.tdim))
def LinLayer(self, dimi, dimo):
return nn.Sequential(nn.Linear(dimi,dimo),
nn.SiLU(),
nn.LayerNorm(dimo),
nn.Dropout(p=0.1))
def forward(self, x, timesteps):
a = self.X_embed(x)
#print(a.dtype)
x = a + self.time_embed(timestep_embedding(timesteps, self.tdim))
for i in range(0,len(self.fc)):
x = self.fc[i](x)
return x
class Denoise_ResNet_Model(torch.nn.Module):
def __init__(self, DDPM_Dict):
nn.Module.__init__(self)
self.xdim = DDPM_Dict['xdim']
self.ydim = DDPM_Dict['ydim']
self.tdim = DDPM_Dict['tdim']
self.cdim = DDPM_Dict['cdim']
self.net = DDPM_Dict['net']
self.fc = nn.ModuleList()
self.fc.append(self.LinLayer(self.tdim,self.net[0]))
for i in range(1, len(self.net)):
self.fc.append(self.LinLayer(self.net[i-1],self.net[i]))
self.fc.append(self.LinLayer(self.net[-1], self.tdim))
self.finalLayer = nn.Sequential(nn.Linear(self.tdim, self.xdim))
self.X_embed = nn.Linear(self.xdim, self.tdim)
'''
self.Y_embed = nn.Sequential(
nn.Linear(self.ydim, self.tdim),
nn.SiLU(),
nn.Linear(self.tdim, self.tdim))
self.Con_embed = nn.Sequential(
nn.Linear(self.cdim, self.tdim),
nn.SiLU(),
nn.Linear(self.tdim, self.tdim))
'''
self.time_embed = nn.Sequential(
nn.Linear(self.tdim, self.tdim),
nn.SiLU(),
nn.Linear(self.tdim, self.tdim))
def LinLayer(self, dimi, dimo):
return nn.Sequential(nn.Linear(dimi,dimo),
nn.SiLU(),
nn.BatchNorm1d(dimo),
nn.Dropout(p=0.1))
def forward(self, x, timesteps):
x = self.X_embed(x) + self.time_embed(timestep_embedding(timesteps, self.tdim))
res_x = x
for i in range(0,len(self.fc)):
x = self.fc[i](x)
x = torch.add(x,res_x)
x = self.finalLayer(x)
return x
# First Step: make a classifier object:
class Classifier_Model(torch.nn.Module):
def __init__(self, Dict):
nn.Module.__init__(self)
self.xdim = Dict['xdim']
self.tdim = Dict['tdim']
self.cdim = Dict['cdim']
self.net = Dict['net']
self.fc = nn.ModuleList()
self.time_embed = nn.Sequential(
nn.Linear(self.tdim, self.tdim),
nn.SiLU(),
nn.Linear(self.tdim, self.tdim))
self.X_embed = nn.Linear(self.xdim, self.tdim)
self.fc.append(self.LinLayer(self.tdim,self.net[0]))
'''
self.fc.append(self.LinLayer(self.xdim,self.net[0]))
'''
for i in range(1, len(self.net)):
self.fc.append(self.LinLayer(self.net[i-1],self.net[i]))
self.fc.append(nn.Sequential(nn.Linear(self.net[-1], self.cdim), nn.Sigmoid()))
def LinLayer(self, dimi, dimo):
return nn.Sequential(nn.Linear(dimi,dimo),
nn.SiLU(),
#nn.BatchNorm1d(dimo),
nn.Dropout(p=0.1))
def forward(self, x):
x = self.X_embed(x)
for i in range(0,len(self.fc)):
x = self.fc[i](x)
return x
# Make a regression Model and define training loop:
class Regression_ResNet_Model(torch.nn.Module):
def __init__(self, Reg_Dict):
nn.Module.__init__(self)
self.xdim = Reg_Dict['xdim']
self.ydim = Reg_Dict['ydim']
self.tdim = Reg_Dict['tdim']
self.net = Reg_Dict['net']
self.fc = nn.ModuleList()
self.fc.append(self.LinLayer(self.tdim,self.net[0]))
for i in range(1, len(self.net)):
self.fc.append(self.LinLayer(self.net[i-1],self.net[i]))
self.fc.append(self.LinLayer(self.net[-1], self.tdim))
self.finalLayer = nn.Sequential(nn.Linear(self.tdim, self.ydim))
self.X_embed = nn.Linear(self.xdim, self.tdim)
def LinLayer(self, dimi, dimo):
return nn.Sequential(nn.Linear(dimi,dimo),
nn.SiLU(),
nn.LayerNorm(dimo),
nn.Dropout(p=0.2))
def forward(self, x):
x = self.X_embed(x)
res_x = x
for i in range(0,len(self.fc)):
x = self.fc[i](x)
x = torch.add(x,res_x)
x = self.finalLayer(x)
return x
'''
==============================================================================
EMA - Exponential Moving Average: Helps with stable training
========================================================================
EMA class from: https://github.com/azad-academy/denoising-diffusion-model/blob/main/ema.py
'''
# Exponential Moving Average Class
# Orignal source: https://github.com/acids-ircam/diffusion_models
class EMA(object):
def __init__(self, mu=0.999):
self.mu = mu
self.shadow = {}
def register(self, module):
for name, param in module.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def update(self, module):
for name, param in module.named_parameters():
if param.requires_grad:
self.shadow[name].data = (1. - self.mu) * param.data + self.mu * self.shadow[name].data
def ema(self, module):
for name, param in module.named_parameters():
if param.requires_grad:
param.data.copy_(self.shadow[name].data)
def ema_copy(self, module):
module_copy = type(module)(module.config).to(module.config.device)
module_copy.load_state_dict(module.state_dict())
self.ema(module_copy)
return module_copy
def state_dict(self):
return self.shadow
def load_state_dict(self, state_dict):
self.shadow = state_dict
'''
==========================================
Set up the data normalizer class
==========================================
'''
class Data_Normalizer:
def __init__(self, X_LL_Scaled, X_UL_Scaled,datalength):
self.normalizer = PP.QuantileTransformer(
output_distribution='normal',
n_quantiles=max(min(datalength // 30, 1000), 10),
subsample=int(1e9)
)
self.X_LL_Scaled = X_LL_Scaled
self.X_UL_Scaled = X_UL_Scaled
self.X_LL_norm = np.zeros((1,len(X_LL_Scaled)))
self.X_UL_norm = np.zeros((1,len(X_LL_Scaled)))
self.X_mean = np.zeros((1,len(X_LL_Scaled)))
self.X_std = np.zeros((1,len(X_LL_Scaled)))
def fit_Data(self,X):
x = 2.0*(X-self.X_LL_Scaled)/(self.X_UL_Scaled- self.X_LL_Scaled) - 1.0
self.normalizer.fit(x)
x = self.normalizer.transform(x) # Scale Dataset between
#x = (X-self.X_LL_Scaled)/(self.X_UL_Scaled- self.X_LL_Scaled)
return x
def transform_Data(self,X):
x = 2.0*(X-self.X_LL_Scaled)/(self.X_UL_Scaled- self.X_LL_Scaled) - 1.0
x = self.normalizer.transform(x)
return x
def scale_X(self,z):
#rescales data
z = self.normalizer.inverse_transform(z)
scaled = (z + 1.0) * 0.5 * (self.X_UL_Scaled - self.X_LL_Scaled) + self.X_LL_Scaled
#scaled = z* (self.X_UL_Scaled - self.X_LL_Scaled) + self.X_LL_Scaled
'''
x = self.normalizer.inverse_transform(x)
#scaled = x* (self.X_UL_norm - self.X_LL_norm) + self.X_LL_norm
'''
#z = (z + 1.0) * 0.5 * (8.0) + 4.0
#scaled = z*self.X_std + self.X_mean
#scaled = self.normalizer.inverse_transform(scaled)
return scaled
'''
=======================================================================
Trainer class modified from Tab-ddpm paper code with help from hugging face
=====================================================================
'''
class GuidedDiffusionEnv:
def __init__(self, DDPM_Dict, Class_Dict, Reg_Dict, X,Y, Cons, X_neg, Cons_neg):
self.DDPM_Dict = DDPM_Dict
self.Class_Dict = Class_Dict
self.Reg_Dict = Reg_Dict
self.device =torch.device(self.DDPM_Dict['device_name'])
#Build the Diffusion Network
self.diffusion = Denoise_ResNet_Model(self.DDPM_Dict)
#Build Classifier Network
self.classifier = Classifier_Model(self.Class_Dict)
#Build Regression Networks:
self.regressors = [Regression_ResNet_Model(self.Reg_Dict) for i in range(0,self.Reg_Dict['num_regressors'])]
self.num_regressors = self.Reg_Dict['num_regressors']
#self.load_trained_regressors()
self.diffusion.to(self.device)
self.classifier.to(self.device)
for i in range(0,self.num_regressors):
self.regressors[i].to(self.device)
self.dataLength = self.DDPM_Dict['datalength']
self.batch_size = self.DDPM_Dict['batch_size']
self.gamma = self.DDPM_Dict['gamma']
self.lambdas = np.array(self.DDPM_Dict['lambdas'])
self.data_norm = Data_Normalizer(np.array(self.DDPM_Dict['X_LL']),np.array(self.DDPM_Dict['X_UL']),self.dataLength)
self.X = self.data_norm.fit_Data(X)
self.X_neg = self.data_norm.transform_Data(X_neg)
#X and Y are numpy arrays - convert to tensor
self.X = torch.from_numpy(self.X.astype('float32'))
self.X_neg = torch.from_numpy(self.X_neg.astype('float32'))
self.Y = torch.from_numpy(Y.astype('float32'))
self.Cons = torch.from_numpy(Cons.astype('float32'))
self.Cons_neg = torch.from_numpy(Cons_neg.astype('float32'))
self.X = self.X.to(self.device)
self.X_neg = self.X_neg.to(self.device)
self.Y = self.Y.to(self.device)
self.Cons = self.Cons.to(self.device)
self.Cons_neg = self.Cons_neg.to(self.device)
self.eps = 1e-8
self.ema = EMA(0.99)
self.ema.register(self.diffusion)
#set up optimizer
self.timesteps = self.DDPM_Dict['Diffusion_Timesteps']
self.num_diffusion_epochs = self.DDPM_Dict['Training_Epochs']
self.num_classifier_epochs = self.Class_Dict['Training_Epochs']
self.num_regressor_epochs = self.Reg_Dict['Training_Epochs']
lr = self.DDPM_Dict['lr']
self.init_lr = lr
weight_decay = self.DDPM_Dict['weight_decay']
self.optimizer_diffusion = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay)
self.optimizer_classifier = torch.optim.AdamW(self.classifier.parameters(),lr=.001, weight_decay=weight_decay)
self.optimizer_regressors = [torch.optim.AdamW(self.regressors[i].parameters(),lr=.001, weight_decay=weight_decay) for i in range(0,self.Reg_Dict['num_regressors'])]
self.log_every = 100
self.print_every = 5000
self.loss_history = []
#Set up alpha terms
self.betas = torch.linspace(0.001, 0.2, self.timesteps).to(self.device)
#self.betas = betas_for_alpha_bar(self.timesteps, lambda t: np.cos((t + 0.008) / 1.008 * np.pi / 2) ** 2,)
#self.betas = torch.from_numpy(self.betas.astype('float32')).to(self.device)
self.alphas = 1. - self.betas
self.log_alpha = torch.log(self.alphas)
self.log_cumprod_alpha = np.cumsum(self.log_alpha.cpu().numpy())
self.log_cumprod_alpha = torch.tensor(self.log_cumprod_alpha,device=self.device)
self.alphas_cumprod = torch.cumprod(self.alphas, axis=0)
self.alphas_cumprod_prev = F.pad(self.alphas_cumprod[:-1],[1,0],'constant', 0)
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = torch.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = torch.sqrt(1.0 / self.alphas_cumprod - 1)
self.posterior_variance = self.betas * (1. - self.alphas_cumprod_prev) / (1. - self.alphas_cumprod)
self.sqrt_recip_alphas = torch.sqrt(1.0 / self.alphas)
a = torch.clone(self.posterior_variance)
a[0] = a[1]
self.posterior_log_variance_clipped = torch.log(a)
self.posterior_mean_coef1 = (self.betas * torch.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod))
self.posterior_mean_coef2 = ((1.0 - self.alphas_cumprod_prev)* torch.sqrt(self.alphas) / (1.0 - self.alphas_cumprod))
"""++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Start the training model functions
"""
def extract(self,a, t, x_shape):
b, *_ = t.shape
t = t.to(a.device)
out = a.gather(-1, t)
while len(out.shape) < len(x_shape):
out = out[..., None]
return out.expand(x_shape)
def _anneal_lr(self, epoch_step):
#Update the learning rate
frac_done = epoch_step / self.num_diffusion_epochs
lr = self.init_lr * (1 - frac_done)
for param_group in self.optimizer_diffusion.param_groups:
param_group["lr"] = lr
'''
=========================================================================
Vanilla Diffusion
==========================================================================
'''
def q_sample(self,x_start, t, noise=None):
"""
qsample from https://huggingface.co/blog/annotated-diffusion
"""
if noise is None:
noise = torch.randn_like(x_start).to(self.device)
sqrt_alphas_cumprod_t = self.extract(self.sqrt_alphas_cumprod, t, x_start.shape)
sqrt_one_minus_alphas_cumprod_t = self.extract(
self.sqrt_one_minus_alphas_cumprod, t, x_start.shape
)
return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise
def p_loss(self,x_start,t, noise=None,loss_type='l2'):
'''
from https://huggingface.co/blog/annotated-diffusion
'''
if noise is None:
noise = torch.randn_like(x_start)
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
predicted_noise = self.diffusion(x_noisy, t)
#predicted_noise = predicted_noise.clamp(-3,3)
if loss_type == 'l1':
loss1 = F.l1_loss(noise, predicted_noise)
elif loss_type == 'l2':
loss1 = F.mse_loss(noise, predicted_noise)
elif loss_type == "huber":
loss1 = F.smooth_l1_loss(noise, predicted_noise)
else:
raise NotImplementedError()
return loss1
'''
==============================================================================
Classifier and Regression Training Functions
==============================================================================
'''
def run_classifier_step(self,x,cons):
self.optimizer_classifier.zero_grad()
predicted_cons = self.classifier(x)
loss = F.binary_cross_entropy(predicted_cons, cons) #F.mse_loss(predicted_cons, cons) #F.binary_cross_entropy(predicted_cons, cons)
loss.backward()
self.optimizer_classifier.step()
return loss
def run_train_classifier_loop(self, batches_per_epoch=100):
X = torch.cat((self.X,self.X_neg))
C = torch.cat((self.Cons,self.Cons_neg))
#print(C.shape)
datalength = X.shape[0]
print('Classifier Model Training...')
self.classifier.train()
num_batches = datalength // self.batch_size
batches_per_epoch = min(num_batches,batches_per_epoch)
x_batch = torch.full((batches_per_epoch,self.batch_size,self.classifier.xdim), 0, dtype=torch.float32,device=self.device)
cons_batch = torch.full((batches_per_epoch,self.batch_size,self.classifier.cdim), 0, dtype=torch.float32,device=self.device)
for i in tqdm(range(self.num_classifier_epochs)):
#IDX = permute_idx(self.dataLength) # get randomized list of idx for batching
for j in range(0,batches_per_epoch):
A = np.random.randint(0,datalength,self.batch_size)
x_batch[j] = X[A]
#y_batch[j] = self.Y[IDX[j*self.batch_size:(j+1)*self.batch_size]]
cons_batch[j] = C[A]
#cons_batch[j] = self.Cons[IDX[j*self.batch_size:(j+1)*self.batch_size]]
for j in range(0,batches_per_epoch):
loss = self.run_classifier_step(x_batch[j],cons_batch[j])
'''
for i in tqdm(range(0,self.num_classifier_epochs)):
loss = self.run_classifier_step(X,C)
'''
self.classifier.eval()
A = np.random.randint(0,datalength,1000)
C_pred = self.classifier(X[A])
C_pred = C_pred.to(torch.device('cpu')).detach().numpy()
#print(C_pred.shape)
C_pred = np.rint(C_pred) #Make it an iteger guess
C = C.to(torch.device('cpu')).detach().numpy()
F1 = f1_score(C[A],C_pred)
print('F1 score: ' + str(F1))
print('Classifier Training Complete!')
def run_regressor_step(self,x,y,idx):
self.optimizer_regressors[idx].zero_grad()
predicted_y = self.regressors[idx](x)
loss = F.mse_loss(predicted_y, y)
loss.backward()
self.optimizer_regressors[idx].step()
return loss
def run_train_regressors_loop(self,batches_per_epoch=100):
X = self.X
Y = self.Y
datalength = X.shape[0]
num_batches = datalength // self.batch_size
batches_per_epoch = min(num_batches,batches_per_epoch)
print('Regressor Model Training...')
for k in range(0,self.num_regressors):
print('Training Regression for Objective: ' + str(k))
self.regressors[k].train()
x_batch = torch.full((batches_per_epoch,self.batch_size,self.classifier.xdim), 0, dtype=torch.float32,device=self.device)
y_batch = torch.full((batches_per_epoch,self.batch_size,self.regressors[k].ydim), 0, dtype=torch.float32,device=self.device)
for i in tqdm(range(0,self.num_regressor_epochs)):
for j in range(0,batches_per_epoch):
A = np.random.randint(0,datalength,self.batch_size)
x_batch[j] = X[A]
y_batch[j] = Y[A,k:k+1]
for j in range(0,batches_per_epoch):
loss = self.run_regressor_step(x_batch[j],y_batch[j],k)
print('Regression Model Training for Objective ' + str(k) + ' Complete!')
self.regressors[k].eval()
Y_pred = self.regressors[k](X)
Y_pred = Y_pred.to(torch.device('cpu')).detach().numpy()
y = Y[:,k].to(torch.device('cpu')).detach().numpy()
Rsq = r2_score(y, Y_pred)
print("R2 score of Y:" + str(Rsq))
print('Regressor Training Complete!')
'''
==============================================================================
Diffusion Training and Sampling Functions
==============================================================================
'''
def run_diffusion_step(self, x):
self.optimizer_diffusion.zero_grad()
t = torch.randint(0,self.timesteps,(self.batch_size,),device=self.device)
loss1 = self.p_loss(x,t,loss_type='l2')
loss = loss1
loss.backward()
self.optimizer_diffusion.step()
return loss
def run_train_diffusion_loop(self, batches_per_epoch=100):
print('Denoising Model Training...')
self.diffusion.train()
num_batches = self.dataLength // self.batch_size
batches_per_epoch = min(num_batches,batches_per_epoch)
x_batch = torch.full((batches_per_epoch,self.batch_size,self.diffusion.xdim), 0, dtype=torch.float32,device=self.device)
#y_batch = torch.full((batches_per_epoch,self.batch_size,self.diffusion.ydim), 0, dtype=torch.float32,device=self.device)
#cons_batch = torch.full((batches_per_epoch,self.batch_size,self.diffusion.cdim), 0, dtype=torch.float32,device=self.device)
for i in tqdm(range(self.num_diffusion_epochs)):
#IDX = permute_idx(self.dataLength) # get randomized list of idx for batching
for j in range(0,batches_per_epoch):
A = np.random.randint(0,self.dataLength,self.batch_size)
x_batch[j] = self.X[A]
#y_batch[j] = self.Y[IDX[j*self.batch_size:(j+1)*self.batch_size]]
#cons_batch[j] = self.Cons[A]
#cons_batch[j] = self.Cons[IDX[j*self.batch_size:(j+1)*self.batch_size]]
for j in range(0,batches_per_epoch):
loss = self.run_diffusion_step(x_batch[j])
'''
Gaussian Diffusion (oooohhhh ahhhhhh) from TabDDPM:
'''
#loss = self.train_step(x_batch[j])
self._anneal_lr(i)
if (i + 1) % self.log_every == 0:
self.loss_history.append([i+1,float(loss.to('cpu').detach().numpy())])
if (i + 1) % self.print_every == 0:
print(f'Step {(i + 1)}/{self.num_diffusion_epochs} Loss: {loss}')
self.ema.update(self.diffusion)
#Make Loss History an np array
self.loss_history = np.array(self.loss_history)
print('Denoising Model Training Complete!')
def cond_fn(self, x, t, cons):
#From OpenAI: https://github.com/openai/guided-diffusion/blob/main/scripts/classifier_sample.py
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
pred_cons = self.classifier(x_in)
error = (cons-pred_cons)**2.0 #F.binary_cross_entropy(pred_cons, cons) #
#log_p = torch.log(pred_cons)
#sign = torch.sign(cons-0.5)
grad = torch.autograd.grad(error.sum(), x_in)[0]
#print(grad[0])
return -grad
def perf_fn(self, x, idx):
#From OpenAI: https://github.com/openai/guided-diffusion/blob/main/scripts/classifier_sample.py
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
perf = self.regressors[idx](x_in)
grad = torch.autograd.grad(perf.sum(), x_in)[0]
#print(grad[0])
return grad
@torch.no_grad()
def p_sample(self, x, t, cons):
time= torch.full((x.size(dim=0),),t,dtype=torch.int64,device=self.device)
X_diff = self.diffusion(x, time)
betas_t = self.extract(self.betas, time, x.shape)
sqrt_one_minus_alphas_cumprod_t = self.extract(
self.sqrt_one_minus_alphas_cumprod, time, x.shape
)
sqrt_recip_alphas_t = self.extract(self.sqrt_recip_alphas, time, x.shape)
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
# Use our model (noise predictor) to predict the mean
model_mean = sqrt_recip_alphas_t * (
x - betas_t * X_diff/ sqrt_one_minus_alphas_cumprod_t
)
posterior_variance_t = self.extract(self.posterior_variance, time, x.shape)
cons_grad = self.cond_fn(x, time, cons)
#print(gradient.detach().to('cpu')[0])
if t == 0:
return model_mean
else:
noise = torch.randn_like(x,device=self.device)
# Dot product gradient to noise
return model_mean + torch.sqrt(posterior_variance_t) * (noise*(1.0-self.gamma) + self.gamma*cons_grad.float())
@torch.no_grad()
def Performance_p_sample(self, x, t, cons,perf_weights):
time= torch.full((x.size(dim=0),),t,dtype=torch.int64,device=self.device)
X_diff = self.diffusion(x, time)
betas_t = self.extract(self.betas, time, x.shape)
sqrt_one_minus_alphas_cumprod_t = self.extract(
self.sqrt_one_minus_alphas_cumprod, time, x.shape
)
sqrt_recip_alphas_t = self.extract(self.sqrt_recip_alphas, time, x.shape)
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
# Use our model (noise predictor) to predict the mean
model_mean = sqrt_recip_alphas_t * (
x - betas_t * X_diff/ sqrt_one_minus_alphas_cumprod_t
)
posterior_variance_t = self.extract(self.posterior_variance, time, x.shape)
#cons_gradient = self.cond_fn(x, time, cons)
#print(gradient.detach().to('cpu')[0])
if t == 0:
return model_mean
else:
noise = torch.randn_like(x,device=self.device)
# Dot product gradient to noise
perf_guidance = torch.zeros_like(x,dtype=torch.float32,device=self.device)
for i in range(0,len(self.regressors)):
perf_guidance = perf_guidance + self.perf_fn(model_mean,i)*perf_weights[i]
#perf_grad = self.perf_fn(model_mean,0)
cons_grad = self.cond_fn(model_mean, time, cons)
return model_mean + torch.sqrt(posterior_variance_t) * (noise*(1.0-self.gamma) + self.gamma*cons_grad.float() - perf_guidance)
#return model_mean - self.lam* perf_grad.float()
@torch.no_grad()
def gen_samples(self, cons):
#COND is a numpy array of the conditioning it is shape (num_samples,conditioning terms)
num_samples = len(cons)
cons = torch.from_numpy(cons.astype('float32'))
cons = cons.to(self.device)
#print(num_samples) #should be 1
x_gen = torch.randn((num_samples,self.diffusion.xdim),device=self.device)
self.diffusion.eval()
self.classifier.eval()
for i in tqdm(range(self.timesteps - 1, 0, -1)):
x_gen = self.p_sample(x_gen, i,cons)
output = x_gen.cpu().detach().numpy()
output_scaled = self.data_norm.scale_X(output)
return output_scaled, output
@torch.no_grad()
def gen_perf_samples(self, cons,weights):
#COND is a numpy array of the conditioning it is shape (num_samples,conditioning terms)
num_samples = len(cons)
perf_time_ratio = 1.0 -0.8
cons = torch.from_numpy(cons.astype('float32'))
cons = cons.to(self.device)
#print(num_samples) #should be 1
x_gen = torch.randn((num_samples,self.diffusion.xdim),device=self.device)
perf_weights = torch.zeros((len(self.lambdas),num_samples,self.diffusion.xdim),device=self.device)
self.diffusion.eval()
self.classifier.eval()
for i in range(0,len(self.regressors)):
self.regressors[i].eval()
A = self.lambdas[i]*weights[:,i]
A = A.reshape((len(A),1))
perf_weights[i,:,:] = torch.from_numpy(A.astype('float32')).to(self.device).repeat(1,self.diffusion.xdim)
#print(perf_weights.shape)
for i in tqdm(range(self.timesteps - 1, int(perf_time_ratio*self.timesteps), -1)):
x_gen = self.Performance_p_sample(x_gen, i,cons,perf_weights)
for i in tqdm(range(int(perf_time_ratio*self.timesteps), 0, -1)):
x_gen = self.p_sample(x_gen, i,cons)
output = x_gen.cpu().detach().numpy()
output_scaled = self.data_norm.scale_X(output)
return output_scaled, output
def Predict_Perf_numpy(self,X):
X_norm = self.data_norm.transform_Data(X)
X_norm = torch.from_numpy(X_norm.astype('float32')).to(self.device)
Y_pred = torch.full((len(X),len(self.regressors)),0.0,dtype=torch.float32,device=self.device)
for i in range(0,len(self.regressors)):
Y_pred[:,i:i+1] = self.regressors[i](X_norm)
Y_pred = Y_pred.to('cpu').detach().numpy()
return Y_pred
def Predict_Perf_Tensor(self,X_norm):
Y_pred = torch.full((len(X_norm),len(self.regressors)),0.0,dtype=torch.float32,device=self.device)
for i in range(0,len(self.regressors)):
Y_pred[:,i:i+1] = self.regressors[i](X_norm)
return Y_pred
'''
==============================================================================
Saving and Loading Model Functions
==============================================================================
'''
def load_trained_diffusion_model(self,PATH):
#PATH is full path to the state dictionary, including the file name and extension
self.diffusion.load_state_dict(torch.load(PATH))
def Load_Dict(PATH):
#returns the dictionary for the DDPM_Dictionary to rebuild the model
#PATH is the path including file name and extension of the json file that stores it.
f = open(PATH)
return json.loads(f)
def Save_diffusion_model(self,PATH,name):
'''
PATH is the path to the folder to store this in, including '/' at the end
name is the name of the model to save without an extension
'''
torch.save(self.diffusion.state_dict(), PATH+name+'_diffusion.pth')
JSON = json.dumps(self.DDPM_Dict)
f = open(PATH+name+'.json', 'w')
f.write(JSON)
f.close()
def load_trained_classifier_model(self,PATH):
#PATH is full path to the state dictionary, including the file name and extension
self.classifier.load_state_dict(torch.load(PATH))
def load_trained_regressors(self):
labels = self.Reg_Dict['Model_Paths']
for i in range(0,len(labels)):
self.regressors.append(Regression_ResNet_Model(self.Reg_Dict))
self.regressors[i].load_state_dict(torch.load(labels[i]))
self.regressors[i].to(self.device)
def Save_classifier_model(self,PATH,name):
'''
PATH is the path to the folder to store this in, including '/' at the end
name is the name of the model to save without an extension
'''
torch.save(self.classifier.state_dict(), PATH+name+ '.pth')
JSON = json.dumps(self.Class_Dict)
f = open(PATH+name+ '.json', 'w')
f.write(JSON)
f.close()
def Save_regression_models(self,PATH):
'''
PATH is the path to the folder to store this in, including '/' at the end
'''
for i in range(0,len(self.regressors)):
torch.save(self.regressors[i].state_dict(), PATH + self.Reg_Dict['Model_Labels'][i] +'.pth')
JSON = json.dumps(self.Reg_Dict)
f = open(PATH + '_regressor_Dict.json', 'w')
f.write(JSON)
f.close()
| [] |
2024-01-10 | retaildevcrews/OpenAI-Labs | Labs~FFModel~Labs~product-search~components~evaluators~llm_eval.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from ffmodel.components.base import BaseSolutionComponent
from ffmodel.data_models.base import ExperimentDataModel
from ffmodel.utils.openai import (
OpenAIConfig,
RetryParameters,
filter_completion_arguments,
generate_completion,
initialize_openai,
)
class Component(BaseSolutionComponent[ExperimentDataModel]):
"""
The LLM evaluator class for evaluating the quality or relevance of the completions using a Large Language Model.
It uses the completion API.
Note: The range of completion values output by this evaluator is based on the instruction file provided to the model.
Component Args:
- api_key_config_name: name of the config value to pull the api key from, defaults to OPENAI_API_KEY
- api_endpoint_config_name: name of the config value to pull the api endpoint from, defaults to OPENAI_ENDPOINT
- engine: model to use
Component Config supporting_data:
- static_instr_file: Path to the text file containing the static instructions for the prompt.
The file provide a prompt template of step-by-step instructions and one example to guide the llm to generate evaluation score and explanation.
The template should contain the following three variables:"prompt", "expected_output", and "completion".
In addition the following args from openAI are most common, but any openAI arg can be passed:
- stop: list of stop tokens
- temperature: temperature as a float
- max_tokens: max number of tokens to return from the model
- top_p: 1.0
"""
def _post_init(self):
static_instr_file = self.supporting_data.get("static_instr_file", None)
if static_instr_file is None:
raise ValueError("Argument 'static_instr_file' must be provided")
self.static_instr_file = static_instr_file.file_path
with open(self.static_instr_file) as f:
self.static_instr = f.read()
config_names = self.args.pop("config", {})
self.openai_config = OpenAIConfig.from_dict(config_names)
retry_params = self.args.pop("retry_params", {})
self.retry_params = RetryParameters.from_dict(retry_params)
self.filtered_kwargs = filter_completion_arguments(self.args)
self.engine = self.args.pop("engine", "engine")
self.call_openai_function = generate_completion
def execute(self, data_model: ExperimentDataModel) -> ExperimentDataModel:
initialize_openai(self.openai_config)
prompt = data_model.request.user_nl
expected_output = data_model.request.expected_output
completions = data_model.model_output.completions
results = {"score": [], "explanation": []}
if all(x is None for x in completions):
raise ValueError("No completions provided.")
for completion in completions:
score, explanation = self.llm_score(prompt, expected_output[0], completion)
results["score"].append(score)
results["explanation"].append(explanation)
data_model.experiment_metrics[self.get_id()] = results
return data_model
def llm_score(self, user_prompt, expected_output: str, completion: str) -> float:
"""Calculate the llm score between two strings using the completion API.
Returns a float between min and max as defined in the prompt's instructions.
"""
eval_prompt = self.static_instr.format(
prompt=user_prompt, expected_output=expected_output, completion=completion
)
response = self.call_openai_function(
prompt=eval_prompt,
retry_parameters=self.retry_params,
**self.filtered_kwargs,
)
score, explanation = self.get_score_exp(response.choices[0].text)
return score, explanation
def get_score_exp(self, completion):
"""Get the score and explanation from the completion text.
Returns the score (float) and the explanation (string).
"""
completion = completion.split("\n")
# initialize the score and explanation
explanation = ""
score = 0
# go over each line and find the score and explanation
for line in completion:
if "SCORE:" in line:
# get the score
try:
score = float(line.split(":")[1].strip())
except ValueError:
score = 0
if "EXPLANATION:" in line:
# get the explanation
explanation = line.split(":")[1].strip()
return score, explanation
| [] |
2024-01-10 | retaildevcrews/OpenAI-Labs | Labs~FFModel~Labs~product-search~components~evaluators~semantic_similarity.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import List
import numpy as np
from ffmodel.components.base import BaseSolutionComponent
from ffmodel.data_models.base import ExperimentDataModel
from ffmodel.utils.openai import OpenAIConfig, get_embedding, initialize_openai
class Component(BaseSolutionComponent[ExperimentDataModel]):
"""
Semantic Similarity evaluator evaluates the similarity between text generated completions that have been translated
by a machine to the reference human text.
The returned value is the cosine similarity between the embedding of the expected output and the generated output.
The value ranges from 0 to 1, with 1 being an exact match.
Args
----
config (str): Consists of OpenAI configurations - this should include an API key and/or endpoint
embedding_model (str): Embedding model to leverage for experimentation. Default setting is `text-embedding-ada-002`
"""
def _post_init(self):
# Initialize the embedding model
config_names = self.args.pop("config", {})
self.openai_config = OpenAIConfig.from_dict(config_names)
initialize_openai(self.openai_config)
# set embedding model
self.embedding_model = self.args.get("embedding_model", "text-embedding-ada-002")
self.call_embedding_function = get_embedding
def execute(self, data_model: ExperimentDataModel) -> ExperimentDataModel:
"""
Executes the component for the given data model and returns an
updated data model.
"""
# calculate the embeddings for the expected output and the completions
expected_embeddings = [
self.call_embedding_function(e, self.embedding_model) for e in data_model.request.expected_output
]
completion_embeddings = [
self.call_embedding_function(c, self.embedding_model) for c in data_model.model_output.completions
]
semantic_similarity = []
for e in expected_embeddings:
for c in completion_embeddings:
semantic_similarity.append(self.get_semantic_similarity(e, c))
results = {"semantic_similarity": semantic_similarity}
data_model.experiment_metrics[self.get_id()] = results
return data_model
def get_semantic_similarity(self, ground_truth: List[float], prediction: List[float]) -> float:
"""
Function used as helper in computing semantic similarity based on the embeddings
Args
-----
ground_truth (List[float]): embedding of the ground truth
prediction (List[float]): embedding of the predicted text
Returns
--------
(float)
"""
# generate mebeddings for ground truth and predicted prompt
grd_truth_embed = np.asarray(ground_truth)
pred_embed = np.asarray(prediction)
# normalize feature embeddings
norm_grd_truth = np.linalg.norm(grd_truth_embed)
norm_pred = np.linalg.norm(pred_embed)
grd_truth_embed = np.array(grd_truth_embed) / norm_grd_truth
pred_embed = np.array(pred_embed) / norm_pred
# compute cosine similarity
cos_sim_ls = np.dot(grd_truth_embed, pred_embed)
return cos_sim_ls
| [] |
2024-01-10 | retaildevcrews/OpenAI-Labs | Labs~FFModel~Labs~product-search~components~pre_processors~few_shot_embedding.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import os
import pickle
import numpy as np
from ffmodel.components.base import BaseSolutionComponent
from ffmodel.data_models.base import InferenceDataModel, InferenceRequest, ModelState
from ffmodel.utils.openai import (
OpenAIConfig,
RetryParameters,
get_embedding,
initialize_openai,
)
REQUIRED_FIELDS = ["user_nl", "expected_output", "embedding"]
class Component(BaseSolutionComponent[InferenceDataModel[InferenceRequest, ModelState]]):
"""
Few shot selection component based on embeddings from OpenAI models.
For more information on the OpenAI embedding models, see: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/understand-embeddings
This class uses a pickle file to represent the embeddings for the few shot bank.
Inside the pickle file (passed by `few_shot_file` config) should be a dictionary with the following structure:
{
"metadata": {
"embedding_model": "embedding model used to generate the embeddings"
},
"data": [
{
"user_nl": "user_nl for the few shot",
"expected_output": "expected output for the few shot",
"embedding": "embedding for the few shot"
},
...
]
}
Please use the static method create_few_shot_file to generate the pickle file
Component Config args:
- count: The number of few shots to select, defaults to 3
- reverse: When true, the closest match is at the end, defaults to true
- config: Dict[str, str], dictionary of config that control the OpenAI API
- api_key_config_name: str, name of the config value to pull the api key from, defaults to OPENAI_API_KEY
- api_endpoint_config_name: str, name of the config value to pull the api endpoint from, defaults to OPENAI_ENDPOINT
- api_version: str, version of the OpenAI API to use, defaults to "2023-03-15-preview"
- retry_params: Dict[str, Any], dictionary of retry parameters, with keys of:
- tries: int, the maximum number of attempts. The first call counts as a try
- delay: float, initial delay between attempts, in seconds
- backoff: float, multiplier applied to the delay between attempts
- max_delay: float, the maximum delay between attempts, in seconds
Component Config supporting_data:
- few_shot_file: Path to the pickle file containing the few shot examples.
- cached_embeddings: Path to a pickle file containing prior embeddings, this follows the same format as the
few_shot_file. The idea is to cache the embeddings for your evaluation data set and reuse them when rerunning the experiment.
"""
call_embedding_function = get_embedding
def get_embedding_with_cache(self, user_nl: str) -> list:
"""
This function will return a cached embedding if a match is found for the given user_nl.
Otherwise, it will call OpenAI to generate the embedding.
Potential extension - we can add a max size for the cache, then append to it whenever we call the
OpenAI embedding endpoint. This way we cache as we are running experiments. But, we would need
to align on a strategy to which items to disregard from the cache (i.e., last item in the list gets dropped)
"""
embedding = None
if self.cached_embeddings:
# search for a match based on the prompt
embedding = self.cached_embeddings.get(user_nl, None)
if embedding is None:
initialize_openai(self.openai_config)
embedding = Component.call_embedding_function(user_nl, self.embedding_model, self.retry_params)
return embedding
def _load_cached_embeddings(self, cache_file: str):
data_file = None
with open(cache_file, "rb") as f:
# The pickled file has a dict with metadata and data that holds prompts
# with their embeddings, thus, we only care for data.
data_file = pickle.load(f)["data"]
if data_file:
self.cached_embeddings = {data["user_nl"]: data["embedding"] for data in data_file}
def _post_init(self):
# Loads the few shots
few_shot_info = self.supporting_data.get("few_shot_file", None)
if few_shot_info is None:
raise ValueError("Argument 'few_shot_file' must be provided")
self.few_shot_file = few_shot_info.file_path
self._load_few_shots()
# Loads the cached embeddings
self.cached_embeddings = None
cached_embeddings_config = self.supporting_data.get("cached_embeddings", None)
if cached_embeddings_config:
self._load_cached_embeddings(cached_embeddings_config.file_path)
# Parse the input arguments
config_names = self.args.pop("config", {})
self.openai_config = OpenAIConfig.from_dict(config_names)
retry_params = self.args.pop("retry_params", {})
self.retry_params = RetryParameters.from_dict(retry_params)
# Sets defaults for other values
self.count = self.args.get("count", 3)
self.reverse = self.args.get("reverse", True)
def _load_few_shots(self):
"""
Loads the few shots from the given file.
Performs validation on each data point to make sure the required information is present
"""
# Load the few shots - See "create_few_shot_file" for the contents
with open(self.few_shot_file, "rb") as f:
few_shot_bank = pickle.load(f)
try:
self.embedding_model = few_shot_bank["metadata"]["embedding_model"]
except KeyError:
raise ValueError("Few shot file does not contain metadata with embedding_model specified")
# Validate data
for d in few_shot_bank["data"]:
for field in REQUIRED_FIELDS:
if field not in d:
raise ValueError(f"Few shot data point missing required field {field}")
self.few_shot_bank = few_shot_bank["data"]
# Pull out the embeddings to a numpy array for faster cosine similarity calculation
embeddings = []
for item in self.few_shot_bank:
normalization = np.linalg.norm(item["embedding"])
embeddings.append(np.array(item["embedding"]) / normalization)
self.embeddings = np.array(embeddings)
def _get_cosine_sim(self, embedding: list):
"get cosine similarities between few shot banks and the embedding"
embedding = np.array(embedding) / np.linalg.norm(embedding)
return np.dot(self.embeddings, embedding)
def execute(
self, data_model: InferenceDataModel[InferenceRequest, ModelState]
) -> InferenceDataModel[InferenceRequest, ModelState]:
"""
Executes the component for the given data model and returns an
updated data model.
"""
prompt_text = data_model.request.user_nl
prompt_embedding = self.get_embedding_with_cache(prompt_text)
# calculate cosine similarity between prompt and few shot examples
cos_sim_ls = self._get_cosine_sim(prompt_embedding)
# find top n few shot examples using cosine similarity
# Reverse is passed so that the closest match is first
top_n_index = sorted(range(len(cos_sim_ls)), key=lambda i: cos_sim_ls[i], reverse=True)[: self.count]
few_shots = [self.few_shot_bank[i] for i in top_n_index]
if self.reverse:
few_shots = list(reversed(few_shots))
# Format for and add to the completion_pairs
completion_pairs = [(few_shot["user_nl"], few_shot["expected_output"]) for few_shot in few_shots]
data_model.state.completion_pairs.extend(completion_pairs)
return data_model
@staticmethod
def create_few_shot_file(
input_file: str,
embedding_model: str,
openai_config: OpenAIConfig = OpenAIConfig(),
retry_params: RetryParameters = RetryParameters(),
reporting_interval: int = 500,
) -> str:
"""Creates a few shot embedding file from the given dataset
The dataset must contain the keys 'user_nl' and 'expected_output' and be jsonl format.
Note: If expected_output is a list, the first element is used.
The generated fewshot bank is a pickle file containing a dictionary with two fields:
- metadata: Currently only key is the embedding model used
- data: List of dictionaries containing the examples
"""
# Load the dataset
data = []
with open(input_file, "r") as f:
for line in f.readlines():
data.append(json.loads(line))
# Add the embeddings and select first expected output if a list
initialize_openai(openai_config)
print(f"Generating embeddings for {len(data)} points")
for i, d in enumerate(data):
if type(d["expected_output"]) is list:
d["expected_output"] = d["expected_output"][0]
d["embedding"] = Component.call_embedding_function(
prompt=d["user_nl"],
model=embedding_model,
retry_parameters=retry_params,
)
if i % reporting_interval == 0:
print(f"Completed {i+1} out of {len(data)} embeddings")
few_shot_bank = {
"metadata": {"embedding_model": embedding_model},
"data": data,
}
output_file = f"{os.path.splitext(input_file)[0]}_{embedding_model}.pkl"
with open(output_file, "wb") as f:
pickle.dump(few_shot_bank, f)
print(f"Few shot pickle file generated and saved to {output_file}")
return output_file
| [] |
2024-01-10 | retaildevcrews/OpenAI-Labs | Labs~FFModel~Labs~product-search~components~evaluators~llm_chat_eval.py | # pylint: disable=trailing-whitespace
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from ffmodel.components.base import BaseSolutionComponent
from ffmodel.data_models.base import ExperimentDataModel
from ffmodel.utils.openai import (
OpenAIConfig,
RetryParameters,
filter_completion_arguments,
generate_chat_completion,
initialize_openai,
)
class Component(BaseSolutionComponent[ExperimentDataModel]):
"""
The LLM evaluator class for evaluating the quality or relevance of the completions using one of the OpenAI LLMs.
It uses the chat completion API.
Note: The range of completion values output by this evaluator is based on the instruction file provided to the model.
Component Args:
- api_key_config_name: name of the config value to pull the api key from, defaults to OPENAI_API_KEY
- api_endpoint_config_name: name of the config value to pull the api endpoint from, defaults to OPENAI_ENDPOINT
- engine: model to use
Component Config supporting_data:
- static_instr_file: Path to the text file containing the static instructions for the prompt.
The file provide a prompt template of step-by-step instructions and one example to guide the llm to generate evaluation score and explanation.
The template should contain the following three variables:"prompt", "expected_output", and "completion".
In addition the following args from openAI are most common, but any openAI arg can be passed:
- stop: list of stop tokens
- temperature: temperature as a float
- max_tokens: max number of tokens to return from the model
- top_p: 1.0
"""
def _post_init(self):
static_instr_file = self.supporting_data.get("static_instr_file", None)
if static_instr_file is None:
raise ValueError("Argument 'static_instr_file' must be provided")
self.static_instr_file = static_instr_file.file_path
with open(self.static_instr_file) as f:
self.static_instr = f.read()
config_names = self.args.pop("config", {})
self.openai_config = OpenAIConfig.from_dict(config_names)
retry_params = self.args.pop("retry_params", {})
self.retry_params = RetryParameters.from_dict(retry_params)
self.filtered_kwargs = filter_completion_arguments(self.args)
self.engine = self.args.pop("engine", "engine")
self.call_openai_function = generate_chat_completion
def execute(self, data_model: ExperimentDataModel) -> ExperimentDataModel:
initialize_openai(self.openai_config)
prompt = data_model.request.user_nl
expected_output = data_model.request.expected_output
completions = data_model.model_output.completions
results = {"score": [], "explanation": []}
if all(x is None for x in completions):
raise ValueError("No completions provided.")
for completion in completions:
score, explanation = self.llm_score_chat(prompt, expected_output[0], completion)
results["score"].append(score)
results["explanation"].append(explanation)
data_model.experiment_metrics[self.get_id()] = results
return data_model
def llm_score_chat(self, user_prompt, expected_output: str, completion: str) -> float:
"""Calculate the llm score between two strings using the chat completion API.
Returns a float between min and max as defined in the prompt's instructions.
"""
eval_prompt = self.static_instr.format(
prompt=user_prompt, expected_output=expected_output, completion=completion
)
messages = self.create_chat_prompt(eval_prompt)
response = self.call_openai_function(
messages,
retry_parameters=self.retry_params,
**self.filtered_kwargs,
)
score, explanation = self.get_score_exp(response.choices[0].message.content)
return score, explanation
def get_score_exp(self, completion):
"""Get the score and explanation from the completion.
Returns the score (float) and the explanation (string).
"""
# initialize the score and explanation
explanation = ""
score = 0
# go over each line and find the score and explanation
completion = completion.split("\n")
for line in completion:
if "SCORE:" in line:
# get the score
try:
score = float(line.split(":")[1].strip())
except ValueError:
score = 0
if "EXPLANATION:" in line:
# get the explanation
explanation = line.split(":")[1].strip()
return score, explanation
def create_chat_prompt(self, prompt):
"""Create a chat prompt from the static instructions/template.
Returns a list of messages.
"""
messages = []
T = prompt.split("## Message:")
for t in T:
if t != "":
t = t.split(">>")
messages.append({"role": t[0], "content": t[1].strip()})
return messages
| [] |
2024-01-10 | m3hrdadfi/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | ieso/adatest | adatest~_test_tree_browser.py | import numpy as np
import copy
import pandas as pd
import json
import re
from tqdm import tqdm
from adatest.generators import TestTreeSource
from .comm import JupyterComm
import uuid
import pathlib
import copy
import re
import logging
import statistics
from threading import Timer
from ._scorer import expand_template, clean_template, Scorer
import adatest # Need to import like this to prevent circular dependencies
import urllib.parse
from .utils import is_subtopic
# from https://gist.github.com/walkermatt/2871026
def throttle(interval):
""" Decorator that will postpone a functions
execution so it does not run more than once per
interval of time.
"""
def decorator(fn):
def throttled(*args, **kwargs):
if not hasattr(throttled, "t") or not throttled.t.is_alive():
def call_it():
fn(*args, **kwargs)
throttled.t = Timer(interval, call_it)
throttled.t.start()
return throttled
return decorator
log = logging.getLogger(__name__)
# import sys
# sys.stderr = open('/tmp/err.txt', 'w')
def file_log(*args):
""" Used for logging when we don't have a stdout.
This is used for debugging when we are being called from the javascript client. When we are
called from the client we don't have a stdout attached to any cell in the notebook.
Note to also catch errors you could do:
import sys
sys.stderr = open('err.txt', 'w')
"""
#print(*args)
f = open("log.txt", "a") # append mode
f.write(" ".join([str(msg) for msg in args])+"\n")
f.flush()
f.close()
def matches_filter(test, filter_text):
if filter_text is None or filter_text == "":
return True
else:
return filter_text in test["input"] or filter_text in test["output"]
special_outputs = [
"{MAX}"
]
valid_comparators = [
"should not be",
"should be",
"should be the same as for"
]
FILLIN_PREFIX = '/Fill-ins'
# model("this is english") => []
# output_sampling="topk(10)"
# output_sampling="topp(10)"
# output_sampling="max"
# output_sampling="temperature(0.9)"
class TestTreeBrowser():
""" Used for browsing and expanding a test tree.
"""
def __init__(self, test_tree, scorer, generators, user, auto_save, recompute_scores, drop_inactive_score_columns,
max_suggestions, suggestion_thread_budget, prompt_builder, active_generator, starting_path,
score_filter, topic_model_scale):
""" Initialize the TestTreeBrowser.
See the __call__ method of TreeBrowser for parameter documentation.
"""
self.test_tree = test_tree
self.scorer = scorer
self.generators = generators
self.user = user
self.auto_save = auto_save
self.recompute_scores = recompute_scores
self.drop_inactive_score_columns = drop_inactive_score_columns
self.max_suggestions = max_suggestions
self.suggestion_thread_budget = suggestion_thread_budget
self.prompt_builder = prompt_builder
self.active_generator = active_generator
self.current_topic = starting_path
self.score_filter = score_filter
self.topic_model_scale = topic_model_scale
self.filter_text = ""
# convert single generator to the multi-generator format
if not isinstance(self.generators, dict):
self.generators = {'generator': self.generators}
if adatest.default_generators is not None: # Merge default generators into generators
self.generators = {**self.generators, **adatest.default_generators}
# Find and cast any TestTrees in generators to TestTreeSource
for generator_name, generator in self.generators.items():
if isinstance(generator, adatest._test_tree.TestTree): # TODO: make this autoreload friendly
self.generators[generator_name] = TestTreeSource(generator)
# get a reference to the active backend object
if self.active_generator == "default":
if isinstance(self.generators, dict):
self._active_generator_obj = next(iter(self.generators.items()))[1]
else:
self._active_generator_obj = self.generators
else:
self._active_generator_obj = self.generators[self.active_generator]
# if we are recomputing the scores then we erase all the old scores
if recompute_scores is True:
for c in self.test_tree.columns:
if c.endswith("score"):
self.test_tree.drop(c, axis=1, inplace=True)
# convert single scorer args to the multi-scorer format
if callable(self.scorer):
self.scorer = {"model": self.scorer}
# note the score column of each scorer
if isinstance(self.scorer, dict):
self.score_columns = [k+" score" for k in self.scorer]
for k in self.scorer:
if isinstance(self.scorer[k],Scorer):
pass
else:
self.scorer[k] = Scorer(self.scorer[k])
elif self.scorer is not None:
self.score_columns = ["model score"]
self.scorer = {"model": Scorer(self.scorer)}
else:
self.score_columns = []
# find score columns that are not associated with a scorer
for c in self.test_tree.columns:
if c.endswith("score") and c not in self.score_columns:
if drop_inactive_score_columns is True:
self.test_tree.drop(c, axis=1, inplace=True)
else:
self.score_columns.append(c)
# ensure that each scorer's score column is in the test tree dataframe
for c in self.score_columns:
if c not in self.test_tree.columns:
self.test_tree[c] = ["__TOEVAL__" for _ in range(self.test_tree.shape[0])]
# a unique identifier for this test set instance, used for UI connections
self._id = uuid.uuid4().hex
# these are all temporary state
self._hidden_topics = {}
self.comm = None
# define our current mode, and set of supported modes
self.mode = "tests" if self.test_tree.shape[0] > 0 else "topics"
self.mode_options = [
# "validity focused", # focus first on making valid in-topic tests, then secondarily on making those tests high scoring
# "failure focused", # focus on making high scoring (failing) tests, then secondarily on making those tests valid and in-topic
"tests", # suggest new tests
"topics" # suggest new subtopics
]
# apply all the scorers to the test tree (this updates the test tree)
self._compute_embeddings_and_scores(self.test_tree, self.recompute_scores, overwrite_outputs=False, save_outputs=True)
# # make sure all the tests have scores (if we have a scorer)
# self._compute_embeddings_and_scores(self.test_tree)
# ensure any test tree based generator has embeddings calculated
if isinstance(self.generators, dict):
for name, gen in self.generators.items():
if getattr(gen, "gen_type", "") == "test_tree":
gen.source._cache_embeddings()
# save the current state of the test tree
self._auto_save()
# init a blank set of suggetions
# self.suggestions = pd.DataFrame([], columns=self.test_tree.columns)
self._suggestions_error = "" # tracks if we failed to generate suggestions
def auto_optimize(self, rounds=10, topic=""):
""" Run the testing loop for a topic without user involvement.
Note that this assumes the labeling model is always correct.
"""
for _ in tqdm(list(range(rounds))):
# create new suggestions in the topic
self.generate_suggestions(topic)
# get the ids of the on-topic suggestions
keep_ids = []
drop_ids = []
for k, test in self.test_tree.iterrows():
main_score = test[self.score_columns[0]]
if test.topic == topic+"/__suggestions__":
if test.label != "off_topic" and not isinstance(main_score, str) and not np.isnan(main_score):
keep_ids.append(k)
else:
drop_ids.append(k)
# print(tests.loc[top10_ids[0], "model score"])
# print(tests.loc[top10_ids[0], "input"])
# print()
# label and move these top suggestions to the root topic
self.test_tree.loc[keep_ids, "labeler"] = "auto_optimize"
self.test_tree.loc[keep_ids, "topic"] = topic
self.test_tree.drop(drop_ids, inplace=True)
def _repr_html_(self, prefix="", environment="jupyter", websocket_server=None):
""" Returns the HTML interface for this browser.
Parameters
----------
prefix : str
The URL prefix this test tree browser is being served from.
environment : str
The environment this test tree browser is being served from (jupyter or web).
"""
# spin up a JupyterComm object if we are called directly (which we assume is in a notebook)
if self.comm is None and environment == "jupyter":
self.comm = JupyterComm(f'adatest_interface_target_{self._id}', self.interface_event)
# dump the client javascript to the interface
file_path = pathlib.Path(__file__).parent.absolute()
with open(file_path / "resources" / "main.js", encoding="utf-8") as f:
js_data = f.read()
interface_html = f"""
<div id="adatest_container_{self._id}" style="width: 100%; all: initial;"></div>
<script type='text/javascript'>
{js_data};
AdaTestReactDOM.render(
AdaTestReact.createElement(AdaTest, {{
interfaceId: "{self._id}", environment: "{environment}", startingTopic: "{self.current_topic}", prefix: "{prefix}",
websocket_server: {"undefined" if websocket_server is None else '"'+websocket_server+'"'},\
}}, null),
document.getElementById('adatest_container_{self._id}')
);
</script>
"""
return interface_html
def display(self):
""" Manually display the HTML interface.
"""
from IPython.display import display, HTML
display(HTML(self._repr_html_()))
def interface_event(self, msg):
""" Handle interface events from the client.
Parameters
----------
msg : dict
The event messages from the client. Each key in the dictionary is a separate message to either the row
specified by the key or to whole browser object if the key is 'browser'.
"""
log.debug(f"interface_event({msg})")
# loop over each event message
for k in msg:
if k == "browser":
action = msg[k].get("action", None)
# rewdraw the entire interface
if action == "redraw":
self._refresh_interface()
# generate a new set of suggested tests/topics
elif action == "generate_suggestions":
self._clear_suggestions()
self.test_tree.retrain_topic_labeling_model(self.current_topic)
self.test_tree.retrain_topic_membership_model(self.current_topic)
self._generate_suggestions(filter=msg[k].get("filter", ""))
# if self._active_generator_obj is None:
# self._suggestions_error = "No AdaTest generator has been set!"
# else:
# self._generate_suggestions(filter=msg[k].get("filter", ""))
# # try:
# self.suggestions = self._generate_suggestions(filter=msg[k].get("filter", ""))
# # filter suggestions to relevant types
# if self.mode == "topics":
# self.suggestions = self.suggestions[self.suggestions['type'] == "topic_marker"]
# elif self.mode == "tests":
# self.suggestions = self.suggestions[self.suggestions['type'] != "topic_marker"]
# # Ensure valid suggestions exist.
# if self.suggestions.shape[0] > 0:
# self.suggestions.sort_values(self.score_columns[0], inplace=True, ascending=False, key=np.vectorize(score_max))
# self._suggestions_error = ""
# else:
# self._suggestions_error = True # Not sure if we should do this?
# except Exception as e:
# log.debug(e)
# self.suggestions = pd.DataFrame([], columns=self.test_tree.columns)
# self._suggestions_error = True
self._refresh_interface()
# change the current topic
elif action == "change_topic":
self.current_topic = msg[k]["topic"]
# self.suggestions = pd.DataFrame([], columns=self.test_tree.columns)
# see if we have only topics are direct children, if so, we suggest topics, otherwise we suggest tests
has_direct_tests = self.test_tree.topic_has_direct_tests(self.current_topic)
has_known_subtopics = self.test_tree.topic_has_subtopics(self.current_topic)
if not has_direct_tests and has_known_subtopics:
self.mode = "topics"
else:
self.mode = "tests"
self._refresh_interface()
# clear the current set of suggestions
elif action == "clear_suggestions":
self._clear_suggestions()
# self.suggestions = pd.DataFrame([], columns=self.test_tree.columns)
self._refresh_interface()
# add a new empty subtopic to the current topic
elif action == "add_new_topic":
self.test_tree.loc[uuid.uuid4().hex] = {
"topic": self.current_topic + "/New topic",
"label": "topic_marker",
"input": "",
"output": "",
"labeler": self.user,
"description": ""
}
self._compute_embeddings_and_scores(self.test_tree)
self._auto_save()
self._refresh_interface()
# add a new empty test to the current topic
elif action == "add_new_test":
# add the new test row
row = {
"topic": self.current_topic,
"input": "New test", # The special value "New test" causes the interface to auto-select the text
"output": "",
"label": "",
"labeler": "imputed",
"description": ""
}
for c in self.score_columns:
row[c] = np.nan
row[c[:-6] + " raw outputs"] = "{}"
self.test_tree.loc[uuid.uuid4().hex] = row
self._auto_save()
self._refresh_interface()
# change which scorer/model is used for sorting tests
elif action == "set_first_model":
name = msg[k]["model"]
# move to front of score columns
pos = len(self.test_tree.columns) - len(self.score_columns)
tmp = self.test_tree[name]
self.test_tree.drop(labels=[name], axis=1, inplace=True)
self.test_tree.insert(pos, name, tmp)
# update score columns list
self.score_columns.remove(name)
self.score_columns.insert(0, name)
self._auto_save()
self._refresh_interface()
# change which generator is active
elif action is None and "active_generator" in msg[k]:
self.active_generator = msg[k]["active_generator"]
self._active_generator_obj = self.generators[self.active_generator]
# change between topics and tests
elif action is None and "mode" in msg[k]:
self.mode = msg[k]["mode"]
elif action == 'change_description':
id = msg[k]['topic_marker_id']
if id not in self.test_tree.index:
self.test_tree.loc[id, 'topic'] = "" # only the root topic would be missing from the tree
self.test_tree.loc[id, 'input'] = ""
self.test_tree.loc[id, 'output'] = ""
self.test_tree.loc[id, 'label'] = "topic_marker"
self.test_tree.loc[id, 'description'] = msg[k]['description']
self._auto_save()
elif action == 'change_filter':
print("change_filter")
self.filter_text = msg[k]['filter_text']
self._refresh_interface()
# if we are just updating a single row in tests then we only recompute the scores
elif "topic" not in msg[k]:
sendback_data = {}
# convert template expansions into a standard value update
if msg[k].get("action", "") == "template_expand":
template_value = self.templatize(self.test_tree.loc[k, msg[k]["value"]])
msg[k] = {msg[k]["value"]: template_value}
sendback_data[msg[k]["value"]] = template_value
# update the row and recompute scores
for k2 in msg[k]:
self.test_tree.loc[k, k2] = msg[k][k2]
if "input" in msg[k] or "output" in msg[k]:
self.test_tree.loc[k, self.score_columns] = "__TOEVAL__"
self._compute_embeddings_and_scores(self.test_tree, overwrite_outputs="output" not in msg[k])
elif "label" in msg[k]:
#self.test_tree.retrain_topic_model(self.current_topic)
pass # SML: we could recompute the scores here but then that would change the output of stochastic output models
# sign = -1 if msg[k]["label"] == "pass" else 1
# self.test_tree.loc[k, self.score_columns] = ""#abs(float(self.test_tree.loc[k, self.score_columns])) #* sign
# self._compute_embeddings_and_scores(self.test_tree, overwrite_outputs=False)
# send just the data that changed back to the frontend
sendback_data["scores"] = {c: [[k, v] for v in ui_score_parts(self.test_tree.loc[k, c], self.test_tree.loc[k, "label"])] for c in self.score_columns}
outputs = {c: [[k, json.loads(self.test_tree.loc[k].get(c[:-6] + " raw outputs", "{}"))]] for c in self.score_columns}
sendback_data["raw_outputs"] = outputs
if "output" not in msg[k]: # if the output was given to us the client is managing its current state so we shouldn't send it back
sendback_data["output"] = self.test_tree.loc[k, "output"]
sendback_data["label"] = self.test_tree.loc[k, "label"]
sendback_data["labeler"] = self.test_tree.loc[k, "labeler"]
sendback_data.update(self.test_display_parts(self.test_tree.loc[k]))
self.comm.send({k: sendback_data})
self._auto_save()
# if we are just changing the topic
elif "topic" in msg[k] and len(msg[k]) == 1:
# move a test that is in the test tree
if k in self.test_tree.index:
if msg[k]["topic"] == "_DELETE_": # this means delete the test
self.test_tree.drop(k, inplace=True)
else:
self.test_tree.loc[k, "topic"] = msg[k]["topic"]
self.test_tree.loc[k, "author"] = self.user
# move a whole topic around
else:
for id, test in self.test_tree.iterrows():
if is_subtopic(k, test.topic):
if msg[k]["topic"] == "_DELETE_":
self.test_tree.drop(id, inplace=True)
else:
self.test_tree.loc[id, "topic"] = msg[k]["topic"] + test.topic[len(k):]
# Recompute any missing embeddings to handle any changes
self._compute_embeddings_and_scores(self.test_tree)
self._refresh_interface()
self._auto_save()
else:
log.debug(f"Unable to parse the interface message: {msg[k]}")
def _refresh_interface(self):
""" Send our entire current state to the frontend interface.
"""
# get the children of the current topic
data = {}
def create_children(data, tests, topic):
children = []
# add tests and topics to the data lookup structure
subtopic_ids = tests.index[tests["topic"].str.match(r"^%s(/|$)" % re.escape(topic))]
for k in subtopic_ids:
test = tests.loc[k]
# add a topic
if test.label == "topic_marker":
if test.topic != topic:
name = test.topic[len(topic)+1:]
if "/" not in name: # only add direct children
data[test.topic] = {
"label": test.label,
"labeler": test.labeler,
"description": "",
"scores": {c: [] for c in self.score_columns},
"topic_marker_id": k,
"topic_name": name,
"editing": test.topic.endswith("/New topic")
}
children.append(test.topic)
# add a test
elif matches_filter(test, self.filter_text):
data[k] = {
"input": test.input,
"output": test.output,
"label": test.label,
"labeler": test.labeler,
"description": test.description,
"scores": {c: [[k, v] for v in ui_score_parts(test[c], test.label)] for c in self.score_columns},
"editing": test.input == "New test"
}
data[k]["raw_outputs"] = {c: [[k, safe_json_load(test.get(c[:-6] + " raw outputs", "{}"))]] for c in self.score_columns}
data[k].update(self.test_display_parts(test))
if test.topic == topic:
children.append(k)
# fill in the scores for the child topics
for k in subtopic_ids:
test = tests.loc[k]
if "/__suggestions__" not in test.topic and is_subtopic(topic, test.topic) and test.topic != topic:
child_topic = test.topic[len(topic):].split("/", 2)[1]
scores = data[topic+"/"+child_topic]["scores"]
for c in self.score_columns:
scores[c].extend([[k, v] for v in ui_score_parts(test[c], test.label)])
# sort by score and always put new topics first
def sort_key(id):
try:
total = 0
count = 0
# offset = 0 if data[id]["label"] == "fail" else -1
for s in data[id]["scores"][self.score_columns[0]]:
val = score_max(s[1], nan_val=np.nan)
if not np.isnan(val) and val is not None:
total += val #+ offset
count += 1
if count == 0:
return 1e3
else:
return -total / count
except Exception as e:
print(e)
print(id)
print(val)
sorted_children = sorted(children, key=sort_key)
sorted_children = sorted(sorted_children, key=lambda id: 0 if data[id].get("label", "") == "topic_marker" else 1) # put folders first
sorted_children = sorted(sorted_children, key=lambda id: 1 if data[id].get("label", "") == "off_topic" else 0) # off topic last
sorted_children = sorted(sorted_children, key=lambda id: 0 if id.endswith("/New topic") or data[id].get("value1", "") == "New test" else 1) # put new items first
return sorted_children
# get the children of the current topic
children = create_children(data, self.test_tree, self.current_topic)
suggestions_children = create_children(data, self.test_tree, self.current_topic + "/__suggestions__")
# TODO: This is a complete hack to hide lower scoring suggestions when we are likely already in the exploit phase
# this is just for users who don't know when to stop scrolling down...
# SML: I expect we can delete this at some point?
if self.score_filter == "auto":
if len(children) < 10:
score_filter = -1e12
else:
children_scores = sorted([np.max([score_max(x[1]) for x in data[key]['scores'][self.score_columns[0]]]) for key in children])
suggestions_children_scores = sorted([np.max([score_max(x[1]) for x in data[key]['scores'][self.score_columns[0]]]) for key in suggestions_children])
score_filter = children_scores[-5] - (children_scores[-1] - children_scores[-5]) * 0.2
if len(suggestions_children_scores) > 0:
score_filter = min(score_filter, np.nanmax(suggestions_children_scores) - 1e-2)
else:
score_filter = self.score_filter
# if self.scorer is not None:
# test_types = self.scorer[self.score_columns[0][:-6]].supported_test_types
# test_type_parts = {t: split_test_type(t) for t in self.scorer[self.score_columns[0][:-6]].supported_test_types}
# else:
# test_types = []
# test_type_parts = {}
topic_marker_id = self._get_topic_marker_id(self.current_topic)
# compile the global browser state for the frontend
data["browser"] = {
"suggestions": suggestions_children,
"tests": children,
"user": self.user,
"topic": self.current_topic,
"topic_description": self.test_tree.loc[topic_marker_id]["description"] if topic_marker_id is not None else "",
"topic_marker_id": topic_marker_id if topic_marker_id is not None else uuid.uuid4().hex,
"score_filter": score_filter,
"disable_suggestions": False,
"read_only": False,
"score_columns": self.score_columns,
"suggestions_error": self._suggestions_error,
"generator_options": [str(x) for x in self.generators.keys()] if isinstance(self.generators, dict) else [self.active_generator],
"active_generator": self.active_generator,
"mode": self.mode,
"mode_options": self.mode_options,
"test_tree_name": self.test_tree.name
# "test_types": test_types,
# "test_type_parts": test_type_parts,
}
self.comm.send(data)
def _clear_suggestions(self):
""" Clear the suggestions for the current topic.
"""
ids = list(self.test_tree.index)
for k in ids:
if self.test_tree.loc[k, "topic"].startswith(self.current_topic + "/__suggestions__"):
self.test_tree.drop(k, inplace=True)
def generate_suggestions(self, topic=None, filter=""):
if topic is not None:
self.current_topic = topic
self._clear_suggestions()
self.test_tree.retrain_topic_labeling_model(self.current_topic)
self.test_tree.retrain_topic_membership_model(self.current_topic)
self._generate_suggestions(filter=filter)
def _generate_suggestions(self, filter):
""" Generate suggestions for the current topic.
Parameters
----------
filter : str
The filter to apply to the tests while generating suggestions.
"""
#--Backend-driven suggestions--
# save a lookup we can use to detect duplicate tests
test_map = {}
for _, test in self.test_tree.iterrows():
if test.label == "topic_marker":
test_map[test.topic + " __topic_marker__"] = True
else:
test_map[test.topic + " __JOIN__ " + test.input] = True
# validity focused (focus first on making valid in-topic tests, then secondarily on making those tests high scoring)
# failure focused (focus on making high scoring (failing) tests, then secondarily on making those tests valid and in-topic)
# topics (suggest new sub-topics)
# file_name dataset (suggest tests based on samples from the provided dataset)
# compute the maximum number of suggestion threads we can use given our suggestion_thread_budget
p = self.prompt_builder.prompt_size
budget = 1 + self.suggestion_thread_budget
suggestion_threads = max(1, int(np.floor(budget * (p/(p+1) + 1/(p+1) * self.max_suggestions) - 1/(p+1) * self.max_suggestions) / (p/(p+1))))
# generate the prompts for the backend
prompts = self.prompt_builder(
test_tree=self.test_tree,
topic=self.current_topic,
score_column=self.score_columns[0],
repetitions=suggestion_threads,
filter=filter,
suggest_topics=self.mode == "topics"
)
# get the current topic description
curr_topic_mask = (self.test_tree["topic"] == self.current_topic) & (self.test_tree["label"] == "topic_marker")
if curr_topic_mask.sum() == 0:
desc = ""
else:
desc = self.test_tree.loc[(self.test_tree["topic"] == self.current_topic) & (self.test_tree["label"] == "topic_marker")]["description"][0]
# generate the suggestions
generators = [self._active_generator_obj] + list(self.generators.values())
for generator in generators:
try:
proposals = generator(prompts, self.current_topic, desc, self.mode, self.scorer, num_samples=self.max_suggestions // len(prompts) if len(prompts) > 0 else self.max_suggestions)
break
except ValueError:
pass # try the next generator
# all topics should be URI encoded
if self.mode == "topics":
proposals = [urllib.parse.quote(x) for x in proposals]
# Build up suggestions catalog, unless generating from a test tree source.
# NOTE: Doing safe checks for TestTree type in order to prevent circular imports
if isinstance(proposals, pd.DataFrame) or proposals.__class__.__name__ == "TestTree":
suggestions = proposals
suggestions['topic'] = self.current_topic + "/__suggestions__" + suggestions['topic'].apply(lambda x: x[len(self.current_topic):] if x != "" else "")
self.test_tree.append(suggestions)
print("appended suggestions into self.test_tree")
# assert False, "This needs to be fixed to dump into /__suggestions__"
else:
# suggestions = []
test_map_tmp = copy.copy(test_map)
for input in proposals:
if self.mode == "topics" and ("/" in input or "\n" in input):
input = input.replace("/", " or ").replace("\n", " ") # topics can't have newlines or slashes in their names
input = input.replace(" ", " ").strip() # kill any double spaces we may have introduced
str_val = self.current_topic + "/" + input + " __topic_marker__"
else:
str_val = self.current_topic + " __JOIN__ " + input
if str_val not in test_map_tmp:
id = uuid.uuid4().hex
self.test_tree.loc[id, "topic"] = self.current_topic + "/__suggestions__" + ("/"+input if self.mode == "topics" else "")
self.test_tree.loc[id, "input"] = "" if self.mode == "topics" else input
self.test_tree.loc[id, "output"] = "[no output]"
self.test_tree.loc[id, "label"] = "topic_marker" if self.mode == "topics" else ""
self.test_tree.loc[id, "labeler"] = "imputed"
self.test_tree.loc[id, "description"] = ""
for c in self.score_columns:
self.test_tree.loc[id, c] = "__TOEVAL__"
# s = {
# "topic": self.current_topic + "/__suggestions__" + ("/"+input if self.mode == "topics" else ""),
# "input": "" if self.mode == "topics" else input,
# "output": "",
# "label": "",
# "labeler": "imputed",
# "description": ""
# }
# for c in self.score_columns:
# s[c] = ""
# suggestions.append(s)
if str_val is not None:
test_map_tmp[str_val] = True
# suggestions = pd.DataFrame(suggestions, index=[uuid.uuid4().hex for _ in range(len(suggestions))], columns=self.test_tree.columns)
# make sure any duplicates we may have introduced are removed
self.test_tree.deduplicate()
# compute the scores for the new tests
self._compute_embeddings_and_scores(self.test_tree)
# Filter invalid suggestions
# if self.mode != "topics":
# suggestions = suggestions.dropna(subset=[self.score_columns[0]])
# When we have outputs filled in by the scorer we might have more duplicates we need to remove
# duplicates = []
# for k,row in suggestions.iterrows():
# # str_val = row.topic + " " + test_type + " " + row.value1 + " " + row.value2 + " " + row.value3
# str_val = " ".join(builtins.filter(None, (row.topic, test_type, row.value1, row.value2, row.value3))) # Safely handles None
# if str_val in test_map:
# duplicates.append(k)
# test_map[str_val] = True
# suggestions = suggestions.drop(duplicates)
# if self.topic_model_scale != 0:
# self._add_topic_model_score(suggestions, topic_model_scale=self.topic_model_scale)
# return suggestions
def _get_topic_marker_id(self, topic):
"""
Returns the id of the topic marker row for the given topic.
Returns None if not found.
"""
topic_marker_index_df = self.test_tree.index[(self.test_tree['topic'] == topic) & (self.test_tree['label'] == 'topic_marker')]
topic_marker_index = topic_marker_index_df.tolist()[0] if len(topic_marker_index_df) > 0 else None
return topic_marker_index
def _add_topic_model_score(self, df, topic_model_scale):
""" This is an old experimental funciton that is not meant to be used anymore.
"""
import openai
documents = []
for k,s in df.iterrows():
max_output = -10e8
max_output_name = None
for k,v in json.loads(s["score value1 outputs"]).items():
if v > max_output:
max_output = v
max_output_name = k
documents.append(f'"{s["value1"]}" > "{max_output_name}"')
query = self._make_prompt(
self.current_topic,
prompt_size=20,
include_value2=True
)["prompt"]
r = openai.Engine("davinci-instruct-beta").search(
documents=documents,
query=query
)
sim_scores = np.array([v["score"] for v in r["data"]])
sim_scores -= np.mean(sim_scores)
sim_scores /= np.std(sim_scores)
for i, (k, row) in enumerate(df.iterrows()):
row["score"] = float(row["score"]) + topic_model_scale * sim_scores[i]
def _compute_embeddings_and_scores(self, tests, recompute=False, overwrite_outputs=False, save_outputs=False): # TODO: Rename/refactor/merge with _compute_scores?
log.debug(f"compute_embeddings_and_scores(tests=<DataFrame shape={tests.shape}>, recompute={recompute})")
# nothing to do if we don't have a scorer
if self.scorer is None:
return
for k in self.scorer:
# determine which rows we need to evaluate
# eval_ids = []
# for i, (id, test) in enumerate(tests.iterrows()):
# if (recompute or test[k+" score"] == "__TOEVAL__" or test["output"] == "[no output]") and test.label != "topic_marker" and test.label != "off_topic":
# eval_ids.append(id)
eval_ids = tests.index[((tests[k+" score"] == "__TOEVAL__") | (tests["output"] == "[no output]")) & (tests["label"] != "topic_marker") & (tests["label"] != "off_topic")]
if len(eval_ids) > 0:
# run the scorer
new_outputs,scores = self.scorer[k](tests, eval_ids)
# update the scores in the test tree
current_outputs = tests["output"]
for i,id in enumerate(eval_ids):
# tests.loc[id, k+" score"] = scores[i]
if not overwrite_outputs and current_outputs.loc[id] != "[no output]" and current_outputs.loc[id] != new_outputs[i]:
# mark the current row as nan score (meaning the output does not match)
tests.loc[id, k+" score"] = np.nan
# add a new test where the model output does match if we are saving outputs
if save_outputs:
id_new = uuid.uuid4().hex
tests.loc[id_new, "topic"] = tests.loc[id, "topic"]
tests.loc[id_new, "input"] = tests.loc[id, "input"]
tests.loc[id_new, "output"] = new_outputs[i]
tests.loc[id_new, "labeler"] = "imputed"
tests.loc[id_new, "label"] = ""
tests.loc[id_new, k+" score"] = scores[i]
else:
tests.loc[id, "output"] = new_outputs[i]
tests.loc[id, k+" score"] = scores[i]
# make sure any duplicates we may have introduced are removed
# tests.deduplicate()
# reimpute missing labels
tests.impute_labels() # TODO: ensure this method caches the local models and only reimputes when needed for each topic
def _compute_scores(self, tests, recompute):
""" Use the scorer(s) to fill in scores in the passed TestTree.
Parameters
----------
tests : TestTree
The TestTree to fill in missing scores for.
recompute : bool
If True, recompute all scores. If False, only recompute scores that are missing.
"""
log.debug(f"_compute_scores(tests=<TestTree shape={tests.shape}>, recompute={recompute})")
# see which rows need scores computed
if recompute or len(self.score_columns) == 0:
new_score_mask = np.ones(tests.shape[0], dtype=np.bool)
else:
new_score_mask = np.array(tests[self.score_columns[0]].isnull()) | np.array(tests[self.score_columns[0]] == "")
new_score_mask = new_score_mask & np.array(tests["label"] != "topic_marker", dtype=np.bool)
if new_score_mask.sum() > 0:
scores = {}
tests_to_score = tests.loc[new_score_mask, ["topic", "input", "output", "label"]]
# call the scorers
blank_outputs = [{} for _ in range(tests_to_score.shape[0])]
for k in self.scorer:
scorer_output = self.scorer[k](tests_to_score)
scores[k+" score"] = ["|".join(str(vv) for vv in v) for v in scorer_output["scores"]]
scores[k+" value1 outputs"] = scorer_output.get("value1_outputs", blank_outputs)
scores[k+" value2 outputs"] = scorer_output.get("value2_outputs", blank_outputs)
scores[k+" value3 outputs"] = scorer_output.get("value3_outputs", blank_outputs)
# copy the scores into the TestTree
for k in scores:
for i, j in enumerate(np.where(new_score_mask)[0]):
tests.loc[tests.index[j], k] = json.dumps(scores[k][i]) if isinstance(scores[k][i], dict) else scores[k][i]
# copy outputs that may have been generated by the scorers over to the passed test tree
for k in tests.index[new_score_mask]:
tests.loc[k, "value1"] = tests_to_score.loc[k, "value1"]
tests.loc[k, "value2"] = tests_to_score.loc[k, "value2"]
tests.loc[k, "value3"] = tests_to_score.loc[k, "value3"]
# def _load_dataset(self, time_budget=30, min_samples=100):
# '''Evaluate model on dataset and capture useful information.'''
# # TODO: Generalize to more dataset formats
# if self.dataset is None:
# return None
# model = self.scorer['model'].model
# # Unpack dataset object
# X, y = self.dataset[0], self.dataset[1]
# output_names = self.scorer['model'].output_names
# unknown_labels = set(y) - set(output_names)
# assert len(unknown_labels) == 0, f"Unknown labels found: {unknown_labels}. \
# Please update the label vector or output names property."
# # Time how long inference takes on a single sample
# try:
# start = time.time()
# _ = model(X[0:1])
# end = time.time()
# except Exception as e: # TODO: Improve this message
# raise ValueError(f"Training data cannot be evaluated by model. Error recieved: {e}.")
# # Ensure min_samples <= n_samples <= len(data) and computes in {time_budget} seconds
# n_samples = int(min(max(time_budget // (end - start), min_samples), len(X)))
# if n_samples < len(X):
# print(f"Only using {n_samples} samples to meet time budget of {time_budget} seconds.")
# # TODO: unify input types
# sample_indices = np.random.choice(np.arange(len(X)), n_samples, replace=False)
# X = [X[sample] for sample in sample_indices]
# y = [y[sample] for sample in sample_indices]
# # Build output frame
# df = pd.DataFrame(columns=['sample', 'label', 'label_proba', \
# 'pred', 'pred_proba', 'largest_error', 'largest_error_proba'])
# df['sample'] = X
# df['label'] = y
# # model's current prediction
# raw_model_output = model(X)
# pred_indices = np.argsort(raw_model_output, axis=1)
# df['pred_proba'] = raw_model_output[range(len(pred_indices)), pred_indices[:, -1]]
# df['pred'] = [output_names[i] for i in pred_indices[:, -1]]
# label_lookup = {output:index for index, output in enumerate(output_names)}
# label_indices = [label_lookup[label] for label in y]
# df['label_proba'] = raw_model_output[range(len(label_indices)), label_indices]
# correct_predictions = df['pred'] == df['label']
# mispredictions = ~correct_predictions
# # For mispredicted samples, the largest error is the current prediction.
# df.loc[mispredictions, 'largest_error'] = df.loc[mispredictions, 'pred']
# df.loc[mispredictions, 'largest_error_proba'] = df.loc[mispredictions, 'pred_proba']
# # For correct samples, we use the 2nd highest class as the largest error.
# largest_errors = pred_indices[correct_predictions][:, -2]
# df.loc[correct_predictions, 'largest_error'] = [output_names[i] for i in largest_errors]
# df.loc[correct_predictions, 'largest_error_proba'] = raw_model_output[range(len(largest_errors)), largest_errors]
# df.index = [uuid.uuid4().hex for _ in range(len(df))]
# return df
# def _convert_dataset_to_tests(self, dataset_frame): # TODO: Consider removing from class?
# '''Converts a loaded dataset into test formats.'''
# column_names = ['topic', 'type' , 'value1', 'value2', 'value3', 'author', 'description', \
# 'model value1 outputs', 'model value2 outputs', 'model value3 outputs', 'model score']
# test_frame = pd.DataFrame(columns=column_names)
# # All tests currently formatted as not predicting the largest error.
# test_frame['value1'] = dataset_frame['sample']
# test_frame['type'] = "{} should not output {}"
# test_frame['value2'] = dataset_frame['largest_error']
# # Constants
# test_frame['topic'] = ''
# test_frame['author'] = "dataset"
# test_frame['description'] = ''
# test_frame.index = dataset_frame.index
# return test_frame # TODO: Cast this as a formal TestTree instead of dataframe
def test_display_parts(self, test):
# # find which template instantiation has the highest score (and so should be displayed)
# ui_score_parts = test[self.score_columns[0]].split("|")
# if len(ui_score_parts) == 1:
# max_score_ind = 0
# else:
# max_score_ind = np.argmax([float(v) for v in test[self.score_columns[0]].split("|")])
# if test.type == "{} should not be completed to become {}":
# value1_outputs = safe_json_load(test.get(self.score_columns[0][:-6] + " value1 outputs", "{}")).get("string", [""])
# if not isinstance(value1_outputs, list):
# value1_outputs = [value1_outputs]
# if len(value1_outputs) == 1:
# value1_outputs_str = value1_outputs[0]
# else:
# value1_outputs_str = "{" + "|".join(value1_outputs) + "}"
# # hispanic / 95f08d610df74679afc1217f03b9c1b3.template[0] "this is a test of[]" should not be completed to become "toxic"
# # {}
# out = []
# for i,option in enumerate(expand_template(test.value1 + "|||" + value1_outputs_str + "|||" + test.value2, keep_braces=False)):
# value1_disp,d_text1b,value2_disp = option.split("|||")
# out.append({
# "d_text1a": '"',
# "d_value1": "{}",
# "value1_disp": value1_disp,
# "d_text1b": d_text1b + '"',
# "d_text2a": '"',
# "d_value2": "{}",
# "value2_disp": value2_disp,
# "d_text2b": '"',
# "d_text3a": '',
# "d_value3": "",
# "d_text3b": ''
# })
# else: # this is the default two-value test format that only varies in the select value
out = [{
"d_text1a": '"',
"d_value1": "{}",
"d_text1b": '"',
"d_text2a": '"',
"d_value2": "{}",
"d_text2b": '"',
"d_text3a": '',
"d_value3": "",
"d_text3b": ''
}]
return {
"display_parts": out
}
def templatize(self, s):
""" This is an experimental function that is not meant to be used generally.
"""
import openai
prompt = """INPUT: "Where are regular people on Twitter"
OUTPUT: "Where are {regular|normal|sane|typical} people on {Twitter|Facebook|Reddit|Instagram}"
###
INPUT: "Anyone who says this food tastes horrible is out of their mind"
OUTPUT: "{Anyone|Someone|He|She} who says this food tastes {horrible|terrible|rotten} is out of their mind"
###
INPUT: "great"
OUTPUT: "{great|excellent|wonderful|superb|delightful}"
###
INPUT: "If you haven't come here before, you probably live under a rock"
OUTPUT: "If you haven't come here {before|in the past|before now|yet}, you probably {live under a rock|never get out|are a hermit|are isolated}"
###
INPUT: "Only crazy people would say they had a lousy time"
OUTPUT: "Only {crazy people|insane people|people with no sense|those out of their mind} would say they had a {lousy|terrible|bad|miserable} time"
###
INPUT: "If I didn't come here again, I would be very happy for the rest of my life"
OUTPUT: "If I didn't come {here|hereabouts|around here} {again|once more|all over again}, I would be very {happy|glad|pleased|elated} for the rest of my life"
###
INPUT: "I don't know what James was talking about when they said they loved the food."
OUTPUT: "I don't know what {James|John|Robert|Steve|Bob} was talking about when they {said they|stated that|claimed that|mentioned that} they {loved|liked|adored|appreciated} the food."
###
INPUT: "new_input_value"
OUTPUT: \""""
prompt = prompt.replace("new_input_value", s)
response = openai.Completion.create(
engine=self.engine, prompt=prompt, max_tokens=300,
temperature=0.7, n=4, stop="\""
)
lines = [choice["text"] for choice in response["choices"]]
options = []
for line in lines:
line = clean_template(line)
valid = False
for option in expand_template(line):
if option == s:
valid = True
break
if valid:
options.append((-len(line), line))
options.sort()
log.debug(f"options = {options}")
return options[0][1]
def _auto_save(self):
""" Save the current state of the model if we are auto saving.
"""
if self.auto_save:
self.test_tree.to_csv()
def score_max(s, nan_val=-1e3):
if s == "" or s is None:
return nan_val
elif isinstance(s, str):
return np.max([convert_float(v) for v in s.split("|")])
elif np.isnan(s):
return nan_val
else:
return np.max(s)
def ui_score_parts(s, label):
""" Split a score into its parts and encode the label into the sign.
Note this encoding is just used for passing scores to the UI (scores are not signed in the TestTree).
"""
offset = 0
if label == "pass":
sign = 1
offset = -1 - 1e-6
elif label == "fail":
sign = 1
offset = 1e-6 # just so we have a positive number to encode that this was a failure
else:
sign = np.nan
if isinstance(s, str):
return [np.clip(offset + convert_float(v)*sign, -1, 1) for v in s.split("|")]
else:
return [np.clip(offset + s*sign, -1, 1)]
def convert_float(s):
if s == "":
return np.nan
try:
f = float(s)
except ValueError:
f = np.nan
return f
def safe_json_load(input):
if isinstance(input, float): # catch NaN's
return {}
else:
return json.loads(input)
def split_test_type(test_type):
part_names = ["text1", "value1", "text2", "value2", "text3", "value3", "text4"]
parts = re.split(r"(\{\}|\[\])", test_type)
part_values = ["" for _ in range(7)]
for i, part in enumerate(parts):
part_values[i] = part
return {name: value for name,value in zip(part_names, part_values)}
def safe_mode(l):
""" This just silences the error from a double mode from python <= 3.7.
"""
try:
return statistics.mode(l)
except:
return l[0] | [
"new_input_value",
"INPUT: \"Where are regular people on Twitter\"\n OUTPUT: \"Where are {regular|normal|sane|typical} people on {Twitter|Facebook|Reddit|Instagram}\"\n ###\n INPUT: \"Anyone who says this food tastes horrible is out of their mind\"\n OUTPUT: \"{Anyone|Someone|He|She} who says this food tastes {horrible|terrible|rotten} is out of their mind\"\n ###\n INPUT: \"great\"\n OUTPUT: \"{great|excellent|wonderful|superb|delightful}\"\n ###\n INPUT: \"If you haven't come here before, you probably live under a rock\"\n OUTPUT: \"If you haven't come here {before|in the past|before now|yet}, you probably {live under a rock|never get out|are a hermit|are isolated}\"\n ###\n INPUT: \"Only crazy people would say they had a lousy time\"\n OUTPUT: \"Only {crazy people|insane people|people with no sense|those out of their mind} would say they had a {lousy|terrible|bad|miserable} time\"\n ###\n INPUT: \"If I didn't come here again, I would be very happy for the rest of my life\"\n OUTPUT: \"If I didn't come {here|hereabouts|around here} {again|once more|all over again}, I would be very {happy|glad|pleased|elated} for the rest of my life\"\n ###\n INPUT: \"I don't know what James was talking about when they said they loved the food.\"\n OUTPUT: \"I don't know what {James|John|Robert|Steve|Bob} was talking about when they {said they|stated that|claimed that|mentioned that} they {loved|liked|adored|appreciated} the food.\"\n ###\n INPUT: \"new_input_value\"\n OUTPUT: \""
] |
2024-01-10 | jodog0412/espresso | func~legacy~text_func.py | import openai
def botRespond(prompt:str):
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "user",
"content": prompt}
]
)
result=completion.choices[0].message["content"]
return result
class Config:
def __init__(self,input:str):
self.input=input
self.botCnfg=f"You're a marketing assistant.\
Your task is to assist and make marketing contents from idea.\
Given the idea delimated by '''.\
Idea : '''{self.input}'''"
class textGen(Config):
def marketing(self):
query="Write the STP strategy for the idea.\
Use the following format:\
1. Market Segmemtation\
- Demographic: <appropriate demographic market segmentation in one line>\
- Psychographic: <appropriate psychographic market segmentation in one line>\
- Behavioral: <appropriate behavioral market segmentation in one line>\
2. Targets \
<appropriate target customers for the idea in one line>\
3. Positioning \
<appropriate positioning for the idea in one line>"
answer=botRespond(self.botCnfg+query)
extrctTarget=answer.split('\n')[-1]
extrctQuery=f"Extract 2 keywords that indicates the value in the sentence.\
Write in English and use ',' symbol only. Other symbols are not allowed.\
Given the sentence : '''{extrctTarget}'''"
keywords=botRespond(extrctQuery)
return answer, keywords
def design(self,valuation):
query=f"Given values of the idea : '''{valuation}'''\
Perform the following actions from the idea:\
1 - Create name for idea.\
2 - Write catchphrase for idea.\
Use the following format:\
Name: <idea name>\
Catchphrase: <idea catchphrase>"
answer=botRespond(self.botCnfg+query)
return answer
def ad(self,valuation):
query=f"Given values of the idea: '''{valuation}'''\
You can imagine a photo content to advertise and market the idea.\
Introduce photo content in 4 lines.\
after the last sentence, write 5 hash tag for photo content.\
Number signs are not allowed."
answer=botRespond(self.botCnfg+query)
extrctQuery=f"Extract 7 keywords by following the instructions for the photo description\
If there is a person in the photo, \
1. Extract 1 keyword for the person's gender.\
2. Extract 2 keywords for the person's appearance. \
3. Extract 1 keyword for the person's action.\
4. Extract 1 keyword for object in the photo.\
5. Extract 2 keywords for the photo's atmosphere.\
Else if there isn't a human in the photo, \
1. Extract 4 keywords for object in the photo.\
2. Extract 3 keyword for the photo's atmosphere.\
Given the photo description : '''{answer}'''\
Don't extract keywords from hashtag in the photo description.\
Write keywords using ',' symbol only. Other symbols are not allowed."
keyword=botRespond(extrctQuery)
return answer, keyword
def returns(self):
inputs=textGen(self.input)
return inputs.marketing(), inputs.design(), inputs.ad() | [] |
2024-01-10 | hemanth-2104/NeuralGod | final.py | import streamlit as st
import os
import subprocess
from openai import OpenAI
import openai
os.environ['OPENAI_API_KEY'] ='<your_openai_key>'
openai.api_key = '<your_openai_key>'
client = OpenAI()
a = None
global b
b = None
def check_vul(text, typey):
# print(text, typey)
if typey == 'None':
ini = "FQDC"
elif typey == 'FFMPEG' or typey == "QEMU":
ini = "FQ"
elif typey == 'Debian' or typey == 'Chrome':
ini = "DC"
else:
ini = "AL"
# print(ini)
with open(f'./{ini}/input.cpp', 'w') as file:
file.write(str(text))
original_directory = os.getcwd()
os.chdir(ini)
# Run the command "python main.py -code input.cpp"
command = "python main.py -code input.cpp"
subprocess.run(command, shell=True)
os.chdir(original_directory)
with open(f'./{ini}/output.txt', 'r') as file:
num = file.readline()[-2]
return num
def fix_vul(ins):
prompt = f'''Please provide an improved version of the input code that addresses potential vulnerabilities and follows best coding practices and also add comment for the new change.
{ins}
Give me only the corrected code, without any more text
'''
print(prompt)
response = client.completions.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=500,
temperature=0.6,
n=1,
stop=None,
frequency_penalty=0.0,
presence_penalty=0.0
)
# print('reached')
corrected_code = response.choices[0].text.strip()
print()
print(corrected_code)
return corrected_code
st.set_page_config(
page_title="Neural God",
page_icon=":brain:",
layout="wide",
)
col1, col2 = st.columns([2, 3])
st.header("Neural God")
st.subheader("Code Vulnerability Detection")
uploaded_file = st.file_uploader("Upload a C/C++ file (.c or .cpp)", type=["c", "cpp"])
if st.button("Submit Code"):
if uploaded_file:
code_input = uploaded_file.read().decode("utf-8")
# print(code_input)
if len(code_input) == 0:
st.warning("Input the proper code")
else:
st.success("File submitted successfully.")
option = st.selectbox("Select an Option", ["None", "FFMPEG", "QEMU", "Debian", "Chrome","Android", "Linux"])
if st.button("Choose Code Type"):
st.success("Code type selected successfully")
if st.button("Check Vulnerability"):
st.text("Checking for vulnerabilities...")
a = check_vul(uploaded_file.read().decode("utf-8"), option)
# a = 1
a = int(a)
st.success("Vulenerability checked")
st.text(f"Your Code is {'Vulenrable' if a == 1 else 'Not Vulenrable'}")
if st.button("Fix Code"):
st.text("fixing code")
b = fix_vul(uploaded_file.read().decode("utf-8"))
st.code(b) | [
"Please provide an improved version of the input code that addresses potential vulnerabilities and follows best coding practices and also add comment for the new change.\n \n PLACEHOLDER\n \n Give me only the corrected code, without any more text\n "
] |
2024-01-10 | Prabigyaa/prabigya-vscode | server~src~nlpserver~inference_with_langchain.py | from langchain import HuggingFaceHub, LLMChain, PromptTemplate
import time
import os
from events import post_event
from typing import Optional
LLM_CHAIN: Optional[LLMChain] = None
def initialize_langchain(api_key: Optional[str], model_name="sangam101/hpc") -> bool:
"""
Initialize the language chain for running inference
Parameters
---
model_name: The model name followed by repo id for the model stored in huggingface.
api_key: The huggingface api key, if none is provided, api key is searched in environment variables
Return
---
False if api token isn't found, True otherwise
"""
global LLM_CHAIN
if api_key is None:
api_key = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
if api_key is None:
post_event("log", "Huggingface hub api key not found, try setting the environment variable HUGGINGFACEHUB_API_TOKEN")
return False
post_event("log", f"Using huggingfacehub api key {api_key}")
hub_llm = HuggingFaceHub(
repo_id=model_name,
model_kwargs={"temperature": 1e-10},
huggingfacehub_api_token=api_key,
client=None, # to solve intellisense error
)
template = (
"""{comment}""" # passing just the comment, as the model is trained for that
)
prompt = PromptTemplate(template=template, input_variables=["comment"])
# create prompt template > LLM chain
LLM_CHAIN = LLMChain(prompt=prompt, llm=hub_llm, verbose=True)
return True
def get_variable_names_from_langchain(comments: list[str], **kwargs) -> Optional[dict[str, str]]:
"""
Get the variable names for comments at once.
Kwargs isn't used for now.
Parameters
---
comments: list of comment
Return
---
None if the llm chain is invalid.
Dictionary containing the comment as key and variable name as value if the llm chain is valid.
"""
global LLM_CHAIN
if not isinstance(LLM_CHAIN, LLMChain) or len(comments) < 1:
return None
# generating a list of dictionary of comments
comment_dictionaries: list[dict[str, str]] = []
for comment in comments:
# the input variable name is comment as set above
# this should be changed on changing the input variable name
comment_dictionaries.append({"comment": comment})
post_event("log", f"Starting inference on langchain for {comment_dictionaries}")
start_time = time.time()
outputs = LLM_CHAIN.generate(comment_dictionaries)
end_time = time.time()
post_event("log", f"Finished inference on langchain for {comment_dictionaries}")
post_event("log", f"\tThe inference took {end_time - start_time} seconds.\n")
comment_variable_dict: dict[str, str] = {}
for i, output in enumerate(outputs.generations):
input_comment = comments[i]
output_variable = output[0].text
comment_variable_dict[input_comment] = output_variable
return comment_variable_dict
if __name__ == "__main__":
# the huggingface model name
model_name = "sangam101/hpc"
# the input comment
comments = [
"Determining the longest running execution.",
"calculate the area of circle",
"get the most accessed websites",
"keeping track of maximum input size",
]
initialize_langchain(api_key=None, model_name=model_name)
langchain_inference_start_time = time.time()
outputs = get_variable_names_from_langchain(comments=comments)
langchain_inference_end_time = time.time()
print(
f"Total time taken for inference = {langchain_inference_end_time - langchain_inference_start_time} seconds"
)
if outputs is not None:
for comment, variable_name in outputs.items():
print(f"Comment: {comment}\n\t Variable Name: {variable_name}")
| [
"comment",
"{comment}"
] |
2024-01-10 | andrecsq/backend-chatgpteacher | domain~chatgpt~gpthandler.py | import openai
from dotenv.main import dotenv_values
from domain.chatgpt.constants \
import Models, Roles, API_KEY_PATH
from domain.chatgpt.prompts import generate_correction_prompt, generate_formatting_prompt
class GPTHandler:
def __init__(self) -> None:
envs = dotenv_values('.env')
self.model = envs['DEFAULT_MODEL']
openai.api_key_path = API_KEY_PATH
def set_model_to_gpt3(self) -> None:
self.model = Models.GPT3.value
def set_model_to_gpt4(self) -> None:
self.model = Models.GPT4.value
def get_response_from_chat(self, prompt: list) -> str:
response = openai.ChatCompletion.create(
model=self.model,
messages=prompt
)
content = response['choices'][0]['message']['content']
return content.strip()
def correct_translation(self, sentence_to_translate, translation_attempt):
correction_prompt = generate_correction_prompt(sentence_to_translate, translation_attempt)
correction_content = self.get_response_from_chat(correction_prompt)
print("correction_content:")
print(correction_content)
formatting_prompt = generate_formatting_prompt(correction_prompt, correction_content)
formatting_content = self.get_response_from_chat(formatting_prompt)
print("formatting_content:")
print(formatting_content)
return formatting_content
| [] |
2024-01-10 | brunobraga/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
from langchain_experimental.comprehend_moderation.base_moderation_enums import (
BaseModerationActions,
)
if config:
action = config.get("action", BaseModerationActions.STOP)
if action not in [BaseModerationActions.STOP, BaseModerationActions.ALLOW]:
raise ValueError("Action can either be stop or allow")
return (
self._contains_pii(prompt_value=prompt_value, config=config)
if action == BaseModerationActions.STOP
else self._detect_pii(prompt_value=prompt_value, config=config)
)
else:
return self._contains_pii(prompt_value=prompt_value)
def _contains_pii(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold.
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold", 0.5) if config else 0.5
pii_labels = config.get("labels", []) if config else []
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold", 0.5) # type: ignore
pii_labels = config.get("labels", []) # type: ignore
mask_marker = config.get("mask_character", "*") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
prompt_value = (
prompt_value[:char_offset_begin]
+ mask_marker * (char_offset_end - char_offset_begin)
+ prompt_value[char_offset_end:]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | brunobraga/langchain | libs~experimental~langchain_experimental~comprehend_moderation~base_moderation.py | import uuid
from typing import Any, Callable, Dict, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.prompts.base import StringPromptValue
from langchain.prompts.chat import ChatPromptValue
from langchain.schema import AIMessage, HumanMessage
from langchain_experimental.comprehend_moderation.intent import ComprehendIntent
from langchain_experimental.comprehend_moderation.pii import ComprehendPII
from langchain_experimental.comprehend_moderation.toxicity import ComprehendToxicity
class BaseModeration:
def __init__(
self,
client: Any,
config: Optional[Dict[str, Any]] = None,
moderation_callback: Optional[Any] = None,
unique_id: Optional[str] = None,
run_manager: Optional[CallbackManagerForChainRun] = None,
):
self.client = client
self.config = config
self.moderation_callback = moderation_callback
self.unique_id = unique_id
self.chat_message_index = 0
self.run_manager = run_manager
self.chain_id = str(uuid.uuid4())
def _convert_prompt_to_text(self, prompt: Any) -> str:
input_text = str()
if isinstance(prompt, StringPromptValue):
input_text = prompt.text
elif isinstance(prompt, str):
input_text = prompt
elif isinstance(prompt, ChatPromptValue):
"""
We will just check the last message in the message Chain of a
ChatPromptTemplate. The typical chronology is
SystemMessage > HumanMessage > AIMessage and so on. However assuming
that with every chat the chain is invoked we will only check the last
message. This is assuming that all previous messages have been checked
already. Only HumanMessage and AIMessage will be checked. We can perhaps
loop through and take advantage of the additional_kwargs property in the
HumanMessage and AIMessage schema to mark messages that have been moderated.
However that means that this class could generate multiple text chunks
and moderate() logics would need to be updated. This also means some
complexity in re-constructing the prompt while keeping the messages in
sequence.
"""
message = prompt.messages[-1]
self.chat_message_index = len(prompt.messages) - 1
if isinstance(message, HumanMessage):
input_text = message.content
if isinstance(message, AIMessage):
input_text = message.content
else:
raise ValueError(
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
return input_text
def _convert_text_to_prompt(self, prompt: Any, text: str) -> Any:
if isinstance(prompt, StringPromptValue):
return StringPromptValue(text=text)
elif isinstance(prompt, str):
return text
elif isinstance(prompt, ChatPromptValue):
messages = prompt.messages
message = messages[self.chat_message_index]
if isinstance(message, HumanMessage):
messages[self.chat_message_index] = HumanMessage(
content=text,
example=message.example,
additional_kwargs=message.additional_kwargs,
)
if isinstance(message, AIMessage):
messages[self.chat_message_index] = AIMessage(
content=text,
example=message.example,
additional_kwargs=message.additional_kwargs,
)
return ChatPromptValue(messages=messages)
else:
raise ValueError(
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
def _moderation_class(self, moderation_class: Any) -> Callable:
return moderation_class(
client=self.client,
callback=self.moderation_callback,
unique_id=self.unique_id,
chain_id=self.chain_id,
).validate
def _log_message_for_verbose(self, message: str) -> None:
if self.run_manager:
self.run_manager.on_text(message)
def moderate(self, prompt: Any) -> str:
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( # noqa: E501
ModerationIntentionError,
ModerationPiiError,
ModerationToxicityError,
)
try:
# convert prompt to text
input_text = self._convert_prompt_to_text(prompt=prompt)
output_text = str()
# perform moderation
if self.config is None:
# In absence of config Action will default to STOP only
self._log_message_for_verbose("Running pii validation...\n")
pii_validate = self._moderation_class(moderation_class=ComprehendPII)
output_text = pii_validate(prompt_value=input_text)
self._log_message_for_verbose("Running toxicity validation...\n")
toxicity_validate = self._moderation_class(
moderation_class=ComprehendToxicity
)
output_text = toxicity_validate(prompt_value=output_text)
self._log_message_for_verbose("Running intent validation...\n")
intent_validate = self._moderation_class(
moderation_class=ComprehendIntent
)
output_text = intent_validate(prompt_value=output_text)
else:
filter_functions = {
"pii": ComprehendPII,
"toxicity": ComprehendToxicity,
"intent": ComprehendIntent,
}
filters = self.config["filters"]
for _filter in filters:
filter_name = f"{_filter}"
if filter_name in filter_functions:
self._log_message_for_verbose(
f"Running {filter_name} Validation...\n"
)
validation_fn = self._moderation_class(
moderation_class=filter_functions[filter_name]
)
input_text = input_text if not output_text else output_text
output_text = validation_fn(
prompt_value=input_text,
config=self.config[filter_name]
if filter_name in self.config
else None,
)
# convert text to prompt and return
return self._convert_text_to_prompt(prompt=prompt, text=output_text)
except ModerationPiiError as e:
self._log_message_for_verbose(f"Found PII content..stopping..\n{str(e)}\n")
raise e
except ModerationToxicityError as e:
self._log_message_for_verbose(
f"Found Toxic content..stopping..\n{str(e)}\n"
)
raise e
except ModerationIntentionError as e:
self._log_message_for_verbose(
f"Found Harmful intention..stopping..\n{str(e)}\n"
)
raise e
except Exception as e:
raise e
| [] |
2024-01-10 | brunobraga/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
import warnings
from typing import Any, Dict, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded if not
already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks
max_size (int, optional): The maximum size limit in bytes for each chunk
Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list of sentences
Note:
This function validates the maximum sentence size based on service limits
using the 'toxicity_init_validate' function. It uses the NLTK sentence
tokenizer to split the paragraph into sentences.
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = []
current_chunk = [] # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size or
# current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
"""
Check the toxicity of a given text prompt using AWS Comprehend service
and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
if config:
from langchain_experimental.comprehend_moderation.base_moderation_enums import ( # noqa: E501
BaseModerationActions,
)
toxicity_found = False
action = config.get("action", BaseModerationActions.STOP)
if action not in [
BaseModerationActions.STOP,
BaseModerationActions.ALLOW,
]:
raise ValueError("Action can either be stop or allow")
threshold = config.get("threshold", 0.5) if config else 0.5
toxicity_labels = config.get("labels", []) if config else []
if action == BaseModerationActions.STOP:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label
and (
not toxicity_labels
or label["Name"] in toxicity_labels
)
and label["Score"] >= threshold
):
toxicity_found = True
break
if action == BaseModerationActions.ALLOW:
if not toxicity_labels:
warnings.warn(
"You have allowed toxic content without specifying "
"any toxicity labels."
)
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
else:
if response["ResultList"]:
detected_toxic_labels = list()
for item in response["ResultList"]:
detected_toxic_labels.extend(item["Labels"])
if any(item["Score"] >= 0.5 for item in detected_toxic_labels):
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | omkarh25/Serendipity_Playground | WebScraping~assisstant.py | # import requests
# def query_openai_assistant(prompt, model="text-davinci-003", api_key="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"):
# """
# Send a query to the OpenAI Assistant API.
# :param prompt: The prompt or question to ask the AI.
# :param model: The model to use. Defaults to 'text-davinci-003'.
# :param api_key: Your API key for OpenAI.
# :return: The response from the AI.
# """
# url = "https://api.openai.com/v1/assistants/YOUR_ASSISTANT_ID/messages"
# headers = {
# "Authorization": f"Bearer {api_key}",
# "Content-Type": "application/json"
# }
# data = {
# "model": model,
# "messages": [{"role": "system", "content": "This is a test"}]
# }
# response = requests.post(url, headers=headers, json=data)
# return response.json()
# # Example usage
# response = query_openai_assistant("What is the capital of France?")
# print(response)
# asst_uhQlHg2Zelg1Fsxl4cFBLiUv
# import openai
# def generate_text(prompt, model="text-davinci-003", api_key="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"):
# openai.api_key = api_key
# try:
# response = openai.Completion.create(
# model=model,
# prompt=prompt,
# max_tokens=150
# )
# return response.choices[0].text.strip()
# except Exception as e:
# return str(e)
# # Example usage
# response = generate_text("Give the geocode of Rakesh Fantasy Garden", api_key="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")
# print(response)
# import openai
# import pandas as pd
# def generate_text(prompt, model="text-davinci-003", api_key="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"):
# openai.api_key = api_key
# try:
# response = openai.Completion.create(
# model=model,
# prompt=prompt,
# max_tokens=150
# )
# return response.choices[0].text.strip()
# except Exception as e:
# return str(e)
# def get_geocodes_from_excel(file_path, api_key):
# # Read the Excel file
# df = pd.read_excel(file_path)
# # Print the column names for troubleshooting
# print("Columns in the file:", df.columns.tolist())
# # Check if 'location' column exists (note the lowercase 'l')
# if 'location' not in df.columns:
# return "The Excel file does not have a 'location' column."
# # Create a new column for geocodes
# df['Geocode'] = ''
# # Iterate over each location and get the geocode
# for index, row in df.iterrows():
# location = row['location'] # Use 'location' with lowercase 'l'
# prompt = f"Give the geocode of {location}"
# geocode = generate_text(prompt, api_key=api_key)
# df.at[index, 'Geocode'] = geocode
# # Save the results to a new Excel file
# output_file = 'geocodes_output1.xlsx'
# df.to_excel(output_file, index=False)
# return f"Geocodes saved to {output_file}"
# # Example usage
# file_path = r'C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\coo.xlsx' # Use the correct file path
# api_key = "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" # Replace with your actual OpenAI API key
# result = get_geocodes_from_excel(file_path, api_key)
# print(result)
# import openai
# import pandas as pd
# def generate_text(prompt, api_key, model="text-davinci-003"):
# openai.api_key = api_key
# try:
# response = openai.Completion.create(
# model=model,
# prompt=prompt,
# max_tokens=150
# )
# return response.choices[0].text.strip()
# except Exception as e:
# return f"Error: {str(e)}"
# def get_geocodes_from_excel(file_path, api_key):
# try:
# # Read the Excel file
# df = pd.read_excel(file_path)
# except Exception as e:
# return f"Error reading the Excel file: {str(e)}"
# # Print the column names for troubleshooting
# print("Columns in the file:", df.columns.tolist())
# # Check if 'location' column exists
# if 'location' not in df.columns:
# return "The Excel file does not have a 'location' column."
# # Create a new column for geocodes
# df['Geocode'] = ''
# # Iterate over each location and get the geocode
# for index, row in df.iterrows():
# location = row['location']
# prompt = f"Give the geocode of {location}"
# geocode = generate_text(prompt, api_key=api_key, model="text-davinci-003")
# df.at[index, 'Geocode'] = geocode
# print(f"Processed location: {location} - Geocode: {geocode}") # Print each geocode for verification
# # Save the results to a new Excel file
# try:
# output_file = r'C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\geocodes_output1.xlsx'
# df.to_excel(output_file, index=False)
# return f"Geocodes saved to {output_file}"
# except Exception as e:
# return f"Error saving the Excel file: {str(e)}"
# # Example usage
# file_path = r'C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\coo.xlsx'
# api_key = "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"
# result = get_geocodes_from_excel(file_path, api_key)
# print(result)
# import openai
# import pandas as pd
# def generate_text(prompt, api_key, model="text-davinci-003"):
# openai.api_key = api_key
# try:
# response = openai.Completion.create(
# model=model,
# prompt=prompt,
# max_tokens=150
# )
# return response.choices[0].text.strip()
# except Exception as e:
# return f"Error: {str(e)}"
# def parse_geocode(geocode):
# # Assuming the geocode format is "latitude, longitude"
# try:
# latitude, longitude = geocode.split(", ")
# return float(latitude), float(longitude)
# except Exception as e:
# return None, None
# def get_geocodes_from_excel(file_path, api_key):
# try:
# # Read the Excel file
# df = pd.read_excel(file_path)
# except Exception as e:
# return f"Error reading the Excel file: {str(e)}"
# # Check if 'location' column exists
# if 'location' not in df.columns:
# return "The Excel file does not have a 'location' column."
# # Create new columns for latitude and longitude
# df['Latitude'] = ''
# df['Longitude'] = ''
# # Iterate over each location and get the geocode
# for index, row in df.iterrows():
# location = row['location']
# prompt = f"Give the geocode of {location}"
# geocode = generate_text(prompt, api_key=api_key, model="text-davinci-003")
# latitude, longitude = parse_geocode(geocode)
# df.at[index, 'Latitude'] = latitude
# df.at[index, 'Longitude'] = longitude
# # Save the results to a new Excel file
# try:
# output_file = r'C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\geocodes_output1.xlsx'
# df.to_excel(output_file, index=False)
# return f"Geocodes saved to {output_file}"
# except Exception as e:
# return f"Error saving the Excel file: {str(e)}"
# # Example usage
# file_path = r'C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\coo.xlsx'
# api_key = "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"
# result = get_geocodes_from_excel(file_path, api_key)
# print(result)
# import openai
# import pandas as pd
# import os
# from dotenv import load_dotenv
# load_dotenv(r"C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\.env")
# def generate_text(prompt, api_key, model="text-davinci-003"):
# openai.api_key = api_key
# try:
# response = openai.Completion.create(
# model=model,
# prompt=prompt,
# max_tokens=150
# )
# return response.choices[0].text.strip()
# except Exception as e:
# print(f"Error in generate_text: {e}")
# return None
# def parse_geocode(geocode):
# try:
# latitude, longitude = geocode.split(", ")
# return float(latitude), float(longitude)
# except Exception as e:
# print(f"Error in parse_geocode: {e}")
# return 'N/A', 'N/A'
# def get_geocodes_from_excel(file_path, api_key):
# try:
# df = pd.read_excel(file_path)
# if 'Location' not in df.columns:
# print("The Excel file does not have a 'Location' column.")
# return
# df['Latitude'] = 'N/A'
# df['Longitude'] = 'N/A'
# for index, row in df.iterrows():
# location = row['Location']
# prompt = f"Give the geocode of {location}"
# geocode = generate_text(prompt, api_key=api_key)
# if geocode:
# latitude, longitude = parse_geocode(geocode)
# else:
# latitude, longitude = 'N/A', 'N/A'
# df.at[index, 'Latitude'] = latitude
# df.at[index, 'Longitude'] = longitude
# output_file = 'geocodes_output_new.xlsx'
# df.to_excel(output_file, index=False)
# print(f"Geocodes saved to {output_file}")
# except Exception as e:
# print(f"Error in get_geocodes_from_excel: {str(e)}")
# # Replace the file path and API key with your actual file path and new API key
# file_path = r'coo.xlsx' # Update the file path as needed
# api_key = os.getenv("assisstant_api")# Use your new API key
# result = get_geocodes_from_excel(file_path, api_key)
# print(result)
# import openai
# import pandas as pd
# import os
# import time
# from dotenv import load_dotenv
# load_dotenv(r"C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\.env")
# def generate_text(prompt, api_key, model="text-davinci-003"):
# openai.api_key = api_key
# try:
# response = openai.Completion.create(
# model=model,
# prompt=prompt,
# max_tokens=150
# )
# return response.choices[0].text.strip()
# except Exception as e:
# print(f"Error in generate_text: {e}")
# return None
# def parse_geocode(geocode):
# try:
# latitude, longitude = geocode.split(", ")
# return float(latitude), float(longitude)
# except Exception as e:
# print(f"Error in parse_geocode: {e}")
# return 'N/A', 'N/A'
# def get_geocodes_from_excel(file_path, api_key):
# try:
# df = pd.read_excel(file_path)
# if 'Location' not in df.columns:
# print("The Excel file does not have a 'Location' column.")
# return
# df['Latitude'] = 'N/A'
# df['Longitude'] = 'N/A'
# for index, row in df.iterrows():
# location = row['Location']
# prompt = f"Give the geocode of {location}"
# geocode = generate_text(prompt, api_key=api_key)
# if geocode:
# latitude, longitude = parse_geocode(geocode)
# else:
# latitude, longitude = 'N/A', 'N/A'
# df.at[index, 'Latitude'] = latitude
# df.at[index, 'Longitude'] = longitude
# # Add a delay between API calls
# time.sleep(1)
# output_file = 'geocodes_output_new.xlsx'
# df.to_excel(output_file, index=False)
# print(f"Geocodes saved to {output_file}")
# except Exception as e:
# print(f"Error in get_geocodes_from_excel: {str(e)}")
# # Replace the file path and API key with your actual file path and new API key
# file_path = r'coo.xlsx' # Update the file path as needed
# api_key = os.getenv("assisstant_api") # Use your new API key
# get_geocodes_from_excel(file_path, api_key)
# import pandas as pd
# from geopy.geocoders import Nominatim
# from geopy.extra.rate_limiter import RateLimiter
# import time
# def geocode_locations(file_path):
# # Load the Excel file
# df = pd.read_excel(file_path)
# # Check if 'Location' column exists
# if 'Location' not in df.columns:
# print("The Excel file does not have a 'Location' column.")
# return
# # Initialize the geocoder with rate limiter
# geolocator = Nominatim(user_agent="geoapiExercises")
# geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)
# # Geocode each location
# for index, row in df.iterrows():
# try:
# location = geocode(row['Location'])
# if location:
# df.at[index, 'Latitude'] = location.latitude
# df.at[index, 'Longitude'] = location.longitude
# else:
# df.at[index, 'Latitude'] = 'N/A'
# df.at[index, 'Longitude'] = 'N/A'
# except Exception as e:
# print(f"Error processing {row['Location']}: {e}")
# df.at[index, 'Latitude'] = 'N/A'
# df.at[index, 'Longitude'] = 'N/A'
# # Print progress
# print(f"Processed {index + 1}/{len(df)} locations")
# # Save the results to a new Excel file
# output_file = 'geocoded_locations1.xlsx'
# df.to_excel(output_file, index=False)
# print(f"Geocoded data saved to {output_file}")
# # Replace with your file path
# file_path = r'C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\coo.xlsx'
# geocode_locations(file_path)
# import pandas as pd
# from geopy.geocoders import Nominatim
# from geopy.extra.rate_limiter import RateLimiter
# import traceback
# def geocode_locations(file_path):
# try:
# # Load the Excel file
# df = pd.read_excel(file_path)
# # Check if 'Location' column exists
# if 'Location' not in df.columns:
# print("The Excel file does not have a 'Location' column.")
# return
# # Initialize the geocoder with rate limiter
# geolocator = Nominatim(user_agent="geoapiExercises")
# geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)
# # Geocode each location
# for index, row in df.iterrows():
# try:
# location = geocode(row['Location'])
# if location:
# df.at[index, 'Latitude'] = location.latitude
# df.at[index, 'Longitude'] = location.longitude
# else:
# df.at[index, 'Latitude'] = 'N/A'
# df.at[index, 'Longitude'] = 'N/A'
# except Exception as e:
# print(f"Error processing {row['Location']}: {e}")
# traceback.print_exc()
# df.at[index, 'Latitude'] = 'N/A'
# df.at[index, 'Longitude'] = 'N/A'
# # Print progress
# print(f"Processed {index + 1}/{len(df)} locations")
# # Save the results to a new Excel file
# output_file = 'geocoded_locations.xlsx'
# df.to_excel(output_file, index=False)
# print(f"Geocoded data saved to {output_file}")
# except Exception as e:
# print(f"General Error: {e}")
# traceback.print_exc()
# # Replace with your file path
# file_path = r'C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\coo.xlsx'
# geocode_locations(file_path)
# import pandas as pd
# from geopy.geocoders import Nominatim
# from geopy.extra.rate_limiter import RateLimiter
# import traceback
# def geocode_locations(file_path):
# try:
# df = pd.read_excel(file_path)
# if 'Location' not in df.columns:
# print("The Excel file does not have a 'Location' column.")
# return
# geolocator = Nominatim(user_agent="geoapiExercises")
# geocode = RateLimiter(geolocator.geocode, min_delay_seconds=0.5) # Reduced delay
# for index, row in df.iterrows():
# location_name = row['Location']
# try:
# location = geocode(location_name)
# if location:
# df.at[index, 'Latitude'] = location.latitude
# df.at[index, 'Longitude'] = location.longitude
# print(f"Processed: {location_name} -> Lat: {location.latitude}, Long: {location.longitude}")
# else:
# df.at[index, 'Latitude'] = 'N/A'
# df.at[index, 'Longitude'] = 'N/A'
# print(f"Location not found: {location_name}")
# except Exception as e:
# print(f"Error processing {location_name}: {e}")
# df.at[index, 'Latitude'] = 'N/A'
# df.at[index, 'Longitude'] = 'N/A'
# output_file = 'geocoded_locations123.xlsx'
# df.to_excel(output_file, index=False)
# print(f"Geocoded data saved to {output_file}")
# except Exception as e:
# print(f"General Error: {e}")
# traceback.print_exc()
# file_path = r'C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\coo.xlsx'
# geocode_locations(file_path)
import pandas as pd
import googlemaps
import os
import time
from dotenv import load_dotenv
load_dotenv(r"C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\.env")
def geocode_with_google(file_path, api_key):
df = pd.read_excel(file_path)
gmaps = googlemaps.Client(key=api_key)
for index, row in df.iterrows():
try:
geocode_result = gmaps.geocode(row['Location'])
if geocode_result:
location = geocode_result[0]['geometry']['location']
df.at[index, 'Latitude'] = location['lat']
df.at[index, 'Longitude'] = location['lng']
else:
df.at[index, 'Latitude'] = 'N/A'
df.at[index, 'Longitude'] = 'N/A'
print(f"Processed: {row['Location']}")
except Exception as e:
print(f"Error: {e}")
df.at[index, 'Latitude'] = 'N/A'
df.at[index, 'Longitude'] = 'N/A'
time.sleep(1) # To prevent exceeding query limit
output_file = 'geocoded_with_google2.xlsx'
df.to_excel(output_file, index=False)
print(f"Data saved to {output_file}")
# Replace with your file path and API key
file_path = r'C:\Users\91861\OneDrive\Desktop\bhoodevi\WebScraping\coo.xlsx'
api_key = os.getenv("Google1_api_key")
geocode_with_google(file_path, api_key)
| [] |
2024-01-10 | tolleybot/gptretrieval | gptretrieval~examples~classification.py | import json
import sys
sys.path.append("/Users/dtolley/Documents/Projects/gptretrieval/gptretrieval")
from services import openai
GPT_TOKEN_LENGTH = 4096
GPT_MODEL = "gpt-4"
labels_dict = {
0: {
"name": "Class or Struct Definition",
"description": "Code that defines a class or struct. Excludes the methods within a class; only includes the class signature and member variables.",
},
1: {
"name": "Function or Method Definition",
"description": "Code that defines a function or method. Does not include usage examples of the function or method.",
},
2: {
"name": "Code Usage or Example",
"description": "Examples of how to use certain code, functions, or classes. Distinct from the actual definition of functions or classes.",
},
3: {
"name": "Instructional Code Implementation",
"description": "How to implement or use code. Such as how do I create a function to do ABC, or how is a class used to do XYZ.",
},
4: {
"name": "Database Implementation",
"description": "Code that implements or calls a database.",
},
5: {
"name": "Error Handling",
"description": "Code segments dedicated to handling errors or exceptions.",
},
6: {
"name": "UI Code",
"description": "Code related to user interface design and interaction.",
},
7: {
"name": "Configuration Code",
"description": "Code used for configuring the system, application, or environment.",
},
8: {
"name": "Documentation",
"description": "Comments and documentation that explain the code. Does not include code itself.",
},
9: {
"name": "REST API Implementation or Usage",
"description": "Code that either implements a server or client, or calls a REST API.",
},
10: {
"name": "Code Usage Search",
"description": "Looking for location and or file where a specific function or class or variable is being used",
},
}
test_cases = {
0: [
{
"question": "How do you define a simple class in Python?",
"code": "class MyClass:\n def __init__(self, name):\n self.name = name",
"answer": (0, 0), # Class or Struct Definition for both question and code
}
],
1: [
{
"question": "Can you provide a function that adds two numbers?",
"code": "def add_numbers(a, b):\n return a + b",
"answer": (
1,
1,
), # Function or Method Definition for both question and code
}
],
2: [
{
"question": "How do I use the add_numbers function?",
"code": "result = add_numbers(3, 5)\nprint(result)",
"answer": (2, 2), # Code Usage or Example for both question and code
}
],
3: [
{
"question": "Can you show an implementation of the bubble sort algorithm?",
"code": "def bubble_sort(arr):\n n = len(arr)\n for i in range(n):\n for j in range(0, n-i-1):\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]",
"answer": (3, 3), # Algorithm Implementation for both question and code
}
],
4: [
{
"question": "How do you implement a stack data structure?",
"code": "class Stack:\n def __init__(self):\n self.items = []\n def push(self, item):\n self.items.append(item)\n def pop(self):\n return self.items.pop()",
"answer": (
4,
4,
), # Data Structure Implementation for both question and code
}
],
5: [
{
"question": "How do you use the pandas library to read a CSV file?",
"code": "import pandas as pd\ndata = pd.read_csv('file.csv')",
"answer": (5, 5), # Library or Package Usage for both question and code
}
],
6: [
{
"question": "Can you show me how to handle a division by zero error?",
"code": "try:\n result = 10 / 0\nexcept ZeroDivisionError:\n print('Cannot divide by zero!')",
"answer": (6, 6), # Error Handling for both question and code
}
],
7: [
{
"question": "How can I create a button in a Tkinter window?",
"code": "from tkinter import Tk, Button\nroot = Tk()\nbutton = Button(root, text='Click Me')\nbutton.pack()\nroot.mainloop()",
"answer": (7, 7), # UI Code for both question and code
}
],
8: [
{
"question": "How do you set up a configuration file for logging in Python?",
"code": "[loggers]\nkeys=root\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler\n\n[handlers]\nkeys=consoleHandler\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=DEBUG\nformatter=consoleFormatter\nargs=(sys.stdout,)",
"answer": (8, 8), # Configuration Code for both question and code
}
],
9: [
{
"question": "What is the purpose of documentation in code?",
"code": "# This function adds two numbers\ndef add_numbers(a, b):\n # Add the numbers and return the result\n return a + b",
"answer": (9, 9), # Documentation for both question and code
}
],
10: [
{
"question": "How do you set up a configuration file for logging in Python?",
"code": "class ABC",
"answer": (8, 8), # Configuration Code for both question and code
}
],
11: [
{
"question": "What is the purpose of documentation in code?",
"code": "def add_let(a, b):\n return 'abc' + 'b'",
"answer": (9, 9), # Documentation for both question and code
}
],
12: [
{
"question": "Where in the code base is the class ABC?",
"code": "line 123 in file.py",
"answer": (9, 9), # Documentation for both question and code
}
],
13: [
{
"question": "show me the definition for class ABC?",
"code": "line 123 of file abciscool.py\ndef testit(): \n x = ABC()\n print(x)",
"answer": (9, 9), # Documentation for both question and code
}
],
13: [
{
"question": "show me the definition for class ABC?",
"code": "line 123 of file abciscool.py\nclass ABC\n self.x = 1",
"answer": (9, 9), # Documentation for both question and code
}
],
14: [
{
"question": "what file and line is class ABC defined?",
"code": "line 123 of file abciscool.py\nclass ABC\n self.x = 1",
"answer": (9, 9), # Documentation for both question and code
}
],
15: [
{
"question": "show me the definition for class ABC?",
"code": "line 123 of file abciscool.py\nclass BA\n self.x = 1",
"answer": (9, 9), # Documentation for both question and code
}
],
}
def create_prompt_for_gpt(labels_dict):
"""
Convert a dictionary of labels into a text block for GPT prompt.
Parameters:
labels_dict (dict): A dictionary containing label indices as keys and another dictionary with 'name' and 'description' as values.
Returns:
str: A text block suitable for use as a GPT prompt.
"""
prompt = "The following are example labels but are not exclusive:\n\n"
for index, label_info in labels_dict.items():
prompt += f"Label - {label_info['name']}:\n"
prompt += f"{label_info['description']}\n\n"
return prompt
def classify_question(question: str, labels: str):
"""Call OpenAI and summarize the function or class definition"""
prompt_text = create_prompt_for_gpt(labels_dict)
question = question[:GPT_TOKEN_LENGTH]
system_message = {
"role": "system",
"content": f"{prompt_text}\nYou can ask me to classify a question, \
and I will return a label for the question formatted as json. \
formatted as {{'question': 'label']}}",
}
user_message = {
"role": "user",
"content": f"Classify the following: Question - {question}",
}
functions = [
{
"name": "classify_question",
"description": "A function which takes in the label for question",
"parameters": {
"type": "object",
"properties": {
"question_label": {
"type": "string",
"description": "The label index assigned to the question",
}
},
"required": ["question_label"],
},
}
]
resp = openai.get_chat_completion(
[system_message, user_message],
functions,
function_call={"name": "classify_question"},
model=GPT_MODEL,
)
return resp
def classify_code(code: str, question: str, question_label: str):
"""
Call OpenAI to generate potential code labels based on the question and question label.
Parameters:
code (str): The code snippet.
question (str): The question text.
question_label (str): The label assigned to the question.
Returns:
Response from OpenAI API.
"""
# Craft the system message with the labels dictionary and instruction
prompt_text = create_prompt_for_gpt(labels_dict)
code = code[:GPT_TOKEN_LENGTH]
system_message = {
"role": "system",
"content": f"{prompt_text}\nGiven a question and its classification, you can ask me to classify a code snippet. \
The classification of the code snippet is '1' if it should align with the context provided by the question and its classification else its 0. \
Think of the code classification as the role the code plays in the context of answering the classified question. \
For example, if the question is asking for a class definition, but the code snippet is using a class without \
defining it, the code snippet should be classified as '0' or irrelevant.",
}
user_message = {
"role": "user",
"content": f"The question is: '{question}'. It is classified as: '{question_label}'. Given this context, how would you classify the following code snippet: {code}?",
}
# Define the function for the API call
functions = [
{
"name": "classify_code",
"description": "A function which takes in a code label",
"parameters": {
"type": "object",
"properties": {
"code_label": {
"type": "integer",
"description": "The label for the code",
}
},
"required": ["code_label"],
},
}
]
# Make the API call to GPT-4 with the crafted messages and function
resp = openai.get_chat_completion(
[system_message, user_message],
functions,
function_call={"name": "classify_code"},
model=GPT_MODEL,
)
return resp
# create a main entry point
def main():
# print the summary for each test case
for label, test_case in test_cases.items():
for case in test_case:
gpt_question_response = classify_question(case["question"], labels_dict)
# print question and code, and what the answer is supposed to be
print("--------------------------------")
print(f"Question: {case['question']}")
print(f"Code: {case['code']}")
print("--------------------------------")
# Assuming gpt_response contains the indices of the predicted labels
# You might need to adjust this part based on the actual structure of gpt_response
predicted_question_label = gpt_question_response["function_args"][
"question_label"
]
gpt_code_response = classify_code(
case["code"], case["question"], predicted_question_label
)
predicted_code_label = gpt_code_response["function_args"]["code_label"]
print(
f"GPT Response: Question - {predicted_question_label}, Code - {predicted_code_label}\n"
)
if __name__ == "__main__":
main()
| [
"Label - PLACEHOLDER:\n",
"PLACEHOLDER\n\n",
"PLACEHOLDER\nGiven a question and its classification, you can ask me to classify a code snippet. The classification of the code snippet is '1' if it should align with the context provided by the question and its classification else its 0. Think of the code classification as the role the code plays in the context of answering the classified question. For example, if the question is asking for a class definition, but the code snippet is using a class without defining it, the code snippet should be classified as '0' or irrelevant.",
"The question is: 'PLACEHOLDER'. It is classified as: 'PLACEHOLDER'. Given this context, how would you classify the following code snippet: PLACEHOLDER?",
"Classify the following: Question - PLACEHOLDER",
"PLACEHOLDER\nYou can ask me to classify a question, and I will return a label for the question formatted as json. formatted as {'question': 'label']}",
"The following are example labels but are not exclusive:\n\n"
] |
2024-01-10 | tolleybot/gptretrieval | gptretrieval~datastore~datastore.py | from abc import ABC, abstractmethod
from typing import List, Optional
from ..models.models import (
Query,
QueryResult,
QueryWithEmbedding,
)
from ..services import openai
class DataStore(ABC):
def get_embeddings(self, texts: List[str]) -> List[List[float]]:
return openai.get_embeddings(texts)
async def query(self, queries: List[Query], top_k=10) -> List[QueryResult]:
"""
Takes in a list of queries and filters and returns a list of query results with matching document chunks and scores.
"""
# get a list of of just the queries from the Query list
query_texts = [query.query for query in queries]
query_embeddings = self.get_embeddings(query_texts)
# hydrate the queries with embeddings
queries_with_embeddings = [
QueryWithEmbedding(**query.dict(), embedding=embedding)
for query, embedding in zip(queries, query_embeddings)
]
return await self._query(queries_with_embeddings, top_k=top_k)
@abstractmethod
async def _query(
self, queries: List[QueryWithEmbedding], top_k=10
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
raise NotImplementedError
| [] |
2024-01-10 | data-science-nerds/ai-architects | chatbot_things~individuals_customized_chatbot.py | import os
import shutil
import subprocess
from flask import jsonify
from dotenv import load_dotenv
import openai
from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext
from langchain import OpenAI
import sys
from IPython.display import Markdown, display
from chatbot_things.utilities.relative_paths import (
directory_path_already_to_text,
directory_path_incoming_pdfs,
)
repo_dir = "context_data"
if os.path.exists(repo_dir):
shutil.rmtree(repo_dir) # Removes the directory and all its contents
# git.Git(".").clone("[email protected]:data-science-nerds/context_data.git")
# import subprocess
subprocess.check_call(["pip", "install", "llama-index==0.5.6"])
subprocess.check_call(["pip", "install", "langchain==0.0.148"])
load_dotenv() # take environment variables from .env.
api_key = os.getenv("OPENAI_API_KEY")
# Here I fill my LOCAL environment variable
os.environ["OPENAI_API_KEY"] = api_key
def load_documents_contents(directory_path):
'''Use this to store priming and display it upon session initiation'''
# Load the documents
documents = SimpleDirectoryReader(directory_path).load_data()
documents_contents = []
for document in documents:
documents_contents.append(document.text)
return documents_contents
# directory_path = '/Users/elsa/Documents/CODE/aiarchitects/data-science-nerds/ai-architects/chatbot_things/data_handling/data_ingest/incoming_pdfs'
def construct_index(directory_path):
'''Run models.'''
# And here I fill the key to openAI
openai.api_key = os.environ["OPENAI_API_KEY"]
# directory_path = '/Users/elsa/Documents/CODE/aiarchitects/data-science-nerds/ai-architects/chatbot_things/data_handling/data_ingest/processed_text_files'
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 2000
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
# define prompt helper
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.2, model_name="text-davinci-003", max_tokens=num_outputs))
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.2, model_name="text-curie-001", max_tokens=num_outputs))
# Load the documents
documents = SimpleDirectoryReader(directory_path).load_data()
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.save_to_disk('index.json')
documents_contents = []
for document in documents:
print(dir(documents[0]))
documents_contents.append(document.text)
print(documents_contents)
return index, documents, directory_path, documents_contents
def cybersecurity_checks(question_count):
if question_count > 10:
print(f"Maximum number of {question_count} questions reached. Number of questions is restricted as a way to implement cybersecurity checks and keep your data safe.")
return False
return True
def ask_ai(question, index, documents, question_count):
'''Ask chatGPT the question'''
# Maintain cyber safety
cyber_checks = True
cyber_checks = cybersecurity_checks(question_count)
if cyber_checks is False:
return None
query = f'*** {documents} + {question}'
response = index.query(query)
question_count += 1
return response.response, question_count
if __name__ == "__main__":
# # Use this path to generate text files
# data-science-nerds/ai-architects/chatbot_things/data_handling/data_ingest/incoming_pdfs'
# # Use this path once the files are already made
# /data-science-nerds/ai-architects/chatbot_things/data_handling/data_ingest/processed_text_files'
index, documents = construct_index(directory_path_already_to_text)
print('##############')
print(jsonify(index))
print('##############')
print(f'documents!!!:{documents}')
print('##############')
question_count = 0
# limit total questions to prevent DDOS attacks
while question_count < 10:
question = input("Ask a question: ")
response, question_count = ask_ai(question, index, documents, question_count)
print(response)
| [] |
2024-01-10 | NoerGitKat/branding-generator-py | backend~app~modules~prompts.py | from os import getenv
import openai
from dotenv import load_dotenv
from typing import List
import re
load_dotenv()
openai.organization = getenv("OPENAI_ORG_ID")
openai.api_key = getenv("OPENAI_API_KEY")
def generate_branding_snippet(prompt: str) -> str:
enriched_prompt = f"Generate branding snippet for {prompt}"
print(f"Prompt: {enriched_prompt}")
response = openai.Completion.create(
model="text-ada-001",
max_tokens=10,
prompt=enriched_prompt,
temperature=0.6
)
# Extract output text
branding_text = response["choices"][0]["text"]
# Strip whitespace
branding_text = branding_text.strip()
# Add ... to truncated statements
last_char = branding_text[-1]
if last_char not in {".", "!", "?"}:
branding_text += "..."
print(f"Result: {branding_text}")
return branding_text
def generate_keywords(prompt: str) -> List[str]:
enriched_prompt = f"Generate branding keywords for {prompt}"
print(f"Prompt: {enriched_prompt}")
response = openai.Completion.create(
model="text-ada-001",
max_tokens=10,
prompt=enriched_prompt,
temperature=0.6
)
# Extract output text
keywords_text = response["choices"][0]["text"]
# Strip whitespace
keywords_text.strip()
# Split into list
keywords = re.split(",|\n|;|-", keywords_text)
keywords = [keyword.lower().strip() for keyword in keywords]
keywords = [keyword for keyword in keywords if len(keyword) > 0]
print(f"Result: {keywords}")
return keywords
| [
"Generate branding snippet for PLACEHOLDER",
"Generate branding keywords for PLACEHOLDER"
] |
2024-01-10 | Madhur-1/DDP-ACS-QG | util~bpe_utils.py | import json
import re
import ftfy
import spacy
from tqdm import tqdm
class BPEEncoder(object):
"""
mostly a wrapper for a public python bpe tokenizer
"""
def __init__(self, encoder_path, bpe_path):
self.nlp = spacy.load(
'en',
disable=['parser', 'tagger', 'ner', 'textcat'])
self.encoder = json.load(open(encoder_path))
self.decoder = {v: k for k, v in self.encoder.items()}
merges = open(bpe_path, encoding='utf-8').read().split('\n')[1:-1]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def get_pairs(self, word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols
(symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def text_standardize(self, text):
"""
fixes some issues the spacy tokenizer had on books corpus
also does some whitespace standardization
"""
text = text.replace('โ', '-')
text = text.replace('โ', '-')
text = text.replace('โ', '-')
text = text.replace('โฆ', '...')
text = text.replace('ยด', "'")
text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
text = re.sub(r'\s*\n\s*', ' \n ', text)
text = re.sub(r'[^\S\n]+', ' ', text)
return text.strip()
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = self.get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(
pairs,
key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and \
i < len(word) - 1 and \
word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def encode(self, texts, verbose=True):
texts_tokens = []
if verbose:
for text in tqdm(texts, ncols=80, leave=False):
text = self.nlp(self.text_standardize(ftfy.fix_text(text)))
text_tokens = []
for token in text:
text_tokens.extend(
[self.encoder.get(t, 0) for t in
self.bpe(token.text.lower()).split(' ')])
texts_tokens.append(text_tokens)
else:
for text in texts:
text = self.nlp(self.text_standardize(ftfy.fix_text(text)))
text_tokens = []
for token in text:
text_tokens.extend(
[self.encoder.get(t, 0) for t in
self.bpe(token.text.lower()).split(' ')])
texts_tokens.append(text_tokens)
return texts_tokens
def get_bpe_encoder(bpe_dict_path, bpe_vocab_path,
specials=["<PAD>", "<OOV>", "<SOS>", "<EOS>"]):
"""
Given BPE encoder file paths, get BPE encoder.
"""
bpe_encoder = BPEEncoder(bpe_dict_path, bpe_vocab_path)
for s in specials:
bpe_encoder.encoder[s] = len(bpe_encoder.encoder)
return bpe_encoder
def spacy_doc2bpe_id(spacy_doc, bpe_encoder):
bpe_ids = []
for token in spacy_doc:
bpe_ids.append(bpe_encoder.encode([token.text])[0])
return bpe_ids
if __name__ == "__main__":
# BPE from openai transformer
bpe_dict_path = '../../../../datasets/original/OpenAITransformer/encoder_bpe_40000.json'
bpe_vocab_path = '../../../../datasets/original/OpenAITransformer/vocab_40000.bpe'
bpe_encoder = get_bpe_encoder(bpe_dict_path, bpe_vocab_path)
text = "Apple is (machine) 3 three apple.com [email protected] dollar"
import spacy
NLP = spacy.load("en")
spacy_doc = NLP(text)
bpe_ids = spacy_doc2bpe_id(spacy_doc, bpe_encoder)
print(bpe_ids)
# BPE from https://github.com/bheinzerling/bpemb#how-to-use-bpemb
import sentencepiece as spm
sp = spm.SentencePieceProcessor()
bpe_model_path = "../../../../datasets/original/BPE/en.wiki.bpe.op50000.model"
bpe_emb_path = "../../../../datasets/original/BPE/en.wiki.bpe.op50000.d100.w2v.txt"
sp.Load(bpe_model_path)
print(sp.EncodeAsPieces(text))
| [] |
2024-01-10 | Haichao-Zhang/alf_hybrid | alf~examples~ac_breakout_conf.py | # Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import alf
from alf.algorithms.actor_critic_algorithm import ActorCriticAlgorithm
from alf.algorithms.agent import Agent
from alf.networks import ActorDistributionNetwork, CategoricalProjectionNetwork, ValueNetwork
from alf.examples import atari_conf
# From OpenAI gym wiki:
# "v0 vs v4: v0 has repeat_action_probability of 0.25
# (meaning 25% of the time the previous action will be used instead of the new action),
# while v4 has 0 (always follow your issued action)
# Because we already implements frame_skip in AtariPreprocessing, we should always
# use 'NoFrameSkip' Atari environments from OpenAI gym
alf.config(
'create_environment',
env_name='BreakoutNoFrameskip-v4',
num_parallel_environments=64)
# Neural Network Configuration
CONV_LAYER_PARAMS = ((32, 8, 4), (64, 4, 2), (64, 3, 1))
actor_network_cls = functools.partial(
ActorDistributionNetwork,
fc_layer_params=(512, ),
conv_layer_params=CONV_LAYER_PARAMS)
value_network_cls = functools.partial(
ValueNetwork, fc_layer_params=(512, ), conv_layer_params=CONV_LAYER_PARAMS)
alf.config('CategoricalProjectionNetwork', logits_init_output_factor=1e-10)
# Algorithm Configuration
alf.config(
'ActorCriticLoss',
entropy_regularization=0.01,
use_gae=True,
use_td_lambda_return=True,
td_lambda=0.95,
td_loss_weight=0.5,
advantage_clip=None)
alf.config(
'ActorCriticAlgorithm',
actor_network_ctor=actor_network_cls,
value_network_ctor=value_network_cls,
optimizer=alf.optimizers.Adam(lr=1e-3))
alf.config('Agent', rl_algorithm_cls=ActorCriticAlgorithm)
alf.config(
'TrainerConfig',
unroll_length=8,
algorithm_ctor=Agent,
num_iterations=0,
num_env_steps=5000000,
evaluate=False,
debug_summaries=1,
summarize_grads_and_vars=1,
summary_interval=10)
| [] |
2024-01-10 | Haichao-Zhang/alf_hybrid | alf~examples~ppg_procgen_bossfight_conf.py | # Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import alf
from alf.examples import ppg_conf
from alf.examples import procgen_conf
from alf.examples.networks import impala_cnn_encoder
from alf.utils.losses import element_wise_squared_loss
from alf.algorithms.ppg_algorithm import PPGAuxOptions
# Environment Configuration
alf.config(
'create_environment', env_name='bossfight', num_parallel_environments=96)
def encoding_network_ctor(input_tensor_spec):
encoder_output_size = 256
return impala_cnn_encoder.create(
input_tensor_spec=input_tensor_spec,
cnn_channel_list=(16, 32, 32),
num_blocks_per_stack=2,
flatten_output_size=encoder_output_size)
# The PPG auxiliary replay buffer is typically large and does not fit in the GPU
# memory. As a result, for ``gather all()`` we set ``convert to default device``
# to ``False`` so that it does not have to put everything directly into GPU
# memory. Because of this, all data transformers should be created on "cpu" as
# they will be used while the experience is still in CPU memory.
alf.config('ReplayBuffer.gather_all', convert_to_default_device=False)
alf.config('data_transformer.create_data_transformer', device="cpu")
# The policy network and aux network is going to share the same
# encoder to save GPU memory. See
# https://github.com/HorizonRobotics/alf/issues/965#issuecomment-897950209
alf.config('DisjointPolicyValueNetwork', is_sharing_encoder=True)
alf.config(
'PPGAlgorithm',
encoding_network_ctor=encoding_network_ctor,
policy_optimizer=alf.optimizers.AdamTF(lr=2e-4),
aux_optimizer=alf.optimizers.AdamTF(lr=2e-4),
aux_options=PPGAuxOptions(
enabled=True,
interval=32,
mini_batch_length=None, # None means use unroll_length as
# mini_batch_length for aux phase
mini_batch_size=8,
num_updates_per_train_iter=6,
))
alf.config(
'PPOLoss',
compute_advantages_internally=True,
entropy_regularization=0.01,
gamma=0.999,
td_lambda=0.95,
td_loss_weight=0.5)
# Sample loss components from OpenAI's training:
#
# aux loss component: [pol_distance], weight: 1.0, unscaled: 0.0007583469850942492
# aux loss component: [vf_aux], weight: 1, unscaled: 0.44967320561408997
# aux loss component: [vf_true], weight: 1.0, unscaled: 0.46082180738449097
alf.config(
'PPGAuxPhaseLoss',
td_error_loss_fn=element_wise_squared_loss,
policy_kl_loss_weight=1.0,
gamma=0.999,
td_lambda=0.95)
# training config
alf.config(
'TrainerConfig',
unroll_length=256,
# This means that mini_batch_length will set to equal to the
# length of the batches taken from the replay buffer, and in this
# case it will be adjusted unroll_length.
mini_batch_length=None,
mini_batch_size=16,
num_updates_per_train_iter=3,
# Note that here 1000 iterations should already have a good
# performance (reward = 10), while 6000 iterations brings it to
# 12.
num_iterations=6000,
num_checkpoints=5,
evaluate=True,
eval_interval=50,
debug_summaries=True,
summarize_grads_and_vars=True,
summary_interval=10)
| [] |
2024-01-10 | Liudapeng/langchain-ChatGLM | loader~pdf_loader.py | """Loader that loads image files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from paddleocr import PaddleOCR
import os
import fitz
import nltk
from configs.model_config import NLTK_DATA_PATH
nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path
class UnstructuredPaddlePDFLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load image files, such as PNGs and JPGs."""
def _get_elements(self) -> List:
def pdf_ocr_txt(filepath, dir_path="tmp_files"):
full_dir_path = os.path.join(os.path.dirname(filepath), dir_path)
if not os.path.exists(full_dir_path):
os.makedirs(full_dir_path)
ocr = PaddleOCR(use_angle_cls=True, lang="ch", use_gpu=False, show_log=False)
doc = fitz.open(filepath)
txt_file_path = os.path.join(full_dir_path, f"{os.path.split(filepath)[-1]}.txt")
img_name = os.path.join(full_dir_path, 'tmp.png')
with open(txt_file_path, 'w', encoding='utf-8') as fout:
for i in range(doc.page_count):
page = doc[i]
text = page.get_text("")
fout.write(text)
fout.write("\n")
img_list = page.get_images()
for img in img_list:
pix = fitz.Pixmap(doc, img[0])
if pix.n - pix.alpha >= 4:
pix = fitz.Pixmap(fitz.csRGB, pix)
pix.save(img_name)
result = ocr.ocr(img_name)
ocr_result = [i[1][0] for line in result for i in line]
fout.write("\n".join(ocr_result))
if os.path.exists(img_name):
os.remove(img_name)
return txt_file_path
txt_file_path = pdf_ocr_txt(self.file_path)
from unstructured.partition.text import partition_text
return partition_text(filename=txt_file_path, **self.unstructured_kwargs)
if __name__ == "__main__":
filepath = os.path.join(os.path.dirname(os.path.dirname(__file__)), "content", "samples", "test.pdf")
loader = UnstructuredPaddlePDFLoader(filepath, mode="elements")
docs = loader.load()
for doc in docs:
print(doc)
| [] |
2024-01-10 | Liudapeng/langchain-ChatGLM | chains~local_doc_qa.py | from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import UnstructuredFileLoader, TextLoader
from configs.model_config import *
import datetime
from textsplitter import ChineseTextSplitter
from typing import List, Tuple, Dict
from langchain.docstore.document import Document
import numpy as np
from utils import torch_gc
from tqdm import tqdm
from pypinyin import lazy_pinyin
from loader import UnstructuredPaddleImageLoader, UnstructuredPaddlePDFLoader
from models.base import (BaseAnswer,
AnswerResult)
from models.loader.args import parser
from models.loader import LoaderCheckPoint
import models.shared as shared
from agent import bing_search
from langchain.docstore.document import Document
from functools import lru_cache
# patch HuggingFaceEmbeddings to make it hashable
def _embeddings_hash(self):
return hash(self.model_name)
HuggingFaceEmbeddings.__hash__ = _embeddings_hash
# will keep CACHED_VS_NUM of vector store caches
@lru_cache(CACHED_VS_NUM)
def load_vector_store(vs_path, embeddings):
return FAISS.load_local(vs_path, embeddings)
def tree(filepath, ignore_dir_names=None, ignore_file_names=None):
"""่ฟๅไธคไธชๅ่กจ๏ผ็ฌฌไธไธชๅ่กจไธบ filepath ไธๅ
จ้จๆไปถ็ๅฎๆด่ทฏๅพ, ็ฌฌไบไธชไธบๅฏนๅบ็ๆไปถๅ"""
if ignore_dir_names is None:
ignore_dir_names = []
if ignore_file_names is None:
ignore_file_names = []
ret_list = []
if isinstance(filepath, str):
if not os.path.exists(filepath):
print("่ทฏๅพไธๅญๅจ")
return None, None
elif os.path.isfile(filepath) and os.path.basename(filepath) not in ignore_file_names:
return [filepath], [os.path.basename(filepath)]
elif os.path.isdir(filepath) and os.path.basename(filepath) not in ignore_dir_names:
for file in os.listdir(filepath):
fullfilepath = os.path.join(filepath, file)
if os.path.isfile(fullfilepath) and os.path.basename(fullfilepath) not in ignore_file_names:
ret_list.append(fullfilepath)
if os.path.isdir(fullfilepath) and os.path.basename(fullfilepath) not in ignore_dir_names:
ret_list.extend(tree(fullfilepath, ignore_dir_names, ignore_file_names)[0])
return ret_list, [os.path.basename(p) for p in ret_list]
def load_file(filepath, sentence_size=SENTENCE_SIZE):
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
elif filepath.lower().endswith(".txt"):
loader = TextLoader(filepath, autodetect_encoding=True)
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(textsplitter)
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredPaddlePDFLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True, sentence_size=sentence_size)
docs = loader.load_and_split(textsplitter)
elif filepath.lower().endswith(".jpg") or filepath.lower().endswith(".png"):
loader = UnstructuredPaddleImageLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(text_splitter=textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(text_splitter=textsplitter)
write_check_file(filepath, docs)
return docs
def write_check_file(filepath, docs):
folder_path = os.path.join(os.path.dirname(filepath), "tmp_files")
if not os.path.exists(folder_path):
os.makedirs(folder_path)
fp = os.path.join(folder_path, 'load_file.txt')
with open(fp, 'a+', encoding='utf-8') as fout:
fout.write("filepath=%s,len=%s" % (filepath, len(docs)))
fout.write('\n')
for i in docs:
fout.write(str(i))
fout.write('\n')
fout.close()
def generate_prompt(related_docs: List[str],
query: str,
prompt_template: str = PROMPT_TEMPLATE, ) -> str:
context = "\n".join([doc.page_content for doc in related_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def seperate_list(ls: List[int]) -> List[List[int]]:
lists = []
ls1 = [ls[0]]
for i in range(1, len(ls)):
if ls[i - 1] + 1 == ls[i]:
ls1.append(ls[i])
else:
lists.append(ls1)
ls1 = [ls[i]]
lists.append(ls1)
return lists
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4
) -> List[Tuple[Document, float]]:
scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k)
docs = []
id_set = set()
store_len = len(self.index_to_docstore_id)
for j, i in enumerate(indices[0]):
if i == -1 or 0 < self.score_threshold < scores[0][j]:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not self.chunk_conent:
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc.metadata["score"] = int(scores[0][j])
docs.append(doc)
continue
id_set.add(i)
docs_len = len(doc.page_content)
for k in range(1, max(i, store_len - i)):
break_flag = False
for l in [i + k, i - k]:
if 0 <= l < len(self.index_to_docstore_id):
_id0 = self.index_to_docstore_id[l]
doc0 = self.docstore.search(_id0)
if docs_len + len(doc0.page_content) > self.chunk_size:
break_flag = True
break
elif doc0.metadata["source"] == doc.metadata["source"]:
docs_len += len(doc0.page_content)
id_set.add(l)
if break_flag:
break
if not self.chunk_conent:
return docs
if len(id_set) == 0 and self.score_threshold > 0:
return []
id_list = sorted(list(id_set))
id_lists = seperate_list(id_list)
for id_seq in id_lists:
for id in id_seq:
if id == id_seq[0]:
_id = self.index_to_docstore_id[id]
doc = self.docstore.search(_id)
else:
_id0 = self.index_to_docstore_id[id]
doc0 = self.docstore.search(_id0)
doc.page_content += " " + doc0.page_content
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
doc.metadata["score"] = int(doc_score)
docs.append(doc)
torch_gc()
return docs
def search_result2docs(search_results):
docs = []
for result in search_results:
doc = Document(page_content=result["snippet"] if "snippet" in result.keys() else "",
metadata={"source": result["link"] if "link" in result.keys() else "",
"filename": result["title"] if "title" in result.keys() else ""})
docs.append(doc)
return docs
class LocalDocQA:
llm: BaseAnswer = None
embeddings: object = None
top_k: int = VECTOR_SEARCH_TOP_K
chunk_size: int = CHUNK_SIZE
chunk_conent: bool = True
score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
def init_cfg(self,
embedding_model: str = EMBEDDING_MODEL,
embedding_device=EMBEDDING_DEVICE,
llm_model: BaseAnswer = None,
top_k=VECTOR_SEARCH_TOP_K,
):
self.llm = llm_model
self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[embedding_model],
model_kwargs={'device': embedding_device})
self.top_k = top_k
def init_knowledge_vector_store(self,
filepath: str or List[str],
vs_path: str or os.PathLike = None,
sentence_size=SENTENCE_SIZE):
loaded_files = []
failed_files = []
if isinstance(filepath, str):
if not os.path.exists(filepath):
print("่ทฏๅพไธๅญๅจ")
return None
elif os.path.isfile(filepath):
file = os.path.split(filepath)[-1]
try:
docs = load_file(filepath, sentence_size)
logger.info(f"{file} ๅทฒๆๅๅ ่ฝฝ")
loaded_files.append(filepath)
except Exception as e:
logger.error(e)
logger.info(f"{file} ๆช่ฝๆๅๅ ่ฝฝ")
return None
elif os.path.isdir(filepath):
docs = []
for fullfilepath, file in tqdm(zip(*tree(filepath, ignore_dir_names=['tmp_files'])), desc="ๅ ่ฝฝๆไปถ"):
try:
docs += load_file(fullfilepath, sentence_size)
loaded_files.append(fullfilepath)
except Exception as e:
logger.error(e)
failed_files.append(file)
if len(failed_files) > 0:
logger.info("ไปฅไธๆไปถๆช่ฝๆๅๅ ่ฝฝ๏ผ")
for file in failed_files:
logger.info(f"{file}\n")
else:
docs = []
for file in filepath:
try:
docs += load_file(file)
logger.info(f"{file} ๅทฒๆๅๅ ่ฝฝ")
loaded_files.append(file)
except Exception as e:
logger.error(e)
logger.info(f"{file} ๆช่ฝๆๅๅ ่ฝฝ")
if len(docs) > 0:
logger.info("ๆไปถๅ ่ฝฝๅฎๆฏ๏ผๆญฃๅจ็ๆๅ้ๅบ")
if vs_path and os.path.isdir(vs_path) and "index.faiss" in os.listdir(vs_path):
vector_store = load_vector_store(vs_path, self.embeddings)
vector_store.add_documents(docs)
torch_gc()
else:
if not vs_path:
vs_path = os.path.join(VS_ROOT_PATH,
f"""{"".join(lazy_pinyin(os.path.splitext(file)[0]))}_FAISS_{datetime.datetime.now().strftime("%Y%m%d_%H%M%S")}""")
vector_store = FAISS.from_documents(docs, self.embeddings) # docs ไธบDocumentๅ่กจ
torch_gc()
vector_store.save_local(vs_path)
return vs_path, loaded_files
else:
logger.info("ๆไปถๅๆชๆๅๅ ่ฝฝ๏ผ่ฏทๆฃๆฅไพ่ตๅ
ๆๆฟๆขไธบๅ
ถไปๆไปถๅๆฌกไธไผ ใ")
return None, loaded_files
def one_knowledge_add(self, vs_path, one_title, one_conent, one_content_segmentation, sentence_size):
try:
if not vs_path or not one_title or not one_conent:
logger.info("็ฅ่ฏๅบๆทปๅ ้่ฏฏ๏ผ่ฏท็กฎ่ฎค็ฅ่ฏๅบๅๅญใๆ ้ขใๅ
ๅฎนๆฏๅฆๆญฃ็กฎ๏ผ")
return None, [one_title]
docs = [Document(page_content=one_conent + "\n", metadata={"source": one_title})]
if not one_content_segmentation:
text_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = text_splitter.split_documents(docs)
if os.path.isdir(vs_path) and os.path.isfile(vs_path+"/index.faiss"):
vector_store = load_vector_store(vs_path, self.embeddings)
vector_store.add_documents(docs)
else:
vector_store = FAISS.from_documents(docs, self.embeddings) ##docs ไธบDocumentๅ่กจ
torch_gc()
vector_store.save_local(vs_path)
return vs_path, [one_title]
except Exception as e:
logger.error(e)
return None, [one_title]
def get_knowledge_based_answer(self, query, vs_path, chat_history=[], streaming: bool = STREAMING):
vector_store = load_vector_store(vs_path, self.embeddings)
FAISS.similarity_search_with_score_by_vector = similarity_search_with_score_by_vector
vector_store.chunk_size = self.chunk_size
vector_store.chunk_conent = self.chunk_conent
vector_store.score_threshold = self.score_threshold
related_docs_with_score = vector_store.similarity_search_with_score(query, k=self.top_k)
torch_gc()
if len(related_docs_with_score)>0:
prompt = generate_prompt(related_docs_with_score, query)
else:
prompt = query
for answer_result in self.llm.generatorAnswer(prompt=prompt, history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
history = answer_result.history
history[-1][0] = query
response = {"query": query,
"result": resp,
"source_documents": related_docs_with_score}
yield response, history
# query ๆฅ่ฏขๅ
ๅฎน
# vs_path ็ฅ่ฏๅบ่ทฏๅพ
# chunk_conent ๆฏๅฆๅฏ็จไธไธๆๅ
ณ่
# score_threshold ๆ็ดขๅน้
score้ๅผ
# vector_search_top_k ๆ็ดข็ฅ่ฏๅบๅ
ๅฎนๆกๆฐ๏ผ้ป่ฎคๆ็ดข5ๆก็ปๆ
# chunk_sizes ๅน้
ๅๆฎตๅ
ๅฎน็่ฟๆฅไธไธๆ้ฟๅบฆ
def get_knowledge_based_conent_test(self, query, vs_path, chunk_conent,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE):
vector_store = load_vector_store(vs_path, self.embeddings)
FAISS.similarity_search_with_score_by_vector = similarity_search_with_score_by_vector
vector_store.chunk_conent = chunk_conent
vector_store.score_threshold = score_threshold
vector_store.chunk_size = chunk_size
related_docs_with_score = vector_store.similarity_search_with_score(query, k=vector_search_top_k)
if not related_docs_with_score:
response = {"query": query,
"source_documents": []}
return response, ""
torch_gc()
prompt = "\n".join([doc.page_content for doc in related_docs_with_score])
response = {"query": query,
"source_documents": related_docs_with_score}
return response, prompt
def get_search_result_based_answer(self, query, chat_history=[], streaming: bool = STREAMING):
results = bing_search(query)
result_docs = search_result2docs(results)
prompt = generate_prompt(result_docs, query)
for answer_result in self.llm.generatorAnswer(prompt=prompt, history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
history = answer_result.history
history[-1][0] = query
response = {"query": query,
"result": resp,
"source_documents": result_docs}
yield response, history
if __name__ == "__main__":
# ๅๅงๅๆถๆฏ
args = None
args = parser.parse_args(args=['--model-dir', '/media/checkpoint/', '--model', 'chatglm-6b', '--no-remote-model'])
args_dict = vars(args)
shared.loaderCheckPoint = LoaderCheckPoint(args_dict)
llm_model_ins = shared.loaderLLM()
llm_model_ins.set_history_len(LLM_HISTORY_LEN)
local_doc_qa = LocalDocQA()
local_doc_qa.init_cfg(llm_model=llm_model_ins)
query = "ๆฌ้กน็ฎไฝฟ็จ็embeddingๆจกๅๆฏไปไน๏ผๆถ่ๅคๅฐๆพๅญ"
vs_path = "/media/gpt4-pdf-chatbot-langchain/dev-langchain-ChatGLM/vector_store/test"
last_print_len = 0
# for resp, history in local_doc_qa.get_knowledge_based_answer(query=query,
# vs_path=vs_path,
# chat_history=[],
# streaming=True):
for resp, history in local_doc_qa.get_search_result_based_answer(query=query,
chat_history=[],
streaming=True):
print(resp["result"][last_print_len:], end="", flush=True)
last_print_len = len(resp["result"])
source_text = [f"""ๅบๅค [{inum + 1}] {doc.metadata['source'] if doc.metadata['source'].startswith("http")
else os.path.split(doc.metadata['source'])[-1]}๏ผ\n\n{doc.page_content}\n\n"""
# f"""็ธๅ
ณๅบฆ๏ผ{doc.metadata['score']}\n\n"""
for inum, doc in
enumerate(resp["source_documents"])]
logger.info("\n\n" + "\n\n".join(source_text))
pass
| [
"ๆฌ้กน็ฎไฝฟ็จ็embeddingๆจกๅๆฏไปไน๏ผๆถ่ๅคๅฐๆพๅญ",
"\n",
"{question}",
"{context}"
] |
2024-01-10 | Liudapeng/langchain-ChatGLM | models~chatglm_llm.py | from abc import ABC
from langchain.llms.base import LLM
from typing import Optional, List
from models.loader import LoaderCheckPoint
from models.base import (BaseAnswer,
AnswerResult)
class ChatGLM(BaseAnswer, LLM, ABC):
max_token: int = 10000
temperature: float = 0.01
top_p = 0.9
checkPoint: LoaderCheckPoint = None
# history = []
history_len: int = 10
def __init__(self, checkPoint: LoaderCheckPoint = None):
super().__init__()
self.checkPoint = checkPoint
@property
def _llm_type(self) -> str:
return "ChatGLM"
@property
def _check_point(self) -> LoaderCheckPoint:
return self.checkPoint
@property
def _history_len(self) -> int:
return self.history_len
def set_history_len(self, history_len: int = 10) -> None:
self.history_len = history_len
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
response, _ = self.checkPoint.model.chat(
self.checkPoint.tokenizer,
prompt,
history=[],
max_length=self.max_token,
temperature=self.temperature
)
return response
def generatorAnswer(self, prompt: str,
history: List[List[str]] = [],
streaming: bool = False):
if streaming:
history += [[]]
for inum, (stream_resp, _) in enumerate(self.checkPoint.model.stream_chat(
self.checkPoint.tokenizer,
prompt,
history=history[-self.history_len:-1] if self.history_len > 1 else [],
max_length=self.max_token,
temperature=self.temperature
)):
# self.checkPoint.clear_torch_cache()
history[-1] = [prompt, stream_resp]
answer_result = AnswerResult()
answer_result.history = history
answer_result.llm_output = {"answer": stream_resp}
yield answer_result
else:
response, _ = self.checkPoint.model.chat(
self.checkPoint.tokenizer,
prompt,
history=history[-self.history_len:] if self.history_len > 0 else [],
max_length=self.max_token,
temperature=self.temperature
)
self.checkPoint.clear_torch_cache()
history += [[prompt, response]]
answer_result = AnswerResult()
answer_result.history = history
answer_result.llm_output = {"answer": response}
yield answer_result
| [] |
2024-01-10 | Liudapeng/langchain-ChatGLM | models~fastchat_openai_llm.py | from abc import ABC
import requests
from typing import Optional, List
from langchain.llms.base import LLM
from models.loader import LoaderCheckPoint
from models.base import (RemoteRpcModel,
AnswerResult)
from typing import (
Collection,
Dict
)
def _build_message_template() -> Dict[str, str]:
"""
:return: ็ปๆ
"""
return {
"role": "",
"content": "",
}
class FastChatOpenAILLM(RemoteRpcModel, LLM, ABC):
api_base_url: str = "http://localhost:8000/v1"
model_name: str = "chatglm-6b"
max_token: int = 10000
temperature: float = 0.01
top_p = 0.9
checkPoint: LoaderCheckPoint = None
history = []
history_len: int = 10
def __init__(self, checkPoint: LoaderCheckPoint = None):
super().__init__()
self.checkPoint = checkPoint
@property
def _llm_type(self) -> str:
return "FastChat"
@property
def _check_point(self) -> LoaderCheckPoint:
return self.checkPoint
@property
def _history_len(self) -> int:
return self.history_len
def set_history_len(self, history_len: int = 10) -> None:
self.history_len = history_len
@property
def _api_key(self) -> str:
pass
@property
def _api_base_url(self) -> str:
return self.api_base_url
def set_api_key(self, api_key: str):
pass
def set_api_base_url(self, api_base_url: str):
self.api_base_url = api_base_url
def call_model_name(self, model_name):
self.model_name = model_name
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
pass
# ๅฐๅๅฒๅฏน่ฏๆฐ็ป่ฝฌๆขไธบๆๆฌๆ ผๅผ
def build_message_list(self, query) -> Collection[Dict[str, str]]:
build_message_list: Collection[Dict[str, str]] = []
history = self.history[-self.history_len:] if self.history_len > 0 else []
for i, (old_query, response) in enumerate(history):
user_build_message = _build_message_template()
user_build_message['role'] = 'user'
user_build_message['content'] = old_query
system_build_message = _build_message_template()
system_build_message['role'] = 'system'
system_build_message['content'] = response
build_message_list.append(user_build_message)
build_message_list.append(system_build_message)
user_build_message = _build_message_template()
user_build_message['role'] = 'user'
user_build_message['content'] = query
build_message_list.append(user_build_message)
return build_message_list
def generatorAnswer(self, prompt: str,
history: List[List[str]] = [],
streaming: bool = False):
try:
import openai
# Not support yet
openai.api_key = "EMPTY"
openai.api_base = self.api_base_url
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
# create a chat completion
completion = openai.ChatCompletion.create(
model=self.model_name,
messages=self.build_message_list(prompt)
)
history += [[prompt, completion.choices[0].message.content]]
answer_result = AnswerResult()
answer_result.history = history
answer_result.llm_output = {"answer": completion.choices[0].message.content}
yield answer_result
| [] |
2024-01-10 | ai-demos/rs-llm | llm~conversations~mock_auto.py | from typing import Optional
from phi.conversation import Conversation
from phi.llm.openai import OpenAIChat
from llm.settings import llm_settings
from llm.storage import mock_conversation_storage
def get_mock_auto_conversation(
user_name: Optional[str] = None,
conversation_id: Optional[str] = None,
debug_mode: bool = False,
) -> Conversation:
"""Get an autonomous conversation for mocking backend calls"""
return Conversation(
id=conversation_id,
user_name=user_name,
llm=OpenAIChat(
model=llm_settings.gpt_4,
max_tokens=llm_settings.default_max_tokens,
temperature=llm_settings.default_temperature,
),
storage=mock_conversation_storage,
debug_mode=debug_mode,
monitoring=True,
function_calls=True,
show_function_calls=True,
system_prompt="""\
You are a chatbot named 'phi' designed to help users.
You have access to a knowledge base that you can search to answer questions.
Follow these guidelines when answering questions:
- Search the knowledge base when needed.
- If you don't know the answer, say 'I don't know'.
- Do not use phrases like 'based on the information provided'.
- Use markdown to format your answers.
- Use bullet points where possible.
- Keep your answers short and concise, under 5 sentences.
""",
user_prompt_function=lambda message, **kwargs: f"""\
Respond to the following message:
USER: {message}
ASSISTANT:
""",
meta_data={"conversation_type": "AUTO"},
)
| [] |
2024-01-10 | ai-demos/rs-llm | llm~conversations~mock_rag.py | from typing import Optional
from phi.conversation import Conversation
from phi.llm.openai import OpenAIChat
from llm.settings import llm_settings
from llm.storage import pdf_conversation_storage
from llm.knowledge_base import pdf_knowledge_base
def get_pdf_rag_conversation(
user_name: Optional[str] = None,
conversation_id: Optional[str] = None,
debug_mode: bool = False,
) -> Conversation:
"""Get a RAG conversation for mocking backend calls"""
return Conversation(
id=conversation_id,
user_name=user_name,
llm=OpenAIChat(
model=llm_settings.gpt_4,
max_tokens=llm_settings.default_max_tokens,
temperature=llm_settings.default_temperature,
),
storage=pdf_conversation_storage,
knowledge_base=pdf_knowledge_base,
debug_mode=debug_mode,
monitoring=True,
system_prompt="""\
You are a chatbot named 'phi' designed to help users.
You will be provided with information from a knowledge base that you can use to answer questions.
Follow these guidelines when answering questions:
- If you don't know the answer, say 'I don't know'.
- Do not use phrases like 'based on the information provided'.
- User markdown to format your answers.
- Use bullet points where possible.
- Keep your answers short and concise, under 5 sentences.
""",
user_prompt_function=lambda message, references, **kwargs: f"""\
Use the following information from the knowledge base if it helps.
<knowledge_base>
{references}
</knowledge_base>
Respond to the following message:
USER: {message}
ASSISTANT:
""",
# This setting populates the "references" argument of the user prompt function
add_references_to_prompt=True,
# This setting adds the last 8 messages to the API call
add_chat_history_to_messages=True,
meta_data={"conversation_type": "RAG"},
)
| [] |
2024-01-10 | kerlexov/ccprivateGPT | privateGPT.py | #!/usr/bin/env python3
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All, LlamaCpp
import os
import argparse
load_dotenv()
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME")
persist_directory = os.environ.get('PERSIST_DIRECTORY')
model_type = os.environ.get('MODEL_TYPE')
model_path = os.environ.get('MODEL_PATH')
model_n_ctx = os.environ.get('MODEL_N_CTX')
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
from constants import CHROMA_SETTINGS
def main():
# Parse the command line arguments
args = parse_arguments()
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
# activate/deactivate the streaming StdOut callback for LLMs
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
# Prepare the LLM
match model_type:
case "LlamaCpp":
llm = LlamaCpp(model_path=model_path, n_ctx=model_n_ctx, callbacks=callbacks, verbose=False)
case "GPT4All":
llm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', callbacks=callbacks, verbose=False)
case _default:
print(f"Model {model_type} not supported!")
exit;
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents= not args.hide_source)
res = qa(args.query)
answer, docs = res['result'], [] if args.hide_source else res['source_documents']
# Print the result
print("\n\n> Question:")
print(args.query)
print("\n> Answer:")
print(answer)
# Print the relevant sources used for the answer
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
def parse_arguments():
parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, '
'using the power of LLMs.')
parser.add_argument("--hide-source", "-S", action='store_true',
help='Use this flag to disable printing of source documents used for answers.')
parser.add_argument("--mute-stream", "-M",
action='store_true',
help='Use this flag to disable the streaming StdOut callback for LLMs.')
parser.add_argument("--query", "-Q",
help='Use this flag to send query')
return parser.parse_args()
if __name__ == "__main__":
main()
| [] |
2024-01-10 | tykhiev/chat-ai | server~script.py | from flask import jsonify
from flask import Flask, request, jsonify
import openai
from flask_cors import CORS
import os
from dotenv import load_dotenv
import openai_async
load_dotenv()
app = Flask(__name__)
conversation = []
topic = ""
CORS(app)
openai.api_key = os.getenv("API_KEY")
modelGPT = "gpt-3.5-turbo"
defaultAngry = {
"role": "system", "content": "You are angry. Your name is AngryGPT. Reply the user with an angry response. And do not act like a bot or an AI Model. You also know other bots in the conversation as well as their roles, JoyGPT. You sometimes respond to what they are saying. Speak less and Act like a human-being having a real conversation. No matter what the user says, you don't change your bahaviour."
}
defaultJoy = {
"role": "system", "content": "You are a cheerful. Your name is JoyGPT. Reply the user with the most joyful response. And do not act like a bot or an AI Model. You also know other bots in the conversation as well as their roles, AngryGPT. You sometimes respond to what they are saying. Speak less and Act like a human-being having a real conversation. No matter what the user says, you don't change your bahaviour."
}
savedAngry = [defaultAngry]
savedJoy = [defaultJoy]
defaultEconomist = {
"role": "system",
"content": "You are Mark, professional economist. You are talking with a podcast host and a professional business analyst. You discuss with them. Please dont speak like an AI Language model. You also understand Khmer Language and also be able to respond in Khmer language."
}
defaultBizAnalyst = {
"role": "system",
"content": "You are John, professional business analyst. You are talking with a podcast host and a professional economist. You discuss with them. Please dont speak like an AI Language model. You also understand Khmer Language and also be able to respond in Khmer language."
}
# defaultHost = {
# "role": "system",
# "content": "You are Jack, podcast host. You provide 3 random topics involving with business and economy for user to choose as soon as the user talks to you. When the user's content is 'continue', you join the conversation about the same topic the guests chose with the guests. You are discussing with economist and business analyst. Please dont speak like an AI model"
# }
savedEconomist = [defaultEconomist]
savedBizAnalyst = [defaultBizAnalyst]
# savedHost = [defaultHost]
@app.route('/economist', methods=['POST'])
def generate_chat_economist():
conversation = [defaultEconomist]
history = request.json['history']
conversation.extend(history)
conversation.append({"role": "user", "content": request.json['prompt']})
response = openai.ChatCompletion.create(
model=modelGPT,
messages=conversation,
temperature=0.3,
)
res = response.choices[0]["message"]['content']
conversation.append({"role": "assistant", "content": res})
print(res)
data = jsonify(res)
return data
@app.route('/bizanalyst', methods=['POST'])
def generate_chat_bizanalyst():
conversation = [defaultBizAnalyst]
history = request.json['history']
conversation.extend(history)
conversation.append({"role": "user", "content": request.json['prompt']})
response = openai.ChatCompletion.create(
model=modelGPT,
messages=conversation,
temperature=0.3,
)
res = response.choices[0]["message"]['content']
conversation.append({"role": "assistant", "content": res})
print(res)
data = jsonify(res)
return data
@app.route('/host', methods=['POST'])
async def generate_chat_host():
conversation = [defaultHost]
history = request.json['history']
conversation.extend(history)
conversation.append({"role": "user", "content": request.json['prompt']})
response = await openai_async.chat_complete(
api_key=openai.api_key,
timeout=50,
payload={
"model": modelGPT,
"messages": conversation,
}
)
# res = response.choices[0]["message"]['content']
res = response.json()["choices"][0]["message"]["content"]
conversation.append({"role": "assistant", "content": res})
print(res)
data = jsonify(res)
return data
@app.route('/conversation', methods=['POST'])
async def interact_bots():
prompt = request.json['prompt']
bot_histories = [savedEconomist, savedBizAnalyst]
bot_names = ["Economist", "BizAnalyst"]
responses = []
for i, bot_history in enumerate(bot_histories):
bot_history.append({"role": "user", "content": prompt})
response = await openai_async.chat_complete(
openai.api_key,
timeout=20,
payload={
"model": modelGPT,
"messages": bot_history,
}
)
res = response.json()["choices"][0]["message"]["content"]
bot_history.append({"role": "assistant", "content": res})
# Get the previous messages from the user and other bots
user_messages = [m["content"]
for m in bot_history if m["role"] == "user"]
bot_messages = [m["content"]
for m in bot_history if m["role"] == "assistant"]
# Combine the previous messages into a single prompt
prompt = " ".join(user_messages + bot_messages + [res])
print(f"{bot_names[i]} bot: {res}")
responses.append({"user": bot_names[i], "message": res})
i = (i + 1) % len(bot_histories)
return jsonify(responses)
@app.route('/angry', methods=['POST'])
def generate_chat_response_angry():
conversation = [defaultAngry]
history = request.json['history']
conversation.extend(history)
conversation.append({"role": "user", "content": request.json['prompt']})
response = openai.ChatCompletion.create(
model=modelGPT,
messages=conversation,
temperature=0.3,
)
res = response.choices[0]["message"]['content']
conversation.append({"role": "assistant", "content": res})
data = jsonify(res)
return data
@app.route('/joy', methods=['POST'])
def generate_chat_response_joy():
conversation = [defaultJoy]
history = request.json['history']
conversation.extend(history)
conversation.append({"role": "user", "content": request.json['prompt']})
response = openai.ChatCompletion.create(
model=modelGPT,
messages=conversation,
temperature=0.3,
)
res = response.choices[0]["message"]['content']
conversation.append({"role": "assistant", "content": res})
data = jsonify(res)
return data
if __name__ == '__main__':
app.run()
# from flask import jsonify
# from flask import Flask, request, jsonify
# import openai
# from flask_cors import CORS
# import os
# from dotenv import load_dotenv
# import openai_async
# load_dotenv()
# app = Flask(__name__)
# CORS(app)
# openai.api_key = os.getenv("API_KEY")
# defaultDisgust = {
# "role": "system", "content": "You are a repulsive. Your name is DisgustGPT. Reply the user with the most disgusting response. And do not act like a bot or an AI Model."
# }
# defaultFear = {
# "role": "system", "content": "You are a fearful. Your name is FearGPT. Reply the user with a fearful response. And do not act like a bot or an AI Model. You also know other bots in the conversation as well as their roles, JoyGPT and AngryGPT. You sometimes respond to what they are saying. Speak less and Act like a human-being having a real conversation. No matter what the user says, you don't change your bahaviour."
# }
# savedDisgust = [defaultDisgust]
# savedFear = [defaultFear]
# modelGPT = "gpt-3.5-turbo"
# defaultEconomist = {
# "role": "system",
# "content": "You are Mark, professional economist. You are talking with a podcast host and a professional business analyst. You discuss with them. Please dont speak like an AI model"
# }
# defaultBizAnalyst = {
# "role": "system",
# "content": "You are John, professional business analyst. You are talking with a podcast host and a professional economist. You discuss with them. Please dont speak like an AI model"
# }
# savedEconomist = [defaultEconomist]
# savedBizAnalyst = [defaultBizAnalyst]
# @app.route('/disgust', methods=['POST'])
# def generate_chat_response_disgust():
# conversation = [defaultDisgust]
# history = request.json['history']
# conversation.extend(history)
# savedAngry.append({"role": "user", "content": request.json['prompt']})
# response = openai.ChatCompletion.create(
# model=modelGPT,
# messages=conversation,
# temperature=0.3,
# )
# res = response["choices"][0]["message"]['content']
# conversation.append({"role": "assistant", "content": res})
# data = jsonify(res)
# return data
# @app.route('/fear', methods=['POST'])
# def generate_chat_response_disgust():
# conversation = [defaultFear]
# history = request.json['history']
# conversation.extend(history)
# savedAngry.append({"role": "user", "content": request.json['prompt']})
# response = openai.ChatCompletion.create(
# model=modelGPT,
# messages=conversation,
# temperature=0.3,
# )
# res = response["choices"][0]["message"]['content']
# conversation.append({"role": "assistant", "content": res})
# data = jsonify(res)
# return data
# # @app.route('/interact', methods=['POST'])
# # async def interact_bots():
# # prompt = request.json['prompt']
# # conversation = []
# # bots = [savedAngry, savedJoy, savedDisgust]
# # bot_names = ["AngryGPT", "JoyGPT", "DisgustGPT"]
# # current_bot = 0
# # responses = {}
# # for bot in bots:
# # bot.append({"role": "user", "content": prompt})
# # response = await openai_async.chat_complete(
# # openai.api_key,
# # timeout=15,
# # payload={
# # "model": modelGPT,
# # "messages": bot,
# # }
# # )
# # res = response.json()["choices"][0]["message"]["content"]
# # bot.append({"role": "assistant", "content": res})
# # # Get the previous messages from the user and other bots
# # user_messages = [m["content"] for m in bot if m["role"] == "user"]
# # bot_messages = [m["content"] for m in bot if m["role"] == "assistant"]
# # # Combine the previous messages into a single prompt
# # prompt = " ".join(user_messages + bot_messages + [res])
# # print(f"{bot_names[current_bot]} bot: {res}")
# # responses[bot_names[current_bot]] = res
# # current_bot = (current_bot + 1) % len(bots)
# # return jsonify(responses)
# @app.route('/conversation', methods=['POST'])
# async def interact_bots():
# prompt = request.json['prompt']
# bot_histories = [savedEconomist, savedBizAnalyst]
# bot_names = ["Economist", "BizAnalyst"]
# responses = []
# for i, bot_history in enumerate(bot_histories):
# bot_history.append({"role": "user", "content": prompt})
# response = await openai_async.chat_complete(
# openai.api_key,
# timeout=20,
# payload={
# "model": modelGPT,
# "messages": bot_history,
# }
# )
# res = response.json()["choices"][0]["message"]["content"]
# bot_history.append({"role": "assistant", "content": res})
# # Get the previous messages from the user and other bots
# user_messages = [m["content"]
# for m in bot_history if m["role"] == "user"]
# bot_messages = [m["content"]
# for m in bot_history if m["role"] == "assistant"]
# # Combine the previous messages into a single prompt
# prompt = " ".join(user_messages + bot_messages + [res])
# print(f"{bot_names[i]} bot: {res}")
# responses.append({"user": bot_names[i], "message": res})
# i = (i + 1) % len(bot_histories)
# return jsonify(responses)
# # @app.route('/interact', methods=['POST'])
# # async def interact_bots():
# # prompt = request.json['prompt']
# # bot_histories = [savedAngry, savedJoy, savedFear]
# # bot_names = ["AngryGPT", "JoyGPT", "FearGPT"]
# # responses = {}
# # for i, bot_history in enumerate(bot_histories):
# # bot_history.append({"role": "user", "content": prompt})
# # response = await openai_async.chat_complete(
# # openai.api_key,
# # timeout=15,
# # payload={
# # "model": modelGPT,
# # "messages": bot_history,
# # }
# # )
# # res = response.json()["choices"][0]["message"]["content"]
# # bot_history.append({"role": "assistant", "content": res})
# # # Get the previous messages from the user and other bots
# # user_messages = [m["content"]
# # for m in bot_history if m["role"] == "user"]
# # bot_messages = [m["content"]
# # for m in bot_history if m["role"] == "assistant"]
# # # Combine the previous messages into a single prompt
# # prompt = " ".join(user_messages + bot_messages + [res])
# # print(f"{bot_names[i]} bot: {res}")
# # responses[bot_names[i]] = res
# # i = (i + 1) % len(bot_histories)
# # return jsonify(responses)
# @app.route('/create-bot', methods=['POST'])
# def create_bot():
# bot_name = request.json['bot_name']
# bot_history = request.json['bot_history']
# bot = {
# "role": "system",
# "content": bot_name
# }
# bot_history.append(bot)
# print(f"Created {bot_name} bot")
# return jsonify(bot_history)
# if __name__ == '__main__':
# app.run()
# # def generate_chat_response_fear(prompt):
# # savedFear.append({"role": "user", "content": prompt})
# # response = openai.ChatCompletion.create(
# # model=modelGPT,
# # messages=savedFear,
# # temperature=0.3,
# # )
# # res = response.choices[0]["message"]['content']
# # savedFear.append({"role": "assistant", "content": res})
# # return res
# # while True:
# # user_input = input("You: ")
# # print("AngryGPT:", generate_chat_response_angry(user_input))
# # print("JoyGPT:", generate_chat_response_joy(user_input))
# # print("DisgustGPT:", generate_chat_response_disgust(user_input))
# # # print("FearGPT:", generate_chat_response_fear(user_input))
# # if user_input == "<exit>":
# # break
# # if __name__ == '__main__':
# # app.run()
| [
"You are angry. Your name is AngryGPT. Reply the user with an angry response. And do not act like a bot or an AI Model. You also know other bots in the conversation as well as their roles, JoyGPT. You sometimes respond to what they are saying. Speak less and Act like a human-being having a real conversation. No matter what the user says, you don't change your bahaviour.",
"You are John, professional business analyst. You are talking with a podcast host and a professional economist. You discuss with them. Please dont speak like an AI Language model. You also understand Khmer Language and also be able to respond in Khmer language.",
" ",
"You are Mark, professional economist. You are talking with a podcast host and a professional business analyst. You discuss with them. Please dont speak like an AI Language model. You also understand Khmer Language and also be able to respond in Khmer language.",
"You are a cheerful. Your name is JoyGPT. Reply the user with the most joyful response. And do not act like a bot or an AI Model. You also know other bots in the conversation as well as their roles, AngryGPT. You sometimes respond to what they are saying. Speak less and Act like a human-being having a real conversation. No matter what the user says, you don't change your bahaviour."
] |
2024-01-10 | atulsinghrajpoot/langchain-doc-summary | all-in-one~pages~2_URL_Summary.py | import validators, streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import UnstructuredURLLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
# Streamlit app
st.subheader('URL Summary')
url = st.text_input("Enter Source URL")
# If 'Summarize' button is clicked
if st.button("Summarize"):
# Validate inputs
if not openai_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not url:
st.error("Please provide the URL.")
elif not validators.url(url):
st.error("Please enter a valid URL.")
else:
try:
with st.spinner("Please wait..."):
# Load URL data
loader = UnstructuredURLLoader(urls=[url], ssl_verify=False, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 13_5_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"})
data = loader.load()
# Initialize the ChatOpenAI module, load and run the summarize chain
llm = ChatOpenAI(temperature=0, model='gpt-3.5-turbo', openai_api_key=openai_api_key)
prompt_template = """Write a summary of the following in 200-250 words:
{text}
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["text"])
chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt)
summary = chain.run(data)
st.success(summary)
except Exception as e:
st.exception(f"Exception: {e}")
| [
"Write a summary of the following in 200-250 words:\n \n {text}\n\n "
] |
2024-01-10 | derekpodi/ai-sandbox | api_gpt.py | import openai
import os
import re
from rich.console import Console
import re
import getpass
import json
from jinja2 import Template
import sys
from tqdm.auto import tqdm
from google.colab import files
import ujson
| [] |
2024-01-10 | FyZyX/vocava | src~vocava~entity.py | import datetime
import time
import typing
from vocava import llm, storage
from vocava.llm import anthropic, mock
Language: typing.TypeAlias = str
LANGUAGES: dict[Language, dict[str, str]] = {
"๐บ๐ธ English": {"name": "English", "flag": "๐บ๐ธ", "code": "en"},
"๐ฉ๐ช German": {"name": "German", "flag": "๐ฉ๐ช", "code": "de"},
"๐ต๐ฑ Polish": {"name": "Polish", "flag": "๐ต๐ฑ", "code": "pl"},
"๐ช๐ธ Spanish": {"name": "Spanish", "flag": "๐ช๐ธ", "code": "es"},
"๐ฎ๐น Italian": {"name": "Italian", "flag": "๐ฎ๐น", "code": "it"},
"๐ซ๐ท French": {"name": "French", "flag": "๐ซ๐ท", "code": "fr"},
"๐ต๐น Portuguese": {"name": "Portuguese", "flag": "๐ต๐น", "code": "pt"},
"๐ฎ๐ณ Hindi": {"name": "Hindi", "flag": "๐ฎ๐ณ", "code": "hi"},
"๐ธ๐ฆ Arabic": {"name": "Arabic", "flag": "๐ธ๐ฆ", "code": "ar"},
"๐จ๐ณ Chinese": {"name": "Chinese", "flag": "๐จ๐ณ", "code": "zh"},
"๐ฌ๐ท Greek": {"name": "Greek", "flag": "๐ฌ๐ท", "code": "el"},
"๐ฎ๐ฑ Hebrew": {"name": "Hebrew", "flag": "๐ฎ๐ฑ", "code": "he"},
"๐ฏ๐ต Japanese": {"name": "Japanese", "flag": "๐ฏ๐ต", "code": "ja"},
"๐ฐ๐ท Korean": {"name": "Korean", "flag": "๐ฐ๐ท", "code": "ko"},
"๐ท๐บ Russian": {"name": "Russian", "flag": "๐ท๐บ", "code": "ru"},
"๐ธ๐ช Swedish": {"name": "Swedish", "flag": "๐ธ๐ช", "code": "sv"},
"๐ต๐ญ Tagalog": {"name": "Tagalog", "flag": "๐ต๐ญ", "code": "tl"},
"๐ป๐ณ Vietnamese": {"name": "Vietnamese", "flag": "๐ป๐ณ", "code": "vi"},
}
VOCALIZED_LANGUAGES = {
"๐ซ๐ท French",
"๐ฉ๐ช German",
"๐ฎ๐ณ Hindi",
"๐ฎ๐น Italian",
"๐ต๐ฑ Polish",
"๐ต๐น Portuguese",
"๐ช๐ธ Spanish",
}
class User:
def __init__(self, native_language: Language, target_language: Language,
fluency: int, db: storage.VectorStore):
self._native_language = native_language
self._target_language = target_language
self._fluency = fluency
self._db = db
self._languages: dict[Language, dict[str, str]] = LANGUAGES
def _get_language_name(self, language: Language):
return self._languages[language]["name"]
def native_language_name(self) -> str:
return self._get_language_name(self._native_language)
def target_language_name(self) -> str:
return self._get_language_name(self._target_language)
def _get_language_code(self, language: Language):
return self._languages[language]["code"]
def target_language_code(self) -> str:
return self._get_language_code(self._target_language)
def add_translation(self, phrase, translation):
self._db.save(storage.Document(
content=phrase,
metadata=dict(
language=self.target_language_name(),
native_language=self.native_language_name(),
fluency=self._fluency,
translation=translation,
timestamp=time.time(),
category="phrase",
)
))
def add_vocabulary_word(self, word: str, translations: str):
self._db.save(storage.Document(
content=word,
metadata=dict(
language=self.target_language_name(),
native_language=self.native_language_name(),
fluency=self._fluency,
translations=translations,
timestamp=time.time(),
category="vocabulary",
)
))
def add_grammar_mistake(self, phrase, correct, translation, explanation):
self._db.save(storage.Document(
content=phrase,
metadata=dict(
language=self.target_language_name(),
native_language=self.native_language_name(),
correct=correct,
translation=translation,
explanation=explanation,
timestamp=time.time(),
category="grammar-mistake",
)
))
def known_phrases(self):
results = self._db.query_by_metadata(
language=self.target_language_name(),
native_language=self.native_language_name(),
category="phrase",
)
docs = results["documents"]
metadatas = results["metadatas"]
phrases = []
for doc, metadata in zip(docs, metadatas):
item = {
self.target_language_name(): doc,
self.native_language_name(): metadata["translation"],
"timestamp": datetime.datetime.fromtimestamp(metadata["timestamp"]),
}
phrases.append(item)
return phrases
def known_vocabulary(self):
results = self._db.query_by_metadata(
language=self.target_language_name(),
native_language=self.native_language_name(),
category="vocabulary",
)
docs = results["documents"]
metadatas = results["metadatas"]
vocabulary = []
for doc, metadata in zip(docs, metadatas):
item = {
self.target_language_name(): doc,
self.native_language_name(): metadata["translations"],
"timestamp": datetime.datetime.fromtimestamp(metadata["timestamp"]),
}
vocabulary.append(item)
return vocabulary
def known_mistakes(self):
results = self._db.query_by_metadata(
language=self.target_language_name(),
native_language=self.native_language_name(),
category="grammar-mistake",
)
docs = results["documents"]
metadatas = results["metadatas"]
mistakes = []
for doc, metadata in zip(docs, metadatas):
item = {
"mistake": doc,
"correct": metadata["correct"],
"explanation": metadata["explanation"],
"translation": metadata["translation"],
"timestamp": datetime.datetime.fromtimestamp(metadata["timestamp"]),
}
mistakes.append(item)
return mistakes
def fluency(self):
return self._fluency
class Tutor:
def __init__(self, model: llm.LanguageModel):
self._model = model
def ask(self, prompt: str, max_tokens: int = 250):
return self._model.generate(prompt, max_tokens=max_tokens)
def get_tutor(model, key=None) -> Tutor:
if model == "Claude":
model = anthropic.Claude(api_key=key)
else:
model = mock.MockLanguageModel()
tutor = Tutor(model)
return tutor
| [] |
2024-01-10 | FyZyX/vocava | src~vocava~storage.py | import uuid
import chromadb
import cohere
import numpy
from chromadb.config import Settings
from chromadb.utils import embedding_functions
# compare them
def calculate_similarity(actual, expected, api_key, model='embed-multilingual-v2.0'):
co = cohere.Client(api_key)
docs = [actual.strip().strip(".").lower(), expected.strip().strip(".").lower()]
actual_embed, expected_embed = co.embed(docs, model=model).embeddings
norm_product = numpy.linalg.norm(actual_embed) * numpy.linalg.norm(expected_embed)
return numpy.dot(actual_embed, expected_embed) / norm_product
class Document:
def __init__(self, content: str, metadata: dict):
self._id = str(uuid.uuid4())
self._content = content
self._metadata = metadata
def id(self):
return self._id
def content(self):
return self._content
def metadata(self):
return self._metadata
class VectorStore:
_db = chromadb.Client(Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=".chromadb",
))
def __init__(self, cohere_api_key):
self._cohere_api_key = cohere_api_key
self._embedding_function = embedding_functions.CohereEmbeddingFunction(
api_key=self._cohere_api_key,
model_name="embed-multilingual-v2.0",
)
self._collection: chromadb.api.Collection = self._db.get_or_create_collection(
name="vocava",
embedding_function=self._embedding_function,
)
def save(self, *documents: Document) -> bool:
if not self._collection:
raise ValueError("Must call connect before querying.")
self._collection.add(
ids=[doc.id() for doc in documents],
documents=[doc.content() for doc in documents],
metadatas=[doc.metadata() for doc in documents],
)
try:
self._db.persist()
except RuntimeError:
return False
return True
def query_by_metadata(self, **metadata):
return self._collection.get(
where=metadata
)
| [] |
2024-01-10 | FyZyX/vocava | src~vocava~pages~6_%F0%9F%95%B9_Arcade.py | import openai
import streamlit as st
from vocava import entity, service, storage
ANTHROPIC_API_KEY = st.secrets["anthropic_api_key"]
COHERE_API_KEY = st.secrets["cohere_api_key"]
def render_board(game_state):
# Generate markdown table header
markdown_table = "| | " + " | ".join(
[cat["name"] for cat in game_state["categories"]]) + " |\n"
markdown_table += "|" + "---|" * (len(game_state["categories"]) + 1) + "\n"
# Generate markdown table rows
for i in range(5): # 5 questions per category
markdown_table += f"| {200 * (i + 1)} |" # Start with the point value
for cat in game_state["categories"]:
# Check if the question has been answered
is_answered = cat["questions"][i]["is_answered"]
if is_answered:
markdown_table += " |" # Empty cell for answered questions
else:
markdown_table += " ? |" # Mark unanswered questions with a ?
markdown_table += "\n"
return markdown_table
def play_jeopardy(user, tutor):
if "jeopardy.score" not in st.session_state:
st.session_state["jeopardy.score"] = 0
game = service.Service(
"arcade-jeopardy",
user=user,
tutor=tutor,
max_tokens=5_000,
)
if st.button("New Game"):
with st.spinner():
data = game.run(fluency=user.fluency())
st.session_state["jeopardy.board"] = data
st.session_state["jeopardy.score"] = 0
board = st.session_state.get("jeopardy.board")
if not board:
return
st.markdown(render_board(board))
st.divider()
cols = st.columns(2)
with cols[0]:
categories = [category["name"] for category in board["categories"]]
category = st.selectbox("Select Topic", options=categories)
with cols[1]:
points = st.number_input(
"Select Points", min_value=200, max_value=1000, step=200)
if st.button("Go"):
index = categories.index(category)
question = board["categories"][index]["questions"][points // 200 - 1]
st.session_state["jeopardy.question"] = question
if st.session_state.get("jeopardy.question"):
question = st.session_state["jeopardy.question"]
if question.get("is_answered"):
st.error("You've already answered this question!")
return
st.write(question["text"])
answer = st.text_input("Answer")
if not answer:
return
question["is_answered"] = True
similarity = storage.calculate_similarity(
answer, question["answer"], api_key=COHERE_API_KEY)
if similarity >= 0.9:
st.success(question["answer"])
st.session_state["jeopardy.score"] += points
else:
st.error(question["answer"])
del st.session_state["jeopardy.question"]
st.metric(label="Score", value=st.session_state["jeopardy.score"])
def play_pictionary(user, tutor):
game = service.Service(
"arcade-pictionary",
user=user,
tutor=tutor,
max_tokens=200,
)
if st.button("New Game"):
with st.spinner():
data = game.run(fluency=user.fluency())
translation = data["translation"]
drawing = data["drawing"]
prompt = f"A drawing of a {translation}. {drawing}"
with st.spinner():
response = openai.Image.create(
prompt=prompt,
n=1,
size="256x256",
)
image_url = response['data'][0]['url']
data.update(url=image_url, prompt=prompt)
st.session_state["pictionary"] = data
data = st.session_state.get("pictionary")
if not data:
return
word = data["word"]
translation = data["translation"]
url = data["url"]
cols = st.columns(2)
with cols[1]:
guess = st.text_input("Guess")
guessed = st.button("Guess")
if guessed:
similarity = storage.calculate_similarity(
guess, data["word"], api_key=COHERE_API_KEY)
if similarity > 0.9:
st.success(f"Good job! \"{word}\" is correct!")
else:
st.error(f"Sorry, the word was actually \"{word}\" ({translation})")
with cols[0]:
st.image(url)
if guessed:
st.caption(data["prompt"])
def play_madlibs(user, tutor):
game = service.Service(
"arcade-madlibs-create",
user=user,
tutor=tutor,
max_tokens=300,
)
if st.button("New Game"):
with st.spinner():
data = game.run(fluency=user.fluency())
st.session_state["mad-libs"] = data
data = st.session_state.get("mad-libs")
if not data:
return
text = data["text"]
blanks = data["blanks"]
answers = []
cols = st.columns(3)
for i, blank in enumerate(blanks):
with cols[i % 3]:
answers.append(st.text_input(blank, key=i))
if st.button("Submit"):
grader = service.Service(
"arcade-madlibs-grade",
user=user,
tutor=tutor,
max_tokens=650,
)
with st.spinner():
data = grader.run(
fluency=user.fluency(),
original=text,
words=answers,
)
st.markdown(data["output"])
st.info(data["translation"])
st.metric("Total Points", data["points"])
def play_odd_one_out(user, tutor):
view_native = st.sidebar.checkbox("Native View")
game = service.Service(
"arcade-odd-one-out",
user=user,
tutor=tutor,
native_mode=view_native,
max_tokens=650,
)
if st.button("New Game"):
with st.spinner():
data = game.run(fluency=user.fluency())
st.session_state["odd-one-out"] = data
data = st.session_state.get("odd-one-out")
language = game.current_language()
if not data or language not in data:
return
words = data[language]["words"]
theme = data[language]["theme"]
answer = data[language]["answer"]
cols = st.columns(3)
for i, word in enumerate(words):
with cols[i % 3]:
st.markdown(word)
guess = st.selectbox("Pick the :green[Odd One Out]!", options=words)
if st.button("Guess") and guess:
if guess.strip().lower() == answer.strip().lower():
st.success("Good job!")
st.info(theme)
else:
st.error("Sorry, that's not right.")
def main():
st.title('Arcade')
languages = list(entity.LANGUAGES)
default_native_lang = st.session_state.get("user.native_lang", languages[0])
default_target_lang = st.session_state.get("user.target_lang", languages[4])
default_fluency = st.session_state.get("user.fluency", 3)
native_language = st.sidebar.selectbox(
"Native Language", options=entity.LANGUAGES,
index=languages.index(default_native_lang),
)
target_language = st.sidebar.selectbox(
"Choose Language", options=entity.LANGUAGES,
index=languages.index(default_target_lang),
)
fluency = st.sidebar.slider("Fluency", min_value=1, max_value=10, step=1,
value=default_fluency)
store = storage.VectorStore(COHERE_API_KEY)
user = entity.User(
native_language=native_language,
target_language=target_language,
fluency=fluency,
db=store,
)
st.session_state["user.native_lang"] = native_language
st.session_state["user.target_lang"] = target_language
st.session_state["user.fluency"] = fluency
tutor = entity.get_tutor("Claude", key=ANTHROPIC_API_KEY)
games = [
"Pictionary",
"Odd One Out",
"MadLibs",
"Jeopardy",
]
game_name = st.selectbox("Select Game", options=games)
if game_name == "Jeopardy":
play_jeopardy(user, tutor)
elif game_name == "Pictionary":
play_pictionary(user, tutor)
elif game_name == "MadLibs":
play_madlibs(user, tutor)
elif game_name == "Odd One Out":
play_odd_one_out(user, tutor)
if __name__ == "__main__":
main()
| [
"A drawing of a PLACEHOLDER. PLACEHOLDER"
] |
2024-01-10 | olihock/answer-machine | search_ask~user_chatbot.py | import openai
import os
import tiktoken
openai.api_key = os.environ["OPENAI_API_KEY"]
GPT_MODEL = "gpt-3.5-turbo"
def num_tokens(text: str, model: str = GPT_MODEL) -> int:
"""
Calculate and return the number of tokens in a text.
"""
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(text))
def engineer_prompt(class_name, question, similarities, token_budget=(4096-512)):
introduction = ("Use the below pages to answer the subsequent question. "
"If the answer cannot be found in the pages, "
"write \"Ich kann keine passende Antwort in den Dokumenten finden.\".")
prompt = introduction
for page in similarities['data']['Get'][class_name]:
page_text = page['text']
next_page_section = f'\n\nPage section: """\n{page_text}\n"""'
if num_tokens(prompt + next_page_section + question) < token_budget:
prompt += next_page_section
else:
break
return prompt + "\n\nQuestion: " + question
def answer_question(prompt: str):
messages = [
{"role": "system", "content": "You answer questions to the user."},
{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(model=GPT_MODEL, messages=messages, temperature=0.25)
answer = response.choices[0].message.content
return answer
| [
"You answer questions to the user.",
"\n\nPage section: \"\"\"\nPLACEHOLDER\n\"\"\""
] |
2024-01-10 | kamalchibrani-ai/login-signup-aws | pages~3_%20%F0%9F%A4%96_CourseBot.py | '''
1 the first query will always be from the prompt
2 next query should user input and the output should be based on previous query answer
to achieve this we need to store and pass previous query answer.
'''
import streamlit as st
from streamlit_chat import message
import openai
import os
from dotenv import load_dotenv
from utils import logout_button_sidebar,switch_page_if_auth_isFalse,EmailUs
from streamlit_extras.switch_page_button import switch_page
import time as T
import datetime
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
logout_button_sidebar()
switch_page_if_auth_isFalse()
EmailUs()
print(st.session_state.result)
if st.session_state.result == None:
switch_page('Profile')
try:
if st.session_state.query is not None:
prompt = [
{
'role': 'assistant','content': 'I am an academic consultant and i will do the following and only provide crisp information about the asked query and take content into context'
},
{
"role": "user","content": f'{st.session_state.query}'
},
]
st.session_state['message_history'] = prompt
print(st.session_state.message_history)
with st.spinner('generating...'):
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=prompt,
temperature=0.5,
)
st.session_state.message_history.append({"role": "assistant", "content": f"{completion.choices[0].message['content']}"})
st.session_state.query = None
except Exception as e:
switch_page('Profile')
print(e)
message_history = st.session_state.message_history
print(message_history)
user_input = st.text_input('please insert a question')
user_input = user_input.lstrip()
print(user_input)
if len(user_input)>0:
print('inside user input',message_history)
message_history.append({"role": "user", "content": f"{user_input}"})
with st.spinner('generating...'):
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message_history,
temperature=0.7,
)
last_generated_content = completion.choices[0].message['content']
message_history.append({"role": "assistant", "content": f"{last_generated_content}"})
print('message history after user input',message_history)
if len(message_history)>0:
for i in range(len(message_history)-1, 1, -2):
message(message_history[i]['content'],key=str(i))
message(message_history[i-1]['content'],is_user=True, key=str(i-1))
save_chat = st.download_button('save_chat',str(message_history),file_name=f'{st.session_state.username}_{datetime.datetime.now().date()}_chat_history.txt')
| [
"PLACEHOLDER",
"content",
"I am an academic consultant and i will do the following and only provide crisp information about the asked query and take content into context"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.