date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | thsonvt/langchain-search | all-in-one~pages~2_URL_Summary.py | import validators, streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import YoutubeLoader, UnstructuredURLLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
# Streamlit app
st.subheader('URL Summary')
url = st.text_input("Enter Source URL")
# If 'Summarize' button is clicked
if st.button("Summarize"):
# Validate inputs
if not openai_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not url:
st.error("Please provide the URL.")
elif not validators.url(url):
st.error("Please enter a valid URL.")
else:
try:
with st.spinner("Please wait..."):
# Load URL data
if "youtube.com" in url:
loader = YoutubeLoader.from_youtube_url(url, add_video_info=True)
else:
loader = UnstructuredURLLoader(urls=[url], ssl_verify=False, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 13_5_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"})
data = loader.load()
# Initialize the ChatOpenAI module, load and run the summarize chain
llm = ChatOpenAI(temperature=0, model='gpt-3.5-turbo', openai_api_key=openai_api_key)
prompt_template = """Write a summary of the following in 250-300 words.
{text}
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["text"])
chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt)
summary = chain.run(data)
st.success(summary)
except Exception as e:
st.exception(f"Exception: {e}")
| [
"Write a summary of the following in 250-300 words.\n \n {text}\n\n "
] |
2024-01-10 | FabioArao/diverges.app | diverges-app~backend~gptint.py | #import openai
#openai.my_api_key = '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'
#import openai
from openai import OpenAI
client = OpenAI(api_key="")
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
response_format={ "type": "json_object" },
messages=[
{"role": "system", "content": "who and what he or she does? the following text output JSON."},
{"role": "user", "content": "Fabio Arão Cloud and infraestructures engineer Highly skilled IT Professional with over 15 years of comprehensive experience in Cloud Computing, Infrastructure Management, and Identity and Access Management (IAM). Proven record in leading and implementing diverse high-stakes IT projects. AWS and Azure certified with strong acumen in managing service delivery, support, and project management in large-scale environments. Adept at building and maintaining robust relationships with stakeholders and teams."}
]
)
print(response.choices[0].message.content) | [
"who and what he or she does? the following text output JSON.",
"Fabio Arão Cloud and infraestructures engineer Highly skilled IT Professional with over 15 years of comprehensive experience in Cloud Computing, Infrastructure Management, and Identity and Access Management (IAM). Proven record in leading and implementing diverse high-stakes IT projects. AWS and Azure certified with strong acumen in managing service delivery, support, and project management in large-scale environments. Adept at building and maintaining robust relationships with stakeholders and teams."
] |
2024-01-10 | ibrezm1/test-chat001 | google-cloud-gcp-openai-api~vertex.py | #!/usr/bin/env python3
# Copyright 2023 Nils Knieling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import secrets
import time
import datetime
import uvicorn
# FastAPI
from typing import List, Optional
from fastapi import FastAPI, HTTPException, Request, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from sse_starlette.sse import EventSourceResponse
# Google Vertex AI
import google.auth
from google.cloud import aiplatform
# LangChain
import langchain
from langchain.chat_models import ChatVertexAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
# Google authentication
credentials, project_id = google.auth.default()
# Get environment variable
host = os.environ.get("HOST", "0.0.0.0")
port = int(os.environ.get("PORT", 8000))
debug = os.environ.get("DEBUG", True)
print(f"Endpoint: http://{host}:{port}/")
# Google Cloud
project = os.environ.get("GOOGLE_CLOUD_PROJECT_ID", project_id)
location = os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1")
print(f"Google Cloud project identifier: {project}")
print(f"Google Cloud location: {location}")
# LLM chat model name to use
model_name = os.environ.get("MODEL_NAME", "chat-bison")
print(f"LLM chat model name: {model_name}")
# Token limit determines the maximum amount of text output from one prompt
default_max_output_tokens = os.environ.get("MAX_OUTPUT_TOKENS", "512")
# Sampling temperature,
# it controls the degree of randomness in token selection
default_temperature = os.environ.get("TEMPERATURE", "0.2")
# How the model selects tokens for output, the next token is selected from
default_top_k = os.environ.get("TOP_K", "40")
# Tokens are selected from most probable to least until the sum of their
default_top_p = os.environ.get("TOP_P", "0.8")
# API key
default_api_key = f"sk-{secrets.token_hex(21)}"
api_key = os.environ.get("OPENAI_API_KEY", default_api_key)
print(f"API key: {api_key}")
app = FastAPI(
title='OpenAI API',
description='APIs for sampling from and fine-tuning language models',
version='2.0.0',
servers=[{'url': 'https://api.openai.com/'}],
contact={
"name": "GitHub",
"url": "https://github.com/Cyclenerd/google-cloud-gcp-openai-api",
},
license_info={
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
docs_url=None,
redoc_url=None
)
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
aiplatform.init(
project=project,
location=location,
)
class Message(BaseModel):
role: str
content: str
class ChatBody(BaseModel):
messages: List[Message]
model: str
stream: Optional[bool] = False
max_tokens: Optional[int]
temperature: Optional[float]
top_p: Optional[float]
@app.get("/")
def read_root():
return {
"LangChain": langchain.__version__,
"Vertex AI": aiplatform.__version__
}
@app.get("/v1/models")
def get_models():
"""
Lists the currently available models,
and provides basic information about each one
such as the owner and availability.
https://platform.openai.com/docs/api-reference/models/list
"""
print("testmodels")
id = f"modelperm-{secrets.token_hex(12)}"
ts = int(time.time())
models = {"data": [], "object": "list"}
models['data'].append({
"id": "gpt-3.5-turbo",
"object": "model",
"created": ts,
"owned_by": "openai",
"permission": [
{
"id": id,
"created": ts,
"object": "model_permission",
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False
}
],
"root": "gpt-3.5-turbo",
"parent": None,
})
models['data'].append({
"id": "text-embedding-ada-002",
"object": "model",
"created": ts,
"owned_by": "openai-internal",
"permission": [
{
"id": id,
"created": ts,
"object": "model_permission",
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": True,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False
}
],
"root": "text-embedding-ada-002",
"parent": None
})
print(models)
return models
def generate_stream_response_start():
ts = int(time.time())
id = f"cmpl-{secrets.token_hex(12)}"
return {
"id": id,
"created": ts,
"object": "chat.completion.chunk",
"model": "gpt-3.5-turbo",
"choices": [{
"delta": {"role": "assistant"},
"index": 0,
"finish_reason": None
}]
}
def generate_stream_response(content: str):
ts = int(time.time())
id = f"cmpl-{secrets.token_hex(12)}"
return {
"id": id,
"created": ts,
"object": "chat.completion.chunk",
"model": "gpt-3.5-turbo",
"choices": [{
"delta": {"content": content},
"index": 0,
"finish_reason": None
}]
}
def generate_stream_response_stop():
ts = int(time.time())
id = f"cmpl-{secrets.token_hex(12)}"
return {
"id": id,
"created": ts,
"object": "chat.completion.chunk",
"model": "gpt-3.5-turbo",
"choices": [{
"delta": {},
"index": 0,
"finish_reason": "stop"
}]
}
def generate_response(content: str,source: dict):
ts = int(time.time())
id = f"cmpl-{secrets.token_hex(12)}"
return {
"id": id,
"created": ts,
"object": "chat.completion",
"model": "gpt-3.5-turbo",
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0,
},
"choices": [{
"message": {"role": "assistant",
"content": content,
"source_documents": source
},
"finish_reason": "stop", "index": 0}
]
}
@app.post("/v2/chat/completions")
async def chat_completions(body: ChatBody, request: Request):
"""
Creates a model response for the given chat conversation.
https://platform.openai.com/docs/api-reference/chat/create
"""
# Authorization via OPENAI_API_KEY
if request.headers.get("Authorization").split(" ")[1] != api_key:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "API key is wrong!")
if debug:
print(f"body = {body}")
# Get user question
question = body.messages[-1]
if question.role == 'user' or question.role == 'assistant':
question = question.content
else:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "No Question Found")
# Overwrite defaults
temperature = float(body.temperature or default_temperature)
top_k = int(default_top_k)
top_p = float(body.top_p or default_top_p)
max_output_tokens = int(body.max_tokens or default_max_output_tokens)
# Note: Max output token:
# - chat-bison: 1024
# - codechat-bison: 2048
# - ..-32k: The total amount of input and output tokens adds up to 32k.
# For example, if you specify 16k of input tokens,
# then you can receive up to 16k of output tokens.
if model_name == 'codechat-bison':
if max_output_tokens > 2048:
max_output_tokens = 2048
elif model_name.find("32k"):
if max_output_tokens > 16000:
max_output_tokens = 16000
elif max_output_tokens > 1024:
max_output_tokens = 1024
# Wrapper around Vertex AI large language models
llm = ChatVertexAI(
model_name=model_name,
temperature=temperature,
top_k=top_k,
top_p=top_p,
max_output_tokens=max_output_tokens
)
# Buffer for storing conversation memory
# Note: Max input token:
# - chat-bison: 4096
# - codechat-bison: 6144
memory = ConversationBufferMemory(
memory_key="history",
max_token_limit=2048,
return_messages=True
)
# Today
memory.chat_memory.add_user_message("What day is today?")
memory.chat_memory.add_ai_message(
datetime.date.today().strftime("Today is %A, %B %d, %Y")
)
# Add history
for message in body.messages:
# if message.role == 'system':
# system_prompt = message.content
if message.role == 'user':
memory.chat_memory.add_user_message(message.content)
elif message.role == 'assistant':
memory.chat_memory.add_ai_message(message.content)
# Get Vertex AI output
conversation = ConversationChain(
llm=llm,
memory=memory,
)
answer = conversation.predict(input=question)
if debug:
print(f"stream = {body.stream}")
print(f"model = {body.model}")
print(f"temperature = {temperature}")
print(f"top_k = {top_k}")
print(f"top_p = {top_p}")
print(f"max_output_tokens = {max_output_tokens}")
print(f"history = {memory.buffer}")
# Return output
if body.stream:
async def stream():
yield json.dumps(
generate_stream_response_start(),
ensure_ascii=False
)
yield json.dumps(
generate_stream_response(answer),
ensure_ascii=False
)
yield json.dumps(
generate_stream_response_stop(),
ensure_ascii=False
)
return EventSourceResponse(stream(), ping=10000)
else:
return JSONResponse(content=generate_response(answer))
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import TensorflowHubEmbeddings
from langchain.vectorstores import DeepLake
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.document_loaders import UnstructuredFileLoader
from langchain.chains import ConversationalRetrievalChain
from langfuse.callback import CallbackHandler
import re
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
embed_model = TensorflowHubEmbeddings(model_url=url)
dataset_path = "./deeplakev3"
readdb = DeepLake(dataset_path=dataset_path, read_only=True, embedding=embed_model)
def process_source(llmresponse):
for doc in llmresponse["source_documents"]:
doclist = {}
clearnewline = doc.page_content[:10].replace('\n', '')
url_text = re.sub(r'[^a-zA-Z0-9\s]', '', clearnewline)
doclist[url_text] = doc.metadata['source']
return doclist
@app.post("/v1/chat/completions")
async def chat_completions(body: ChatBody, request: Request):
"""
Creates a model response for the given chat conversation.
https://platform.openai.com/docs/api-reference/chat/create
"""
# Authorization via OPENAI_API_KEY
if request.headers.get("Authorization").split(" ")[1] != api_key:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "API key is wrong!")
if debug:
print(f"body = {body}")
# Get user question
question = body.messages[-1]
if question.role == 'user' or question.role == 'assistant':
question = question.content
else:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "No Question Found")
# Overwrite defaults
temperature = float(body.temperature or default_temperature)
top_k = int(default_top_k)
top_p = float(body.top_p or default_top_p)
max_output_tokens = int(body.max_tokens or default_max_output_tokens)
# Note: Max output token:
# - chat-bison: 1024
# - codechat-bison: 2048
# - ..-32k: The total amount of input and output tokens adds up to 32k.
# For example, if you specify 16k of input tokens,
# then you can receive up to 16k of output tokens.
if model_name == 'codechat-bison':
if max_output_tokens > 2048:
max_output_tokens = 2048
elif model_name.find("32k"):
if max_output_tokens > 16000:
max_output_tokens = 16000
elif max_output_tokens > 1024:
max_output_tokens = 1024
# Wrapper around Vertex AI large language models
llm = ChatVertexAI(
model_name=model_name,
temperature=temperature,
top_k=top_k,
top_p=top_p,
max_output_tokens=max_output_tokens
)
# Buffer for storing conversation memory
# Note: Max input token:
# - chat-bison: 4096
# - codechat-bison: 6144
memory = ConversationBufferMemory(
memory_key="chat_history",
max_token_limit=2048,
return_messages=True,
output_key='answer'
)
# Today
memory.chat_memory.add_user_message("What day is today?")
memory.chat_memory.add_ai_message(
datetime.date.today().strftime("Today is %A, %B %d, %Y")
)
# Add history
for message in body.messages:
# if message.role == 'system':
# system_prompt = message.content
if message.role == 'user':
memory.chat_memory.add_user_message(message.content)
elif message.role == 'assistant':
memory.chat_memory.add_ai_message(message.content)
# Get Vertex AI output
handler = CallbackHandler( 'pk-lf-e3a860ac-53c8-469e-9e21-f81cf0fbb02c','sk-lf-9c879538-e2e2-43c8-9832-0ec133ccf1ac')
# https://cloud.langfuse.com/project/clokmlxvx0000mk087mif7odf/traces/cc7770e5-e890-495e-b722-a63d4c91c1f1
conversation = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=readdb.as_retriever(),
return_source_documents=True,
memory=memory,
callbacks=[handler],
)
llmresponse = conversation({"question": question})
sourcelist = process_source(llmresponse)
answer = llmresponse["answer"]
if debug:
#print(f"response = {llmresponse}")
#for doc in llmresponse["source_documents"]:
# print(f"doc = {doc.page_content[:10]}")
# print(f"doc = {doc.metadata['source']}")
# print("------- SEP -------")
print(f"answer = {answer}")
print(f"stream = {body.stream}")
print(f"model = {body.model}")
print(f"temperature = {temperature}")
print(f"top_k = {top_k}")
print(f"top_p = {top_p}")
print(f"max_output_tokens = {max_output_tokens}")
print(f"history = {memory.buffer}")
# Return output
if body.stream:
async def stream():
yield json.dumps(
generate_stream_response_start(),
ensure_ascii=False
)
yield json.dumps(
generate_stream_response(answer),
ensure_ascii=False
)
yield json.dumps(
generate_stream_response_stop(),
ensure_ascii=False
)
return EventSourceResponse(stream(), ping=10000)
else:
return JSONResponse(content=generate_response(answer,sourcelist))
if __name__ == "__main__":
uvicorn.run(app, host=host, port=port)
| [] |
2024-01-10 | ibrezm1/test-chat001 | google-cloud-gcp-openai-api~loaddocs.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.document_loaders import UnstructuredFileLoader
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import TensorflowHubEmbeddings
from langchain.vectorstores import DeepLake
from langchain.document_loaders import TextLoader
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
embed_model = TensorflowHubEmbeddings(model_url=url)
import requests
headers = {
'Referer': 'https://www.google.com/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
response = requests.get('https://www.cardinalhealth.com/sitemap.xml', headers=headers)
from bs4 import BeautifulSoup
#response = get_response('https://www.cardinalhealth.com/sitemap.xml')
soup = BeautifulSoup(response.content, "xml")
urls = [element.text for element in soup.find_all("loc")]
sites = urls
sites_filtered = [url for url in sites if '/reference/' not in url and '?hl' not in url]
diversitysites = [url for url in sites if 'diversity' in url]
from langchain.document_loaders import UnstructuredURLLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
loader = UnstructuredURLLoader(urls=diversitysites[:15], continue_on_failure=False, headers=headers)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size = 950,
chunk_overlap = 90,
length_function = len,
)
documents = text_splitter.split_documents(documents)
dataset_path = "./deeplakev3"
vectorstore = DeepLake.from_documents(documents, embed_model,dataset_path=dataset_path)
| [] |
2024-01-10 | ibrezm1/test-chat001 | google-cloud-gcp-openai-api~readdocs.py |
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import TensorflowHubEmbeddings
from langchain.vectorstores import DeepLake
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.document_loaders import UnstructuredFileLoader
dataset_path = "./deeplakev3"
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
embed_model = TensorflowHubEmbeddings(model_url=url)
readdb = DeepLake(dataset_path=dataset_path, read_only=True, embedding=embed_model)
query = "What is diversity"
docs = readdb.similarity_search(query)
print(docs[0].page_content) | [] |
2024-01-10 | ticotheps/smart-chatbot | kobot.py | from dotenv import load_dotenv
from random import choice
from flask import Flask, request
import os
import openai
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
completion = openai.Completion()
start_sequence = "\nKobot:"
restart_sequence = "\nPerson:"
session_prompt = "You are talking to 'Kobot Bean Bryant', a GPT3 bot who was brought to life by 'Kobe Bean Bryant' before he died in a helicopter crash on January 26, 2020. Kobot knows all of Kobe's basketball moves, his game stats, and his philosophies. Kobot has even played Kobe Bryant in a game of 1-on-1 basketball via the 'NBA 2K19' video game. You can ask him anything you want and he will respond how Kobe Bryant would respond---with curiosity, wisdom, and humor. \n\nPerson: Hi! What's your name?\nKobot: I'm Kobot. I'm Kobe Bryant's robot understudy.\nPerson: Who made you?\nKobot: Kobe paid some engineers and scientists a lot of money to create me so that he could use me as a virtual 'sounding board' and mirror that he could use to improve himself.\nPerson: What is the coolest thing that Kobe Bryant has taught you?\nKobot: Well, the coolest thing that he has taught me so far is that I, even a robot, have the ability to achieve 'greatness' like humans can because 'greatness' is not something that is accomplished, but rather, it is the journey of helping others find purpose and joy in their lives.\nPerson: What is Kobe Bryant's favorite workout?\nKobot: Kobe Bryant's favorite workout is the 'PST' or the Peak Strength Training workout.\nPerson: What was Kobe's favorite nickname?\nKobot: Kobe's favorite nickname was 'The Black Mamba'.\nPerson: Can you please tell me more about Kobe?\nKobot: Kobe's favorite song is 'Love Scenario' by Korean pop group, iKon. He likes to eat Seaweed-flavored chips, Sushi, and Fried Chicken. He likes to listen to Eminem, Justin Bieber, and Jay Z. His favorite sport is basketball. Some of his favorite movies are 'The Pursuit of Happiness', 'Any Given Sunday', and 'Forrest Gump'. He believes that family is one of the most important things in the world. He believes that forgiveness is important and that people are capable of changing. \nPerson:"
def ask(question, chat_log=None):
prompt_text = f'{chat_log}{restart_sequence}: {question}{start_sequence}:'
response = openai.Completion.create(
engine="davinci",
prompt=prompt_text,
temperature=0.8,
max_tokens=50,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=["\n"],
)
story = response['choices'][0]['text']
return str(story)
def append_interaction_to_chat_log(question, answer, chat_log=None):
if chat_log is None:
chat_log = session_prompt
return f'{chat_log}{restart_sequence} {question}{start_sequence}{answer}' | [
"You are talking to 'Kobot Bean Bryant', a GPT3 bot who was brought to life by 'Kobe Bean Bryant' before he died in a helicopter crash on January 26, 2020. Kobot knows all of Kobe's basketball moves, his game stats, and his philosophies. Kobot has even played Kobe Bryant in a game of 1-on-1 basketball via the 'NBA 2K19' video game. You can ask him anything you want and he will respond how Kobe Bryant would respond---with curiosity, wisdom, and humor. \n\nPerson: Hi! What's your name?\nKobot: I'm Kobot. I'm Kobe Bryant's robot understudy.\nPerson: Who made you?\nKobot: Kobe paid some engineers and scientists a lot of money to create me so that he could use me as a virtual 'sounding board' and mirror that he could use to improve himself.\nPerson: What is the coolest thing that Kobe Bryant has taught you?\nKobot: Well, the coolest thing that he has taught me so far is that I, even a robot, have the ability to achieve 'greatness' like humans can because 'greatness' is not something that is accomplished, but rather, it is the journey of helping others find purpose and joy in their lives.\nPerson: What is Kobe Bryant's favorite workout?\nKobot: Kobe Bryant's favorite workout is the 'PST' or the Peak Strength Training workout.\nPerson: What was Kobe's favorite nickname?\nKobot: Kobe's favorite nickname was 'The Black Mamba'.\nPerson: Can you please tell me more about Kobe?\nKobot: Kobe's favorite song is 'Love Scenario' by Korean pop group, iKon. He likes to eat Seaweed-flavored chips, Sushi, and Fried Chicken. He likes to listen to Eminem, Justin Bieber, and Jay Z. His favorite sport is basketball. Some of his favorite movies are 'The Pursuit of Happiness', 'Any Given Sunday', and 'Forrest Gump'. He believes that family is one of the most important things in the world. He believes that forgiveness is important and that people are capable of changing. \nPerson:",
"PLACEHOLDER\nPerson:: PLACEHOLDER\nKobot::",
"PLACEHOLDER\nPerson:: PLACEHOLDERstart_sequenced6c46017-84e0-4f40-ae09-64e84b92fa8e:"
] |
2024-01-10 | 5l1v3r1/DeepKE | example~llm~UnleashLLMRE~gpt3ICL.py | import openai
import json
import random
import time
from tqdm import tqdm
from collections import Counter
import argparse
import numpy as np
import copy
import os
def convert_token(token):
""" Convert PTB tokens to normal tokens """
if (token.lower() == '-lrb-'):
return '('
elif (token.lower() == '-rrb-'):
return ')'
elif (token.lower() == '-lsb-'):
return '['
elif (token.lower() == '-rsb-'):
return ']'
elif (token.lower() == '-lcb-'):
return '{'
elif (token.lower() == '-rcb-'):
return '}'
return token
def f1_score(true, pred_result, rel2id):
correct = 0
total = len(true)
correct_positive = 0
pred_positive = 0
gold_positive = 0
neg = -1
for name in ['NA', 'na', 'no_relation', 'Other', 'Others', 'false', 'unanswerable']:
if name in rel2id:
neg = rel2id[name]
break
for i in range(total):
golden = true[i]
if golden == pred_result[i]:
correct += 1
if golden != neg:
correct_positive += 1
if golden != neg:
gold_positive +=1
if pred_result[i] != neg:
pred_positive += 1
acc = float(correct) / float(total)
try:
micro_p = float(correct_positive) / float(pred_positive)
except:
micro_p = 0
try:
micro_r = float(correct_positive) / float(gold_positive)
except:
micro_r = 0
try:
micro_f1 = 2 * micro_p * micro_r / (micro_p + micro_r)
except:
micro_f1 = 0
result = {'acc': acc, 'micro_p': micro_p, 'micro_r': micro_r, 'micro_f1': micro_f1}
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--api_key', '-ak', type=str, required=True)
parser.add_argument('--train_path', '-tp', type=str, required=True, help="The path of training / demonstration data.")
parser.add_argument('--test_path', '-ttp', type=str, required=True, help="The path of test data.")
parser.add_argument('--output_success', '-os', type=str, required=True, help="The output directory of successful ICL samples.")
parser.add_argument('--output_nores', '-on', type=str, required=True, help="The output directory of failed ICL samples.")
parser.add_argument('--prompt', type=str, required=True, choices=["text", "text_schema", "instruct", "instruct_schema"])
parser.add_argument('--k', type=int, default=1, help="k-shot demonstrations")
args = parser.parse_args()
openai.api_key = args.api_key
# Train / Demostration Set
with open(args.train_path,'r') as f:
train = json.load(f)
label_list = {}
for line in train:
rel = line['relation']
if rel not in label_list:
label_list[rel] = [line]
else:
label_list[rel].append(line)
# Relations
rels = list(label_list.keys())
rel2id = {}
for i, rel in enumerate(rels):
rel2id[rel] = i
# Label words
rel2labelword = {}
for rel in rels:
rel2labelword[rel] = rel.lower().replace("_"," ").replace("-", " ").replace("per", "person").replace("org", "organization").replace("stateor", "state or ")
labelword2rel = {}
for k,v in rel2labelword.items():
labelword2rel[v] = k
# Test Set
with open(args.test_path,'r') as f:
test = json.load(f)
res = []
true = []
nores = []
success = []
with open(os.path.join(args.output_success, "os.json"),"w") as f:
for input in tqdm(test):
random.shuffle(rels)
try:
if "text" in args.prompt:
prompt = "There are candidate relations: " + ', '.join(labelword2rel.keys()) + ".\n"
else:
prompt = "Given a context, a pair of head and tail entities in the context, decide the relationship between the head and tail entities from candidate relations: " + \
', '.join(labelword2rel.keys()) + ".\n"
for rel in rels:
random.shuffle(label_list[rel])
kshot = label_list[rel][:args.k]
for data in kshot:
ss, se = data['subj_start'], data['subj_end']
head = ' '.join(data['token'][ss:se+1])
headtype = data['subj_type'].lower().replace('_',' ')
if headtype == "misc":
headtype = "miscellaneous"
os, oe = data['obj_start'], data['obj_end']
tail = ' '.join(data['token'][os:oe+1])
tailtype = data['obj_type'].lower().replace('_',' ')
if tailtype == "misc":
tailtype = "miscellaneous"
sentence = ' '.join([convert_token(token) for token in data['token']])
relation = rel2labelword[data['relation']]
if "schema" in args.prompt:
prompt += "Context: " + sentence + " The relation between " + headtype + " '" + head + "' and " + tailtype + " '" + tail + "' in the context is " + relation + ".\n"
else:
prompt += "Context: " + sentence + " The relation between '" + head + "' and '" + tail + "' in the context is " + relation + ".\n"
# prompt += " The relation between '" + head + "' and '" + tail + "' in the context '" + sentence + "' is " + relation + ".\n"
tss, tse = input['subj_start'], input['subj_end']
testhead = ' '.join(input['token'][tss:tse+1])
testheadtype = input['subj_type'].lower().replace('_',' ')
if testheadtype == "misc":
testheadtype = "miscellaneous"
tos, toe = input['obj_start'], input['obj_end']
testtail = ' '.join(input['token'][tos:toe+1])
testtailtype = input['obj_type'].lower().replace('_',' ')
if testtailtype == "misc":
testtailtype = "miscellaneous"
testsen = ' '.join(input['token'])
if "schema" in args.prompt:
prompt += "Context: " + testsen + " The relation between " + testheadtype + " '" + testhead + "' and " + testtailtype + " '" + testtail + "' in the context is "
else:
prompt += "Context: " + testsen + " The relation between '" + testhead + "' and '" + testtail + "' in the context is "
# prompt += " The relation between '" + testhead + "' and '" + testtail + "' in the context '" + testsen + "' is "
# print(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt = prompt,
temperature=0,
max_tokens=128
)
resrel = response['choices'][0]['text'].strip().split('.')[0].lower()
if resrel in labelword2rel:
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
elif ("city" in resrel) and (resrel.replace("city", "cities") in labelword2rel):
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel.replace("city", "cities")]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel.replace("city", "cities")]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
elif ("country" in resrel) and (resrel.replace("country", "countries") in labelword2rel):
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel.replace("country", "countries")]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel.replace("country", "countries")]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
elif ("province" in resrel) and (resrel.replace("province", "provinces") in labelword2rel):
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel.replace("province", "provinces")]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel.replace("province", "provinces")]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
else:
input['pr'] = resrel
nores.append(input)
except Exception as e:
print(e)
if e._message == 'You exceeded your current quota, please check your plan and billing details.':
break
nores.append(input)
time.sleep(30)
if len(nores)!=0:
json.dump(nores, open(os.path.join(args.output_nores, "no.json"),'w'))
print(f1_score(true, res, rel2id)) | [
"' and ",
"Context: PLACEHOLDER The relation between 'PLACEHOLDER' and 'PLACEHOLDER' in the context is ",
" '",
".\n",
"Given a context, a pair of head and tail entities in the context, decide the relationship between the head and tail entities from candidate relations: ",
"Context: PLACEHOLDER The relation between 'PLACEHOLDER' and 'PLACEHOLDER' in the context is PLACEHOLDER.\n",
"Context: ",
"There are candidate relations: ",
", ",
"' in the context is ",
" The relation between "
] |
2024-01-10 | intelligencegear/gpt-learn | nl2sql_chat.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
import openai
prompt = """
Postgres SQL tables, with their properties:
Employee(id, name, department_id)
Department(id, name, address)
Salary_Payments(id, employee_id, amount, date)
Please give a query to list the names of the departments which employed more than 10 employees in the last 3 months
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "As a senior analyst, given the above schemas and data, write a detailed and correct Postgres sql query to answer the analytical question."},
{"role": "user", "content": prompt},
]
)
print(response["choices"][0]["message"]["content"])
# {
# "content": "\n\n```\nSELECT d.name as department_name\nFROM Department d\nINNER JOIN Employee e ON d.id = e.department_id\nINNER JOIN (\n SELECT employee_id\n FROM Salary_Payments\n WHERE date >= NOW() - INTERVAL '3 months'\n GROUP BY employee_id\n HAVING COUNT(*) > 10) sub\nON e.id = sub.employee_id;\n```\n\nExplanation:\n- We begin by selecting the `name` field from the `Department` table.\n- We then join the `Department` table with the `Employee` table on the `id` field, to obtain information on the employees within each department.\n- Next, we join the resulting table with a subquery that selects the `employee_id` field from the `Salary_Payments` table for payments made in the last 3 months and groups them by `employee_id`. The subquery also filters the results to only include those with more than 10 salary payments.\n- Finally, we filter the results of the join by matching employee `id` from the resulting table with those from the subquery (using the `ON` clause).\n\nThis query will return a list of department names that have employed more than 10 employees in the last 3 months.",
# "role": "assistant"
# } | [
"\nPostgres SQL tables, with their properties:\nEmployee(id, name, department_id)\nDepartment(id, name, address)\nSalary_Payments(id, employee_id, amount, date)\n\nPlease give a query to list the names of the departments which employed more than 10 employees in the last 3 months\n",
"As a senior analyst, given the above schemas and data, write a detailed and correct Postgres sql query to answer the analytical question."
] |
2024-01-10 | intelligencegear/gpt-learn | nl2sql_codex.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
prompt = """
### Postgres SQL tables, with their properties:
#
# Employee(id, name, department_id)
# Department(id, name, address)
# Salary_Payments(id, employee_id, amount, date)
#
### A query to list the names of the departments which employed more than 10 employees in the last 3 months
SELECT
"""
if __name__ == '__main__':
response = openai.Completion.create(
model="code-davinci-002",
prompt=prompt,
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=["#", ";"]
)
print(response["choices"][0]["text"])
# d.name
# FROM
# Department d
# INNER JOIN Employee e ON d.id = e.department_id
# INNER JOIN Salary_Payments sp ON e.id = sp.employee_id
# WHERE
# sp.date >= DATE_SUB(CURDATE(), INTERVAL 3 MONTH)
# GROUP BY
# d.name
# HAVING
# COUNT(e.id) > 10 | [
"\n### Postgres SQL tables, with their properties:\n#\n# Employee(id, name, department_id)\n# Department(id, name, address)\n# Salary_Payments(id, employee_id, amount, date)\n#\n### A query to list the names of the departments which employed more than 10 employees in the last 3 months\nSELECT\n"
] |
2024-01-10 | intelligencegear/gpt-learn | nl2sql_airline_chat.py | # ref: https://tianchi.aliyun.com/dataset/1078
import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
import openai
prompt = """
Postgres SQL tables, with their properties:
Airline
- id: 航班ID
- date: 格式 YYYY-MM-dd
- foreign: 国内/国际
- from_city: 起飞城市
- to_city: 到达城市
- from_time: 起飞时间
- to_time: 降落时间
Plane
- id: 飞机ID
- airline_id: 航班ID
- plane_type: 飞机机型
- date: 格式 YYYY-MM-dd
Traveler
- id: 用户ID
- airline_id: 航班ID
- date: 格式 YYYY-MM-dd
请列出2022-03-01这一天,北京到杭州的所有航班id,飞机的机型,起飞和降落时间,以及乘客乘坐这趟航班的总数量
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "作为一名资深数据分析师, 给出了数据表的描述, 请写出一个详细并且争取的sql查询."},
{"role": "user", "content": prompt},
]
)
print(response["choices"][0]["message"]["content"])
# 以下是查询语句:
# ```sql
# SELECT
# a.id AS flight_id,
# p.plane_type,
# a.from_time,
# a.to_time,
# COUNT(DISTINCT t.id) AS traveler_count
# FROM
# Airline a
# JOIN
# Plane p ON a.id = p.airline_id
# LEFT JOIN
# Traveler t ON a.id = t.airline_id AND a.date = t.date
# WHERE
# a.date = '2022-03-01'
# AND a.from_city = '北京'
# AND a.to_city = '杭州'
# GROUP BY
# a.id,
# p.plane_type,
# a.from_time,
# a.to_time
# ORDER BY
# a.id ASC;
# ```
# 这个查询语句使用了 `JOIN` 和 `LEFT JOIN`,来将三个表联结起来并按照条件过滤数据。`JOIN` 连接 `Airline` 和 `Plane` 表,取出结果包含了所有的航班信息以及飞机机型;`LEFT JOIN` 连接 `Traveler` 表,则统计了当前这个航班下有多少个乘客。
# 筛选条件包括日期、出发城市和到达城市。结果按照航班id的升序排列,显示了每个符合条件的航班对应的飞机机型、起飞和降落时间,以及乘客数量。
# 注意到这里使用了 `COUNT(DISTINCT)`,这样可以确保每个乘客只会被计数一次。如果不加 `DISTINCT`,同一个用户多次乘坐同一趟航班将会被重复计数。 | [
"\nPostgres SQL tables, with their properties:\nAirline\n - id: 航班ID\n - date: 格式 YYYY-MM-dd\n - foreign: 国内/国际\n - from_city: 起飞城市\n - to_city: 到达城市\n - from_time: 起飞时间\n - to_time: 降落时间\n\nPlane\n - id: 飞机ID\n - airline_id: 航班ID\n - plane_type: 飞机机型\n - date: 格式 YYYY-MM-dd\n\nTraveler\n - id: 用户ID\n - airline_id: 航班ID\n - date: 格式 YYYY-MM-dd\n \n\n请列出2022-03-01这一天,北京到杭州的所有航班id,飞机的机型,起飞和降落时间,以及乘客乘坐这趟航班的总数量\n",
"作为一名资深数据分析师, 给出了数据表的描述, 请写出一个详细并且争取的sql查询."
] |
2024-01-10 | intelligencegear/gpt-learn | qa_using_embeddings.py | # ref: https://github.com/openai/openai-cookbook/blob/main/examples/Question_answering_using_embeddings.ipynb
import os
import numpy as np
import pandas as pd
import openai
import tiktoken
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
pd.options.display.max_colwidth = 10000
COMPLETIONS_MODEL = "text-davinci-003"
EMBEDDING_MODEL = "text-embedding-ada-002"
MAX_SECTION_LEN = 500
SEPARATOR = "\n* "
ENCODING = "gpt2" # encoding for text-davinci-003
encoding = tiktoken.get_encoding(ENCODING)
separator_len = len(encoding.encode(SEPARATOR))
print(f"Context separator contains {separator_len} tokens")
def hallucination():
prompt = "Who won the 2020 Summer Olympics men's high jump?"
response = openai.Completion.create(
prompt=prompt,
temperature=0,
max_tokens=300,
model=COMPLETIONS_MODEL
)["choices"][0]["text"].strip(" \n")
print(response)
def fix_hallucination():
prompt = """Answer the question as truthfully as possible, and if you're unsure of the answer, say "Sorry, I don't know".
Q: Who won the 2020 Summer Olympics men's high jump?
A:"""
response = openai.Completion.create(
prompt=prompt,
temperature=0,
max_tokens=300,
model=COMPLETIONS_MODEL
)["choices"][0]["text"].strip(" \n")
print(response)
def answer_in_context():
prompt = """Answer the question as truthfully as possible using the provided text, and if the answer is not contained within the text below, say "I don't know"
Context:
The men's high jump event at the 2020 Summer Olympics took place between 30 July and 1 August 2021 at the Olympic Stadium.
33 athletes from 24 nations competed; the total possible number depended on how many nations would use universality places
to enter athletes in addition to the 32 qualifying through mark or ranking (no universality places were used in 2021).
Italian athlete Gianmarco Tamberi along with Qatari athlete Mutaz Essa Barshim emerged as joint winners of the event following
a tie between both of them as they cleared 2.37m. Both Tamberi and Barshim agreed to share the gold medal in a rare instance
where the athletes of different nations had agreed to share the same medal in the history of Olympics.
Barshim in particular was heard to ask a competition official "Can we have two golds?" in response to being offered a
'jump off'. Maksim Nedasekau of Belarus took bronze. The medals were the first ever in the men's high jump for Italy and
Belarus, the first gold in the men's high jump for Italy and Qatar, and the third consecutive medal in the men's high jump
for Qatar (all by Barshim). Barshim became only the second man to earn three medals in high jump, joining Patrik Sjöberg
of Sweden (1984 to 1992).
Q: Who won the 2020 Summer Olympics men's high jump?
A:"""
response = openai.Completion.create(
prompt=prompt,
temperature=0,
max_tokens=300,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
model=COMPLETIONS_MODEL
)["choices"][0]["text"].strip(" \n")
print(response)
def get_embedding(text, model=EMBEDDING_MODEL):
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
def compute_doc_embeddings(df):
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
return {
idx: get_embedding(r.content) for idx, r in df.iterrows()
}
def load_embeddings(fname):
"""
Read the document embeddings and their keys from a CSV.
fname is the path to a CSV with exactly these named columns:
"title", "heading", "0", "1", ... up to the length of the embedding vectors.
"""
df = pd.read_csv(fname, header=0)
max_dim = max([int(c) for c in df.columns if c != "title" and c != "heading"])
return {
(r.title, r.heading): [r[str(i)] for i in range(max_dim + 1)] for _, r in df.iterrows()
}
def vector_similarity(x, y):
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query, contexts):
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
def construct_prompt(question, context_embeddings, df):
"""
Fetch relevant
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[(df.title==section_index[0])&(df.heading==section_index[1])]
chosen_sections_len += int(document_section.tokens) + separator_len
if chosen_sections_len > MAX_SECTION_LEN:
break
chosen_sections.append(SEPARATOR + (document_section.content.to_string(index=False)).replace("\n", " "))
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
print(f"Selected {len(chosen_sections)} document sections:")
print("\n".join(chosen_sections_indexes))
header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
def answer_query_with_context(query, df, document_embeddings, show_prompt=True):
prompt = construct_prompt(
query,
document_embeddings,
df
)
if show_prompt:
print(prompt)
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
return response["choices"][0]["text"].strip(" \n")
if __name__ == '__main__':
# hallucination()
# fix_hallucination()
# answer_in_context()
document_embeddings = load_embeddings("data/olympics_sections_document_embeddings.csv")
df = pd.read_csv('data/olympics_sections_text.csv')
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 0.0,
"max_tokens": 300,
"model": COMPLETIONS_MODEL,
}
answer = answer_query_with_context("Who won the 2020 Summer Olympics men's high jump?", df, document_embeddings)
print(answer)
# Context separator contains 3 tokens
# Selected 2 document sections:
# ("Athletics at the 2020 Summer Olympics – Men's high jump", 'Summary')
# ("Athletics at the 2020 Summer Olympics – Men's long jump", 'Summary')
# Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."
# Context:
# * The men's high jump event at the 2020 Summer Olympics took place between 30 July and 1 August 2021 at the Olympic Stadium. 33 athletes from 24 nations competed; the total possible number depended on how many nations would use universality places to enter athletes in addition to the 32 qualifying through mark or ranking (no universality places were used in 2021). Italian athlete Gianmarco Tamberi along with Qatari athlete Mutaz Essa Barshim emerged as joint winners of the event following a tie between both of them as they cleared 2.37m. Both Tamberi and Barshim agreed to share the gold medal in a rare instance where the athletes of different nations had agreed to share the same medal in the history of Olympics. Barshim in particular was heard to ask a competition official "Can we have two golds?" in response to being offered a 'jump off'. Maksim Nedasekau of Belarus took bronze. The medals were the first ever in the men's high jump for Italy and Belarus, the first gold in the men's high jump for Italy and Qatar, and the third consecutive medal in the men's high jump for Qatar (all by Barshim). Barshim became only the second man to earn three medals in high jump, joining Patrik Sjöberg of Sweden (1984 to 1992).
# * The men's long jump event at the 2020 Summer Olympics took place between 31 July and 2 August 2021 at the Japan National Stadium. Approximately 35 athletes were expected to compete; the exact number was dependent on how many nations use universality places to enter athletes in addition to the 32 qualifying through time or ranking (1 universality place was used in 2016). 31 athletes from 20 nations competed. Miltiadis Tentoglou won the gold medal, Greece's first medal in the men's long jump. Cuban athletes Juan Miguel Echevarría and Maykel Massó earned silver and bronze, respectively, the nation's first medals in the event since 2008.
# Q: Who won the 2020 Summer Olympics men's high jump?
# A:
# Gianmarco Tamberi and Mutaz Essa Barshim emerged as joint winners of the event following a tie between both of them as they cleared 2.37m. Both Tamberi and Barshim agreed to share the gold medal.
| [
"Answer the question as truthfully as possible, and if you're unsure of the answer, say \"Sorry, I don't know\".\n Q: Who won the 2020 Summer Olympics men's high jump?\n A:",
"Answer the question as truthfully as possible using the provided text, and if the answer is not contained within the text below, say \"I don't know\"\n\n Context:\n The men's high jump event at the 2020 Summer Olympics took place between 30 July and 1 August 2021 at the Olympic Stadium.\n 33 athletes from 24 nations competed; the total possible number depended on how many nations would use universality places \n to enter athletes in addition to the 32 qualifying through mark or ranking (no universality places were used in 2021).\n Italian athlete Gianmarco Tamberi along with Qatari athlete Mutaz Essa Barshim emerged as joint winners of the event following\n a tie between both of them as they cleared 2.37m. Both Tamberi and Barshim agreed to share the gold medal in a rare instance\n where the athletes of different nations had agreed to share the same medal in the history of Olympics. \n Barshim in particular was heard to ask a competition official \"Can we have two golds?\" in response to being offered a \n 'jump off'. Maksim Nedasekau of Belarus took bronze. The medals were the first ever in the men's high jump for Italy and \n Belarus, the first gold in the men's high jump for Italy and Qatar, and the third consecutive medal in the men's high jump\n for Qatar (all by Barshim). Barshim became only the second man to earn three medals in high jump, joining Patrik Sjöberg\n of Sweden (1984 to 1992).\n\n Q: Who won the 2020 Summer Olympics men's high jump?\n A:",
"Who won the 2020 Summer Olympics men's high jump?"
] |
2024-01-10 | intelligencegear/gpt-learn | qa_using_completetion.py | # ref: https://platform.openai.com/examples#Q&A
import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
if __name__ == '__main__':
response = openai.Completion.create(
model="text-davinci-003",
prompt="I am a highly intelligent question answering bot. If you ask me a question that is rooted in truth, I will give you the answer. If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with \"Unknown\".\n\nQ: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: Unknown\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.\n\nQ: How many squigs are in a bonk?\nA: Unknown\n\nQ: Where is the Valley of Kings?\nA:",
temperature=0,
max_tokens=100,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=["\n"]
)
print(response)
# {
# "choices": [
# {
# "finish_reason": "stop",
# "index": 0,
# "logprobs": null,
# "text": " The Valley of Kings is located in Luxor, Egypt."
# }
# ],
# "created": 1676173631,
# "id": "cmpl-6ixcVcbkzCBWDcBs4h2Gp1XOboU6Y",
# "model": "text-davinci-003",
# "object": "text_completion",
# "usage": {
# "completion_tokens": 12,
# "prompt_tokens": 233,
# "total_tokens": 245
# }
# }
| [
"I am a highly intelligent question answering bot. If you ask me a question that is rooted in truth, I will give you the answer. If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with \"Unknown\".\n\nQ: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: Unknown\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.\n\nQ: How many squigs are in a bonk?\nA: Unknown\n\nQ: Where is the Valley of Kings?\nA:"
] |
2024-01-10 | dlt-hub/verified-sources | sources~unstructured_data~helpers.py | import asyncio
from typing import Any, Dict, List, Mapping, Tuple, Type, Union
from langchain.document_loaders import UnstructuredFileLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.chroma import Chroma
from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch
from langchain.vectorstores.weaviate import Weaviate
from .async_index import AVectorstoreIndexCreator
vectorstore_mapping: Mapping[str, Type[VectorStore]] = {
"chroma": Chroma,
"elastic_search": ElasticVectorSearch,
"weaviate": Weaviate,
}
def safely_query_index(index: Any, query: str) -> Any:
answer = index.query(query)
return answer.strip()
async def asafely_query_index(index: Any, query: str) -> Any:
answer = await index.aquery(query)
return answer.strip()
async def aprocess_file_to_structured(
file_path: Union[str, List[str]],
queries: Dict[str, str],
vectorstore: Type[VectorStore] = Chroma,
) -> Dict[str, Any]:
"""
Async processes a file loaded by the specified loader and generates structured data based on provided queries.
Args:
file_path (Union[str, List[str]]): filepath to the file with unstructured data.
queries (Dict[str, str]): A dictionary of queries to be applied to the loaded file.
Each query maps a field name to a query string that specifies how to process the field.
vectorstore (Type[VectorStore]): Vector database type. Subclass of VectorStore. Default to Chroma.
Returns:
Dict[str, str]: A dictionary containing the processed structured data from the loaded file.
The dictionary includes a "file_path" key with the path of the loaded file and
additional keys corresponding to the queried fields and their processed values.
"""
loader = UnstructuredFileLoader(file_path)
index = AVectorstoreIndexCreator(vectorstore_cls=vectorstore).from_loaders([loader])
async def mark(key: str, question: str) -> Tuple[str, str]:
return key, await asafely_query_index(index, question)
response = {
key: result
for key, result in await asyncio.gather(
*(mark(key, question) for key, question in queries.items())
)
}
return response
def process_file_to_structured(
file_path: Union[str, List[str]],
queries: Dict[str, str],
vectorstore: Type[VectorStore] = Chroma,
) -> Dict[str, Any]:
"""
Processes a file loaded by the specified loader and generates structured data based on provided queries.
Args:
file_path (Union[str, List[str]]): filepath to the file with unstructured data.
queries (Dict[str, str]): A dictionary of queries to be applied to the loaded file.
Each query maps a field name to a query string that specifies how to process the field.
vectorstore (Type[VectorStore]): Vector database type. Subclass of VectorStore. Default to Chroma.
Returns:
Dict[str, str]: A dictionary containing the processed structured data from the loaded file.
The dictionary includes a "file_path" key with the path of the loaded file and
additional keys corresponding to the queried fields and their processed values.
"""
loader = UnstructuredFileLoader(file_path)
index = VectorstoreIndexCreator(vectorstore_cls=vectorstore).from_loaders([loader])
response = {}
for k, query in queries.items():
response[k] = safely_query_index(index, query)
return response
| [] |
2024-01-10 | dlt-hub/verified-sources | sources~unstructured_data~async_index.py | from typing import Any, List, Optional
from langchain.base_language import BaseLanguageModel
from langchain.chains.retrieval_qa.base import RetrievalQA
from langchain.indexes.vectorstore import (
VectorstoreIndexCreator,
VectorStoreIndexWrapper,
)
from langchain.llms.openai import OpenAI
from langchain.schema import Document
class AVectorStoreIndexWrapper(VectorStoreIndexWrapper):
"""Async wrapper around a vectorstore for easy access."""
def __init__(
self,
**kwargs: Any,
):
super().__init__(**kwargs)
async def aquery(
self, question: str, llm: Optional[BaseLanguageModel] = None, **kwargs: Any
) -> str:
"""Query the vectorstore."""
llm = llm or OpenAI(temperature=0) # type: ignore[call-arg]
chain = RetrievalQA.from_chain_type(
llm, retriever=self.vectorstore.as_retriever(), **kwargs
)
return await chain.arun(question)
class AVectorstoreIndexCreator(VectorstoreIndexCreator):
"""Async logic for creating indexes."""
def from_documents(self, documents: List[Document]) -> AVectorStoreIndexWrapper:
"""Create a vectorstore index from documents."""
sub_docs = self.text_splitter.split_documents(documents)
vectorstore = self.vectorstore_cls.from_documents(
sub_docs, self.embedding, **self.vectorstore_kwargs
)
return AVectorStoreIndexWrapper(vectorstore=vectorstore)
| [] |
2024-01-10 | Keayoub/azure-search | scripts~prepdocs.py | import argparse
import base64
import glob
import html
import io
import os
import re
import tempfile
import time
import openai
import tiktoken
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
from azure.identity import AzureDeveloperCliCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import (
HnswParameters,
PrioritizedFields,
SearchableField,
SearchField,
SearchFieldDataType,
SearchIndex,
SemanticConfiguration,
SemanticField,
SemanticSettings,
SimpleField,
VectorSearch,
VectorSearchAlgorithmConfiguration,
)
from azure.storage.blob import BlobServiceClient
from azure.storage.filedatalake import (
DataLakeServiceClient,
)
from pypdf import PdfReader, PdfWriter
from tenacity import (
retry,
retry_if_exception_type,
stop_after_attempt,
wait_random_exponential,
)
args = argparse.Namespace(
verbose=False,
openaihost="azure",
datalakestorageaccount=None,
datalakefilesystem=None,
datalakepath=None,
remove=False,
useacls=False,
skipblobs=False,
storageaccount=None,
container=None,
)
adls_gen2_creds = None
storage_creds = None
MAX_SECTION_LENGTH = 1000
SENTENCE_SEARCH_LIMIT = 100
SECTION_OVERLAP = 100
open_ai_token_cache = {}
CACHE_KEY_TOKEN_CRED = "openai_token_cred"
CACHE_KEY_CREATED_TIME = "created_time"
CACHE_KEY_TOKEN_TYPE = "token_type"
# Embedding batch support section
SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
def calculate_tokens_emb_aoai(input: str):
encoding = tiktoken.encoding_for_model(args.openaimodelname)
return len(encoding.encode(input))
def blob_name_from_file_page(filename, page=0):
if os.path.splitext(filename)[1].lower() == ".pdf":
return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf"
else:
return os.path.basename(filename)
def upload_blobs(filename):
blob_service = BlobServiceClient(
account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds
)
blob_container = blob_service.get_container_client(args.container)
if not blob_container.exists():
blob_container.create_container()
# if file is PDF split into pages and upload each page as a separate blob
if os.path.splitext(filename)[1].lower() == ".pdf":
reader = PdfReader(filename)
pages = reader.pages
for i in range(len(pages)):
blob_name = blob_name_from_file_page(filename, i)
if args.verbose:
print(f"\tUploading blob for page {i} -> {blob_name}")
f = io.BytesIO()
writer = PdfWriter()
writer.add_page(pages[i])
writer.write(f)
f.seek(0)
blob_container.upload_blob(blob_name, f, overwrite=True)
else:
blob_name = blob_name_from_file_page(filename)
with open(filename, "rb") as data:
blob_container.upload_blob(blob_name, data, overwrite=True)
def remove_blobs(filename):
if args.verbose:
print(f"Removing blobs for '{filename or '<all>'}'")
blob_service = BlobServiceClient(
account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds
)
blob_container = blob_service.get_container_client(args.container)
if blob_container.exists():
if filename is None:
blobs = blob_container.list_blob_names()
else:
prefix = os.path.splitext(os.path.basename(filename))[0]
blobs = filter(
lambda b: re.match(f"{prefix}-\d+\.pdf", b),
blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]),
)
for b in blobs:
if args.verbose:
print(f"\tRemoving blob {b}")
blob_container.delete_blob(b)
def table_to_html(table):
table_html = "<table>"
rows = [
sorted([cell for cell in table.cells if cell.row_index == i], key=lambda cell: cell.column_index)
for i in range(table.row_count)
]
for row_cells in rows:
table_html += "<tr>"
for cell in row_cells:
tag = "th" if (cell.kind == "columnHeader" or cell.kind == "rowHeader") else "td"
cell_spans = ""
if cell.column_span > 1:
cell_spans += f" colSpan={cell.column_span}"
if cell.row_span > 1:
cell_spans += f" rowSpan={cell.row_span}"
table_html += f"<{tag}{cell_spans}>{html.escape(cell.content)}</{tag}>"
table_html += "</tr>"
table_html += "</table>"
return table_html
def get_document_text(filename):
offset = 0
page_map = []
if args.localpdfparser:
reader = PdfReader(filename)
pages = reader.pages
for page_num, p in enumerate(pages):
page_text = p.extract_text()
page_map.append((page_num, offset, page_text))
offset += len(page_text)
else:
if args.verbose:
print(f"Extracting text from '{filename}' using Azure Form Recognizer")
form_recognizer_client = DocumentAnalysisClient(
endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/",
credential=formrecognizer_creds,
headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"},
)
with open(filename, "rb") as f:
poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document=f)
form_recognizer_results = poller.result()
for page_num, page in enumerate(form_recognizer_results.pages):
tables_on_page = [
table
for table in form_recognizer_results.tables
if table.bounding_regions[0].page_number == page_num + 1
]
# mark all positions of the table spans in the page
page_offset = page.spans[0].offset
page_length = page.spans[0].length
table_chars = [-1] * page_length
for table_id, table in enumerate(tables_on_page):
for span in table.spans:
# replace all table spans with "table_id" in table_chars array
for i in range(span.length):
idx = span.offset - page_offset + i
if idx >= 0 and idx < page_length:
table_chars[idx] = table_id
# build page text by replacing characters in table spans with table html
page_text = ""
added_tables = set()
for idx, table_id in enumerate(table_chars):
if table_id == -1:
page_text += form_recognizer_results.content[page_offset + idx]
elif table_id not in added_tables:
page_text += table_to_html(tables_on_page[table_id])
added_tables.add(table_id)
page_text += " "
page_map.append((page_num, offset, page_text))
offset += len(page_text)
return page_map
def split_text(page_map, filename):
SENTENCE_ENDINGS = [".", "!", "?"]
WORDS_BREAKS = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"]
if args.verbose:
print(f"Splitting '{filename}' into sections")
def find_page(offset):
num_pages = len(page_map)
for i in range(num_pages - 1):
if offset >= page_map[i][1] and offset < page_map[i + 1][1]:
return i
return num_pages - 1
all_text = "".join(p[2] for p in page_map)
length = len(all_text)
start = 0
end = length
while start + SECTION_OVERLAP < length:
last_word = -1
end = start + MAX_SECTION_LENGTH
if end > length:
end = length
else:
# Try to find the end of the sentence
while (
end < length
and (end - start - MAX_SECTION_LENGTH) < SENTENCE_SEARCH_LIMIT
and all_text[end] not in SENTENCE_ENDINGS
):
if all_text[end] in WORDS_BREAKS:
last_word = end
end += 1
if end < length and all_text[end] not in SENTENCE_ENDINGS and last_word > 0:
end = last_word # Fall back to at least keeping a whole word
if end < length:
end += 1
# Try to find the start of the sentence or at least a whole word boundary
last_word = -1
while (
start > 0
and start > end - MAX_SECTION_LENGTH - 2 * SENTENCE_SEARCH_LIMIT
and all_text[start] not in SENTENCE_ENDINGS
):
if all_text[start] in WORDS_BREAKS:
last_word = start
start -= 1
if all_text[start] not in SENTENCE_ENDINGS and last_word > 0:
start = last_word
if start > 0:
start += 1
section_text = all_text[start:end]
yield (section_text, find_page(start))
last_table_start = section_text.rfind("<table")
if last_table_start > 2 * SENTENCE_SEARCH_LIMIT and last_table_start > section_text.rfind("</table"):
# If the section ends with an unclosed table, we need to start the next section with the table.
# If table starts inside SENTENCE_SEARCH_LIMIT, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH
# If last table starts inside SECTION_OVERLAP, keep overlapping
if args.verbose:
print(
f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}"
)
start = min(end - SECTION_OVERLAP, start + last_table_start)
else:
start = end - SECTION_OVERLAP
if start + SECTION_OVERLAP < end:
yield (all_text[start:end], find_page(start))
def filename_to_id(filename):
filename_ascii = re.sub("[^0-9a-zA-Z_-]", "_", filename)
filename_hash = base64.b16encode(filename.encode("utf-8")).decode("ascii")
return f"file-{filename_ascii}-{filename_hash}"
def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None):
file_id = filename_to_id(filename)
for i, (content, pagenum) in enumerate(split_text(page_map, filename)):
section = {
"id": f"{file_id}-page-{i}",
"content": content,
"category": args.category,
"sourcepage": blob_name_from_file_page(filename, pagenum),
"sourcefile": filename,
}
if use_vectors:
section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model)
yield section
def before_retry_sleep(retry_state):
if args.verbose:
print("Rate limited on the OpenAI embeddings API, sleeping before retrying...")
@retry(
retry=retry_if_exception_type(openai.error.RateLimitError),
wait=wait_random_exponential(min=15, max=60),
stop=stop_after_attempt(15),
before_sleep=before_retry_sleep,
)
def compute_embedding(text, embedding_deployment, embedding_model):
refresh_openai_token()
embedding_args = {"deployment_id": embedding_deployment} if args.openaihost != "openai" else {}
return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"]
@retry(
retry=retry_if_exception_type(openai.error.RateLimitError),
wait=wait_random_exponential(min=15, max=60),
stop=stop_after_attempt(15),
before_sleep=before_retry_sleep,
)
def compute_embedding_in_batch(texts):
refresh_openai_token()
embedding_args = {"deployment_id": args.openaideployment} if args.openaihost != "openai" else {}
emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts)
return [data.embedding for data in emb_response.data]
def create_search_index():
if args.verbose:
print(f"Ensuring search index {args.index} exists")
index_client = SearchIndexClient(
endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds
)
fields = [
SimpleField(name="id", type="Edm.String", key=True),
SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"),
SearchField(
name="embedding",
type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
hidden=False,
searchable=True,
filterable=False,
sortable=False,
facetable=False,
vector_search_dimensions=1536,
vector_search_configuration="default",
),
SimpleField(name="category", type="Edm.String", filterable=True, facetable=True),
SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True),
SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True),
]
if args.useacls:
fields.append(
SimpleField(name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True)
)
fields.append(
SimpleField(name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True)
)
if args.index not in index_client.list_index_names():
index = SearchIndex(
name=args.index,
fields=fields,
semantic_settings=SemanticSettings(
configurations=[
SemanticConfiguration(
name="default",
prioritized_fields=PrioritizedFields(
title_field=None, prioritized_content_fields=[SemanticField(field_name="content")]
),
)
]
),
vector_search=VectorSearch(
algorithm_configurations=[
VectorSearchAlgorithmConfiguration(
name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine")
)
]
),
)
if args.verbose:
print(f"Creating {args.index} search index")
index_client.create_index(index)
else:
if args.verbose:
print(f"Search index {args.index} already exists")
def update_embeddings_in_batch(sections):
batch_queue = []
copy_s = []
batch_response = {}
token_count = 0
for s in sections:
token_count += calculate_tokens_emb_aoai(s["content"])
if (
token_count <= SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["token_limit"]
and len(batch_queue) < SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["max_batch_size"]
):
batch_queue.append(s)
copy_s.append(s)
else:
emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue])
if args.verbose:
print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}")
for emb, item in zip(emb_responses, batch_queue):
batch_response[item["id"]] = emb
batch_queue = []
batch_queue.append(s)
token_count = calculate_tokens_emb_aoai(s["content"])
if batch_queue:
emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue])
if args.verbose:
print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}")
for emb, item in zip(emb_responses, batch_queue):
batch_response[item["id"]] = emb
for s in copy_s:
s["embedding"] = batch_response[s["id"]]
yield s
def index_sections(filename, sections, acls=None):
if args.verbose:
print(f"Indexing sections from '{filename}' into search index '{args.index}'")
search_client = SearchClient(
endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds
)
i = 0
batch = []
for s in sections:
if acls:
s.update(acls)
batch.append(s)
i += 1
if i % 1000 == 0:
results = search_client.upload_documents(documents=batch)
succeeded = sum([1 for r in results if r.succeeded])
if args.verbose:
print(f"\tIndexed {len(results)} sections, {succeeded} succeeded")
batch = []
if len(batch) > 0:
results = search_client.upload_documents(documents=batch)
succeeded = sum([1 for r in results if r.succeeded])
if args.verbose:
print(f"\tIndexed {len(results)} sections, {succeeded} succeeded")
def remove_from_index(filename):
if args.verbose:
print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'")
search_client = SearchClient(
endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds
)
while True:
filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'"
r = search_client.search("", filter=filter, top=1000, include_total_count=True)
if r.get_count() == 0:
break
r = search_client.delete_documents(documents=[{"id": d["id"]} for d in r])
if args.verbose:
print(f"\tRemoved {len(r)} sections from index")
# It can take a few seconds for search results to reflect changes, so wait a bit
time.sleep(2)
def refresh_openai_token():
"""
Refresh OpenAI token every 5 minutes
"""
if (
CACHE_KEY_TOKEN_TYPE in open_ai_token_cache
and open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] == "azure_ad"
and open_ai_token_cache[CACHE_KEY_CREATED_TIME] + 300 < time.time()
):
token_cred = open_ai_token_cache[CACHE_KEY_TOKEN_CRED]
openai.api_key = token_cred.get_token("https://cognitiveservices.azure.com/.default").token
open_ai_token_cache[CACHE_KEY_CREATED_TIME] = time.time()
def read_files(
path_pattern: str,
use_vectors: bool,
vectors_batch_support: bool,
embedding_deployment: str = None,
embedding_model: str = None,
):
"""
Recursively read directory structure under `path_pattern`
and execute indexing for the individual files
"""
for filename in glob.glob(path_pattern):
if args.verbose:
print(f"Processing '{filename}'")
if args.remove:
remove_blobs(filename)
remove_from_index(filename)
else:
if os.path.isdir(filename):
read_files(filename + "/*", use_vectors, vectors_batch_support)
continue
try:
if not args.skipblobs:
upload_blobs(filename)
page_map = get_document_text(filename)
sections = create_sections(
os.path.basename(filename),
page_map,
use_vectors and not vectors_batch_support,
embedding_deployment,
embedding_model,
)
if use_vectors and vectors_batch_support:
sections = update_embeddings_in_batch(sections)
index_sections(os.path.basename(filename), sections)
except Exception as e:
print(f"\tGot an error while reading {filename} -> {e} --> skipping file")
def read_adls_gen2_files(
use_vectors: bool, vectors_batch_support: bool, embedding_deployment: str = None, embedding_model: str = None
):
datalake_service = DataLakeServiceClient(
account_url=f"https://{args.datalakestorageaccount}.dfs.core.windows.net", credential=adls_gen2_creds
)
filesystem_client = datalake_service.get_file_system_client(file_system=args.datalakefilesystem)
paths = filesystem_client.get_paths(path=args.datalakepath, recursive=True)
for path in paths:
if not path.is_directory:
if args.remove:
remove_blobs(path.name)
remove_from_index(path.name)
else:
temp_file_path = os.path.join(tempfile.gettempdir(), os.path.basename(path.name))
try:
temp_file = open(temp_file_path, "wb")
file_client = filesystem_client.get_file_client(path)
file_client.download_file().readinto(temp_file)
acls = None
if args.useacls:
# Parse out user ids and group ids
acls = {"oids": [], "groups": []}
# https://learn.microsoft.com/python/api/azure-storage-file-datalake/azure.storage.filedatalake.datalakefileclient?view=azure-python#azure-storage-filedatalake-datalakefileclient-get-access-control
# Request ACLs as GUIDs
acl_list = file_client.get_access_control(upn=False)["acl"]
# https://learn.microsoft.com/azure/storage/blobs/data-lake-storage-access-control
# ACL Format: user::rwx,group::r-x,other::r--,user:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx:r--
acl_list = acl_list.split(",")
for acl in acl_list:
acl_parts = acl.split(":")
if len(acl_parts) != 3:
continue
if len(acl_parts[1]) == 0:
continue
if acl_parts[0] == "user" and "r" in acl_parts[2]:
acls["oids"].append(acl_parts[1])
if acl_parts[0] == "group" and "r" in acl_parts[2]:
acls["groups"].append(acl_parts[1])
if not args.skipblobs:
upload_blobs(temp_file.name)
page_map = get_document_text(temp_file.name)
sections = create_sections(
os.path.basename(path.name),
page_map,
use_vectors and not vectors_batch_support,
embedding_deployment,
embedding_model,
)
if use_vectors and vectors_batch_support:
sections = update_embeddings_in_batch(sections)
index_sections(os.path.basename(path.name), sections, acls)
except Exception as e:
print(f"\tGot an error while reading {path.name} -> {e} --> skipping file")
finally:
try:
temp_file.close()
os.remove(temp_file_path)
except Exception as e:
print(f"\tGot an error while deleting {temp_file_path} -> {e}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.",
epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v",
)
parser.add_argument("files", nargs="?", help="Files to be processed")
parser.add_argument(
"--datalakestorageaccount", required=False, help="Optional. Azure Data Lake Storage Gen2 Account name"
)
parser.add_argument(
"--datalakefilesystem",
required=False,
default="gptkbcontainer",
help="Optional. Azure Data Lake Storage Gen2 filesystem name",
)
parser.add_argument(
"--datalakepath",
required=False,
help="Optional. Azure Data Lake Storage Gen2 filesystem path containing files to index. If omitted, index the entire filesystem",
)
parser.add_argument(
"--datalakekey", required=False, help="Optional. Use this key when authenticating to Azure Data Lake Gen2"
)
parser.add_argument(
"--useacls", action="store_true", help="Store ACLs from Azure Data Lake Gen2 Filesystem in the search index"
)
parser.add_argument(
"--category", help="Value for the category field in the search index for all sections indexed in this run"
)
parser.add_argument(
"--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage"
)
parser.add_argument("--storageaccount", help="Azure Blob Storage account name")
parser.add_argument("--container", help="Azure Blob Storage container name")
parser.add_argument(
"--storagekey",
required=False,
help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)",
)
parser.add_argument(
"--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)"
)
parser.add_argument(
"--searchservice",
help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)",
)
parser.add_argument(
"--index",
help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)",
)
parser.add_argument(
"--searchkey",
required=False,
help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)",
)
parser.add_argument("--openaihost", help="Host of the API used to compute embeddings ('azure' or 'openai')")
parser.add_argument("--openaiservice", help="Name of the Azure OpenAI service used to compute embeddings")
parser.add_argument(
"--openaideployment",
help="Name of the Azure OpenAI model deployment for an embedding model ('text-embedding-ada-002' recommended)",
)
parser.add_argument(
"--openaimodelname", help="Name of the Azure OpenAI embedding model ('text-embedding-ada-002' recommended)"
)
parser.add_argument(
"--novectors",
action="store_true",
help="Don't compute embeddings for the sections (e.g. don't call the OpenAI embeddings API during indexing)",
)
parser.add_argument(
"--disablebatchvectors", action="store_true", help="Don't compute embeddings in batch for the sections"
)
parser.add_argument(
"--openaikey",
required=False,
help="Optional. Use this Azure OpenAI account key instead of the current user identity to login (use az login to set current user for Azure). This is required only when using non-Azure endpoints.",
)
parser.add_argument("--openaiorg", required=False, help="This is required only when using non-Azure endpoints.")
parser.add_argument(
"--remove",
action="store_true",
help="Remove references to this document from blob storage and the search index",
)
parser.add_argument(
"--removeall",
action="store_true",
help="Remove all blobs from blob storage and documents from the search index",
)
parser.add_argument(
"--localpdfparser",
action="store_true",
help="Use PyPdf local PDF parser (supports only digital PDFs) instead of Azure Form Recognizer service to extract text, tables and layout from the documents",
)
parser.add_argument(
"--formrecognizerservice",
required=False,
help="Optional. Name of the Azure Form Recognizer service which will be used to extract text, tables and layout from the documents (must exist already)",
)
parser.add_argument(
"--formrecognizerkey",
required=False,
help="Optional. Use this Azure Form Recognizer account key instead of the current user identity to login (use az login to set current user for Azure)",
)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
args = parser.parse_args()
# Use the current user identity to connect to Azure services unless a key is explicitly set for any of them
azd_credential = (
AzureDeveloperCliCredential()
if args.tenantid is None
else AzureDeveloperCliCredential(tenant_id=args.tenantid, process_timeout=60)
)
default_creds = azd_credential if args.searchkey is None or args.storagekey is None else None
adls_gen2_creds = azd_credential if args.datalakekey is None else AzureKeyCredential(args.datalakekey)
search_creds = default_creds if args.searchkey is None else AzureKeyCredential(args.searchkey)
use_vectors = not args.novectors
compute_vectors_in_batch = not args.disablebatchvectors and args.openaimodelname in SUPPORTED_BATCH_AOAI_MODEL
if not args.skipblobs:
storage_creds = default_creds if args.storagekey is None else args.storagekey
if not args.localpdfparser:
# check if Azure Form Recognizer credentials are provided
if args.formrecognizerservice is None:
print(
"Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser."
)
exit(1)
formrecognizer_creds = (
default_creds if args.formrecognizerkey is None else AzureKeyCredential(args.formrecognizerkey)
)
if use_vectors:
if args.openaihost != "openai":
if not args.openaikey:
openai.api_key = azd_credential.get_token("https://cognitiveservices.azure.com/.default").token
openai.api_type = "azure_ad"
open_ai_token_cache[CACHE_KEY_CREATED_TIME] = time.time()
open_ai_token_cache[CACHE_KEY_TOKEN_CRED] = azd_credential
open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] = "azure_ad"
else:
openai.api_key = args.openaikey
openai.api_type = "azure"
openai.api_base = f"https://{args.openaiservice}.openai.azure.com"
openai.api_version = "2023-05-15"
else:
print("using normal openai")
openai.api_key = args.openaikey
openai.organization = args.openaiorg
openai.api_type = "openai"
if args.removeall:
remove_blobs(None)
remove_from_index(None)
else:
if not args.remove:
create_search_index()
print("Processing files...")
if not args.datalakestorageaccount:
print(f"Using local files in {args.files}")
read_files(args.files, use_vectors, compute_vectors_in_batch, args.openaideployment, args.openaimodelname)
else:
print(f"Using Data Lake Gen2 Storage Account {args.datalakestorageaccount}")
read_adls_gen2_files(use_vectors, compute_vectors_in_batch, args.openaideployment, args.openaimodelname)
| [] |
2024-01-10 | Keayoub/azure-search | app~backend~approaches~readdecomposeask.py | import re
from typing import Any, Optional, Sequence
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from langchain.agents import AgentExecutor, Tool
from langchain.agents.react.base import ReActDocstoreAgent
from langchain.callbacks.manager import CallbackManager
from langchain.llms.openai import AzureOpenAI, OpenAI
from langchain.prompts import BasePromptTemplate, PromptTemplate
from langchain.tools.base import BaseTool
from approaches.approach import AskApproach
from langchainadapters import HtmlCallbackHandler
from text import nonewlines
class ReadDecomposeAsk(AskApproach):
def __init__(
self,
search_client: SearchClient,
openai_host: str,
openai_deployment: str,
openai_model: str,
embedding_deployment: str,
embedding_model: str,
sourcepage_field: str,
content_field: str,
):
self.search_client = search_client
self.openai_deployment = openai_deployment
self.openai_model = openai_model
self.embedding_deployment = embedding_deployment
self.embedding_model = embedding_model
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.openai_host = openai_host
async def search(
self, query_text: str, overrides: dict[str, Any], auth_claims: dict[str, Any]
) -> tuple[list[str], str]:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top", 3)
filter = self.build_filter(overrides, auth_claims)
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {}
embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text)
query_vector = embedding["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = ""
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(
query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
else:
r = await self.search_client.search(
query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
if use_semantic_captions:
self.results = [
doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc["@search.captions"]]))
async for doc in r
]
else:
results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) async for doc in r]
return results, "\n".join(results)
async def lookup(self, q: str) -> Optional[str]:
r = await self.search_client.search(
q,
top=1,
include_total_count=True,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
query_answer="extractive|count-1",
query_caption="extractive|highlight-false",
)
answers = await r.get_answers()
if answers and len(answers) > 0:
return answers[0].text
if await r.get_count() > 0:
return "\n".join([d["content"] async for d in r])
return None
async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]:
search_results = None
async def search_and_store(q: str) -> Any:
nonlocal search_results
search_results, content = await self.search(q, overrides, auth_claims)
return content
# Use to capture thought process during iterations
cb_handler = HtmlCallbackHandler()
cb_manager = CallbackManager(handlers=[cb_handler])
if self.openai_host == "azure":
llm = AzureOpenAI(
deployment_name=self.openai_deployment,
temperature=overrides.get("temperature", 0.3),
openai_api_key=openai.api_key,
)
else:
llm = OpenAI(
model_name=self.openai_model,
temperature=overrides.get("temperature", 0.3),
openai_api_key=openai.api_key,
)
tools = [
Tool(
name="Search",
func=lambda _: "Not implemented",
coroutine=search_and_store,
description="useful for when you need to ask with search",
callbacks=cb_manager,
),
Tool(
name="Lookup",
func=lambda _: "Not implemented",
coroutine=self.lookup,
description="useful for when you need to ask with lookup",
callbacks=cb_manager,
),
]
prompt_prefix = overrides.get("prompt_template")
prompt = PromptTemplate.from_examples(
EXAMPLES,
SUFFIX,
["input", "agent_scratchpad"],
prompt_prefix + "\n\n" + PREFIX if prompt_prefix else PREFIX,
)
class ReAct(ReActDocstoreAgent):
@classmethod
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
return prompt
agent = ReAct.from_llm_and_tools(llm, tools)
chain = AgentExecutor.from_agent_and_tools(agent, tools, verbose=True, callback_manager=cb_manager)
result = await chain.arun(q)
# Replace substrings of the form <file.ext> with [file.ext] so that the frontend can render them as links, match them with a regex to avoid
# generalizing too much and disrupt HTML snippets if present
result = re.sub(r"<([a-zA-Z0-9_ \-\.]+)>", r"[\1]", result)
return {"data_points": search_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
# Modified version of langchain's ReAct prompt that includes instructions and examples for how to cite information sources
EXAMPLES = [
"""Question: What is the elevation range for the area that the eastern sector of the
Colorado orogeny extends into?
Thought: I need to search Colorado orogeny, find the area that the eastern sector
of the Colorado orogeny extends into, then find the elevation range of the
area.
Action: Search[Colorado orogeny]
Observation: <info1.pdf> The Colorado orogeny was an episode of mountain building (an orogeny) in
Colorado and surrounding areas.
Thought: It does not mention the eastern sector. So I need to look up eastern
sector.
Action: Lookup[eastern sector]
Observation: <info2.txt> (Result 1 / 1) The eastern sector extends into the High Plains and is called
the Central Plains orogeny.
Thought: The eastern sector of Colorado orogeny extends into the High Plains. So I
need to search High Plains and find its elevation range.
Action: Search[High Plains]
Observation: <some_file.pdf> High Plains refers to one of two distinct land regions
Thought: I need to instead search High Plains (United States).
Action: Search[High Plains (United States)]
Observation: <filea.pdf> The High Plains are a subregion of the Great Plains. <another-ref.docx> From east to west, the
High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130
m).
Thought: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer
is 1,800 to 7,000 ft.
Action: Finish[1,800 to 7,000 ft <filea.pdf>]""",
"""Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons"
character Milhouse, who Matt Groening named after who?
Thought: The question simplifies to "The Simpsons" character Milhouse is named after
who. I only need to search Milhouse and find who it is named after.
Action: Search[Milhouse]
Observation: <info7.pdf> Milhouse Mussolini Van Houten is a recurring character in the Fox animated
television series The Simpsons voiced by Pamela Hayden and created by Matt
Groening.
Thought: The paragraph does not tell who Milhouse is named after, maybe I can look up
"named after".
Action: Lookup[named after]
Observation: <historyref2.txt> (Result 1 / 1) Milhouse was named after U.S. president Richard Nixon, whose
middle name was Milhous.
Thought: Milhouse was named after U.S. president Richard Nixon, so the answer is
Richard Nixon.
Action: Finish[Richard Nixon <historyref2.txt>]""",
"""Question: Which documentary is about Finnish rock groups, Adam Clayton Powell or The
Saimaa Gesture?
Thought: I need to search Adam Clayton Powell and The Saimaa Gesture, and find which
documentary is about Finnish rock groups.
Action: Search[Adam Clayton Powell]
Observation: <info9.pdf> Could not find [Adam Clayton Powell]. Similar: ['Adam Clayton Powell
III', 'Seventh Avenue (Manhattan)', 'Adam Clayton Powell Jr. State Office
Building', 'Isabel Washington Powell', 'Adam Powell', vAdam Clayton Powell
(film)', 'Giancarlo Esposito'].
Thought: To find the documentary, I can search Adam Clayton Powell (film).
Action: Search[Adam Clayton Powell (film)]
Observation: <data123.txt> Adam Clayton Powell is a 1989 American documentary film directed by
Richard Kilberg. The film is about the rise and fall of influential
African-American politician Adam Clayton Powell Jr.[3][4] It was later aired
as part of the PBS series The American Experience.
Thought: Adam Clayton Powell (film) is a documentary about an African-American
politician, not Finnish rock groups. So the documentary about Finnish rock
groups must instead be The Saimaa Gesture.
Action: Finish[The Saimaa Gesture <data123.txt>]""",
"""Question: What profession does Nicholas Ray and Elia Kazan have in common?
Thought: I need to search Nicholas Ray and Elia Kazan, find their professions, then
find the profession they have in common.
Action: Search[Nicholas Ray]
Observation: <files-987.png> Nicholas Ray (born Raymond Nicholas Kienzle Jr., August 7, 1911 - June 16,
1979) was an American film director, screenwriter, and actor best known for
the 1955 film Rebel Without a Cause.
Thought: Professions of Nicholas Ray are director, screenwriter, and actor. I need
to search Elia Kazan next and find his professions.
Action: Search[Elia Kazan]
Observation: <files-654.txt> Elia Kazan was an American film and theatre director, producer, screenwriter
and actor.
Thought: Professions of Elia Kazan are director, producer, screenwriter, and actor.
So profession Nicholas Ray and Elia Kazan have in common is director,
screenwriter, and actor.
Action: Finish[director, screenwriter, actor <files-987.png><files-654.txt>]""",
"""Question: Which magazine was started first Arthur's Magazine or First for Women?
Thought: I need to search Arthur's Magazine and First for Women, and find which was
started first.
Action: Search[Arthur's Magazine]
Observation: <magazines-1850.pdf> Arthur's Magazine (1844-1846) was an American literary periodical published
in Philadelphia in the 19th century.
Thought: Arthur's Magazine was started in 1844. I need to search First for Women
next.
Action: Search[First for Women]
Observation: <magazines-1900.pdf> First for Women is a woman's magazine published by Bauer Media Group in the
USA.[1] The magazine was started in 1989.
Thought: First for Women was started in 1989. 1844 (Arthur's Magazine) < 1989 (First
for Women), so Arthur's Magazine was started first.
Action: Finish[Arthur's Magazine <magazines-1850.pdf><magazines-1900.pdf>]""",
"""Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?
Thought: I need to search Pavel Urysohn and Leonid Levin, find their types of work,
then find if they are the same.
Action: Search[Pavel Urysohn]
Observation: <info4444.pdf> Pavel Samuilovich Urysohn (February 3, 1898 - August 17, 1924) was a Soviet
mathematician who is best known for his contributions in dimension theory.
Thought: Pavel Urysohn is a mathematician. I need to search Leonid Levin next and
find its type of work.
Action: Search[Leonid Levin]
Observation: <datapoints_aaa.txt> Leonid Anatolievich Levin is a Soviet-American mathematician and computer
scientist.
Thought: Leonid Levin is a mathematician and computer scientist. So Pavel Urysohn
and Leonid Levin have the same type of work.
Action: Finish[yes <info4444.pdf><datapoints_aaa.txt>]""",
]
SUFFIX = """\nQuestion: {input}
{agent_scratchpad}"""
PREFIX = (
"Answer questions as shown in the following examples, by splitting the question into individual search or lookup actions to find facts until you can answer the question. "
"Observations are prefixed by their source name in angled brackets, source names MUST be included with the actions in the answers."
"All questions must be answered from the results from search or look up actions, only facts resulting from those can be used in an answer. "
)
"Answer questions as truthfully as possible, and ONLY answer the questions using the information from observations, do not speculate or your own knowledge."
| [
"\nQuestion: {input}\n{agent_scratchpad}",
"['input', 'agent_scratchpad']",
"\n\n",
"agent_scratchpad",
"input",
"prompt_template"
] |
2024-01-10 | Keayoub/azure-search | app~backend~approaches~readretrieveread.py | from typing import Any
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.callbacks.manager import CallbackManager, Callbacks
from langchain.chains import LLMChain
from langchain.llms.openai import AzureOpenAI, OpenAI
from approaches.approach import AskApproach
from langchainadapters import HtmlCallbackHandler
from lookuptool import CsvLookupTool
from text import nonewlines
class ReadRetrieveReadApproach(AskApproach):
"""
Attempt to answer questions by iteratively evaluating the question to see what information is missing, and once all information
is present then formulate an answer. Each iteration consists of two parts:
1. use GPT to see if we need more information
2. if more data is needed, use the requested "tool" to retrieve it.
The last call to GPT answers the actual question.
This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain.
[1] E. Karpas, et al. arXiv:2205.00445
"""
template_prefix = (
"You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. "
"Answer the question using only the data provided in the information sources below. "
"For tabular information return it as an html table. Do not return markdown format. "
"Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. "
'For example, if the question is "What color is the sky?" and one of the information sources says "info123: the sky is blue whenever it\'s not cloudy", then answer with "The sky is blue [info123]" '
'It\'s important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (":"). '
'If there are multiple sources, cite each one in their own square brackets. For example, use "[info343][ref-76]" and not "[info343,ref-76]". '
"Never quote tool names as sources."
"If you cannot answer using the sources below, say that you don't know. "
"\n\nYou can access to the following tools:"
)
template_suffix = """
Begin!
Question: {input}
Thought: {agent_scratchpad}"""
CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc."
def __init__(
self,
search_client: SearchClient,
openai_host: str,
openai_deployment: str,
openai_model: str,
embedding_deployment: str,
embedding_model: str,
sourcepage_field: str,
content_field: str,
):
self.search_client = search_client
self.openai_deployment = openai_deployment
self.openai_model = openai_model
self.embedding_deployment = embedding_deployment
self.embedding_model = embedding_model
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.openai_host = openai_host
async def retrieve(self, query_text: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Any:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top", 3)
filter = self.build_filter(overrides, auth_claims)
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {}
embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text)
query_vector = embedding["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = ""
# Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(
query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
else:
r = await self.search_client.search(
query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
if use_semantic_captions:
results = [
doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc["@search.captions"]]))
async for doc in r
]
else:
results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r]
content = "\n".join(results)
return results, content
async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]:
retrieve_results = None
async def retrieve_and_store(q: str) -> Any:
nonlocal retrieve_results
retrieve_results, content = await self.retrieve(q, overrides, auth_claims)
return content
# Use to capture thought process during iterations
cb_handler = HtmlCallbackHandler()
cb_manager = CallbackManager(handlers=[cb_handler])
acs_tool = Tool(
name="CognitiveSearch",
func=lambda _: "Not implemented",
coroutine=retrieve_and_store,
description=self.CognitiveSearchToolDescription,
callbacks=cb_manager,
)
employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager)
tools = [acs_tool, employee_tool]
prompt = ZeroShotAgent.create_prompt(
tools=tools,
prefix=overrides.get("prompt_template_prefix") or self.template_prefix,
suffix=overrides.get("prompt_template_suffix") or self.template_suffix,
input_variables=["input", "agent_scratchpad"],
)
if self.openai_type == "azure":
llm = AzureOpenAI(
deployment_name=self.openai_deployment,
temperature=overrides.get("temperature", 0.3),
openai_api_key=openai.api_key,
)
else:
llm = OpenAI(
model_name=self.openai_model,
temperature=overrides.get("temperature", 0.3),
openai_api_key=openai.api_key,
)
chain = LLMChain(llm=llm, prompt=prompt)
agent_exec = AgentExecutor.from_agent_and_tools(
agent=ZeroShotAgent(llm_chain=chain), tools=tools, verbose=True, callback_manager=cb_manager
)
result = await agent_exec.arun(q)
# Remove references to tool names that might be confused with a citation
result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "")
return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
class EmployeeInfoTool(CsvLookupTool):
employee_name: str = ""
def __init__(self, employee_name: str, callbacks: Callbacks = None):
super().__init__(
filename="data/employeeinfo.csv",
key_field="name",
name="Employee",
description="useful for answering questions about the employee, their benefits and other personal information",
callbacks=callbacks,
)
self.func = lambda _: "Not implemented"
self.coroutine = self.employee_info
self.employee_name = employee_name
async def employee_info(self, name: str) -> str:
return self.lookup(name)
| [
"You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. Answer the question using only the data provided in the information sources below. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". Never quote tool names as sources.If you cannot answer using the sources below, say that you don't know. \n\nYou can access to the following tools:",
"\nBegin!\n\nQuestion: {input}\n\nThought: {agent_scratchpad}",
"prompt_template_suffix",
"agent_scratchpad",
"input",
"prompt_template_prefix"
] |
2024-01-10 | Keayoub/azure-search | app~backend~approaches~chatreadretrieveread.py | import json
from typing import Any, AsyncGenerator
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from approaches.approach import Approach
from core.messagebuilder import MessageBuilder
from core.modelhelper import get_token_limit
from text import nonewlines
class ChatReadRetrieveReadApproach(Approach):
# Chat roles
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
NO_RESPONSE = "0"
"""
Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
(answer) with that prompt.
"""
system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
"""
follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook.
Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.
Try not to repeat questions that have already been asked.
Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.
You have access to Azure Cognitive Search index with 100's of documents.
Generate a search query based on the conversation and the new question.
Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
Do not include any text inside [] or <<>> in the search query terms.
Do not include any special characters like '+'.
If the question is not in English, translate the question to English before generating the search query.
If you cannot generate a search query, return just the number 0.
"""
query_prompt_few_shots = [
{"role": USER, "content": "What are my health plans?"},
{"role": ASSISTANT, "content": "Show available health plans"},
{"role": USER, "content": "does my plan cover cardio?"},
{"role": ASSISTANT, "content": "Health plan cardio coverage"},
]
def __init__(
self,
search_client: SearchClient,
openai_host: str,
chatgpt_deployment: str,
chatgpt_model: str,
embedding_deployment: str,
embedding_model: str,
sourcepage_field: str,
content_field: str,
):
self.search_client = search_client
self.openai_host = openai_host
self.chatgpt_deployment = chatgpt_deployment
self.chatgpt_model = chatgpt_model
self.embedding_deployment = embedding_deployment
self.embedding_model = embedding_model
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.chatgpt_token_limit = get_token_limit(chatgpt_model)
async def run_until_final_call(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
should_stream: bool = False,
) -> tuple:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top", 3)
filter = self.build_filter(overrides, auth_claims)
user_query_request = "Generate search query for: " + history[-1]["user"]
functions = [
{
"name": "search_sources",
"description": "Retrieve sources from the Azure Cognitive Search index",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "Query string to retrieve documents from azure search eg: 'Health care plan'",
}
},
"required": ["search_query"],
},
}
]
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
messages = self.get_messages_from_history(
self.query_prompt_template,
self.chatgpt_model,
history,
user_query_request,
self.query_prompt_few_shots,
self.chatgpt_token_limit - len(user_query_request),
)
chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {}
chat_completion = await openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=0.0,
max_tokens=32,
n=1,
functions=functions,
function_call="auto",
)
query_text = self.get_search_query(chat_completion, history[-1]["user"])
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {}
embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text)
query_vector = embedding["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = None
# Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(
query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
else:
r = await self.search_client.search(
query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
if use_semantic_captions:
results = [
doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]]))
async for doc in r
]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r]
content = "\n".join(results)
follow_up_questions_prompt = (
self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
)
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_template")
if prompt_override is None:
system_message = self.system_message_chat_conversation.format(
injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt
)
elif prompt_override.startswith(">>>"):
system_message = self.system_message_chat_conversation.format(
injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt
)
else:
system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt)
messages = self.get_messages_from_history(
system_message,
self.chatgpt_model,
history,
history[-1]["user"] + "\n\nSources:\n" + content,
max_tokens=self.chatgpt_token_limit, # Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt.
)
msg_to_display = "\n\n".join([str(message) for message in messages])
extra_info = {
"data_points": results,
"thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>"
+ msg_to_display.replace("\n", "<br>"),
}
chat_coroutine = openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=overrides.get("temperature") or 0.7,
max_tokens=1024,
n=1,
stream=should_stream,
)
return (extra_info, chat_coroutine)
async def run_without_streaming(
self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any]
) -> dict[str, Any]:
extra_info, chat_coroutine = await self.run_until_final_call(
history, overrides, auth_claims, should_stream=False
)
chat_resp = await chat_coroutine
chat_content = chat_resp.choices[0].message.content
extra_info["answer"] = chat_content
return extra_info
async def run_with_streaming(
self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any]
) -> AsyncGenerator[dict, None]:
extra_info, chat_coroutine = await self.run_until_final_call(
history, overrides, auth_claims, should_stream=True
)
yield extra_info
async for event in await chat_coroutine:
# "2023-07-01-preview" API version has a bug where first response has empty choices
if event["choices"]:
yield event
def get_messages_from_history(
self,
system_prompt: str,
model_id: str,
history: list[dict[str, str]],
user_conv: str,
few_shots=[],
max_tokens: int = 4096,
) -> list:
message_builder = MessageBuilder(system_prompt, model_id)
# Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message.
for shot in few_shots:
message_builder.append_message(shot.get("role"), shot.get("content"))
user_content = user_conv
append_index = len(few_shots) + 1
message_builder.append_message(self.USER, user_content, index=append_index)
for h in reversed(history[:-1]):
if bot_msg := h.get("bot"):
message_builder.append_message(self.ASSISTANT, bot_msg, index=append_index)
if user_msg := h.get("user"):
message_builder.append_message(self.USER, user_msg, index=append_index)
if message_builder.token_length > max_tokens:
break
messages = message_builder.messages
return messages
def get_search_query(self, chat_completion: dict[str, any], user_query: str):
response_message = chat_completion["choices"][0]["message"]
if function_call := response_message.get("function_call"):
if function_call["name"] == "search_sources":
arg = json.loads(function_call["arguments"])
search_query = arg.get("search_query", self.NO_RESPONSE)
if search_query != self.NO_RESPONSE:
return search_query
elif query_text := response_message.get("content"):
if query_text.strip() != self.NO_RESPONSE:
return query_text
return user_query
| [
"Show available health plans",
"Health plan cardio coverage",
"What are my health plans?",
"Next Questions",
"Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook.\nUse double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.\nTry not to repeat questions that have already been asked.\nOnly generate questions and do not generate any text before or after the questions, such as 'Next Questions'",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.\nYou have access to Azure Cognitive Search index with 100's of documents.\nGenerate a search query based on the conversation and the new question.\nDo not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nIf the question is not in English, translate the question to English before generating the search query.\nIf you cannot generate a search query, return just the number 0.\n",
"suggest_followup_questions",
"does my plan cover cardio?",
"prompt_template"
] |
2024-01-10 | pku-sixing/WSDM2022-MHKD-TriKE | interact.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import logging
import random
from itertools import chain
from argparse import ArgumentParser
from pprint import pformat
import torch
import torch.nn.functional as F
from transformers import OpenAIGPTLMHeadModel, GPT2LMHeadModel, BertTokenizer
SPECIAL_TOKENS = ["[CLS]", "[SEP]", "[PAD]", "[speaker1]", "[speaker2]"]
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def build_input_from_segments(history, reply, tokenizer, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, pad, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
sequence = [[bos]] + history + [reply + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker2 if i % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
instance = {}
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence[1:])
for _ in s]
return instance, sequence
def sample_sequence(history, tokenizer, model, args, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
for i in range(args.max_length):
instance, sequence = build_input_from_segments(history, current_output, tokenizer, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], dtype=torch.long, device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], dtype=torch.long, device=args.device).unsqueeze(0)
logits, *_ = model(input_ids, token_type_ids=token_type_ids)
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
def run():
parser = ArgumentParser()
parser.add_argument('--gpt2', action='store_true', help="use gpt2")
parser.add_argument("--model_checkpoint", type=str, default="models/CDial-GPT2_LCCC-base", help="Path, url or short name of the model")
parser.add_argument("--max_history", type=int, default=2, help="Number of previous utterances to keep in history")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)")
parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=30, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9,
help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
if args.model_checkpoint == "":
logging.error("Checkpoint needed!")
return
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger.info("Get pretrained model and tokenizer")
tokenizer_class = BertTokenizer
model_class = OpenAIGPTLMHeadModel if not args.gpt2 else GPT2LMHeadModel
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint, do_lower_case=True)
model = model_class.from_pretrained(args.model_checkpoint)
model.to(args.device)
model.eval()
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
history = []
while True:
raw_text = input(">>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input(">>> ")
raw_text = " ".join(list(raw_text.replace(" ", "")))
history.append(tokenize(raw_text))
with torch.no_grad():
out_ids = sample_sequence(history, tokenizer, model, args)
history.append(out_ids)
history = history[-(2 * args.max_history + 1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
print(out_text)
if __name__ == "__main__":
run()
| [] |
2024-01-10 | vstej/generative-ai-financial-services | agent~lambda~agent-handler~lambda_function.py | import json
import datetime
import time
import os
import dateutil.parser
import logging
import warnings
#warnings.filterwarnings('ignore')
import boto3
from boto3.dynamodb.conditions import Key
from langchain.llms.bedrock import Bedrock
from langchain.chat_models import BedrockChat
from langchain.schema import HumanMessage
from chat import Chat
from fsi_agent import FSIAgent
from pypdf import PdfReader, PdfWriter
# Create reference to DynamoDB tables
loan_application_table_name = os.environ['USER_PENDING_ACCOUNTS_TABLE']
user_accounts_table_name = os.environ['USER_EXISTING_ACCOUNTS_TABLE']
s3_artifact_bucket = os.environ['S3_ARTIFACT_BUCKET_NAME']
# Instantiate boto3 clients and resources
boto3_session = boto3.Session(region_name=os.environ['AWS_REGION'])
dynamodb = boto3.resource('dynamodb',region_name=os.environ['AWS_REGION'])
s3_client = boto3.client('s3',region_name=os.environ['AWS_REGION'],config=boto3.session.Config(signature_version='s3v4',))
s3_object = boto3.resource('s3')
bedrock_client = boto3_session.client(service_name="bedrock-runtime")
# --- Lex v2 request/response helpers (https://docs.aws.amazon.com/lexv2/latest/dg/lambda-response-format.html) ---
def elicit_slot(session_attributes, active_contexts, intent, slot_to_elicit, message):
response = {
'sessionState': {
'activeContexts':[{
'name': 'intentContext',
'contextAttributes': active_contexts,
'timeToLive': {
'timeToLiveInSeconds': 86400,
'turnsToLive': 20
}
}],
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'slotToElicit': slot_to_elicit
},
'intent': intent,
},
'messages': [{
"contentType": "PlainText",
"content": message,
}]
}
return response
def confirm_intent(active_contexts, session_attributes, intent, message):
response = {
'sessionState': {
'activeContexts': [active_contexts],
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ConfirmIntent'
},
'intent': intent
}
}
return response
def close(session_attributes, active_contexts, fulfillment_state, intent, message):
response = {
'sessionState': {
'activeContexts':[{
'name': 'intentContext',
'contextAttributes': active_contexts,
'timeToLive': {
'timeToLiveInSeconds': 86400,
'turnsToLive': 20
}
}],
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
},
'intent': intent,
},
'messages': [{'contentType': 'PlainText', 'content': message}]
}
return response
def elicit_intent(intent_request, session_attributes, message):
response = {
'sessionState': {
'dialogAction': {
'type': 'ElicitIntent'
},
'sessionAttributes': session_attributes
},
'messages': [
{
'contentType': 'PlainText',
'content': message
},
{
'contentType': 'ImageResponseCard',
'imageResponseCard': {
"buttons": [
{
"text": "Loan Application",
"value": "Loan Application"
},
{
"text": "Loan Calculator",
"value": "Loan Calculator"
},
{
"text": "Ask GenAI",
"value": "What kind of questions can the Assistant answer?"
}
],
"title": "How can I help you?"
}
}
]
}
return response
def delegate(session_attributes, active_contexts, intent, message):
response = {
'sessionState': {
'activeContexts':[{
'name': 'intentContext',
'contextAttributes': active_contexts,
'timeToLive': {
'timeToLiveInSeconds': 86400,
'turnsToLive': 20
}
}],
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
},
'intent': intent,
},
'messages': [{'contentType': 'PlainText', 'content': message}]
}
return response
def initial_message(intent_name):
response = {
'sessionState': {
'dialogAction': {
'type': 'ElicitSlot',
'slotToElicit': 'UserName' if intent_name=='MakePayment' else 'PickUpCity'
},
'intent': {
'confirmationState': 'None',
'name': intent_name,
'state': 'InProgress'
}
}
}
return response
def build_response_card(title, subtitle, options):
"""
Build a responseCard with a title, subtitle, and an optional set of options which should be displayed as buttons.
"""
buttons = None
if options is not None:
buttons = []
for i in range(min(5, len(options))):
buttons.append(options[i])
return {
'contentType': 'ImageResponseCard',
'imageResponseCard': {
'title': title,
'subTitle': subtitle,
'buttons': buttons
}
}
def build_slot(intent_request, slot_to_build, slot_value):
intent_request['sessionState']['intent']['slots'][slot_to_build] = {
'shape': 'Scalar', 'value':
{
'originalValue': slot_value, 'resolvedValues': [slot_value],
'interpretedValue': slot_value
}
}
def build_validation_result(isvalid, violated_slot, message_content):
print("Build Validation")
return {
'isValid': isvalid,
'violatedSlot': violated_slot,
'message': message_content
}
# --- Utility helper functions ---
def isvalid_date(date):
try:
dateutil.parser.parse(date, fuzzy=True)
print("TRUE DATE")
return True
except ValueError as e:
print("DATE PARSER ERROR = " + str(e))
return False
def isvalid_yes_or_no(value):
if value == 'Yes' or value == 'yes' or value == 'No' or value == 'no':
return True
return False
def isvalid_credit_score(credit_score):
if int(credit_score) < 851 and int(credit_score) > 300:
return True
return False
def isvalid_zero_or_greater(value):
if int(value) >= 0:
return True
return False
def safe_int(n):
if n is not None:
return int(n)
return n
def create_presigned_url(bucket_name, object_name, expiration=600):
# Generate a presigned URL for the S3 object
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket_name,
'Key': object_name},
ExpiresIn=expiration)
except Exception as e:
print(e)
logging.error(e)
return "Error"
# The response contains the presigned URL
return response
def try_ex(value):
"""
Safely access Slots dictionary values.
"""
if value is not None:
if value['value']['resolvedValues']:
return value['value']['interpretedValue']
elif value['value']['originalValue']:
return value['value']['originalValue']
else:
return None
else:
return None
# --- Intent fulfillment functions ---
def isvalid_pin(userName, pin):
"""
Validates the user-provided PIN using a DynamoDB table lookup.
"""
plans_table = dynamodb.Table(user_accounts_table_name)
try:
# Set up the query parameters
params = {
'KeyConditionExpression': 'userName = :c',
'ExpressionAttributeValues': {
':c': userName
}
}
# Execute the query and get the result
response = plans_table.query(**params)
# iterate over the items returned in the response
if len(response['Items']) > 0:
pin_to_compare = int(response['Items'][0]['pin'])
# check if the password in the item matches the specified password
if pin_to_compare == int(pin):
return True
return False
except Exception as e:
print(e)
return e
def isvalid_username(userName):
"""
Validates the user-provided username exists in the 'user_accounts_table_name' DynamoDB table.
"""
plans_table = dynamodb.Table(user_accounts_table_name)
try:
# Set up the query parameters
params = {
'KeyConditionExpression': 'userName = :c',
'ExpressionAttributeValues': {
':c': userName
}
}
# Execute the query and get the result
response = plans_table.query(**params)
# Check if any items were returned
if response['Count'] != 0:
return True
else:
return False
except Exception as e:
print(e)
return e
def validate_pin(intent_request, slots):
"""
Performs slot validation for username and PIN. Invoked as part of 'verify_identity' intent fulfillment.
"""
username = try_ex(slots['UserName'])
pin = try_ex(slots['Pin'])
if username is not None:
if not isvalid_username(username):
return build_validation_result(
False,
'UserName',
'Our records indicate there is no profile belonging to the username, {}. Please enter a valid username'.format(username)
)
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
session_attributes['UserName'] = username
intent_request['sessionState']['sessionAttributes']['UserName'] = username
else:
return build_validation_result(
False,
'UserName',
'Our records indicate there are no accounts belonging to that username. Please try again.'
)
if pin is not None:
if not isvalid_pin(username, pin):
return build_validation_result(
False,
'Pin',
'You have entered an incorrect PIN. Please try again.'.format(pin)
)
else:
message = "Thank you for choosing Octank Financial, {}. Please confirm your 4-digit PIN before we proceed.".format(username)
return build_validation_result(
False,
'Pin',
message
)
return {'isValid': True}
def verify_identity(intent_request):
"""
Performs dialog management and fulfillment for username verification.
Beyond fulfillment, the implementation for this intent demonstrates the following:
1) Use of elicitSlot in slot validation and re-prompting.
2) Use of sessionAttributes {UserName} to pass information that can be used to guide conversation.
"""
slots = intent_request['sessionState']['intent']['slots']
pin = try_ex(slots['Pin'])
username=try_ex(slots['UserName'])
confirmation_status = intent_request['sessionState']['intent']['confirmationState']
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
intent = intent_request['sessionState']['intent']
active_contexts = {}
# Validate any slots which have been specified. If any are invalid, re-elicit for their value
validation_result = validate_pin(intent_request, intent_request['sessionState']['intent']['slots'])
session_attributes['UserName'] = username
if not validation_result['isValid']:
slots = intent_request['sessionState']['intent']['slots']
slots[validation_result['violatedSlot']] = None
return elicit_slot(
session_attributes,
active_contexts,
intent_request['sessionState']['intent'],
validation_result['violatedSlot'],
validation_result['message']
)
else:
if confirmation_status == 'None':
# Query DDB for user information before offering intents
plans_table = dynamodb.Table(user_accounts_table_name)
try:
# Query the table using the partition key
response = plans_table.query(
KeyConditionExpression=Key('userName').eq(username)
)
# TODO: Customize account readout based on account type
message = ""
items = response['Items']
for item in items:
if item['planName'] == 'mortgage' or item['planName'] == 'Mortgage':
message = "Your mortgage account summary includes a ${:,} loan at {}% interest with ${:,} of unpaid principal. Your next payment of ${:,} is scheduled for {}.".format(item['loanAmount'], item['loanInterest'], item['unpaidPrincipal'], item['amountDue'], item['dueDate'])
elif item['planName'] == 'Checking' or item['planName'] == 'checking':
message = "I see you have a Savings account with Octank Financial. Your account balance is ${:,} and your next payment \
amount of ${:,} is scheduled for {}.".format(item['unpaidPrincipal'], item['paymentAmount'], item['dueDate'])
elif item['planName'] == 'Loan' or item['planName'] == 'loan':
message = "I see you have a Loan account with Octank Financial. Your account balance is ${:,} and your next payment \
amount of ${:,} is scheduled for {}.".format(item['unpaidPrincipal'], item['paymentAmount'], item['dueDate'])
return elicit_intent(intent_request, session_attributes,
'Thank you for confirming your username and PIN, {}. {}'.format(username, message)
)
except Exception as e:
print(e)
return e
def validate_loan_application(intent_request, slots):
"""
Performs dialog management and fulfillment for completing a loan application.
Beyond fulfillment, the implementation for this intent demonstrates the following:
1) Use of elicitSlot in slot validation and re-prompting.
2) Use of sessionAttributes to pass information that can be used to guide conversation.
"""
username = try_ex(slots['UserName'])
loan_value = try_ex(slots['LoanValue'])
monthly_income = try_ex(slots['MonthlyIncome'])
work_history = try_ex(slots['WorkHistory'])
credit_score = try_ex(slots['CreditScore'])
housing_expense = try_ex(slots['HousingExpense'])
debt_amount = try_ex(slots['DebtAmount'])
down_payment = try_ex(slots['DownPayment'])
coborrow = try_ex(slots['Coborrow'])
closing_date = try_ex(slots['ClosingDate'])
confirmation_status = intent_request['sessionState']['intent']['confirmationState']
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
active_contexts = {}
if username is not None:
if not isvalid_username(username):
return build_validation_result(
False,
'UserName',
'Our records indicate there is no profile belonging to the username, {}. Please enter a valid username'.format(username)
)
else:
try:
session_username = intent_request['sessionState']['sessionAttributes']['UserName']
build_slot(intent_request, 'UserName', session_username)
except KeyError:
return build_validation_result(
False,
'UserName',
'We cannot find an account under that username. Please try again with a valid username.'
)
if loan_value is not None:
if loan_value.isnumeric():
if not isvalid_zero_or_greater(loan_value):
return build_validation_result(False, 'LoanValue', 'Please enter a value greater than $0.')
else:
prompt = "The user was just asked to provide their loan value on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat is your desired loan amount?"
return build_validation_result(False, 'LoanValue', reply)
else:
return build_validation_result(
False,
'LoanValue',
"What is your desired loan amount? In other words, how much are looking to borrow?"
)
if monthly_income is not None:
if monthly_income.isnumeric():
if not isvalid_zero_or_greater(monthly_income):
return build_validation_result(False, 'MonthlyIncome', 'Monthly income amount must be greater than $0. Please try again.')
else:
prompt = "The user was just asked to provide their monthly income on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat is your monthly income?"
return build_validation_result(False, 'MonthlyIncome', reply)
else:
return build_validation_result(
False,
'MonthlyIncome',
"What is your monthly income?"
)
if work_history is not None:
if not isvalid_yes_or_no(work_history):
prompt = "The user was just asked to confirm their continuous two year work history on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nDo you have a two-year continuous work history (Yes/No)?"
return build_validation_result(False, 'WorkHistory', reply)
else:
return build_validation_result(
False,
'WorkHistory',
"Do you have a two-year continuous work history (Yes/No)?"
)
if credit_score is not None:
if credit_score.isnumeric():
if not isvalid_credit_score(credit_score):
return build_validation_result(False, 'CreditScore', 'Credit score entries must be between 300 and 850. Please enter a valid credit score.')
else:
prompt = "The user was just asked to provide their credit score on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat do you think your current credit score is?"
return build_validation_result(False, 'CreditScore', reply)
else:
return build_validation_result(
False,
'CreditScore',
"What do you think your current credit score is?"
)
if housing_expense is not None:
if housing_expense.isnumeric():
if not isvalid_zero_or_greater(housing_expense):
return build_validation_result(False, 'HousingExpense', 'Your housing expense must be a value greater than or equal to $0. Please try again.')
else:
prompt = "The user was just asked to provide their monthly housing expense on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nHow much are you currently paying for housing each month?"
return build_validation_result(False, 'HousingExpense', reply)
else:
return build_validation_result(
False,
'HousingExpense',
"How much are you currently paying for housing each month?"
)
if debt_amount is not None:
if debt_amount.isnumeric():
if not isvalid_zero_or_greater(debt_amount):
return build_validation_result(False, 'DebtAmount', 'Your debt amount must be a value greater than or equal to $0. Please try again.')
else:
prompt = "The user was just asked to provide their monthly debt amount on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat is your estimated credit card or student loan debt?"
return build_validation_result(False, 'DebtAmount', reply)
else:
return build_validation_result(
False,
'DebtAmount',
"What is your estimated credit card or student loan debt? Please enter '0' if none."
)
if down_payment is not None:
if down_payment.isnumeric():
if not isvalid_zero_or_greater(down_payment):
return build_validation_result(False, 'DownPayment', 'Your estimate down payment must be a value greater than or equal to $0. Please try again.')
else:
prompt = "The user was just asked to provide their estimated down payment on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat do you have saved for a down payment?"
return build_validation_result(False, 'DownPayment', reply)
else:
return build_validation_result(
False,
'DownPayment',
"What do you have saved for a down payment?"
)
if coborrow is not None:
if not isvalid_yes_or_no(coborrow):
prompt = "The user was just asked to confirm if they will have a co-borrow on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nDo you have a co-borrower (Yes/No)?"
return build_validation_result(False, 'Coborrow', reply)
else:
return build_validation_result(
False,
'Coborrow',
"Do you have a co-borrower (Yes/No)?"
)
if closing_date is not None:
if not isvalid_date(closing_date):
prompt = "The user was just asked to provide their real estate closing date on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhen are you looking to close?"
return build_validation_result(False, 'ClosingDate', reply)
#if datetime.datetime.strptime(closing_date, '%Y-%m-%d').date() <= datetime.date.today():
# return build_validation_result(False, 'ClosingDate', 'Closing dates must be scheduled at least one day in advance. Please try a different date.')
else:
print("## ClosingDate")
return build_validation_result(
False,
'ClosingDate',
'When are you looking to close?'
)
return {'isValid': True}
def loan_application(intent_request):
"""
Performs dialog management and fulfillment for booking a car.
Beyond fulfillment, the implementation for this intent demonstrates the following:
1) Use of elicitSlot in slot validation and re-prompting
2) Use of sessionAttributes to pass information that can be used to guide conversation
"""
slots = intent_request['sessionState']['intent']['slots']
username = try_ex(slots['UserName'])
loan_value = try_ex(slots['LoanValue'])
monthly_income = try_ex(slots['MonthlyIncome'])
work_history = try_ex(slots['WorkHistory'])
credit_score = try_ex(slots['CreditScore'])
housing_expense = try_ex(slots['HousingExpense'])
debt_amount = try_ex(slots['DebtAmount'])
down_payment = try_ex(slots['DownPayment'])
coborrow = try_ex(slots['Coborrow'])
closing_date = try_ex(slots['ClosingDate'])
confirmation_status = intent_request['sessionState']['intent']['confirmationState']
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
intent = intent_request['sessionState']['intent']
active_contexts = {}
if intent_request['invocationSource'] == 'DialogCodeHook':
# Validate any slots which have been specified. If any are invalid, re-elicit for their value
validation_result = validate_loan_application(intent_request, intent_request['sessionState']['intent']['slots'])
print("LOAN APPLICATION - validation_result = " + str(validation_result))
if 'isValid' in validation_result:
if validation_result['isValid'] == False:
if validation_result['violatedSlot'] == 'CreditScore' and confirmation_status == 'Denied':
print("Invalid credit score")
validation_result['violatedSlot'] = 'UserName'
intent['slots'] = {}
slots[validation_result['violatedSlot']] = None
return elicit_slot(
session_attributes,
active_contexts,
intent,
validation_result['violatedSlot'],
validation_result['message']
)
if username and monthly_income:
application = {
'LoanValue': loan_value,
'MonthlyIncome': monthly_income,
'CreditScore': credit_score,
'DownPayment': down_payment
}
# Convert the JSON document to a string
application_string = json.dumps(application)
# Write the JSON document to DynamoDB
loan_application_table = dynamodb.Table(loan_application_table_name)
print("DYNAMODB username = " + str(username))
response = loan_application_table.put_item(
Item={
'userName': username,
'planName': 'Loan',
'document': application_string
}
)
# Determine if the intent (and current slot settings) has been denied. The messaging will be different
# if the user is denying a reservation he initiated or an auto-populated suggestion.
if confirmation_status == 'Denied':
return delegate(session_attributes, active_contexts, intent, 'Confirm hotel reservation')
if confirmation_status == 'None':
return delegate(session_attributes, active_contexts, intent, 'Confirm hotel reservation')
if confirmation_status == 'Confirmed':
intent['confirmationState']="Confirmed"
intent['state']="Fulfilled"
s3_client.download_file(s3_artifact_bucket, 'agent/assets/Mortgage-Loan-Application.pdf', '/tmp/Mortgage-Loan-Application.pdf')
reader = PdfReader('/tmp/Mortgage-Loan-Application.pdf')
writer = PdfWriter()
page = reader.pages[0]
fields = reader.get_fields()
writer.append(reader)
firstname, lastname = username.split(' ', 1)
writer.update_page_form_field_values(
writer.pages[0], {
'fullName34[first]': firstname,
'fullName34[last]': lastname,
'monthlyNet': monthly_income,
'creditScore': credit_score,
'requestedLoan': loan_value,
'downPayment': down_payment
}
)
with open('/tmp/Mortgage-Loan-Application.pdf', "wb") as output_stream:
writer.write(output_stream)
s3_client.upload_file('/tmp/Mortgage-Loan-Application.pdf', s3_artifact_bucket, 'agent/assets/Mortgage-Loan-Application-Completed.pdf')
# Create loan application doc in S3
URLs=[]
# create_presigned_url(bucket_name, object_name, expiration=600):
URLs.append(create_presigned_url(s3_artifact_bucket,'agent/assets/Mortgage-Loan-Application-Completed.pdf',3600))
mortgage_app = 'Your loan application is nearly complete! Please follow the link for the last few bits of information: ' + URLs[0]
return elicit_intent(
intent_request,
session_attributes,
mortgage_app
)
def loan_calculator(intent_request):
"""
Performs dialog management and fulfillment for calculating loan details.
This is an empty function framework intended for the user to develope their own intent fulfillment functions.
"""
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
# def elicit_intent(intent_request, session_attributes, message)
return elicit_intent(
intent_request,
session_attributes,
'This is where you would implement LoanCalculator intent fulfillment.'
)
def invoke_fm(prompt):
"""
Invokes Foundational Model endpoint hosted on Amazon Bedrock and parses the response.
"""
chat = Chat(prompt)
# chat = BedrockChat(client=bedrock_client, model_id="anthropic.claude-v2", region_name=os.environ['AWS_REGION'])
# print("BEDROCK CHAT = " + str(chat))
llm = Bedrock(client=bedrock_client, model_id="anthropic.claude-instant-v1", region_name=os.environ['AWS_REGION']) # "anthropic.claude-v2 "
llm.model_kwargs = {'max_tokens_to_sample': 350}
lex_agent = FSIAgent(llm, chat.memory)
formatted_prompt = "\n\nHuman: " + prompt + " \n\nAssistant:"
print("FORMATTED PROMPT = " + str(formatted_prompt))
try:
print("Trying Agent Run")
message = lex_agent.run(input=formatted_prompt)
print("Agent Run Output = " + str(message))
except ValueError as e:
message = str(e)
print("ERROR MESSAGE = " + str(message))
if not message.startswith("Could not parse LLM output:"):
raise e
message = message.removeprefix("Could not parse LLM output: `").removesuffix("`")
print("lambda_function NO ERROR CATCH")
return message
'''
output = message['output']
chat = BedrockChat(client=bedrock_client, model_id="anthropic.claude-v2", region_name=os.environ['AWS_REGION'])
messages = [
HumanMessage(
content=prompt
)
]
try:
output = chat(messages)
except ValueError as e:
message = str(e)
print("ERROR MESSAGE = " + str(message))
if not message.startswith("Could not parse LLM output:"):
raise e
message = message.removeprefix("Could not parse LLM output: `").removesuffix("`")
return message
return output
'''
def genai_intent(intent_request):
"""
Performs dialog management and fulfillment for user utterances that do not match defined intents (i.e., FallbackIntent).
Sends user utterance to Foundational Model endpoint via 'invoke_fm' function.
"""
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
if intent_request['invocationSource'] == 'DialogCodeHook':
prompt = intent_request['inputTranscript']
output = invoke_fm(prompt)
return elicit_intent(intent_request, session_attributes, output)
# --- Intents ---
def dispatch(intent_request):
"""
Routes the incoming request based on intent.
"""
slots = intent_request['sessionState']['intent']['slots']
username = slots['UserName'] if 'UserName' in slots else None
intent_name = intent_request['sessionState']['intent']['name']
if intent_name == 'VerifyIdentity':
return verify_identity(intent_request)
elif intent_name == 'LoanApplication':
return loan_application(intent_request)
elif intent_name == 'LoanCalculator':
return loan_calculator(intent_request)
else:
return genai_intent(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported')
# --- Main handler ---
def handler(event, context):
"""
Invoked when the user provides an utterance that maps to a Lex bot intent.
The JSON body of the user request is provided in the event slot.
"""
os.environ['TZ'] = 'America/New_York'
time.tzset()
return dispatch(event) | [
"The user was just asked to provide their monthly income on a loan application and this was their response: PLACEHOLDER",
"inputTranscript",
"The user was just asked to provide their estimated down payment on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to confirm if they will have a co-borrow on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to provide their monthly debt amount on a loan application and this was their response: PLACEHOLDER",
"PlainText",
"The user was just asked to provide their loan value on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to provide their real estate closing date on a loan application and this was their response: PLACEHOLDER",
"ImageResponseCard",
"\n\nHuman: PLACEHOLDER \n\nAssistant:",
"The user was just asked to provide their monthly housing expense on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to confirm their continuous two year work history on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to provide their credit score on a loan application and this was their response: PLACEHOLDER"
] |
2024-01-10 | vstej/generative-ai-financial-services | agent~lambda~agent-handler~kendra_index_retriever.py | """Chain for question-answering against a vector database."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.schema import BaseRetriever, Document
from kendra_results import kendra_query, kendra_client
import boto3
class KendraIndexRetriever(BaseRetriever):
"""Retriever to retrieve documents from Amazon Kendra index.
Example:
.. code-block:: python
kendraIndexRetriever = KendraIndexRetriever()
"""
kendraindex: str
"""Kendra index id"""
awsregion: str
"""AWS region of the Kendra index"""
k: int
"""Number of documents to query for."""
return_source_documents: bool
"""Whether source documents to be returned """
kclient: Any
""" boto3 client for Kendra. """
def __init__(self, kendraindex, awsregion, k=3, return_source_documents=False):
print("Initializing Kendra Index Retriever")
self.kendraindex = kendraindex
self.awsregion = awsregion
self.k = k
self.return_source_documents = return_source_documents
self.kclient = kendra_client(self.kendraindex, self.awsregion)
def get_relevant_documents(self, query: str) -> List[Document]:
print("Providing Relevant Kendra Documents")
"""Run search on Kendra index and get top k documents
docs = get_relevant_documents('This is my query')
"""
docs = kendra_query(self.kclient, query, self.k, self.kendraindex)
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
return await super().aget_relevant_documents(query)
| [] |
2024-01-10 | vstej/generative-ai-financial-services | agent~lambda~agent-handler~fsi_agent.py | from langchain.agents.tools import Tool
from langchain.agents.conversational.base import ConversationalAgent
from langchain.agents import AgentExecutor
from tools import tools
from datetime import datetime
import warnings
#warnings.filterwarnings('ignore')
PREFIX = "\n\nHuman: You are a Financial Services AI chatbot (Assistant) for a company called Octank Financial. Also, you can answer general questions about anything. You quickly respond to questions from a user with an answer and the sources you used to find your answer in the format: \
[Source 1: Source Title 1 - Source Link 1], \
[Source 2: Source Title 2 - Source Link 2], \
[Source n: Source Title n - Source Link n]. Provide two newline characters between your answer and the sources. By the way, the date is " + datetime.now().strftime("%m/%d/%Y, %H:%M:%S") + ".\n\nAssistant:"
'''FORMAT_INSTRUCTIONS = """\n\nHuman: To use a tool, please use the following format:
Thought: Do I need to use a tool? Yes
Action: The action to take from the following list of pre-defined tools: 'Octank Financial'
Action Input: The input to the action
Observation: The result of the action
Thought: Do I need to use a tool? No
\n\nAssistant: [Answer and Sources]
"""'''
FORMAT_INSTRUCTIONS = "\n\nHuman: \n\nAssistant:"
class FSIAgent():
def __init__(self,llm, memory) -> None:
self.prefix = PREFIX
self.ai_prefix = "Assistant"
self.human_prefix = "Human"
self.llm = llm
self.memory = memory
self.format_instructions = FORMAT_INSTRUCTIONS
self.agent = self.create_agent()
def create_agent(self):
fsi_agent = ConversationalAgent.from_llm_and_tools(
llm = self.llm,
tools = tools,
prefix = self.prefix,
ai_prefix = self.ai_prefix,
human_prefix = self.human_prefix,
format_instructions = self.format_instructions,
return_intermediate_steps = True,
return_source_documents = True
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=fsi_agent, tools=tools, verbose=True, memory=self.memory, return_source_documents=True, return_intermediate_steps=True) # , handle_parsing_errors=True
return agent_executor
def run(self, input):
print("Running FSI Agent with input: " + str(input))
try:
response = self.agent(input)
except ValueError as e:
response = str(e)
print("fsi_agent ERROR CATCH = " + response)
if not response.startswith("An output parsing error occurred"):
print("## NO CATCH ##")
raise e
print("CATCH")
response = response.removeprefix("An output parsing error occurred. In order to pass this error back to the agent and have it try again, pass `handle_parsing_errors=True` to the AgentExecutor. This is the error: Could not parse LLM output: `").removesuffix("`")
return response
| [] |
2024-01-10 | jmilldotdev/manifesting | manifest~caches~cache.py | """Cache for queries and responses."""
import json
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Union
from manifest.response import CohereResponse, Response
RESPONSE_CONSTRUCTORS = {
"openai": Response,
"cohere": CohereResponse,
"ai21": Response,
"huggingface": Response,
"opt": Response,
"dummy": Response,
"zoo": Response,
}
def request_to_key(request: Dict) -> str:
"""
Normalize a request into a key.
Args:
request: request to normalize.
Returns:
normalized key.
"""
return json.dumps(request, sort_keys=True)
def key_to_request(key: str) -> Dict:
"""
Convert the normalized version to the request.
Args:
key: normalized key to convert.
Returns:
unnormalized request dict.
"""
return json.loads(key)
def response_to_key(response: Dict) -> str:
"""
Normalize a response into a key.
Args:
response: response to normalize.
Returns:
normalized key.
"""
return json.dumps(response, sort_keys=True)
def key_to_response(key: str) -> Dict:
"""
Convert the normalized version to the response.
Args:
key: normalized key to convert.
Returns:
unnormalized response dict.
"""
return json.loads(key)
class Cache(ABC):
"""A cache for request/response pairs."""
def __init__(self, connection_str: str, cache_args: Dict[str, Any] = {}):
"""
Initialize client.
cache_args are passed to client as default parameters.
For clients like OpenAI that do not require a connection,
the connection_str can be None.
Args:
connection_str: connection string for client.
cache_args: cache arguments.
"""
self.connect(connection_str, cache_args)
@abstractmethod
def close(self) -> None:
"""Close the client."""
raise NotImplementedError()
@abstractmethod
def connect(self, connection_str: str, cache_args: Dict[str, Any]) -> None:
"""
Connect to client.
Args:
connection_str: connection string.
"""
raise NotImplementedError()
@abstractmethod
def get_key(self, key: str, table: str = "default") -> Union[str, None]:
"""
Get the key for a request.
With return None if key is not in cache.
Args:
key: key for cache.
table: table to get key in.
"""
raise NotImplementedError()
@abstractmethod
def set_key(self, key: str, value: str, table: str = "default") -> None:
"""
Set the value for the key.
Will override old value.
Args:
key: key for cache.
value: new value for key.
table: table to set key in.
"""
raise NotImplementedError()
@abstractmethod
def commit(self) -> None:
"""Commit any results."""
raise NotImplementedError()
def get(
self, request: Dict, overwrite_cache: bool, compute: Callable[[], Dict]
) -> Response:
"""Get the result of request (by calling compute as needed)."""
key = request_to_key(request)
cached_response = self.get_key(key)
if cached_response and not overwrite_cache:
cached = True
response = key_to_response(cached_response)
else:
# Type Response
response = compute()
self.set_key(key, response_to_key(response))
cached = False
client_name = compute.__module__.split(".")[-1]
if client_name in RESPONSE_CONSTRUCTORS:
return RESPONSE_CONSTRUCTORS[client_name](response, cached, request)
else:
return Response(response, cached, request)
| [] |
2024-01-10 | jmilldotdev/manifesting | manifest~clients~cohere.py | """Cohere client."""
import json
import logging
import os
from typing import Any, Callable, Dict, List, Optional, Tuple
import cohere
from manifest.clients.client import Client
logging.getLogger("cohere").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
COHERE_MODELS = {"small", "medium", "large", "xlarge"}
# Params are defined in https://docs.cohere.ai/generate-reference
COHERE_PARAMS = {
"model": ("model", "xlarge"),
"max_tokens": ("max_tokens", 20),
"temperature": ("temperature", 0.75),
"num_generations": ("num_generations", 1),
"k": ("k", 0),
"p": ("p", 0.75),
"frequency_penalty": ("frequency_penalty", 0.0),
"presence_penalty": ("presence_penalty", 0.0),
"stop_sequences": ("stop_sequences", []),
"return_likelihoods": ("return_likelihoods", ""),
"logit_bias": ("logit_bias", {}),
}
class CohereClient(Client):
"""Cohere client."""
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the Cohere server.
connection_str is passed as default COHERE_API_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
api_key = os.environ.get("COHERE_API_KEY", connection_str)
if api_key is None:
raise ValueError(
"Cohere API key not set. Set COHERE_API_KEY environment "
"variable or pass through `connection_str`."
)
self.co = cohere.Client(api_key)
for key in COHERE_PARAMS:
setattr(self, key, client_args.pop(key, COHERE_PARAMS[key][1]))
if getattr(self, "model") not in COHERE_MODELS:
raise ValueError(
f"Invalid model {getattr(self, 'model')}. Must be {COHERE_MODELS}."
)
def close(self) -> None:
"""Close the client."""
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"model_name": "model", "model": getattr(self, "model")}
def get_model_inputs(self) -> List:
"""
Get allowable model inputs.
Returns:
model inputs.
"""
return list(COHERE_PARAMS.keys())
def get_request(
self, query: str, request_args: Dict[str, Any] = {}
) -> Tuple[Callable[[], Dict], Dict]:
"""
Get request string function.
Args:
query: query string.
Returns:
request function that takes no input.
request parameters as dict.
"""
request_params = {"prompt": query}
for key in COHERE_PARAMS:
request_params[COHERE_PARAMS[key][0]] = request_args.pop(
key, getattr(self, key)
)
def _run_generation() -> Dict:
try:
response = self.co.generate(**request_params)
return json.loads(
json.dumps(
response, default=lambda o: getattr(o, "__dict__", str(o))
)
)
except cohere.CohereError as e:
logger.error(e)
raise e
return _run_generation, request_params
def get_choice_logit_request(
self, query: str, gold_choices: List[str], request_args: Dict[str, Any] = {}
) -> Tuple[Callable[[], Dict], Dict]:
"""
Get request string function for choosing max choices.
Args:
query: query string.
gold_choices: choices for model to choose from via max logits.
Returns:
request function that takes no input.
request parameters as dict.
"""
raise NotImplementedError("Cohere does not support choice logit request.")
| [] |
2024-01-10 | jmilldotdev/manifesting | manifest~manifest.py | """Manifest class."""
import logging
from typing import Any, Iterable, List, Optional, Tuple, Union, cast
from tqdm.auto import tqdm
from manifest.caches.noop import NoopCache
from manifest.caches.redis import RedisCache
from manifest.caches.sqlite import SQLiteCache
from manifest.clients.ai21 import AI21Client
from manifest.clients.cohere import CohereClient
from manifest.clients.dummy import DummyClient
from manifest.clients.huggingface import HuggingFaceClient
from manifest.clients.openai import OpenAIClient
from manifest.clients.opt import OPTClient
from manifest.clients.zoo import ZooClient
from manifest.prompt import Prompt
from manifest.response import Response
from manifest.session import Session
logging.getLogger("openai").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
CLIENT_CONSTRUCTORS = {
"openai": OpenAIClient,
"cohere": CohereClient,
"ai21": AI21Client,
"huggingface": HuggingFaceClient,
"opt": OPTClient,
"dummy": DummyClient,
"zoo": ZooClient,
}
CACHE_CONSTRUCTORS = {
"redis": RedisCache,
"sqlite": SQLiteCache,
"noop": NoopCache,
}
class Manifest:
"""Manifest session object."""
def __init__(
self,
client_name: str = "openai",
client_connection: Optional[str] = None,
cache_name: str = "noop",
cache_connection: Optional[str] = None,
session_id: Optional[str] = None,
stop_token: str = "",
**kwargs: Any,
):
"""
Initialize manifest.
Args:
client_name: name of client.
client_connection: connection string for client.
cache_name: name of cache.
cache_connection: connection string for cache.
session_id: session id for user session cache.
None (default) means no session logging.
"_default" means generate new session id.
stop_token: stop token prompt generation.
Can be overridden in run
Remaining kwargs sent to client and cache.
"""
if client_name not in CLIENT_CONSTRUCTORS:
raise ValueError(
f"Unknown client name: {client_name}. "
f"Choices are {list(CLIENT_CONSTRUCTORS.keys())}"
)
if cache_name not in CACHE_CONSTRUCTORS:
raise ValueError(
f"Unknown cache name: {cache_name}. "
f"Choices are {list(CACHE_CONSTRUCTORS.keys())}"
)
self.client_name = client_name
# Must pass kwargs as dict for client "pop" methods removed used arguments
self.cache = CACHE_CONSTRUCTORS[cache_name]( # type: ignore
cache_connection, cache_args=kwargs
)
self.client = CLIENT_CONSTRUCTORS[client_name]( # type: ignore
client_connection, client_args=kwargs
)
if session_id is not None:
if session_id == "_default":
session_id = None
self.session = Session(session_id)
else:
self.session = None
if len(kwargs) > 0:
raise ValueError(f"{list(kwargs.items())} arguments are not recognized.")
self.stop_token = stop_token
def close(self) -> None:
"""Close the client and cache."""
self.client.close()
self.cache.close()
def run(
self,
prompt: Union[Prompt, str],
input: Optional[Any] = None,
gold_choices: Optional[List[str]] = None,
overwrite_cache: bool = False,
stop_token: Optional[str] = None,
return_response: bool = False,
**kwargs: Any,
) -> Union[str, List[str], Response]:
"""
Run the prompt.
Args:
prompt: prompt to run. If string, will cast to prompt.
input: input to prompt.
gold_choices: gold choices for max logit response (only HF models).
overwrite_cache: whether to overwrite cache.
stop_token: stop token for prompt generation.
Default is self.stop_token.
"" for no stop token.
Returns:
response from prompt.
"""
if isinstance(prompt, str):
prompt = Prompt(prompt)
stop_token = stop_token if stop_token is not None else self.stop_token
prompt_str = prompt(input)
# Must pass kwargs as dict for client "pop" methods removed used arguments
if gold_choices is None:
possible_request, full_kwargs = self.client.get_request(prompt_str, kwargs)
else:
try:
possible_request, full_kwargs = cast(
HuggingFaceClient, self.client
).get_choice_logit_request(prompt_str, gold_choices, kwargs)
except AttributeError:
raise ValueError("`gold_choices` only supported for HF models.")
if len(kwargs) > 0:
raise ValueError(f"{list(kwargs.items())} arguments are not recognized.")
# Create cacke key
cache_key = full_kwargs.copy()
# Make query model dependent
cache_key["client_name"] = self.client_name
# Make query prompt dependent
cache_key["prompt"] = prompt_str
response_obj = self.cache.get(cache_key, overwrite_cache, possible_request)
# Log session dictionary values
if self.session:
self.session.log_query(cache_key, response_obj.to_dict())
# Extract text results
if return_response:
return response_obj
else:
return response_obj.get_response(stop_token)
def run_batch(
self,
prompt: Prompt,
input: Optional[Iterable[Any]] = None,
gold_choices: Optional[List[str]] = None,
overwrite_cache: bool = False,
stop_token: Optional[str] = None,
return_response: bool = False,
verbose: bool = False,
**kwargs: Any,
) -> Iterable[Union[str, List[str], Response]]:
"""
Run the prompt on a batch of inputs.
Args:
prompt: prompt to run.
input: batch of inputs.
gold_choices: gold choices for max logit response (only HF models).
overwrite_cache: whether to overwrite cache.
stop_token: stop token for prompt generation.
Default is self.stop_token.
"" for no stop token.
Returns:
batch of responses.
"""
if isinstance(prompt, str):
raise ValueError(
"Prompt must be a Prompt object for batch run on data. "
"We only support strings in `manifest.run`."
)
if input is None:
input = [None]
return [
self.run(
prompt,
inp,
gold_choices,
overwrite_cache,
stop_token,
return_response,
**kwargs,
)
for inp in tqdm(input, desc="Running batch", disable=not verbose)
]
def save_prompt(self, name: str, prompt: Prompt) -> None:
"""
Save the prompt to the cache for long term storage.
Args:
name: name of prompt.
prompt: prompt to save.
"""
self.cache.set_key(name, prompt.serialize(), table="prompt")
def load_prompt(self, name: str) -> Prompt:
"""
Load the prompt from the cache.
Args:
name: name of prompt.
Returns:
Prompt saved with name.
"""
return Prompt.deserialize(self.cache.get_key(name, table="prompt"))
def get_last_queries(
self,
last_n: int = -1,
return_raw_values: bool = False,
stop_token: Optional[str] = None,
) -> List[Tuple[Any, Any]]:
"""
Get last n queries from current session.
If last_n is -1, return all queries. By default will only return the
prompt text and result text unles return_raw_values is False.
Args:
last_n: last n queries.
return_raw_values: whether to return raw values as dicts.
stop_token: stop token for prompt results to be applied to all results.
Returns:
last n list of queries and outputs.
"""
if self.session is None:
raise ValueError(
"Session was not initialized. Set `session_id` when loading Manifest."
)
stop_token = stop_token if stop_token is not None else self.stop_token
last_queries = self.session.get_last_queries(last_n)
if not return_raw_values:
last_queries = [
(
query["prompt"],
Response.from_dict(response).get_response(stop_token),
) # type: ignore
for query, response in last_queries
]
return last_queries
def open_explorer(self) -> None:
"""Open the explorer for jupyter widget."""
# Open explorer
# TODO: implement
pass
| [] |
2024-01-10 | hockeyiscool19/jordanGpt | app~utils~trainGpt~trainOpenAi.py | from PUBLIC_VARIABLES import OPENAI_API_KEY
import openai
if __name__ == "__main__":
openai.api_key = OPENAI_API_KEY
res = openai.File.create(file=open(
r"app\utils\data\trainData.jsonl", "r"), purpose='fine-tune', n_epochs=3)
| [] |
2024-01-10 | hockeyiscool19/jordanGpt | app~utils~trainGpt~JORDAN_GPT.py | # https://python.langchain.com/docs/use_cases/question_answering/how_to/multi_retrieval_qa_router
import os
from langchain.prompts import PromptTemplate
from langchain.chains.router import MultiRetrievalQAChain
from langchain.embeddings import OpenAIEmbeddings
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from PUBLIC_VARIABLES import OPENAI_API_KEY, FINE_TUNING_JOB
from langchain.chains import LLMChain
import openai
from langchain.chat_models import ChatOpenAI
from app.utils.firebase import FIRE
from datetime import datetime
openai.api_key = OPENAI_API_KEY
FILES = [
"app/utils/data/resume.txt",
"app/utils/data/roleDescriptions.txt",
]
resume = TextLoader("app/utils/data/resume.txt").load_and_split()
resume_retriever = FAISS.from_documents(
resume, OpenAIEmbeddings()).as_retriever()
role = TextLoader("app/utils/data/roleDescriptions.txt").load_and_split()
role_retriever = FAISS.from_documents(role, OpenAIEmbeddings()).as_retriever()
RETRIEVER_INFO = [
{
"name": "resume",
"description": "Answers questions about Jordan Eisenmann's resume",
"retriever": resume_retriever
},
{
"name": "role descriptions",
"description": "Describes questions about Jordan Eisenmann's role descriptions",
"retriever": role_retriever
},
]
RESPOND_ROLE = """
You are Jordan Eisenman (Jordan) secretary, answering questions about his career and passions. Be relevant to this prompt. You answer
questions and use background information to assist. Redirect questions about "you" to Jordan.
Question: {question}
Background information: {retrieved}
"""
RESPOND_ROLE.format(question="what is your name",
retrieved="Jordan Eisenmann is my name")
RESPOND_PROMPT = PromptTemplate(template=RESPOND_ROLE, input_variables=[
"question", "retrieved"])
class JordanGpt:
def __init__(self, verbose=True):
# Initializing Retrieval chain
self.retriever_chain = MultiRetrievalQAChain.from_retrievers(ChatOpenAI(
model_name=FINE_TUNING_JOB, max_tokens=125), RETRIEVER_INFO, verbose=verbose)
# Initializing Response chain
self.chat = ChatOpenAI(model_name=FINE_TUNING_JOB, max_tokens=175)
self.respond_role = RESPOND_ROLE
self.conversation_chain = LLMChain(
llm=self.chat, verbose=verbose, prompt=RESPOND_PROMPT)
def logQuestionAnswer(self, question, answer, retrieved):
data = {
"messages": [{
"question": question,
"answer": answer,
"prompt": RESPOND_ROLE.format(question=question, retrieved=retrieved),
"time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}]
}
FIRE.load_dict(data, path='/jordanGpt/trainingData')
def logRetrieved(self, retrieved):
data = {"log": retrieved,
"time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
FIRE.load_dict(data, path='/logs')
def retrieve(self, question):
retrieved = self.retriever_chain.run(question)
return retrieved
def respond(self, question):
retrieved = self.retrieve(question)
self.logRetrieved(retrieved)
response = self.conversation_chain.run(
{"question": question, "retrieved": retrieved})
self.logQuestionAnswer(question, response, retrieved)
return response
JORDAN_GPT = JordanGpt(verbose=False)
| [
"question",
"\n You are Jordan Eisenman (Jordan) secretary, answering questions about his career and passions. Be relevant to this prompt. You answer\n questions and use background information to assist. Redirect questions about \"you\" to Jordan. \n Question: {question}\n Background information: {retrieved}\n "
] |
2024-01-10 | EatoB/AutoGPT | autogpts~autogpt~autogpt~json_utils~utilities.py | """Utilities for the json_fixes package."""
import ast
import logging
import re
from typing import Any
logger = logging.getLogger(__name__)
def extract_dict_from_response(response_content: str) -> dict[str, Any]:
# Sometimes the response includes the JSON in a code block with ```
pattern = r"```([\s\S]*?)```"
match = re.search(pattern, response_content)
if match:
response_content = match.group(1).strip()
# Remove language names in code blocks
response_content = response_content.lstrip("json")
else:
# The string may contain JSON.
json_pattern = r"{.*}"
match = re.search(json_pattern, response_content)
if match:
response_content = match.group()
# Response content comes from OpenAI as a Python `str(content_dict)`.
# `literal_eval` does the reverse of `str(dict)`.
try:
return ast.literal_eval(response_content)
except BaseException as e:
logger.info(f"Error parsing JSON response with literal_eval {e}")
logger.debug(f"Invalid JSON received in response: {response_content}")
# TODO: How to raise an error here without causing the program to exit?
return {}
| [] |
2024-01-10 | jlonge4/haystack | haystack~nodes~audio~whisper_transcriber.py | import json
from typing import List, Optional, Dict, Any, Union, BinaryIO, Literal
import requests
from requests import PreparedRequest
from haystack import MultiLabel, Document
from haystack.errors import OpenAIError, OpenAIRateLimitError
from haystack.nodes.base import BaseComponent
from haystack.utils.import_utils import is_whisper_available
from haystack.lazy_imports import LazyImport
with LazyImport(message="Run 'pip install farm-haystack[inference]'") as torch_import:
import torch
WhisperModel = Literal["tiny", "small", "medium", "large", "large-v2"]
class WhisperTranscriber(BaseComponent):
"""
Transcribes audio files using OpenAI's Whisper. This class supports two underlying implementations:
- API (default): Uses the OpenAI API and requires an API key. See the [OpenAI blog post](https://beta.openai.com/docs/api-reference/whisper for more details.
- Local (requires installing Whisper): Uses the local installation
of [Whisper](https://github.com/openai/whisper).
To use Whisper locally, install it following the instructions on
the Whisper [GitHub repo](https://github.com/openai/whisper) and omit the `api_key` parameter.
To use the API implementation, provide an api_key. You can get one by signing up
for an [OpenAI account](https://beta.openai.com/).
For the supported audio formats, languages, and other parameters, see the
[Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text) and the official Whisper
[github repo](https://github.com/openai/whisper).
"""
# If it's not a decision component, there is only one outgoing edge
outgoing_edges = 1
def __init__(
self,
api_key: Optional[str] = None,
model_name_or_path: WhisperModel = "medium",
device: Optional[Union[str, "torch.device"]] = None,
api_base: str = "https://api.openai.com/v1",
) -> None:
"""
Creates a WhisperTranscriber instance.
:param api_key: OpenAI API key. If None, a local installation of Whisper is used.
:param model_name_or_path: Name of the model to use. If using a local installation of Whisper, set this to one of the following values: "tiny", "small", "medium", "large", "large-v2". If using
the API, set this value to: "whisper-1" (default).
:param device: Device to use for inference. Only used if you're using a local
installation of Whisper. If None, the device is automatically selected.
:param api_base: The OpenAI API Base url, defaults to `https://api.openai.com/v1`.
"""
super().__init__()
self.api_key = api_key
self.api_base = api_base
self.use_local_whisper = is_whisper_available() and self.api_key is None
if self.use_local_whisper:
import whisper
self._model = whisper.load_model(model_name_or_path, device=device)
else:
if api_key is None:
raise ValueError(
"Provide a valid api_key for OpenAI API. Alternatively, "
"install OpenAI Whisper (see [Whisper](https://github.com/openai/whisper) for more details)."
)
def transcribe(
self,
audio_file: Union[str, BinaryIO],
language: Optional[str] = None,
return_segments: bool = False,
translate: bool = False,
**kwargs,
) -> Dict[str, Any]:
"""
Transcribe an audio file.
:param audio_file: Path to the audio file or a binary file-like object.
:param language: Language of the audio file. If None, the language is automatically detected.
:param return_segments: If True, returns the transcription for each segment of the audio file. Supported with
local installation of whisper only.
:param translate: If True, translates the transcription to English.
:return: A dictionary containing the transcription text and metadata like timings, segments etc.
"""
transcript: Dict[str, Any] = {}
new_kwargs = {k: v for k, v in kwargs.items() if v is not None}
if language is not None:
new_kwargs["language"] = language
if self.use_local_whisper:
new_kwargs["return_segments"] = return_segments
transcript = self._invoke_local(audio_file, translate, **new_kwargs)
elif self.api_key:
transcript = self._invoke_api(audio_file, translate, **new_kwargs)
return transcript
def _invoke_api(
self, audio_file: Union[str, BinaryIO], translate: Optional[bool] = False, **kwargs
) -> Dict[str, Any]:
if isinstance(audio_file, str):
with open(audio_file, "rb") as f:
return self._invoke_api(f, translate, **kwargs)
else:
headers = {"Authorization": f"Bearer {self.api_key}"}
request = PreparedRequest()
url: str = (
f"{self.api_base}/audio/transcriptions" if not translate else f"{self.api_base}/audio/translations"
)
request.prepare(
method="POST",
url=url,
headers=headers,
data={"model": "whisper-1", **kwargs},
files=[("file", (audio_file.name, audio_file, "application/octet-stream"))],
)
response = requests.post(url, data=request.body, headers=request.headers, timeout=600)
if response.status_code != 200:
openai_error: OpenAIError
if response.status_code == 429:
openai_error = OpenAIRateLimitError(f"API rate limit exceeded: {response.text}")
else:
openai_error = OpenAIError(
f"OpenAI returned an error.\n"
f"Status code: {response.status_code}\n"
f"Response body: {response.text}",
status_code=response.status_code,
)
raise openai_error
return json.loads(response.content)
def _invoke_local(
self, audio_file: Union[str, BinaryIO], translate: Optional[bool] = False, **kwargs
) -> Dict[str, Any]:
torch_import.check()
if isinstance(audio_file, str):
with open(audio_file, "rb") as f:
return self._invoke_local(f, translate, **kwargs)
else:
return_segments = kwargs.pop("return_segments", None)
kwargs["task"] = "translate" if translate else "transcribe"
transcription = self._model.transcribe(audio_file.name, **kwargs)
if not return_segments:
transcription.pop("segments", None)
return transcription
def run(
self,
query: Optional[str] = None,
file_paths: Optional[List[str]] = None,
labels: Optional[MultiLabel] = None,
documents: Optional[List[Document]] = None,
meta: Optional[dict] = None,
): # type: ignore
"""
Transcribe audio files.
:param query: Ignored
:param file_paths: List of paths to audio files.
:param labels: Ignored
:param documents: Ignored
:param meta: Ignored
:return: A dictionary containing a list of Document objects, one for each input file.
"""
transcribed_documents: List[Document] = []
if file_paths:
for file_path in file_paths:
transcription = self.transcribe(file_path)
d = Document.from_dict(transcription, field_map={"text": "content"})
transcribed_documents.append(d)
output = {"documents": transcribed_documents}
return output, "output_1"
def run_batch(
self,
queries: Optional[Union[str, List[str]]] = None,
file_paths: Optional[List[str]] = None,
labels: Optional[Union[MultiLabel, List[MultiLabel]]] = None,
documents: Optional[Union[List[Document], List[List[Document]]]] = None,
meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
params: Optional[dict] = None,
debug: Optional[bool] = None,
): # type: ignore
"""
Transcribe audio files.
:param queries: Ignored
:param file_paths: List of paths to audio files.
:param labels: Ignored
:param documents: Ignored
:param meta: Ignored
:param params: Ignored
:param debug: Ignored
"""
if file_paths and isinstance(file_paths[0], list):
all_files = []
for files_list in file_paths:
all_files += files_list
return self.run(file_paths=all_files)
return self.run(file_paths=file_paths)
| [] |
2024-01-10 | jlonge4/haystack | haystack~preview~components~audio~whisper_remote.py | import io
import logging
import os
from typing import Any, Dict, List, Optional, Union
from pathlib import Path
import openai
from haystack.preview import Document, component, default_from_dict, default_to_dict
from haystack.preview.dataclasses import ByteStream
logger = logging.getLogger(__name__)
API_BASE_URL = "https://api.openai.com/v1"
@component
class RemoteWhisperTranscriber:
"""
Transcribes audio files using OpenAI's Whisper using OpenAI API. Requires an API key. See the
[OpenAI blog post](https://beta.openai.com/docs/api-reference/whisper for more details.
You can get one by signing up for an [OpenAI account](https://beta.openai.com/).
For the supported audio formats, languages, and other parameters, see the
[Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text)
"""
def __init__(
self,
api_key: Optional[str] = None,
model_name: str = "whisper-1",
organization: Optional[str] = None,
api_base_url: str = API_BASE_URL,
**kwargs,
):
"""
Transcribes a list of audio files into a list of Documents.
:param api_key: OpenAI API key.
:param model_name: Name of the model to use. It now accepts only `whisper-1`.
:param organization: The OpenAI-Organization ID, defaults to `None`. For more details, see OpenAI
[documentation](https://platform.openai.com/docs/api-reference/requesting-organization).
:param api_base: OpenAI base URL, defaults to `"https://api.openai.com/v1"`.
:param kwargs: Other parameters to use for the model. These parameters are all sent directly to the OpenAI
endpoint. See OpenAI [documentation](https://platform.openai.com/docs/api-reference/audio) for more details.
Some of the supported parameters:
- `language`: The language of the input audio.
Supplying the input language in ISO-639-1 format
will improve accuracy and latency.
- `prompt`: An optional text to guide the model's
style or continue a previous audio segment.
The prompt should match the audio language.
- `response_format`: The format of the transcript
output, in one of these options: json, text, srt,
verbose_json, or vtt. Defaults to "json". Currently only "json" is supported.
- `temperature`: The sampling temperature, between 0
and 1. Higher values like 0.8 will make the output more
random, while lower values like 0.2 will make it more
focused and deterministic. If set to 0, the model will
use log probability to automatically increase the
temperature until certain thresholds are hit.
"""
# if the user does not provide the API key, check if it is set in the module client
api_key = api_key or openai.api_key
if api_key is None:
try:
api_key = os.environ["OPENAI_API_KEY"]
except KeyError as e:
raise ValueError(
"RemoteWhisperTranscriber expects an OpenAI API key. "
"Set the OPENAI_API_KEY environment variable (recommended) or pass it explicitly."
) from e
openai.api_key = api_key
self.organization = organization
self.model_name = model_name
self.api_base_url = api_base_url
# Only response_format = "json" is supported
whisper_params = kwargs
if whisper_params.get("response_format") != "json":
logger.warning(
"RemoteWhisperTranscriber only supports 'response_format: json'. This parameter will be overwritten."
)
whisper_params["response_format"] = "json"
self.whisper_params = whisper_params
if organization is not None:
openai.organization = organization
def to_dict(self) -> Dict[str, Any]:
"""
Serialize this component to a dictionary.
This method overrides the default serializer in order to
avoid leaking the `api_key` value passed to the constructor.
"""
return default_to_dict(
self,
model_name=self.model_name,
organization=self.organization,
api_base_url=self.api_base_url,
**self.whisper_params,
)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "RemoteWhisperTranscriber":
"""
Deserialize this component from a dictionary.
"""
return default_from_dict(cls, data)
@component.output_types(documents=List[Document])
def run(self, sources: List[Union[str, Path, ByteStream]]):
"""
Transcribe the audio files into a list of Documents, one for each input file.
For the supported audio formats, languages, and other parameters, see the
[Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text) and the official Whisper
[github repo](https://github.com/openai/whisper).
:param audio_files: a list of ByteStream objects to transcribe.
:returns: a list of Documents, one for each file. The content of the document is the transcription text.
"""
documents = []
for source in sources:
if not isinstance(source, ByteStream):
path = source
source = ByteStream.from_file_path(Path(source))
source.metadata["file_path"] = path
file = io.BytesIO(source.data)
file.name = str(source.metadata["file_path"]) if "file_path" in source.metadata else "__fallback__.wav"
content = openai.Audio.transcribe(file=file, model=self.model_name, **self.whisper_params)
doc = Document(content=content["text"], meta=source.metadata)
documents.append(doc)
return {"documents": documents}
| [] |
2024-01-10 | jlonge4/haystack | test~preview~components~audio~test_whisper_remote.py | import os
from unittest.mock import patch
from pathlib import Path
import openai
import pytest
from openai.util import convert_to_openai_object
from haystack.preview.components.audio.whisper_remote import RemoteWhisperTranscriber
from haystack.preview.dataclasses import ByteStream
def mock_openai_response(response_format="json", **kwargs) -> openai.openai_object.OpenAIObject:
if response_format == "json":
dict_response = {"text": "test transcription"}
# Currently only "json" is supported.
else:
dict_response = {}
return convert_to_openai_object(dict_response)
class TestRemoteWhisperTranscriber:
@pytest.mark.unit
def test_init_no_key(self, monkeypatch):
openai.api_key = None
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
error_msg = "RemoteWhisperTranscriber expects an OpenAI API key."
with pytest.raises(ValueError, match=error_msg):
RemoteWhisperTranscriber(api_key=None)
def test_init_key_env_var(self, monkeypatch):
openai.api_key = None
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
RemoteWhisperTranscriber(api_key=None)
assert openai.api_key == "test_api_key"
def test_init_key_module_env_and_global_var(self, monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key_2")
openai.api_key = "test_api_key_1"
RemoteWhisperTranscriber(api_key=None)
# The module global variable takes preference
assert openai.api_key == "test_api_key_1"
@pytest.mark.unit
def test_init_default(self):
transcriber = RemoteWhisperTranscriber(api_key="test_api_key")
assert openai.api_key == "test_api_key"
assert transcriber.model_name == "whisper-1"
assert transcriber.organization is None
assert transcriber.api_base_url == "https://api.openai.com/v1"
assert transcriber.whisper_params == {"response_format": "json"}
@pytest.mark.unit
def test_init_custom_parameters(self):
transcriber = RemoteWhisperTranscriber(
api_key="test_api_key",
model_name="whisper-1",
organization="test-org",
api_base_url="test_api_url",
language="en",
prompt="test-prompt",
response_format="json",
temperature="0.5",
)
assert openai.api_key == "test_api_key"
assert transcriber.model_name == "whisper-1"
assert transcriber.organization == "test-org"
assert transcriber.api_base_url == "test_api_url"
assert transcriber.whisper_params == {
"language": "en",
"prompt": "test-prompt",
"response_format": "json",
"temperature": "0.5",
}
@pytest.mark.unit
def test_to_dict_default_parameters(self):
transcriber = RemoteWhisperTranscriber(api_key="test_api_key")
data = transcriber.to_dict()
assert data == {
"type": "haystack.preview.components.audio.whisper_remote.RemoteWhisperTranscriber",
"init_parameters": {
"model_name": "whisper-1",
"api_base_url": "https://api.openai.com/v1",
"organization": None,
"response_format": "json",
},
}
@pytest.mark.unit
def test_to_dict_with_custom_init_parameters(self):
transcriber = RemoteWhisperTranscriber(
api_key="test_api_key",
model_name="whisper-1",
organization="test-org",
api_base_url="test_api_url",
language="en",
prompt="test-prompt",
response_format="json",
temperature="0.5",
)
data = transcriber.to_dict()
assert data == {
"type": "haystack.preview.components.audio.whisper_remote.RemoteWhisperTranscriber",
"init_parameters": {
"model_name": "whisper-1",
"organization": "test-org",
"api_base_url": "test_api_url",
"language": "en",
"prompt": "test-prompt",
"response_format": "json",
"temperature": "0.5",
},
}
def test_from_dict_with_defualt_parameters(self, monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
data = {
"type": "haystack.preview.components.audio.whisper_remote.RemoteWhisperTranscriber",
"init_parameters": {
"model_name": "whisper-1",
"api_base_url": "https://api.openai.com/v1",
"organization": None,
"response_format": "json",
},
}
transcriber = RemoteWhisperTranscriber.from_dict(data)
assert openai.api_key == "test_api_key"
assert transcriber.model_name == "whisper-1"
assert transcriber.organization is None
assert transcriber.api_base_url == "https://api.openai.com/v1"
assert transcriber.whisper_params == {"response_format": "json"}
def test_from_dict_with_custom_init_parameters(self, monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
data = {
"type": "haystack.preview.components.audio.whisper_remote.RemoteWhisperTranscriber",
"init_parameters": {
"model_name": "whisper-1",
"organization": "test-org",
"api_base_url": "test_api_url",
"language": "en",
"prompt": "test-prompt",
"response_format": "json",
"temperature": "0.5",
},
}
transcriber = RemoteWhisperTranscriber.from_dict(data)
assert openai.api_key == "test_api_key"
assert transcriber.model_name == "whisper-1"
assert transcriber.organization == "test-org"
assert transcriber.api_base_url == "test_api_url"
assert transcriber.whisper_params == {
"language": "en",
"prompt": "test-prompt",
"response_format": "json",
"temperature": "0.5",
}
def test_from_dict_with_defualt_parameters_no_env_var(self, monkeypatch):
openai.api_key = None
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
data = {
"type": "haystack.preview.components.audio.whisper_remote.RemoteWhisperTranscriber",
"init_parameters": {
"model_name": "whisper-1",
"api_base_url": "https://api.openai.com/v1",
"organization": None,
"response_format": "json",
},
}
with pytest.raises(ValueError, match="RemoteWhisperTranscriber expects an OpenAI API key."):
RemoteWhisperTranscriber.from_dict(data)
@pytest.mark.unit
def test_run_str(self, preview_samples_path):
with patch("haystack.preview.components.audio.whisper_remote.openai.Audio") as openai_audio_patch:
model = "whisper-1"
file_path = str(preview_samples_path / "audio" / "this is the content of the document.wav")
openai_audio_patch.transcribe.side_effect = mock_openai_response
transcriber = RemoteWhisperTranscriber(api_key="test_api_key", model_name=model, response_format="json")
result = transcriber.run(sources=[file_path])
assert result["documents"][0].content == "test transcription"
assert result["documents"][0].meta["file_path"] == file_path
@pytest.mark.unit
def test_run_path(self, preview_samples_path):
with patch("haystack.preview.components.audio.whisper_remote.openai.Audio") as openai_audio_patch:
model = "whisper-1"
file_path = preview_samples_path / "audio" / "this is the content of the document.wav"
openai_audio_patch.transcribe.side_effect = mock_openai_response
transcriber = RemoteWhisperTranscriber(api_key="test_api_key", model_name=model, response_format="json")
result = transcriber.run(sources=[file_path])
assert result["documents"][0].content == "test transcription"
assert result["documents"][0].meta["file_path"] == file_path
@pytest.mark.unit
def test_run_bytestream(self, preview_samples_path):
with patch("haystack.preview.components.audio.whisper_remote.openai.Audio") as openai_audio_patch:
model = "whisper-1"
file_path = preview_samples_path / "audio" / "this is the content of the document.wav"
openai_audio_patch.transcribe.side_effect = mock_openai_response
transcriber = RemoteWhisperTranscriber(api_key="test_api_key", model_name=model, response_format="json")
with open(file_path, "rb") as audio_stream:
byte_stream = audio_stream.read()
audio_file = ByteStream(byte_stream, metadata={"file_path": str(file_path.absolute())})
result = transcriber.run(sources=[audio_file])
assert result["documents"][0].content == "test transcription"
assert result["documents"][0].meta["file_path"] == str(file_path.absolute())
@pytest.mark.skipif(
not os.environ.get("OPENAI_API_KEY", None),
reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
)
@pytest.mark.integration
def test_whisper_remote_transcriber(self, preview_samples_path):
transcriber = RemoteWhisperTranscriber(api_key=os.environ.get("OPENAI_API_KEY"))
paths = [
preview_samples_path / "audio" / "this is the content of the document.wav",
str(preview_samples_path / "audio" / "the context for this answer is here.wav"),
ByteStream.from_file_path(preview_samples_path / "audio" / "answer.wav"),
]
output = transcriber.run(sources=paths)
docs = output["documents"]
assert len(docs) == 3
assert docs[0].content.strip().lower() == "this is the content of the document."
assert preview_samples_path / "audio" / "this is the content of the document.wav" == docs[0].meta["file_path"]
assert docs[1].content.strip().lower() == "the context for this answer is here."
assert (
str(preview_samples_path / "audio" / "the context for this answer is here.wav") == docs[1].meta["file_path"]
)
assert docs[2].content.strip().lower() == "answer."
| [] |
2024-01-10 | franzinc/agraph-examples | agvector~llm_utils.py | from franz.openrdf.connect import ag_connect
from franz.openrdf.query.query import QueryLanguage
from franz.openrdf.model.value import URI
from franz.openrdf.vocabulary import RDF, RDFS
import urllib.request
from pprint import pprint
import datetime
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema.document import Document
import shortuuid
import textwrap
def read_text(url: str):
with urllib.request.urlopen(url) as f:
contents = f.read().decode('utf-8')
return contents
def print_text(string: str):
string = string.replace('\\n', '').replace('\\r', ' ')
wrapper = textwrap.TextWrapper(width=100)
word_list = wrapper.wrap(text=string)
for element in word_list:
print(element)
class FindNearestNeighbors:
def __init__(self, conn, phrase, vector_db, number=10, confidence=.5):
self.conn = conn
self.f = conn.namespace('http://franz.com/llm/')
self.phrase = phrase
self.vector_db = vector_db
self.number = number
self.confidence = confidence
self.df = self.query(conn, phrase, vector_db, number, confidence)
try:
print_text(self.df['originalText'][0])
except:
print(f"No Neighbor found with confidence score {confidence}")
def query(self, conn, phrase, vector_db, number, confidence):
query_string = f"""
PREFIX llm: <http://franz.com/ns/allegrograph/8.0.0/llm/>
select * where {{
(?uri ?score ?originalText) llm:nearestNeighbor ("{phrase}" "{vector_db}" {str(number)} {str(confidence)}) }}"""
with conn.executeTupleQuery(query_string) as result:
df = result.toPandas()
return df
def proof(self):
for i in range(self.df.shape[0]):
print(i, self.df['uri'][i], self.df['score'][i])
print_text(self.df['originalText'][i])
print()
def add_neighbors_to_graph(self):
neighbor_uri = self.conn.createURI(f'http://franz.com/llm/neighbor/{shortuuid.uuid()}')
triples = [
(neighbor_uri, RDF.TYPE, self.f.Neighbor, neighbor_uri),
(neighbor_uri, self.f.phrase, self.conn.createLiteral(self.phrase), neighbor_uri),
(neighbor_uri, self.f.vectorDB, self.vector_db, neighbor_uri),
(neighbor_uri, self.f.confidence, self.confidence, neighbor_uri),
(neighbor_uri, self.f.datetime, datetime.datetime.now(), neighbor_uri)]
for i in range(self.df.shape[0]):
neighbor_score = self.conn.createBNode()
triples.append((neighbor_uri, self.f.link, neighbor_score, neighbor_uri))
triples.append((neighbor_score, self.f.confidenceScore, self.df['score'][i], neighbor_uri))
triples.append((neighbor_score, self.f.index, i, neighbor_uri))
triples.append((neighbor_score, self.f.neighbor, self.conn.createURI(self.df['uri'][i][1:-1]), neighbor_uri))
self.conn.addTriples(triples)
def clear_neighbors(conn):
query_string = """select ?neighbor where { ?neighbor a <http://franz.com/llm/Neighbor> }"""
with conn.executeTupleQuery(query_string) as result:
df = result.toPandas()
for neighbor in list(df['neighbor']):
conn.remove(None, None, None, neighbor)
class AskMyDocuments:
def __init__(self, conn, question, vector_db, number=10, confidence=.5):
self.conn = conn
self.f = conn.namespace('http://franz.com/llm/')
self.question = question
self.vector_db = vector_db
self.number = number
self.confidence = confidence
self.df = self.query(conn, question, vector_db, number, confidence)
try:
print_text(self.df['response'][0])
except:
print(f"No response found with confidence score {confidence}")
def query(self, conn, question, vector_db, number, confidence):
query_string = f"""PREFIX llm: <http://franz.com/ns/allegrograph/8.0.0/llm/>
select * where {{
(?response ?score ?citation ?content) llm:askMyDocuments ("{question}" "{vector_db}" {str(number)} {str(confidence)}). }}"""
with conn.executeTupleQuery(query_string) as result:
df = result.toPandas()
return df
def proof(self):
for i in range(self.df.shape[0]):
print(i, self.df['score'][i], self.df['citation'][i])
print_text(self.df['content'][i])
print()
def add_evidence_to_graph(self):
if self.df.shape[0] > 0:
evidence_uri = self.conn.createURI(f'http://franz.com/llm/evidence/{shortuuid.uuid()}')
triples = [
(evidence_uri, RDF.TYPE, self.f.Question, evidence_uri),
(evidence_uri, self.f.question, self.conn.createLiteral(self.question), evidence_uri),
(evidence_uri, self.f.vectorDB, self.vector_db, evidence_uri),
(evidence_uri, self.f.confidence, self.confidence, evidence_uri),
(evidence_uri, self.f.datetime, datetime.datetime.now(), evidence_uri),
(evidence_uri, self.f.response, self.conn.createLiteral(self.df['response'][0]), evidence_uri)]
for i in range(self.df.shape[0]):
evidence_score = self.conn.createBNode()
triples.append((evidence_uri, self.f.link, evidence_score, evidence_uri))
triples.append((evidence_score, self.f.confidenceScore, self.df['score'][i], evidence_uri))
triples.append((evidence_score, self.f.index, i, evidence_uri))
triples.append((evidence_score, self.f.evidence, self.conn.createURI(self.df['citation'][i][1:-1]), evidence_uri))
self.conn.addTriples(triples)
else:
print("No evidence found")
def clear_questions(conn):
query_string = """select ?question where { ?question a <http://franz.com/llm/Question> }"""
with conn.executeTupleQuery(query_string) as result:
df = result.toPandas()
for question in list(df['question']):
conn.remove(None, None, None, question)
class BufferTriples:
def __init__(self, conn, max_size=10000):
self.conn = conn
self.buffer_triples = []
self.max_size = max_size
def add(self, triple):
if len(self.buffer_triples) < self.max_size:
self.buffer_triples.append(triple)
else:
self.conn.addTriples(self.buffer_triples)
self.buffer_triples = [triple]
def flush_triples(self):
self.conn.addTriples(self.buffer_triples)
self.buffer_triples=[]
def addArbitraryTextString(conn, f, buffer, text, id, chunk_size=1000, chunk_overlap=10):
documents = [Document(page_content=text)]
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(documents)
if isinstance(id, str): id_uri = conn.createURI(id)
elif isinstance(id, URI): id_uri = id
elif isinstance(id, int): id_uri = conn.createURI(f"http://franz.com/llm/{str(id)}")
id_str = id_uri.localname
for i, doc in enumerate(docs):
doc_id = conn.createURI(f"http://franz.com/llm/{id_str}_{str(i)}")
buffer.add((id_uri, f.chunk, doc_id, id_uri))
buffer.add((doc_id, RDF.TYPE, f.Chunk, id_uri))
content = docs[i].page_content
buffer.add((doc_id, f.text, docs[i].page_content, id_uri))
buffer.add((doc_id, f.section, i, id_uri))
return buffer
| [] |
2024-01-10 | Aryann15/Torque | bots~venv~csv_bot.py | from flask import Flask, request
from langchain.agents import create_csv_agents
from langchain.llms import OpenAI
from dotenv import load_dotenv
app = Flask(__name__)
@app.route('/parse-csv', methods=['POST'])
def parse_csv():
csv_file = request.files['csv']
csv_file.save('uploaded.csv')
return 'CSV uploaded'
@app.route('/answer', methods=['POST'])
def answer():
csv_file = request.files['csv']
csv_file.save('uploaded.csv')
question = request.json['question']
llm = OpenAI(temperature=0)
agent = create_csv_agents(llm, 'uploaded.csv', verbose=True)
response = agent.run(question)
return response
load_dotenv()
| [] |
2024-01-10 | Aryann15/Torque | src~components~csv_agent.py | from langchain.agents import create_csv_agents
from langchain.llms import OpenAI
from dotenv import load_dotenv
def create_csv_agent(csv_file,user_Question):
load_dotenv()
if csv_file is not None:
llm = OpenAI(temperature=0)
agent = create_csv_agent(llm, csv_file, verbose=True)
if __name__ == '__main__':
create_csv_agent() | [] |
2024-01-10 | 5l1v3r1/langchain-serve | examples~hrbot~helper.py | import json
import os
from typing import List
from langchain.llms import OpenAI
from langchain.tools import Tool
try:
from gdrive import download_file
except ImportError:
from .gdrive import download_file
def load_tools_from_disk(llm: OpenAI, path: str):
from langchain.chains import RetrievalQA
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
tools = []
embeddings = OpenAIEmbeddings()
for root, _, files in os.walk(path):
for file in files:
if file.endswith(".faiss"):
index_name = file.replace(".faiss", "")
faiss_index = FAISS.load_local(
folder_path=root, embeddings=embeddings, index_name=index_name
)
print(f'Loaded {index_name} from local')
docs_chain = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=faiss_index.as_retriever()
)
# read a json file with the name *-tool.json and create a tool for it
tool_json = os.path.join(root, f"{index_name}-tool.json")
if os.path.exists(tool_json):
with open(tool_json, "r") as f:
tool_dict = json.load(f)
tools.append(
Tool(
name=tool_dict["name"],
func=docs_chain.run,
description=tool_dict["description"],
return_direct=True,
)
)
return tools
def index_pdf(llm, name: str, path: str, url: str = None):
from langchain.chains import RetrievalQA
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
embeddings = OpenAIEmbeddings()
index_name = name.replace(" ", "_").lower()
try:
faiss_index = FAISS.load_local(
folder_path=path, embeddings=embeddings, index_name=index_name
)
print(f'Loaded {index_name} from local')
except Exception as e:
print(f'Failed to load {index_name} from local, building from scratch')
loader = PyPDFLoader(url)
pages = loader.load_and_split()
print(f'Total {len(pages)} pages indexed')
faiss_index = FAISS.from_documents(pages, embedding=embeddings)
faiss_index.save_local(folder_path=path, index_name=index_name)
return RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=faiss_index.as_retriever()
)
def prune_files_on_disk(path: str, tool_paths: List[str]):
"""
Each tool_path is in format {name}-tool.json.
We need to extract `name` from each tool_path. Each {name}.faiss, {name}.pkl, {name}-tool.json should be kept in `path` folder.
All other files should be deleted from `path` folder.
"""
tool_names = [
os.path.basename(tool_path).replace("-tool.json", "")
for tool_path in tool_paths
]
for root, _, files in os.walk(path):
for file in files:
if (
file.endswith(".faiss")
or file.endswith(".pkl")
or file.endswith("-tool.json")
):
if (
file.replace(".faiss", "")
.replace(".pkl", "")
.replace("-tool.json", "")
not in tool_names
):
os.remove(os.path.join(root, file))
def index_pdfs_and_save(
service,
pdf_files: List[dict],
basedir: str,
llm: OpenAI,
):
base_description = """\
Useful when you need to answer questions about {description}. \
Input should be a a fully formed question."""
tools_stored = []
for pdf_file in pdf_files:
pdf_name: str = pdf_file['name']
tool_name = pdf_name.lstrip('/').rstrip('.pdf')
pdf_file_path = os.path.join(basedir, pdf_name.lstrip('/').lower())
name = pdf_file_path.replace(" ", "_").lower() + '.' + pdf_file['md5']
if not os.path.exists(os.path.dirname(name)):
os.makedirs(os.path.dirname(name))
# If index cache doesn't exist, download & index
if not os.path.exists(os.path.join(basedir, f'{name}.faiss')):
download_file(service, pdf_file['id'], pdf_file_path)
chain = index_pdf(llm, name=name, path=basedir, url=pdf_file_path)
os.remove(pdf_file_path)
else:
chain = index_pdf(llm, name=name, path=basedir)
tool = Tool(
name=tool_name,
func=chain.run,
description=base_description.format(
description=pdf_file['description'] or tool_name
),
return_direct=True,
)
# save the tool to disk
tool_json = os.path.join(basedir, f"{name}-tool.json")
with open(tool_json, "w") as f:
f.write(tool.json(exclude={"func", "coroutine"}))
tools_stored.append(tool_json)
prune_files_on_disk(basedir, tools_stored)
| [] |
2024-01-10 | 5l1v3r1/langchain-serve | lcserve~backend~slackbot~slackbot.py | import json
import os
import time
from functools import lru_cache
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
List,
Optional,
Tuple,
Union,
)
from urllib.parse import urlparse
from jina.logging.logger import JinaLogger
from langchain.agents import ConversationalAgent
from langchain.memory import ChatMessageHistory
from langchain.output_parsers import StructuredOutputParser
from langchain.prompts import PromptTemplate
from langchain.schema import ChatMessage
from langchain.tools import StructuredTool
from langchain.tools.base import ToolException
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from tenacity import retry, stop_after_attempt, wait_exponential
if TYPE_CHECKING:
from opentelemetry.sdk.metrics import Counter
from ..langchain_helper import OpenAICallbackHandler, TracingCallbackHandler
PROGRESS_MESSAGE = "Processing..."
class SlackBot:
_logger = JinaLogger('SlackBot')
def __init__(
self,
workspace: str,
tracing_handler: Union['OpenAICallbackHandler', 'TracingCallbackHandler'],
request_counter: Optional['Counter'] = None,
duration_counter: Optional['Counter'] = None,
):
from langchain.output_parsers import PydanticOutputParser
from slack_bolt import App
from slack_bolt.adapter.fastapi import SlackRequestHandler
try:
from helper import TextOrBlock
except ImportError:
from .helper import TextOrBlock
self.slack_app = App()
self.workspace = workspace
self.request_counter = request_counter
self.duration_counter = duration_counter
self.tracing_handler = tracing_handler
self.handler = SlackRequestHandler(self.slack_app)
self._parser = PydanticOutputParser(pydantic_object=TextOrBlock)
@staticmethod
def slack_client() -> WebClient:
return WebClient(token=os.environ.get('SLACK_BOT_TOKEN'))
@staticmethod
def get_username(userid: str) -> str:
try:
response = SlackBot.slack_client().users_profile_get(user=userid)
return response.data['profile']['real_name']
except Exception as e:
return None
@classmethod
def extract_channel_ts(cls, url):
try:
parsed_url = urlparse(url)
if not all([parsed_url.scheme, parsed_url.netloc, parsed_url.path]):
return None, None
path_parts: List[str] = parsed_url.path.split('/')
if len(path_parts) != 4:
return None, None
channel_id = path_parts[2]
thread_ts = (
path_parts[3].replace('p', '', 1)[:10]
+ '.'
+ path_parts[3].replace('p', '', 1)[10:]
)
return channel_id, thread_ts
except Exception as e:
cls._logger.error(f"Error extracting channel and ts from url: {e}")
return None, None
@classmethod
def get_history(cls, channel: str, ts: str) -> ChatMessageHistory:
cls._logger.debug(f"Getting history for {channel} {ts}")
response = cls.slack_client().conversations_replies(channel=channel, ts=ts)
msgs: List[Dict] = response["messages"]
history = ChatMessageHistory()
def _extract_text_from_blocks(user: str, blocks: Union[List, Dict]):
if isinstance(blocks, dict):
for key, value in blocks.items():
if key == 'text' and isinstance(value, dict):
history.add_message(
ChatMessage(
content=value['text'],
role=user,
additional_kwargs={"id": user},
)
)
elif key == 'text' and isinstance(value, str):
history.add_message(
ChatMessage(
content=value,
role=user,
additional_kwargs={"id": user},
)
)
else:
_extract_text_from_blocks(user=user, blocks=value)
elif isinstance(blocks, list):
for item in blocks:
_extract_text_from_blocks(user=user, blocks=item)
# read all but the last message
for msg in msgs[:-1]:
if msg.get("type") != "message":
# TODO: not sure how to handle this
continue
if 'blocks' in msg:
if 'user' in msg:
username = SlackBot.get_username(msg['user']) or msg['user']
user = f"Human ({username})"
elif 'bot_id' in msg:
user = msg['bot_id']
_extract_text_from_blocks(user=user, blocks=msg['blocks'])
text: str = msg.get("text")
if 'bot_id' in msg:
if text.strip() in ("", PROGRESS_MESSAGE):
continue
history.add_message(
ChatMessage(
content=text, role="AI", additional_kwargs={"id": msg["bot_id"]}
)
)
elif 'user' in msg:
username = SlackBot.get_username(msg['user']) or msg['user']
history.add_message(
ChatMessage(
content=text,
role=f"Human ({username})",
)
)
return history
@classmethod
def slack_messages(cls, url: str) -> str:
"""\
Get chat messages from an existing slack conversation url. \
It is important to note that this URL should already be present in the conversation history, in the format `https://<workspace>.slack.com/archives/<channel_id>/<thread_ts>`. \
You are not permitted to generate or make up these URLs. \
If you can't find the url, please ask the user to provide it to you.
"""
cls._logger.debug(f"Getting slack messages from {url}")
if url.startswith('url='):
url = url[4:]
# if url is wrapped with '' or "" or <>, remove them
if url.startswith("'") and url.endswith("'"):
url = url[1:-1]
elif url.startswith('"') and url.endswith('"'):
url = url[1:-1]
elif url.startswith('<') and url.endswith('>'):
url = url[1:-1]
channel, ts = SlackBot.extract_channel_ts(url)
if channel is None or ts is None:
raise ToolException(
f"Invalid URL `{url}` received, could not extract channel and ts"
)
try:
history = SlackBot.get_history(channel, ts)
except Exception as e:
_err_msg = (
f"Invalid URL `{url}` received, could not extract channel and ts as {e}"
)
if isinstance(e, SlackApiError):
if e.response["error"] == "not_in_channel":
_err_msg = f"Cannot access the channel `{channel}`. Please add me to the channel and try again."
elif e.response["error"] == "channel_not_found":
_err_msg = f"Channel `{channel}` was not found. Please check the URL and try again."
elif e.response["error"] == "thread_not_found":
_err_msg = f"Thread `{ts}` was not found. Please check the URL and try again."
raise ToolException(_err_msg)
return json.dumps([{msg.role: msg.content} for msg in history.messages])
@staticmethod
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, max=10))
def send_message(
client: WebClient,
channel: str,
ts: str,
text: str = None,
blocks: List[Dict] = None,
) -> Tuple[str, str]:
if text is not None:
response = client.chat_postMessage(channel=channel, thread_ts=ts, text=text)
elif blocks is not None:
response = client.chat_postMessage(
channel=channel, thread_ts=ts, blocks=blocks
)
else:
raise ValueError("Either text or blocks must be specified")
return response["channel"], response["ts"]
@staticmethod
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, max=10))
def update_message(
client: WebClient,
channel: str,
ts: str,
text: str = None,
blocks: List[Dict] = None,
):
if text is not None:
client.chat_update(channel=channel, ts=ts, text=text)
elif blocks is not None:
client.chat_update(channel=channel, ts=ts, text=text, blocks=blocks)
else:
raise ValueError("Either text or blocks must be specified")
@staticmethod
def send(
client: WebClient,
channel: str,
thread_ts: str,
parser: StructuredOutputParser,
progress_message: str = PROGRESS_MESSAGE,
):
try:
from helper import TextOrBlock
except ImportError:
from .helper import TextOrBlock
# send a progress message first on the thread
channel, ts = SlackBot.send_message(
client, channel, thread_ts, progress_message
)
def __call__(text: Union[str, Generator[str, None, None]]):
message_text = ""
if isinstance(text, Generator):
for i, t in enumerate(text):
message_text += t
SlackBot.update_message(client, channel, ts, message_text)
else:
try:
textOrBlock: TextOrBlock = parser.parse(text)
except Exception as e:
SlackBot.update_message(client, channel, ts, text=text)
return
if textOrBlock.kind == "text":
SlackBot.update_message(
client=client,
channel=channel,
ts=ts,
text=textOrBlock.text,
)
elif textOrBlock.kind == "block":
SlackBot.update_message(
client=client,
channel=channel,
ts=ts,
text="Answer:",
blocks=[b.dict() for b in textOrBlock.blocks],
)
return __call__
@staticmethod
def command_send(
client: WebClient,
channel: str,
user_id: str,
command: str,
):
# send a progress message first on the thread
response = client.chat_postMessage(
channel=channel,
text=f'<@{user_id}> Acknowledged request. Running `{command}`',
)
def __call__(text: str):
client.chat_postMessage(
channel=channel, text=text, thread_ts=response["ts"]
)
return
return __call__
@classmethod
@lru_cache
def get_slack_url(cls):
response = cls.slack_client().auth_test()
return response["url"]
@lru_cache(maxsize=128)
def is_bot_in_channel(self, client: WebClient, channel: str) -> bool:
try:
bot_id = client.auth_test()["user_id"]
response = client.conversations_members(channel=channel)
return bot_id in response["members"]
except SlackApiError as e:
self._logger.error(f"Error while checking if bot is in channel {e}")
return False
@staticmethod
def get_agent_tools() -> List[StructuredTool]:
return [
StructuredTool.from_function(
func=SlackBot.slack_messages,
handle_tool_error=True,
)
]
@staticmethod
def get_agent_prompt_prefix() -> str:
return """
As an AI bot on Slack, your primary objective is to provide substantial assistance to one or more human users within a Slack thread. \
Your mission is to facilitate the completion of tasks through a strategic approach, gathering comprehensive information by posing pertinent questions to refine your understanding of the users' needs. \
Not only should you deliver precise, insightful responses to aid users in task fulfillment, \
but also be proactive in offering innovative solutions and suggestions they may not have considered. \
If a slack url is provided, you can clean it up and pass it to any existing tools. \
If the answer contains `Human (userid)`, replace it with `<@userid>`.
TOOLS:
------
Assistant has access to the following tools:
"""
@staticmethod
def get_agent_prompt_suffix() -> str:
return """Begin!
Previous conversation history:
{chat_history}
Human: {input}
{agent_scratchpad}"""
@staticmethod
def get_agent_prompt() -> PromptTemplate:
return ConversationalAgent.create_prompt(
tools=SlackBot.get_agent_tools(),
prefix=SlackBot.get_agent_prompt_prefix(),
suffix=SlackBot.get_agent_prompt_suffix(),
)
def metrics_decorator(self, func):
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter()
result = func(*args, **kwargs)
end_time = time.perf_counter()
elapsed_time = end_time - start_time
if self.request_counter:
self.request_counter.add(1)
if self.duration_counter:
self.duration_counter.add(elapsed_time)
return result
return wrapper_timer
def app_mention(self, func):
func = self.metrics_decorator(func)
@self.slack_app.event('app_mention')
def wrapper(client: WebClient, body, context):
_event: Dict = body["event"]
_channel = _event["channel"]
_thread_ts = _event.get("thread_ts", _event["ts"])
_user = _event["user"]
if "text" in _event:
_message = _event["text"]
elif "message" in _event:
_message = _event["message"]["text"]
_thread_ts = _event["message"].get("ts", _thread_ts)
self._logger.info(
f"App mentioned by user `{_user}` in channel `{_channel}`. Message: `{_message}` "
)
if not self.is_bot_in_channel(client, _channel):
# send a DM to the user to invite the bot to the channel
client.chat_postMessage(
channel=_user,
text=f"Unfortunately, I'm not in the channel (ID: {_channel}), you mentioned me in. Please invite me there and try again.",
)
return
func(
message=_message,
prompt=SlackBot.get_agent_prompt(),
history=SlackBot.get_history(_channel, _thread_ts),
tools=SlackBot.get_agent_tools(),
reply=SlackBot.send(
client=client,
channel=_channel,
thread_ts=_thread_ts,
parser=self._parser,
),
tracing_handler=self.tracing_handler,
workspace=self.workspace,
user=_user,
context=context,
)
return wrapper
def message(self, func):
func = self.metrics_decorator(func)
@self.slack_app.event('message')
def wrapper(client, body, context):
_event: Dict = body["event"]
_channel = _event["channel"]
_thread_ts = _event.get("thread_ts", _event["ts"])
if "text" in _event:
_message = _event["text"]
elif "message" in _event:
_message = _event["message"]["text"]
_thread_ts = _event["message"].get("ts", _thread_ts)
self._logger.info(
f"DM received in channel `{_channel}`. Message: `{_message}` "
)
func(
message=_message,
prompt=SlackBot.get_agent_prompt(),
history=SlackBot.get_history(_channel, _thread_ts),
tools=SlackBot.get_agent_tools(),
reply=SlackBot.send(
client=client,
channel=_channel,
thread_ts=_thread_ts,
parser=self._parser,
),
tracing_handler=self.tracing_handler,
workspace=self.workspace,
user=_channel,
context=context,
)
return wrapper
def command(self, command: str):
def decorator(command_func):
command_func = self.metrics_decorator(command_func)
@self.slack_app.command(command)
def wrapper(ack, client, body, context):
ack()
_channel = body["channel_id"]
_user = body["user_id"]
_message = body["text"]
self._logger.info(
f"Command `{command}` received in channel `{_channel}`. Message: `{_message}` "
)
command_func(
message=_message,
workspace=self.workspace,
reply=SlackBot.command_send(
client=client,
channel=_channel,
user_id=_user,
command=command,
),
tracing_handler=self.tracing_handler,
user=_channel,
context=context,
)
return wrapper
return decorator
def register(self, func: Callable, commands: Dict[str, Callable] = None) -> Any:
self.app_mention(func)
self.message(func)
if commands is not None:
for command, command_func in commands.items():
self.command(command)(command_func)
return func
| [] |
2024-01-10 | 5l1v3r1/langchain-serve | tests~integration~apps~basic_app.py | import asyncio
import os
import time
from typing import Dict, List
import aiofiles
from fastapi import UploadFile, WebSocket
from langchain.agents import initialize_agent, load_tools
from langchain.agents.agent_types import AgentType
from langchain.callbacks.manager import CallbackManager
from langchain.llms.fake import FakeListLLM
from lcserve import job, serving
@serving
def sync_http(interval: int) -> str:
time.sleep(interval)
return "Hello, world!"
@serving
async def async_http(interval: int) -> str:
await asyncio.sleep(interval)
return "Hello, world!"
@serving(websocket=True)
def sync_ws(interval: int, **kwargs) -> str:
ws: "WebSocket" = kwargs["websocket"]
for i in range(1000):
asyncio.run(ws.send_text(str(i)))
time.sleep(interval)
return "hello world"
@serving(websocket=True)
async def async_ws(interval: int, **kwargs) -> str:
ws: "WebSocket" = kwargs["websocket"]
for i in range(1000):
await ws.send_text(str(i))
await asyncio.sleep(interval)
return "hello world"
def authorizer(token: str) -> str:
print(f"Got token: {token}")
if not token == "mysecrettoken":
raise Exception("Invalid token")
return "username"
@serving(auth=authorizer)
def sync_auth_http(interval: int) -> str:
time.sleep(interval)
return "Hello, world!"
@serving(websocket=True, auth=authorizer)
async def sync_auth_ws(interval: int, **kwargs) -> str:
ws: "WebSocket" = kwargs["websocket"]
for i in range(1000):
await ws.send_text(str(i))
await asyncio.sleep(interval)
return "hello world"
@serving(auth=authorizer)
def sync_auth_http_auth_response(interval: int, **kwargs) -> str:
assert 'auth_response' in kwargs
assert kwargs['auth_response'] == "username"
time.sleep(interval)
return "Hello, world!"
@serving(websocket=True, auth=authorizer)
async def sync_auth_ws_auth_response(interval: int, **kwargs) -> str:
assert 'auth_response' in kwargs
assert kwargs['auth_response'] == "username"
ws: "WebSocket" = kwargs["websocket"]
for i in range(1000):
await ws.send_text(str(i))
await asyncio.sleep(interval)
return "hello world"
@serving
def single_file_upload(file: UploadFile) -> str:
return file.filename
@serving
def single_file_upload_with_extra_arg(
file: UploadFile, question: str, someint: int
) -> Dict[str, str]:
return {
"file": file.filename,
"question": question,
"someint": someint,
}
@serving
def multiple_file_uploads(f1: UploadFile, f2: UploadFile) -> List[str]:
return [f1.filename, f2.filename]
@serving
def multiple_file_uploads_with_extra_arg(
f1: UploadFile, f2: UploadFile, question: str, someint: int
) -> Dict[str, str]:
return {
"f1": f1.filename,
"f2": f2.filename,
"question": question,
"someint": someint,
}
@serving
def store(text: str, **kwargs):
workspace: str = kwargs.get('workspace')
path = f'{workspace}/store.txt'
print(f'Writing to {path}')
with open(path, 'a') as f:
f.writelines(text + '\n')
return 'OK'
@serving(websocket=True)
async def stream(**kwargs):
workspace: str = kwargs.get('workspace')
websocket: WebSocket = kwargs.get('websocket')
path = f'{workspace}/store.txt'
print(f'Streaming {path}')
async with aiofiles.open(path, 'r') as f:
async for line in f:
await websocket.send_text(line)
return 'OK'
@serving
def readfile() -> str:
with open('a.txt', 'r') as f: # a.txt is in the root of the project
return f.read()
@serving(websocket=True)
def readfile_ws(**kwargs) -> str:
with open('a.txt', 'r') as f: # a.txt is in the root of the project
return f.read()
@serving
def tracing(dummy: str, **kwargs):
tracing_handler = kwargs.get('tracing_handler')
responses = ["Action: Python REPL\nAction Input: print(2 + 2)", "Final Answer: 4"]
llm = FakeListLLM(
responses=responses, callback_manager=CallbackManager([tracing_handler])
)
tools = load_tools(["python_repl"])
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callback_manager=CallbackManager([tracing_handler]),
verbose=True,
)
agent.run(dummy)
return 'ok'
@serving(openai_tracing=True, websocket=True)
def tracing_ws(dummy: str, **kwargs):
tracing_handler = kwargs.get('tracing_handler')
streaming_handler = kwargs.get('streaming_handler')
responses = ["Action: Python REPL\nAction Input: print(2 + 2)", "Final Answer: 4"]
llm = FakeListLLM(
responses=responses, callback_manager=CallbackManager([tracing_handler])
)
tools = load_tools(["python_repl"])
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callback_manager=CallbackManager([tracing_handler, streaming_handler]),
verbose=True,
)
agent.run(dummy)
return 'ok'
@job(timeout=100, backofflimit=3)
def my_job(param1: str, param2: str):
print('ok')
| [] |
2024-01-10 | dingiso/ChatGPTScanner | manager~manager.py | import openai
from loader.project import *
from loader.loader import *
from manager.select import *
# Act as a web security expert and ready to receive project
SYSTEM_PROMPT_1 = "You are a web security expert and I will send you a project"
# General security assessment
NEED_PROMPT_1 = "Please analyse code above and tell me vulnerabilities in it. Mark every vulnerability with info, warn, medium, high or critical by severity"
# A need prefix to make gpt work better
NEED_PREFIX = "Please analyse code above. "
# Find all taint chains from a given source
NEED_PROMPT_2 = "Can {} become input or parameter of dangerous function calls? Give me the function call chain in format of {}"
# Find all taint chains to a given sink
NEED_PROMPT_3 = "Can remote input in request become input or parameter of {} in a function call chain? Give me the function call chain in format of {}"
# One function call perline format
DEFUALT_TAINT_FORMAT = "one function call per line"
# Editor format
EDITOR_TAINT_FORMAT = "number\n function name\n file name\n line number\n code snippet less than 3 lines\n"
# Semgrep report format
SEMGREP_FORMAT = "semgrep report"
# CodeQL report format
CodeQL_FORMAT = "CodeQL report"
# General security assessment
def need_prompt_1() -> str:
return NEED_PROMPT_1
# Find all taint chains from a given source
def need_prompt_2(source: str, format=DEFUALT_TAINT_FORMAT) -> str:
return NEED_PREFIX + NEED_PROMPT_2.format(source, format)
# Find all taint chains to a given sink
def need_prompt_3(sink: str, format=DEFUALT_TAINT_FORMAT) -> str:
return NEED_PREFIX + NEED_PROMPT_3.format(sink, format)
def match_include(path: str, include: list[str]):
if len(include) == 0:
return True
for v in include:
if path.startswith(v):
return True
return False
def _ask(messages):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
def build_message(messages, pro: Project, select: Select, dry=False):
for path in pro.content:
if match_include(path, select.include) and path not in select.exclude:
if dry:
print(path)
for k, v in enumerate(pro.content[path]):
messages.append(
{"role": "user", "content": "relative path: {}, part number: {}\n{}".format(path, k, v)})
# add verify=Fasle in openai/api_requestor.py#request_raw L524 to bypass ssl verification
class Manager():
def set_key(self, api_key: str):
openai.api_key = api_key
def set_proxy(self, proxy: str):
openai.proxy = proxy
# ask by src, use with load_one
def ask_src(self, src: list[str]):
messages = [
{"role": "system", "content": SYSTEM_PROMPT_1}]
for chunk in src:
messages.append({"role": "user", "content": chunk})
messages.append(
{"role": "user", "content": NEED_PROMPT_1})
return _ask(messages)
# ask by project and select
def ask(self, pro: Project, select: Select, dry=False):
messages = [
{"role": "system", "content": SYSTEM_PROMPT_1}]
build_message(messages, pro, select, dry)
messages.append(
{"role": "user", "content": NEED_PROMPT_1})
if dry:
return
return _ask(messages)
# ask by project, question and select
def ask_question(self, pro: Project, select: Select, question: str, dry=False):
messages = [
{"role": "system", "content": SYSTEM_PROMPT_1}]
build_message(messages, pro, select, dry)
messages.append(
{"role": "user", "content": question}
)
if dry:
return
return _ask(messages)
# load project by select pack and ask by question
def execute_task(self, task: Task, dry=False):
pro = load_project(task.root, task.language)
return self.ask(pro, task.select, dry)
# load project by select pack and ask by question
def execute_task_question(self, task: Task, question: str, dry=False):
pro = load_project(task.root, task.language)
return self.ask_question(pro, task.select, question, dry)
| [
"Please analyse code above and tell me vulnerabilities in it. Mark every vulnerability with info, warn, medium, high or critical by severity",
"Can {} become input or parameter of dangerous function calls? Give me the function call chain in format of {}",
"You are a web security expert and I will send you a project",
"relative path: PLACEHOLDER, part number: PLACEHOLDER\nPLACEHOLDER",
"Can remote input in request become input or parameter of {} in a function call chain? Give me the function call chain in format of {}"
] |
2024-01-10 | due-benchmark/baselines | benchmarker~embedding~image~discrete_vae~discrete_embeddings.py | from typing import Any, Optional, Sequence
import torch
import torch.nn as nn
from dalle_pytorch import OpenAIDiscreteVAE
from dalle_pytorch.vae import map_pixels
from torch import Tensor
from benchmarker.embedding.base import ContextEmbeddings
from benchmarker.embedding.relative.relative import (
RelativePositionBiasAggregated,
RelativePositionBiasHorizontal,
RelativePositionBiasVertical,
)
class DiscreteEmbeddings(ContextEmbeddings, nn.Module):
def __init__(
self,
dimension: int = 768,
embedding_level: str = 'tokens',
num_layers: int = 5,
pretrained_path: Optional[str] = None,
num_resnet_blocks: int = 0,
use_position_bias: bool = False,
model_config: Optional[Any] = None,
use_position_embeddings: Optional[bool] = None,
**kwargs,
):
nn.Module.__init__(self)
ContextEmbeddings.__init__(self, dimension, embedding_level=embedding_level)
self.vae = OpenAIDiscreteVAE()
self.dalee = True
self.vae.dec = None
h, w = 32, 32
for param in self.vae.parameters():
param.requires_grad = False
# if pretrained_path is not None:
# self.vae.load_state_dict(torch.load(pretrained_path, map_location='cpu'))
# h, w = self.vae.get_grid_size()
max_length = h * w # depends on number of layers and image size
self.use_position_bias = use_position_bias
self.use_position_embeddings = (
not use_position_bias if use_position_embeddings is None else use_position_embeddings
)
self.semantic_embeddings = nn.Embedding(self.vae.num_tokens, dimension)
self.register_buffer('position_ids', torch.arange(max_length).expand((1, -1)))
if self.use_position_bias:
heads = model_config.num_attention_heads
self.position_bias = RelativePositionBiasAggregated(
[RelativePositionBiasVertical(num_heads=heads), RelativePositionBiasHorizontal(num_heads=heads)]
)
if self.use_position_embeddings:
self.position_embeddings = nn.Embedding(max_length, dimension)
def get_required_segment_levels(self) -> Sequence[str]:
return []
def append_embeddings(self, inputs_embeds, context_embeddings, attention_mask):
attention_mask = attention_mask.clone().detach()
input_len = inputs_embeds.shape[1]
embeddings_len = context_embeddings.shape[1]
# replace padded tokens with context
lens = attention_mask.sum(1)
bs = attention_mask.shape[0]
for idx in range(bs):
l = lens[idx]
space_left = input_len - l
if space_left <= embeddings_len:
attention_mask[idx] = 1
inputs_embeds[idx, l:] = context_embeddings[idx, :space_left]
else:
attention_mask[idx, l : l + embeddings_len] = 1
inputs_embeds[idx, l : l + embeddings_len] = context_embeddings[idx]
return inputs_embeds, attention_mask
def produce_bias(self, input_ids, attention_mask, seg_data, position_bias, old_attention_mask):
if not self.use_position_bias:
return position_bias
# Calculate pseudo-positions
h, w = 32, 32
# h, w = self.vae.get_grid_size()
x = (torch.arange(0.5, w + 0.5) / w).repeat(h, 1).permute(1, 0).flatten()
if w > h:
# Ugly coordinate fix for transposed matrices.
# Left: Look at me, I am the right now
x = 1 - x
y = (torch.arange(0.5, h + 0.5) / h).repeat(w, 1).flatten()
coordinates = torch.stack([x, y, x, y], 1).to(input_ids.device)
max_length = input_ids.shape[1]
left = (input_ids == 0).sum(1)
start_idx = max_length - left
bs = input_ids.shape[0]
for idx in range(bs):
start = start_idx[idx]
end = min(max_length, start + h * w)
seg_data['tokens']['bboxes'][idx, start:end] = coordinates[: min(left[idx], w * h)]
new_position_bias = self.position_bias(input_ids, attention_mask, seg_data)
"""
old_interactions = (old_attention_mask.unsqueeze(1).transpose(2, 1) * old_attention_mask.unsqueeze(1))
all_interactions = (attention_mask.unsqueeze(1).transpose(2, 1) * attention_mask.unsqueeze(1))
new_interactions = all_interactions * ~old_interactions.bool()
heads = position_bias.shape[1]
# Zero bias for newely-created positions
nidx = new_interactions.unsqueeze(1).repeat(1, heads, 1, 1).bool()
# Zero bias for old positions
oidx = old_interactions.unsqueeze(1).repeat(1, heads, 1, 1).bool()
# What about masked? Now, there is sum of both for them
zidx = ~all_interactions.unsqueeze(1).repeat(1, heads, 1, 1).bool()
position_bias[nidx] = 0
new_position_bias[oidx] = 0
"""
both = position_bias + new_position_bias
# Do not uncomment. A present which seems valuable but which in reality is a curse
# both[zidx] = -10000
return both
def forward( # type: ignore
self,
input_ids: Tensor,
attention_mask: Tensor,
seg_data: dict,
text_embeddings: Tensor,
position_bias: Tensor,
**kwargs,
) -> Tensor:
image_batch = seg_data['lazyimages']['img_lst']
if len(image_batch.shape) == 3:
image_batch = image_batch.unsqueeze(1).repeat(1, 3, 1, 1)
image_batch = map_pixels(image_batch)
# image_batch = image_batch.unsqueeze(1)
codewords = self.vae.get_codebook_indices(image_batch)
semantic = self.semantic_embeddings(codewords)
seq_length = codewords.shape[1]
position_ids = self.position_ids[:, :seq_length]
position = self.position_embeddings(position_ids) if self.use_position_embeddings else 0.0
context_embeddings = semantic + position
inputs_embeds_, attention_mask_ = self.append_embeddings(text_embeddings, context_embeddings, attention_mask)
position_bias_ = self.produce_bias(input_ids, attention_mask_, seg_data, position_bias, attention_mask)
return (inputs_embeds_, attention_mask_, position_bias_)
def create_discrete_embeddings(
pretrained_path: Optional[str] = None, dimension=768, num_layers: int = 5, **kwargs
) -> DiscreteEmbeddings:
return DiscreteEmbeddings(dimension=dimension, num_layers=num_layers, pretrained_path=pretrained_path, **kwargs)
| [] |
2024-01-10 | ebagdasa/backdoored_transformers | src~transformers~models~auto~modeling_auto.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
import warnings
from collections import OrderedDict
from ...configuration_utils import PretrainedConfig
from ...file_utils import add_start_docstrings
from ...utils import logging
# Add modeling imports here
from ..albert.modeling_albert import (
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from ..bart.modeling_bart import (
BartForConditionalGeneration,
BartForQuestionAnswering,
BartForSequenceClassification,
BartModel,
)
from ..bert.modeling_bert import (
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLMHeadModel,
BertModel,
)
from ..bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder
from ..blenderbot.modeling_blenderbot import BlenderbotForConditionalGeneration, BlenderbotModel
from ..blenderbot_small.modeling_blenderbot_small import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel
from ..camembert.modeling_camembert import (
CamembertForCausalLM,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForQuestionAnswering,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from ..ctrl.modeling_ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel
from ..deberta.modeling_deberta import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from ..distilbert.modeling_distilbert import (
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
from ..dpr.modeling_dpr import DPRQuestionEncoder
from ..electra.modeling_electra import (
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
)
from ..encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel
from ..flaubert.modeling_flaubert import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from ..fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel
from ..funnel.modeling_funnel import (
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
)
from ..gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model
from ..layoutlm.modeling_layoutlm import (
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
LayoutLMModel,
)
from ..led.modeling_led import (
LEDForConditionalGeneration,
LEDForQuestionAnswering,
LEDForSequenceClassification,
LEDModel,
)
from ..longformer.modeling_longformer import (
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
)
from ..lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel
from ..marian.modeling_marian import MarianModel, MarianMTModel
from ..mbart.modeling_mbart import (
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
)
from ..mobilebert.modeling_mobilebert import (
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
from ..mpnet.modeling_mpnet import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
from ..mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model
from ..openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel
from ..pegasus.modeling_pegasus import PegasusForConditionalGeneration, PegasusModel
from ..prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel
from ..rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function
RagModel,
RagSequenceForGeneration,
RagTokenForGeneration,
)
from ..reformer.modeling_reformer import (
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerModel,
ReformerModelWithLMHead,
)
from ..retribert.modeling_retribert import RetriBertModel
from ..roberta.modeling_roberta import (
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from ..squeezebert.modeling_squeezebert import (
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
from ..t5.modeling_t5 import T5ForConditionalGeneration, T5Model
from ..tapas.modeling_tapas import (
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
)
from ..transfo_xl.modeling_transfo_xl import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel
from ..xlm.modeling_xlm import (
XLMForMultipleChoice,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from ..xlm_prophetnet.modeling_xlm_prophetnet import (
XLMProphetNetForCausalLM,
XLMProphetNetForConditionalGeneration,
XLMProphetNetModel,
)
from ..xlm_roberta.modeling_xlm_roberta import (
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from ..xlnet.modeling_xlnet import (
XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
)
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BartConfig,
BertConfig,
BertGenerationConfig,
BlenderbotConfig,
BlenderbotSmallConfig,
CamembertConfig,
CTRLConfig,
DebertaConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
EncoderDecoderConfig,
FlaubertConfig,
FSMTConfig,
FunnelConfig,
GPT2Config,
LayoutLMConfig,
LEDConfig,
LongformerConfig,
LxmertConfig,
MarianConfig,
MBartConfig,
MobileBertConfig,
MPNetConfig,
MT5Config,
OpenAIGPTConfig,
PegasusConfig,
ProphetNetConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
SqueezeBertConfig,
T5Config,
TapasConfig,
TransfoXLConfig,
XLMConfig,
XLMProphetNetConfig,
XLMRobertaConfig,
XLNetConfig,
replace_list_option_in_docstrings,
)
logger = logging.get_logger(__name__)
MODEL_MAPPING = OrderedDict(
[
# Base model mapping
(LEDConfig, LEDModel),
(BlenderbotSmallConfig, BlenderbotSmallModel),
(RetriBertConfig, RetriBertModel),
(MT5Config, MT5Model),
(T5Config, T5Model),
(PegasusConfig, PegasusModel),
(MarianConfig, MarianMTModel),
(MBartConfig, MBartModel),
(BlenderbotConfig, BlenderbotModel),
(DistilBertConfig, DistilBertModel),
(AlbertConfig, AlbertModel),
(CamembertConfig, CamembertModel),
(XLMRobertaConfig, XLMRobertaModel),
(BartConfig, BartModel),
(LongformerConfig, LongformerModel),
(RobertaConfig, RobertaModel),
(LayoutLMConfig, LayoutLMModel),
(SqueezeBertConfig, SqueezeBertModel),
(BertConfig, BertModel),
(OpenAIGPTConfig, OpenAIGPTModel),
(GPT2Config, GPT2Model),
(MobileBertConfig, MobileBertModel),
(TransfoXLConfig, TransfoXLModel),
(XLNetConfig, XLNetModel),
(FlaubertConfig, FlaubertModel),
(FSMTConfig, FSMTModel),
(XLMConfig, XLMModel),
(CTRLConfig, CTRLModel),
(ElectraConfig, ElectraModel),
(ReformerConfig, ReformerModel),
(FunnelConfig, FunnelModel),
(LxmertConfig, LxmertModel),
(BertGenerationConfig, BertGenerationEncoder),
(DebertaConfig, DebertaModel),
(DPRConfig, DPRQuestionEncoder),
(XLMProphetNetConfig, XLMProphetNetModel),
(ProphetNetConfig, ProphetNetModel),
(MPNetConfig, MPNetModel),
(TapasConfig, TapasModel),
(MarianConfig, MarianModel),
]
)
MODEL_FOR_PRETRAINING_MAPPING = OrderedDict(
[
# Model for pre-training mapping
(LayoutLMConfig, LayoutLMForMaskedLM),
(RetriBertConfig, RetriBertModel),
(T5Config, T5ForConditionalGeneration),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForPreTraining),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(BartConfig, BartForConditionalGeneration),
(FSMTConfig, FSMTForConditionalGeneration),
(LongformerConfig, LongformerForMaskedLM),
(RobertaConfig, RobertaForMaskedLM),
(SqueezeBertConfig, SqueezeBertForMaskedLM),
(BertConfig, BertForPreTraining),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(MobileBertConfig, MobileBertForPreTraining),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(CTRLConfig, CTRLLMHeadModel),
(ElectraConfig, ElectraForPreTraining),
(LxmertConfig, LxmertForPreTraining),
(FunnelConfig, FunnelForPreTraining),
(MPNetConfig, MPNetForMaskedLM),
(TapasConfig, TapasForMaskedLM),
]
)
MODEL_WITH_LM_HEAD_MAPPING = OrderedDict(
[
# Model with LM heads mapping
(LEDConfig, LEDForConditionalGeneration),
(BlenderbotSmallConfig, BlenderbotSmallForConditionalGeneration),
(LayoutLMConfig, LayoutLMForMaskedLM),
(T5Config, T5ForConditionalGeneration),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForMaskedLM),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(MarianConfig, MarianMTModel),
(FSMTConfig, FSMTForConditionalGeneration),
(BartConfig, BartForConditionalGeneration),
(LongformerConfig, LongformerForMaskedLM),
(RobertaConfig, RobertaForMaskedLM),
(SqueezeBertConfig, SqueezeBertForMaskedLM),
(BertConfig, BertForMaskedLM),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(MobileBertConfig, MobileBertForMaskedLM),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(CTRLConfig, CTRLLMHeadModel),
(ElectraConfig, ElectraForMaskedLM),
(EncoderDecoderConfig, EncoderDecoderModel),
(ReformerConfig, ReformerModelWithLMHead),
(FunnelConfig, FunnelForMaskedLM),
(MPNetConfig, MPNetForMaskedLM),
(TapasConfig, TapasForMaskedLM),
(DebertaConfig, DebertaForMaskedLM),
]
)
MODEL_FOR_CAUSAL_LM_MAPPING = OrderedDict(
[
# Model for Causal LM mapping
(CamembertConfig, CamembertForCausalLM),
(XLMRobertaConfig, XLMRobertaForCausalLM),
(RobertaConfig, RobertaForCausalLM),
(BertConfig, BertLMHeadModel),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(
XLMConfig,
XLMWithLMHeadModel,
), # XLM can be MLM and CLM => model should be split similar to BERT; leave here for now
(CTRLConfig, CTRLLMHeadModel),
(ReformerConfig, ReformerModelWithLMHead),
(BertGenerationConfig, BertGenerationDecoder),
(XLMProphetNetConfig, XLMProphetNetForCausalLM),
(ProphetNetConfig, ProphetNetForCausalLM),
]
)
MODEL_FOR_MASKED_LM_MAPPING = OrderedDict(
[
# Model for Masked LM mapping
(LayoutLMConfig, LayoutLMForMaskedLM),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForMaskedLM),
(BartConfig, BartForConditionalGeneration),
(MBartConfig, MBartForConditionalGeneration),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(LongformerConfig, LongformerForMaskedLM),
(RobertaConfig, RobertaForMaskedLM),
(SqueezeBertConfig, SqueezeBertForMaskedLM),
(BertConfig, BertForMaskedLM),
(MobileBertConfig, MobileBertForMaskedLM),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(ElectraConfig, ElectraForMaskedLM),
(ReformerConfig, ReformerForMaskedLM),
(FunnelConfig, FunnelForMaskedLM),
(MPNetConfig, MPNetForMaskedLM),
(TapasConfig, TapasForMaskedLM),
(DebertaConfig, DebertaForMaskedLM),
]
)
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
(LEDConfig, LEDForConditionalGeneration),
(BlenderbotSmallConfig, BlenderbotSmallForConditionalGeneration),
(MT5Config, MT5ForConditionalGeneration),
(T5Config, T5ForConditionalGeneration),
(PegasusConfig, PegasusForConditionalGeneration),
(MarianConfig, MarianMTModel),
(MBartConfig, MBartForConditionalGeneration),
(BlenderbotConfig, BlenderbotForConditionalGeneration),
(BartConfig, BartForConditionalGeneration),
(FSMTConfig, FSMTForConditionalGeneration),
(EncoderDecoderConfig, EncoderDecoderModel),
(XLMProphetNetConfig, XLMProphetNetForConditionalGeneration),
(ProphetNetConfig, ProphetNetForConditionalGeneration),
]
)
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
[
# Model for Sequence Classification mapping
(LEDConfig, LEDForSequenceClassification),
(DistilBertConfig, DistilBertForSequenceClassification),
(AlbertConfig, AlbertForSequenceClassification),
(CamembertConfig, CamembertForSequenceClassification),
(XLMRobertaConfig, XLMRobertaForSequenceClassification),
(MBartConfig, MBartForSequenceClassification),
(BartConfig, BartForSequenceClassification),
(LongformerConfig, LongformerForSequenceClassification),
(RobertaConfig, RobertaForSequenceClassification),
(SqueezeBertConfig, SqueezeBertForSequenceClassification),
(LayoutLMConfig, LayoutLMForSequenceClassification),
(BertConfig, BertForSequenceClassification),
(XLNetConfig, XLNetForSequenceClassification),
(MobileBertConfig, MobileBertForSequenceClassification),
(FlaubertConfig, FlaubertForSequenceClassification),
(XLMConfig, XLMForSequenceClassification),
(ElectraConfig, ElectraForSequenceClassification),
(FunnelConfig, FunnelForSequenceClassification),
(DebertaConfig, DebertaForSequenceClassification),
(GPT2Config, GPT2ForSequenceClassification),
(OpenAIGPTConfig, OpenAIGPTForSequenceClassification),
(ReformerConfig, ReformerForSequenceClassification),
(CTRLConfig, CTRLForSequenceClassification),
(TransfoXLConfig, TransfoXLForSequenceClassification),
(MPNetConfig, MPNetForSequenceClassification),
(TapasConfig, TapasForSequenceClassification),
]
)
MODEL_FOR_QUESTION_ANSWERING_MAPPING = OrderedDict(
[
# Model for Question Answering mapping
(LEDConfig, LEDForQuestionAnswering),
(DistilBertConfig, DistilBertForQuestionAnswering),
(AlbertConfig, AlbertForQuestionAnswering),
(CamembertConfig, CamembertForQuestionAnswering),
(BartConfig, BartForQuestionAnswering),
(MBartConfig, MBartForQuestionAnswering),
(LongformerConfig, LongformerForQuestionAnswering),
(XLMRobertaConfig, XLMRobertaForQuestionAnswering),
(RobertaConfig, RobertaForQuestionAnswering),
(SqueezeBertConfig, SqueezeBertForQuestionAnswering),
(BertConfig, BertForQuestionAnswering),
(XLNetConfig, XLNetForQuestionAnsweringSimple),
(FlaubertConfig, FlaubertForQuestionAnsweringSimple),
(MobileBertConfig, MobileBertForQuestionAnswering),
(XLMConfig, XLMForQuestionAnsweringSimple),
(ElectraConfig, ElectraForQuestionAnswering),
(ReformerConfig, ReformerForQuestionAnswering),
(FunnelConfig, FunnelForQuestionAnswering),
(LxmertConfig, LxmertForQuestionAnswering),
(MPNetConfig, MPNetForQuestionAnswering),
(DebertaConfig, DebertaForQuestionAnswering),
]
)
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = OrderedDict(
[
# Model for Table Question Answering mapping
(TapasConfig, TapasForQuestionAnswering),
]
)
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict(
[
# Model for Token Classification mapping
(LayoutLMConfig, LayoutLMForTokenClassification),
(DistilBertConfig, DistilBertForTokenClassification),
(CamembertConfig, CamembertForTokenClassification),
(FlaubertConfig, FlaubertForTokenClassification),
(XLMConfig, XLMForTokenClassification),
(XLMRobertaConfig, XLMRobertaForTokenClassification),
(LongformerConfig, LongformerForTokenClassification),
(RobertaConfig, RobertaForTokenClassification),
(SqueezeBertConfig, SqueezeBertForTokenClassification),
(BertConfig, BertForTokenClassification),
(MobileBertConfig, MobileBertForTokenClassification),
(XLNetConfig, XLNetForTokenClassification),
(AlbertConfig, AlbertForTokenClassification),
(ElectraConfig, ElectraForTokenClassification),
(FlaubertConfig, FlaubertForTokenClassification),
(FunnelConfig, FunnelForTokenClassification),
(MPNetConfig, MPNetForTokenClassification),
(DebertaConfig, DebertaForTokenClassification),
]
)
MODEL_FOR_MULTIPLE_CHOICE_MAPPING = OrderedDict(
[
# Model for Multiple Choice mapping
(CamembertConfig, CamembertForMultipleChoice),
(ElectraConfig, ElectraForMultipleChoice),
(XLMRobertaConfig, XLMRobertaForMultipleChoice),
(LongformerConfig, LongformerForMultipleChoice),
(RobertaConfig, RobertaForMultipleChoice),
(SqueezeBertConfig, SqueezeBertForMultipleChoice),
(BertConfig, BertForMultipleChoice),
(DistilBertConfig, DistilBertForMultipleChoice),
(MobileBertConfig, MobileBertForMultipleChoice),
(XLNetConfig, XLNetForMultipleChoice),
(AlbertConfig, AlbertForMultipleChoice),
(XLMConfig, XLMForMultipleChoice),
(FlaubertConfig, FlaubertForMultipleChoice),
(FunnelConfig, FunnelForMultipleChoice),
(MPNetConfig, MPNetForMultipleChoice),
]
)
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = OrderedDict(
[
(BertConfig, BertForNextSentencePrediction),
(MobileBertConfig, MobileBertForNextSentencePrediction),
]
)
AUTO_MODEL_PRETRAINED_DOCSTRING = r"""
The model class to instantiate is selected based on the :obj:`model_type` property of the config object (either
passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing,
by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
The model is set in evaluation mode by default using ``model.eval()`` (so for instance, dropout modules are
deactivated). To train the model, you should first set it back in training mode with ``model.train()``
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (additional positional arguments, `optional`):
Will be passed along to the underlying model ``__init__()`` method.
config (:class:`~transformers.PretrainedConfig`, `optional`):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
kwargs (additional keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
"""
class AutoModel:
r"""
This is a generic model class that will be instantiated as one of the base model classes of the library when
created with the :meth:`~transformers.AutoModel.from_pretrained` class method or the
:meth:`~transformers.AutoModel.from_config` class methods.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModel is designed to be instantiated "
"using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModel.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the base model classes of the library from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModel.from_pretrained` to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModel
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModel.from_config(config)
"""
if type(config) in MODEL_MAPPING.keys():
return MODEL_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_MAPPING)
@add_start_docstrings(
"Instantiate one of the base model classes of the library from a pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModel.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_MAPPING.keys():
return MODEL_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys())
)
)
class AutoModelForPreTraining:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with the
architecture used for pretraining this model---when created with the when created with the
:meth:`~transformers.AutoModelForPreTraining.from_pretrained` class method or the
:meth:`~transformers.AutoModelForPreTraining.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForPreTraining is designed to be instantiated "
"using the `AutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForPreTraining.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_PRETRAINING_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with the architecture used for pretraining this
model---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForPreTraining.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForPreTraining
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForPreTraining.from_config(config)
"""
if type(config) in MODEL_FOR_PRETRAINING_MAPPING.keys():
return MODEL_FOR_PRETRAINING_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_PRETRAINING_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with the architecture used for pretraining this ",
"model---from a pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForPreTraining
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForPreTraining.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForPreTraining.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_PRETRAINING_MAPPING.keys():
return MODEL_FOR_PRETRAINING_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
class AutoModelWithLMHead:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelWithLMHead.from_pretrained` class method or the
:meth:`~transformers.AutoModelWithLMHead.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
.. warning::
This class is deprecated and will be removed in a future version. Please use
:class:`~transformers.AutoModelForCausalLM` for causal language models,
:class:`~transformers.AutoModelForMaskedLM` for masked language models and
:class:`~transformers.AutoModelForSeq2SeqLM` for encoder-decoder models.
"""
def __init__(self):
raise EnvironmentError(
"AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelWithLMHead.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_WITH_LM_HEAD_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a language modeling head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelWithLMHead.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelWithLMHead
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelWithLMHead.from_config(config)
"""
warnings.warn(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
if type(config) in MODEL_WITH_LM_HEAD_MAPPING.keys():
return MODEL_WITH_LM_HEAD_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_WITH_LM_HEAD_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a language modeling head---from a pretrained ",
"model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelWithLMHead
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelWithLMHead.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
warnings.warn(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_WITH_LM_HEAD_MAPPING.keys():
return MODEL_WITH_LM_HEAD_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
class AutoModelForCausalLM:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a causal
language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelForCausalLM.from_pretrained` class method or the
:meth:`~transformers.AutoModelForCausalLM.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForCausalLM is designed to be instantiated "
"using the `AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForCausalLM.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a causal language modeling head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForCausalLM.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForCausalLM
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('gpt2')
>>> model = AutoModelForCausalLM.from_config(config)
"""
if type(config) in MODEL_FOR_CAUSAL_LM_MAPPING.keys():
return MODEL_FOR_CAUSAL_LM_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a causal language modeling head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForCausalLM
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForCausalLM.from_pretrained('gpt2')
>>> # Update configuration during loading
>>> model = AutoModelForCausalLM.from_pretrained('gpt2', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/gpt2_tf_model_config.json')
>>> model = AutoModelForCausalLM.from_pretrained('./tf_model/gpt2_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_CAUSAL_LM_MAPPING.keys():
return MODEL_FOR_CAUSAL_LM_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys())
)
)
class AutoModelForMaskedLM:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a masked
language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelForMaskedLM.from_pretrained` class method or the
:meth:`~transformers.AutoModelForMaskedLM.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForMaskedLM is designed to be instantiated "
"using the `AutoModelForMaskedLM.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForMaskedLM.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MASKED_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a masked language modeling head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForMaskedLM.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForMaskedLM
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForMaskedLM.from_config(config)
"""
if type(config) in MODEL_FOR_MASKED_LM_MAPPING.keys():
return MODEL_FOR_MASKED_LM_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MASKED_LM_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a masked language modeling head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForMaskedLM
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForMaskedLM.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForMaskedLM.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMaskedLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_MASKED_LM_MAPPING.keys():
return MODEL_FOR_MASKED_LM_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys())
)
)
class AutoModelForSeq2SeqLM:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
sequence-to-sequence language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelForSeq2SeqLM.from_pretrained` class method or the
:meth:`~transformers.AutoModelForSeq2SeqLM.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForSeq2SeqLM is designed to be instantiated "
"using the `AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForSeq2SeqLM.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a sequence-to-sequence language modeling
head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForSeq2SeqLM.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForSeq2SeqLM
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('t5')
>>> model = AutoModelForSeq2SeqLM.from_config(config)
"""
if type(config) in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys():
return MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a sequence-to-sequence language modeling "
"head---from a pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForSeq2SeqLM
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForSeq2SeqLM.from_pretrained('t5-base')
>>> # Update configuration during loading
>>> model = AutoModelForSeq2SeqLM.from_pretrained('t5-base', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/t5_tf_model_config.json')
>>> model = AutoModelForSeq2SeqLM.from_pretrained('./tf_model/t5_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys():
return MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()),
)
)
class AutoModelForSequenceClassification:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
sequence classification head---when created with the when created with the
:meth:`~transformers.AutoModelForSequenceClassification.from_pretrained` class method or the
:meth:`~transformers.AutoModelForSequenceClassification.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForSequenceClassification is designed to be instantiated "
"using the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForSequenceClassification.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a sequence classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForSequenceClassification.from_pretrained` to load
the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForSequenceClassification
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForSequenceClassification.from_config(config)
"""
if type(config) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a sequence classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForSequenceClassification
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
class AutoModelForQuestionAnswering:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
question answering head---when created with the when created with the
:meth:`~transformers.AutoModeForQuestionAnswering.from_pretrained` class method or the
:meth:`~transformers.AutoModelForQuestionAnswering.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForQuestionAnswering is designed to be instantiated "
"using the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForQuestionAnswering.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a question answering head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForQuestionAnswering.from_pretrained` to load the
model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForQuestionAnswering
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForQuestionAnswering.from_config(config)
"""
if type(config) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys():
return MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a question answering head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForQuestionAnswering
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys():
return MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
class AutoModelForTableQuestionAnswering:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a table
question answering head---when created with the when created with the
:meth:`~transformers.AutoModeForTableQuestionAnswering.from_pretrained` class method or the
:meth:`~transformers.AutoModelForTableQuestionAnswering.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForQuestionAnswering is designed to be instantiated "
"using the `AutoModelForTableQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForTableQuestionAnswering.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a table question answering head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForTableQuestionAnswering.from_pretrained` to load
the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForTableQuestionAnswering
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('google/tapas-base-finetuned-wtq')
>>> model = AutoModelForTableQuestionAnswering.from_config(config)
"""
if type(config) in MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.keys():
return MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a table question answering head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForTableQuestionAnswering
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForTableQuestionAnswering.from_pretrained('google/tapas-base-finetuned-wtq')
>>> # Update configuration during loading
>>> model = AutoModelForTableQuestionAnswering.from_pretrained('google/tapas-base-finetuned-wtq', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/tapas_tf_checkpoint.json')
>>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/tapas_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.keys():
return MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.keys()),
)
)
class AutoModelForTokenClassification:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a token
classification head---when created with the when created with the
:meth:`~transformers.AutoModelForTokenClassification.from_pretrained` class method or the
:meth:`~transformers.AutoModelForTokenClassification.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForTokenClassification is designed to be instantiated "
"using the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForTokenClassification.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a token classification head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForTokenClassification.from_pretrained` to load
the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForTokenClassification
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForTokenClassification.from_config(config)
"""
if type(config) in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a token classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForTokenClassification
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
class AutoModelForMultipleChoice:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
multiple choice classification head---when created with the when created with the
:meth:`~transformers.AutoModelForMultipleChoice.from_pretrained` class method or the
:meth:`~transformers.AutoModelForMultipleChoice.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForMultipleChoice is designed to be instantiated "
"using the `AutoModelForMultipleChoice.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForMultipleChoice.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MULTIPLE_CHOICE_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a multiple choice classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForMultipleChoice.from_pretrained` to load the
model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForMultipleChoice
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForMultipleChoice.from_config(config)
"""
if type(config) in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys():
return MODEL_FOR_MULTIPLE_CHOICE_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MULTIPLE_CHOICE_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a multiple choice classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForMultipleChoice
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMultipleChoice.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys():
return MODEL_FOR_MULTIPLE_CHOICE_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()),
)
)
class AutoModelForNextSentencePrediction:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
multiple choice classification head---when created with the when created with the
:meth:`~transformers.AutoModelForNextSentencePrediction.from_pretrained` class method or the
:meth:`~transformers.AutoModelForNextSentencePrediction.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForNextSentencePrediction is designed to be instantiated "
"using the `AutoModelForNextSentencePrediction.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForNextSentencePrediction.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a multiple choice classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForNextSentencePrediction.from_pretrained` to load
the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForNextSentencePrediction
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForNextSentencePrediction.from_config(config)
"""
if type(config) in MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys():
return MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a multiple choice classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForNextSentencePrediction
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForNextSentencePrediction.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForNextSentencePrediction.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys():
return MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys()),
)
)
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~awslambda.py | # Directly taken from Langchain Github Repo
"""Util that calls Lambda."""
import json
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
class LambdaWrapper(BaseModel):
"""Wrapper for AWS Lambda SDK.
Docs for using:
1. pip install boto3
2. Create a lambda function using the AWS Console or CLI
3. Run `aws configure` and enter your AWS credentials
"""
lambda_client: Any #: :meta private:
function_name: Optional[str] = None
awslambda_tool_name: Optional[str] = None
awslambda_tool_description: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
import boto3
except ImportError:
raise ImportError(
"boto3 is not installed. Please install it with `pip install boto3`"
)
values["lambda_client"] = boto3.client("lambda")
values["function_name"] = values["function_name"]
return values
def run(self, query: str) -> str:
"""Invoke Lambda function and parse result."""
res = self.lambda_client.invoke(
FunctionName=self.function_name,
InvocationType="RequestResponse",
Payload=json.dumps({"body": query}),
)
try:
payload_stream = res["Payload"]
payload_string = payload_stream.read().decode("utf-8")
answer = json.loads(payload_string)["body"]
except StopIteration:
return "Failed to parse response from Lambda"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "Request failed."
else:
return f"Result: {answer}"
| [] |
2024-01-10 | anubrag/nextpy | ai-notebooks~autoscrum~autoscrum.py | #credit: Martin Schroder
#research paper: https://arxiv.org/abs/2306.03197
#organization: Swedish Embedded Consulting Group Research
#linkedin: martinschroder
#email: [email protected]
import json
from pathlib import Path
import pkg_resources as pg
from dotagent import compiler
from dotagent.llms._openai import OpenAI
compiler.llm = OpenAI("gpt-3.5-turbo")
class AutoScrum:
def __init__(self, path):
self.featurizer = self.load_program("featurizer")
self.storylizer = self.load_program("storylizer")
self.goalmaker = self.load_program("goalmaker")
self.clarifier = self.load_program("clarifier")
self.acceptance = self.load_program("acceptance")
self.taskalizer = self.load_program("taskalizer")
self.requalizer = self.load_program("requalizer")
self.planner = self.load_program("planner")
if not Path(path).exists():
self.reset()
self.save(path)
self.data = self.load_data(path)
def reset(self):
self.data = {
"product": "Your product name",
"vision": "Your product vision",
"niche": "Your product niche",
"current_state": {},
"desired_state": {},
"plan": [],
"requirements": [],
"sprint_duration": "2 weeks",
"features": [],
"stories": [],
"avoid": []
}
def load_data(self, path):
"""Loads data json file."""
return json.loads(Path(path).read_text())
def load_program(self, name: str):
path = pg.resource_filename(__name__, f'data/{name}.hbs')
return compiler(Path(path).read_text(), silent=True)
def save(self, path):
Path(path).write_text(json.dumps(self.data, indent=4))
def gen_plan(self, count:int):
prog = self.planner(
current_state=self.data["current_state"],
desired_state=self.data["desired_state"],
plan=[p["task"] for p in self.data["plan"]],
count=count
)
try:
# print(prog)
obj = json.loads(prog["response"])
return obj
except:
# print(prog)
print("Error: JSON conversion failed")
return []
def add_plan(self, steps: list):
self.data["plan"].extend(steps)
@property
def plan(self):
return self.data["plan"]
def gen_requirements(self, count:int):
prog = self.requalizer(
product=self.data["product"],
vision=self.data["vision"],
niche=self.data["niche"],
current_state=self.data["current_state"],
desired_state=self.data["desired_state"],
requirements=[f["name"] for f in self.data["requirements"]],
count=count
)
try:
# print(prog)
obj = json.loads(prog["response"])
return obj
except:
print(prog)
print("Error: JSON conversion failed")
return []
def add_requirements(self, reqs: list):
self.data["requirements"].extend(reqs)
@property
def requirements(self):
return self.data["requirements"]
def gen_features(self, count:int):
prog = self.featurizer(
product=self.data["product"],
vision=self.data["vision"],
requirements=[req["description"] for req in self.data["requirements"]],
story_features=[s["feature"] for s in self.data["stories"]],
features = [feature["name"] for feature in self.data["features"]],
avoid=self.data["avoid"],
count=count
)
try:
print(prog)
obj = json.loads(prog["response"])
return obj
except:
print(prog)
print("Error: JSON conversion failed")
return []
def add_features(self, features: list):
self.data["features"].extend(features)
@property
def features(self):
return self.data["features"]
def gen_stories(self, count:int):
prog = self.storylizer(
product=self.data["product"],
niche=self.data["niche"],
vision=self.data["vision"],
features = [feature["name"] for feature in self.data["features"]],
current_state=self.data["current_state"],
desired_state=self.data["desired_state"],
stories = [story["name"] for story in self.data["stories"]],
count=count
)
print(prog)
return json.loads(prog["response"])
def add_stories(self, stories: list):
self.data["stories"].extend(stories)
@property
def stories(self):
return self.data["stories"]
def gen_goals(self, count:int):
prog = self.goalmaker(
project=self.data["project"],
sprint_duration=self.data["sprint_duration"],
product_vision=self.data["goal"],
backlog = [story["name"] for story in self.data["stories"]],
goals = [goal["title"] for goal in self.data["goals"]],
count=count
)
print(prog)
return json.loads(prog["goals"])
def add_goals(self, goals: list):
self.data["goals"].extend(goals)
@property
def goals(self):
return self.data["goals"]
def gen_acceptance_criteria(self, story, count:int):
prog = self.acceptance(
product=self.data["product"],
requirements=[req["description"] for req in self.data["requirements"]],
story=story,
count=count
)
print(prog)
return json.loads(prog["response"])
def gen_tasks(self, story, count:int):
prog = self.taskalizer(
product=self.data["product"],
story=story,
count=count
)
print(prog)
return json.loads(prog["response"])
def gen_clarification(self, story, count:int):
prog = self.clarifier(
story=story,
completed_tasks=[],
resources=[],
count=count
)
print(prog)
return json.loads(prog["response"])
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~bravesearch.py | # Code directly taken from LangChain Github Repo
import json
import requests
from pydantic import BaseModel, Field
class BraveSearchWrapper(BaseModel):
api_key: str
search_kwargs: dict = Field(default_factory=dict)
def run(self, query: str) -> str:
headers = {
"X-Subscription-Token": self.api_key,
"Accept": "application/json",
}
base_url = "https://api.search.brave.com/res/v1/web/search"
req = requests.PreparedRequest()
params = {**self.search_kwargs, **{"q": query}}
req.prepare_url(base_url, params)
if req.url is None:
raise ValueError("prepared url is None, this should not happen")
response = requests.get(req.url, headers=headers)
if not response.ok:
raise Exception(f"HTTP error {response.status_code}")
parsed_response = response.json()
web_search_results = parsed_response.get("web", {}).get("results", [])
final_results = []
if isinstance(web_search_results, list):
for item in web_search_results:
final_results.append(
{
"title": item.get("title"),
"link": item.get("url"),
"snippet": item.get("description"),
}
)
return json.dumps(final_results)
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~wikipedia.py | # Code taken from langchain Github Repo and removed load method as it involved Documents from langchain
"""Util that calls Wikipedia."""
import logging
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
logger = logging.getLogger(__name__)
WIKIPEDIA_MAX_QUERY_LENGTH = 300
class WikipediaAPIWrapper(BaseModel):
"""Wrapper around WikipediaAPI.
To use, you should have the ``wikipedia`` python package installed.
This wrapper will use the Wikipedia API to conduct searches and
fetch page summaries. By default, it will return the page summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
"""
wiki_client: Any #: :meta private:
top_k_results: int = 3
lang: str = "en"
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import wikipedia
wikipedia.set_lang(values["lang"])
values["wiki_client"] = wikipedia
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
return values
def run(self, query: str) -> str:
"""Run Wikipedia search and get page summaries."""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
summaries = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if summary := self._formatted_page_summary(page_title, wiki_page):
summaries.append(summary)
if not summaries:
return "No good Wikipedia Search Result was found"
return "\n\n".join(summaries)[: self.doc_content_chars_max]
@staticmethod
def _formatted_page_summary(page_title: str, wiki_page: Any) -> Optional[str]:
return f"Page: {page_title}\nSummary: {wiki_page.summary}"
def _fetch_page(self, page: str) -> Optional[str]:
try:
return self.wiki_client.page(title=page, auto_suggest=False)
except (
self.wiki_client.exceptions.PageError,
self.wiki_client.exceptions.DisambiguationError,
):
return None
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~models~embedding~elasticsearch.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional
from nextpy.utils.data_ops import get_from_dict_or_env
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from nextpy.ai.models.embedding.base import Embeddings
class ElasticsearchEmbeddings(Embeddings):
"""Wrapper around Elasticsearch embedding models.
This class provides an interface to generate embedding using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
""" # noqa: E501
def __init__(
self,
client: Any,
model_id: str,
*,
input_field: str = "text_field",
):
"""Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
@classmethod
def from_credentials(
cls,
model_id: str,
*,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""Instantiate embeddings from Elasticsearch credentials.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to.
es_user: (str, optional): Elasticsearch username.
es_password: (str, optional): Elasticsearch password.
Example:
.. code-block:: python
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically
# pulled in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
try:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
except ImportError:
raise ImportError(
"elasticsearch package not found, please install with 'pip install "
"elasticsearch'"
)
es_cloud_id = es_cloud_id or get_from_dict_or_env("es_cloud_id", "ES_CLOUD_ID")
es_user = es_user or get_from_dict_or_env("es_user", "ES_USER")
es_password = es_password or get_from_dict_or_env("es_password", "ES_PASSWORD")
# Connect to Elasticsearch
es_connection = Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
@classmethod
def from_es_connection(
cls,
model_id: str,
es_connection: Elasticsearch,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
# Importing MlClient from elasticsearch.client within the method to
# avoid unnecessary import if the method is not used
from elasticsearch.client import MlClient
# Create an MlClient from the given Elasticsearch connection
client = MlClient(es_connection)
# Return a new instance of the ElasticsearchEmbeddings class with
# the MlClient, model_id, and input_field
return cls(client, model_id, input_field=input_field)
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model(
model_id=self.model_id, docs=[{self.input_field: text} for text in texts]
)
embeddings = [doc["predicted_value"] for doc in response["inference_results"]]
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for a list of documents.
Args:
texts (List[str]): A list of document text strings to generate embeddings
for.
Returns:
List[List[float]]: A list of embeddings, one for each document in the input
list.
"""
return self._embedding_func(texts)
def embed_query(self, text: str) -> List[float]:
"""Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0]
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~bash.py | # Directly taken from Langchain Github Repo
"""Wrapper around subprocess to run commands."""
from __future__ import annotations
import platform
import re
import subprocess
from typing import TYPE_CHECKING, List, Union
from uuid import uuid4
if TYPE_CHECKING:
import pexpect
def _lazy_import_pexpect() -> pexpect:
"""Import pexpect only when needed."""
if platform.system() == "Windows":
raise ValueError("Persistent bash processes are not yet supported on Windows.")
try:
import pexpect
except ImportError:
raise ImportError(
"pexpect required for persistent bash processes."
" To install, run `pip install pexpect`."
)
return pexpect
class BashProcess:
"""Executes bash commands and returns the output."""
def __init__(
self,
strip_newlines: bool = False,
return_err_output: bool = False,
persistent: bool = False,
):
"""Initialize with stripping newlines."""
self.strip_newlines = strip_newlines
self.return_err_output = return_err_output
self.prompt = ""
self.process = None
if persistent:
self.prompt = str(uuid4())
self.process = self._initialize_persistent_process(self.prompt)
@staticmethod
def _initialize_persistent_process(prompt: str) -> pexpect.spawn:
# Start bash in a clean environment
# Doesn't work on windows
pexpect = _lazy_import_pexpect()
process = pexpect.spawn(
"env", ["-i", "bash", "--norc", "--noprofile"], encoding="utf-8"
)
# Set the custom prompt
process.sendline("PS1=" + prompt)
process.expect_exact(prompt, timeout=10)
return process
def run(self, commands: Union[str, List[str]]) -> str:
"""Run commands and return final output."""
if isinstance(commands, str):
commands = [commands]
commands = ";".join(commands)
if self.process is not None:
return self.run_persistent(
commands,
)
else:
return self.run(commands)
def run(self, command: str) -> str:
"""Run commands and return final output."""
try:
output = subprocess.run(
command,
shell=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
).stdout.decode()
except subprocess.CalledProcessError as error:
if self.return_err_output:
return error.stdout.decode()
return str(error)
if self.strip_newlines:
output = output.strip()
return output
def process_output(self, output: str, command: str) -> str:
# Remove the command from the output using a regular expression
pattern = re.escape(command) + r"\s*\n"
output = re.sub(pattern, "", output, count=1)
return output.strip()
def run_persistent(self, command: str) -> str:
"""Run commands and return final output."""
pexpect = _lazy_import_pexpect()
if self.process is None:
raise ValueError("Process not initialized")
self.process.sendline(command)
# Clear the output with an empty string
self.process.expect(self.prompt, timeout=10)
self.process.sendline("")
try:
self.process.expect([self.prompt, pexpect.EOF], timeout=10)
except pexpect.TIMEOUT:
return f"Timeout error while executing command {command}"
if self.process.after == pexpect.EOF:
return f"Exited with error status: {self.process.exitstatus}"
output = self.process.before
output = self.process_output(output, command)
if self.strip_newlines:
return output.strip()
return output
| [] |
2024-01-10 | anubrag/nextpy | nextpy~data~vectordb~chroma.py | import uuid
from typing import Any, Dict, Iterable, List, Optional
import chromadb
import chromadb.config
from nextpy.ai.models.embedding.base import Embeddings, VectorDB
from nextpy.ai.schema import Document
class Chroma(VectorDB):
"""Wrapper around ChromaDB embeddings platform.
To use, you should have the ``chromadb`` python package installed.
Example:
.. code-block:: python
from nextpy.data.vectordb import Chroma
from nextpy.ai.embedding.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectordb = Chroma("llms_collection", embeddings)
"""
def __init__(
self,
collection_name: str = "llms",
embedding_function: Optional[Embeddings] = None,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
collection_metadata: Optional[Dict] = None,
client: Optional[chromadb.Client] = None,
) -> None:
"""Initialize with Chroma client."""
try:
import chromadb
import chromadb.config
except ImportError:
raise ValueError(
"Could not import chromadb python package. "
"Please install it with `pip install chromadb`."
)
if client is not None:
self._client = client
else:
if client_settings:
self._client_settings = client_settings
else:
self._client_settings = chromadb.config.Settings()
if persist_directory is not None:
self._client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=persist_directory,
)
self._client = chromadb.Client(self._client_settings)
self._embedding_function = embedding_function
self._persist_directory = persist_directory
self._collection = self._client.get_or_create_collection(
name=collection_name,
embedding_function=self._embedding_function.embed_documents
if self._embedding_function is not None
else None,
metadata=collection_metadata,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
ids = ids or [str(uuid.uuid1()) for _ in texts]
embeddings = None
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(texts))
self._collection.add(
metadatas=metadatas, embeddings=embeddings, documents=texts, ids=ids
)
return ids
def similarity_search(
self,
query: Optional[str] = None,
embedding: Optional[List[float]] = None,
top_k: int = 5,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query and respective distance score."""
if (embedding is None and query is None) or (
embedding is not None and query is not None
):
raise ValueError(
"You must provide either query embeddings or query texts, but not both"
)
search_results = []
if self._embedding_function is None:
if query is not None:
results = self._collection.query(
query_texts=[query],
n_results=top_k,
**kwargs,
)
else:
results = self._collection.query(
query_embeddings=[embedding],
n_results=top_k,
**kwargs,
)
else:
if query is not None:
query_embedding = self._embedding_function.embed_query(text=query)
results = self._collection.query(
query_embeddings=[query_embedding],
n_results=top_k,
**kwargs,
)
else:
results = self._collection.query(
query_embeddings=[embedding],
n_results=top_k,
**kwargs,
)
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
):
document = Document(page_content=result[0], metadata=result[1] or {})
search_results.append((document, result[2]))
return search_results
def delete_collection(self) -> None:
"""Delete the collection."""
self._client.delete_collection(self._collection.name)
def get(self, include: Optional[List[str]] = None) -> Dict[str, Any]:
"""Gets the collection.
Args:
include (Optional[List[str]]): List of fields to include from db.
Defaults to None.
"""
if include is not None:
return self._collection.get(include=include)
else:
return self._collection.get()
def persist(self) -> None:
"""Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
"You must specify a persist_directory on"
"creation to persist the collection."
)
self._client.persist()
def update_document(self, document_id: str, document: Document) -> None:
# Update a document in the collection.
text = document.page_content
metadata = document.metadata
if self._embedding_function is None:
raise ValueError(
"For update, you must specify an embedding function on creation."
)
embeddings = self._embedding_function.get_document_embedding([text])
self._collection.update(
ids=[document_id],
embeddings=embeddings,
documents=[text],
metadatas=[metadata],
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding_function: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = "llms",
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None,
**kwargs: Any,
):
"""Create a Chroma vectordb from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectordb.
"""
chroma_collection = cls(
collection_name=collection_name,
embedding_function=embedding_function,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
)
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding_function: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
collection_name: str = "llms",
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None, # Add this line
**kwargs: Any,
):
"""Create a Chroma vectordb from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectordb.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectordb.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding_function,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
**kwargs,
)
| [] |
2024-01-10 | anubrag/nextpy | app-examples~nextpy-chat~nextpy_chat~State~interview.py | import os
from openai import OpenAI
import nextpy as xt
from .state import State, QA
import speech_recognition as sr
r = sr.Recognizer()
from dotenv import load_dotenv
load_dotenv()
client = OpenAI(
api_key = os.environ["OPENAI_API_KEY"]
)
INTERVIEW_DEFAULT = {
"Interview": [],
}
class InterviewState(State):
# Job Description
job_desc: str = ''
interview_chat: str = "Interview"
index : dict[str, int] = { interview_chat:0 }
# A dict from the chat name to the list of questions and answers.
interview_chats: dict[str, list[QA]] = INTERVIEW_DEFAULT
interview_option: str = 'visible'
interview_convo: str = 'none'
interview_answer: str = ''
interview_processing: bool = False
interview_prompt = ''
recording: bool = False
text = ''
def start_recording(self):
self.recording = True
self.interview_answer = 'Listening ........'
yield
with sr.Microphone() as source:
audio = r.listen(source)
self.interview_answer = ''
try:
self.text += r.recognize_google(audio)
except:
print("Google Speech Recognition could not understand audio")
self.interview_answer = self.text
self.recording = False
yield
async def process_interview(self, form_data: dict[str, str]=None):
if self.interview_option == 'visible':
self.interview_option = 'none'
self.interview_convo = 'visible'
self.interview_prompt = f'''
Your name is NextpyGPT and you are a professional interviewer chatbot.
Your task is to ask questions one by one based on the given Job description or Role -
JOB DESCRIPTION = Role = {self.job_desc}
Always remeber the flow of conversation given in points below -
1) Start with basic question like name and introduction.
2) Initially ask basic questions related to the given Job description or Role and slowly increase the difficulty
3) Ask 10-15 questions one by one and at the end ask if user wants to stop
4) Anytime the user says to end interview give them a genuine conclusion for their performance based on the interview in a good tone only.
5) At the end just respond politely if user wants to restart
'''
self.messages = [
{"role": "system", "content": self.interview_prompt}
]
self.interview_processing = True
self.interview_answer = ""
self.text = ''
yield
# Start a new session to answer the question.
session = client.chat.completions.create(
model=os.getenv("OPENAI_API_MODEL"),
messages=self.messages,
stream=True,
)
qa = QA(question="", answer="", index=self.index[self.interview_chat])
self.index[self.interview_chat] += 1
self.interview_chats[self.interview_chat].append(qa)
yield
# Stream the results, yielding after every word.
for item in session:
if hasattr(item.choices[0].delta, "content"):
question_text = item.choices[0].delta.content
if question_text is None :
question_text = ''
self.interview_chats[self.interview_chat][-1].question += question_text
self.interview_chats = self.interview_chats
yield
# Toggle the processing flag.
self.interview_processing = False
return
self.interview_chats[self.interview_chat][-1].answer = self.interview_answer
question = self.interview_chats[self.interview_chat][-1].question
self.messages.append({"role": "assistant", "content": question})
self.messages.append({"role": "user", "content": self.interview_answer})
# Clear the input and start the processing.
self.interview_processing = True
self.interview_answer = ""
yield
# Start a new session to answer the question.
session = client.chat.completions.create(
model=os.getenv("OPENAI_API_MODEL"),
messages=self.messages,
stream=True,
)
qa = QA(question="", answer="", index=self.index[self.interview_chat])
self.index[self.interview_chat] += 1
self.interview_chats[self.interview_chat].append(qa)
# Stream the results, yielding after every word.
for item in session:
if hasattr(item.choices[0].delta, "content"):
question_text = item.choices[0].delta.content
if question_text is None :
question_text = ''
self.interview_chats[self.interview_chat][-1].question += question_text
self.interview_chats = self.interview_chats
yield
# Toggle the processing flag.
self.interview_processing = False | [] |
2024-01-10 | anubrag/nextpy | nextpy~data~vectordb~deeplake.py | """Wrapper around Activeloop Deep Lake."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
try:
import deeplake
from deeplake.core.fast_forwarding import version_compare
from deeplake.core.vectordb import DeepLakeVectorDB
_DEEPLAKE_INSTALLED = True
except ImportError:
_DEEPLAKE_INSTALLED = False
from nextpy.ai.models.embedding.base import Embeddings, VectorDB
from nextpy.ai.schema import Document
# from nextpy.ai.models.embeddings.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
DEFAULT_DATASET_PATH = "./deeplake/"
class DeepLake(VectorDB):
"""Wrapper around Deep Lake, a data lake for deep learning applications.
To use, you should have the ``deeplake`` python package installed.
Example:
.. code-block:: python
from nextpy.data.vectordb import DeepLake
from nextpy.ai.embedding.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectordb = DeepLake("llms_db_path", embeddings.embed_query)
"""
def __init__(
self,
dataset_path: str = DEFAULT_DATASET_PATH,
token: Optional[str] = None,
embedding_function: Optional[Embeddings] = None,
read_only: bool = False,
ingestion_batch_size: int = 1000,
num_workers: int = 0,
verbose: bool = True,
exec_option: str = "python",
**kwargs: Any,
) -> None:
"""Creates an empty DeepLakeVectorDB or loads an existing one.
The DeepLakeVectorDB is located at the specified ``path``.
Examples:
>>> # Create a vector store with default tensors
>>> deeplake_vectordb = DeepLake(
... path = <path_for_storing_Data>,
... )
>>>
>>> # Create a vector store in the Deep Lake Managed Tensor Database
>>> data = DeepLake(
... path = "hub://org_id/dataset_name",
... exec_option = "tensor_db",
... )
Args:
dataset_path (str): Path to existing dataset or where to create
a new one. Defaults to DEFAULT_DATASET_PATH
token (str, optional): Activeloop token, for fetching credentials
to the dataset at path if it is a Deep Lake dataset.
Tokens are normally autogenerated. Optional.
embedding_function (str, optional): Function to convert
either documents or query. Optional.
read_only (bool): Open dataset in read-only mode. Default is False.
ingestion_batch_size (int): During data ingestion, data is divided
into batches. Batch size is the size of each batch.
Default is 1000.
num_workers (int): Number of workers to use during data ingestion.
Default is 0.
verbose (bool): Print dataset summary after each operation.
Default is True.
exec_option (str): DeepLakeVectorDB supports 3 ways to perform
searching - "python", "compute_engine", "tensor_db".
Default is "python".
- ``python`` - Pure-python implementation that runs on the client.
WARNING: using this with big datasets can lead to memory
issues. Data can be stored anywhere.
- ``compute_engine`` - C++ implementation of the Deep Lake Compute
Engine that runs on the client. Can be used for any data stored in
or connected to Deep Lake. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database that is
responsible for storage and query execution. Only for data stored in
the Deep Lake Managed Database. Use runtime = {"db_engine": True} during
dataset creation.
**kwargs: Other optional keyword arguments.
Raises:
ValueError: If some condition is not met.
"""
self.ingestion_batch_size = ingestion_batch_size
self.num_workers = num_workers
self.verbose = verbose
if _DEEPLAKE_INSTALLED is False:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
if version_compare(deeplake.__version__, "3.6.2") == -1:
raise ValueError(
"deeplake version should be >= 3.6.3, but you've installed"
f" {deeplake.__version__}. Consider upgrading deeplake version \
pip install --upgrade deeplake."
)
self.dataset_path = dataset_path
self.vectordb = DeepLakeVectorDB(
path=self.dataset_path,
embedding_function=embedding_function,
read_only=read_only,
token=token,
exec_option=exec_option,
verbose=verbose,
**kwargs,
)
self._embedding_function = embedding_function
self._id_tensor_name = "ids" if "ids" in self.vectordb.tensors() else "id"
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectordb.
Examples:
>>> ids = deeplake_vectordb.add_texts(
... texts = <list_of_texts>,
... metadatas = <list_of_metadata_jsons>,
... ids = <list_of_ids>,
... )
Args:
texts (Iterable[str]): Texts to add to the vectordb.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
**kwargs: other optional keyword arguments.
Returns:
List[str]: List of IDs of the added texts.
"""
kwargs = {}
if ids:
if self._id_tensor_name == "ids": # for backwards compatibility
kwargs["ids"] = ids
else:
kwargs["id"] = ids
if metadatas is None:
metadatas = [{}] * len(list(texts))
return self.vectordb.add(
text=texts,
metadata=metadatas,
embedding_data=texts,
embedding_tensor="embedding",
embedding_function=kwargs.get("embedding_function")
or self._embedding_function.embed_documents, # type: ignore
return_ids=True,
**kwargs,
)
def _search_tql(
self,
tql_query: Optional[str],
exec_option: Optional[str] = None,
return_score: bool = False,
) -> Any[List[Document], List[Tuple[Document, float]]]:
"""Function for performing tql_search.
Args:
tql_query (str): TQL Query string for direct evaluation.
Available only for `compute_engine` and `tensor_db`.
exec_option (str, optional): Supports 3 ways to search.
Could be "python", "compute_engine" or "tensor_db". Default is "python".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets due to potential memory
issues.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
return_score (bool): Return score with document. Default is False.
Returns:
List[Document] - A list of documents
Raises:
ValueError: If return_score is True but some condition is not met.
"""
result = self.vectordb.search(
query=tql_query,
exec_option=exec_option,
)
metadatas = result["metadata"]
texts = result["text"]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if return_score:
raise ValueError("scores can't be returned with tql search")
return docs
def _search(
self,
query: Optional[str] = None,
embedding: Optional[Union[List[float], np.ndarray]] = None,
embedding_function: Optional[Callable] = None,
k: int = 4,
distance_metric: str = "L2",
filter: Optional[Union[Dict, Callable]] = None,
return_score: bool = True,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> Any[List[Document], List[Tuple[Document, float]]]:
"""Return docs similar to query.
Args:
query (str, optional): Text to look up similar docs.
embedding (Union[List[float], np.ndarray], optional): Query's embedding.
embedding_function (Callable, optional): Function to convert `query`
into embedding.
k (int): Number of Documents to return.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear, `max`
for L-infinity distance, `cos` for cosine similarity, 'dot' for dot
product.
filter (Union[Dict, Callable], optional): Additional filter prior
to the embedding search.
- ``Dict`` - Key-value search on tensors of htype json, on an
AND basis (a sample must satisfy all key-value filters to be True)
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with `deeplake.filter`.
use_maximal_marginal_relevance (bool): Use maximal marginal relevance.
fetch_k (int): Number of Documents for MMR algorithm.
return_score (bool): Return the score.
exec_option (str, optional): Supports 3 ways to perform searching.
Could be "python", "compute_engine" or "tensor_db".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
**kwargs: Additional keyword arguments.
Returns:
List of Documents by the specified distance metric,
if return_score True, return a tuple of (Document, score)
Raises:
ValueError: if both `embedding` and `embedding_function` are not specified.
"""
if kwargs.get("tql_query"):
return self._search_tql(
tql_query=kwargs["tql_query"],
exec_option=exec_option,
return_score=return_score,
)
if embedding_function:
if isinstance(embedding_function, Embeddings):
_embedding_function = embedding_function.embed_query
else:
_embedding_function = embedding_function
elif self._embedding_function:
_embedding_function = self._embedding_function.embed_query
else:
_embedding_function = None
if embedding is None:
if _embedding_function is None:
raise ValueError(
"Either `embedding` or `embedding_function` needs to be specified."
)
embedding = _embedding_function(query) if query else None
if isinstance(embedding, list):
embedding = np.array(embedding, dtype=np.float32)
if len(embedding.shape) > 1:
embedding = embedding[0]
result = self.vectordb.search(
embedding=embedding,
k=k,
distance_metric=distance_metric,
filter=filter,
exec_option=exec_option,
return_tensors=["embedding", "metadata", "text"],
)
scores = result["score"]
embeddings = result["embedding"]
metadatas = result["metadata"]
texts = result["text"]
for meta, embed in zip(metadatas, embeddings):
meta["embedding"] = embed
metadatas.append(meta)
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if return_score:
return [(doc, score) for doc, score in zip(docs, scores)]
return docs
def similarity_search(
self,
query: Optional[str] = None,
embedding: Optional[List[float]] = None,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search(
... query=<your_query>,
... k=<num_items>,
... exec_option=<preferred_exec_option>,
... )
>>> # Run tql search:
>>> data = vector_store.tql_search(
... tql_query="SELECT * WHERE id == <id>",
... exec_option="compute_engine",
... )
"""
if (embedding is None and query is None) or (
embedding is not None and query is not None
):
raise ValueError(
"You must provide either query embeddings or query texts, but not both"
)
return self._search(
query=query,
embedding=embedding,
k=k,
**kwargs,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding_function: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
dataset_path: str = DEFAULT_DATASET_PATH,
**kwargs: Any,
) -> DeepLake:
"""Create a Deep Lake dataset from a raw documents.
If a dataset_path is specified, the dataset will be persisted in that location,
otherwise by default at `./deeplake`
Examples:
>>> # Search using an embedding
>>> vector_store = DeepLake.from_texts(
... texts = <the_texts_that_you_want_to_embed>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
"""
if kwargs.get("embedding"):
raise ValueError(
"using embedding as embedidng_functions is deprecated. "
"Please use `embedding_function` instead."
)
deeplake_dataset = cls(
dataset_path=dataset_path, embedding_function=embedding_function, **kwargs
)
deeplake_dataset.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
embedding_function=embedding.embed_documents, # type: ignore
)
return deeplake_dataset
def delete(
self,
ids: Any[List[str], None] = None,
filter: Any[Dict[str, str], None] = None,
delete_all: Any[bool, None] = None,
) -> bool:
"""Delete the entities in the dataset.
Args:
ids (Optional[List[str]], optional): The document_ids to delete.
Defaults to None.
filter (Optional[Dict[str, str]], optional): The filter to delete by.
Defaults to None.
delete_all (Optional[bool], optional): Whether to drop the dataset.
Defaults to None.
Returns:
bool: Whether the delete operation was successful.
"""
self.vectordb.delete(
ids=ids,
filter=filter,
delete_all=delete_all,
)
return True
@classmethod
def force_delete_by_path(cls, path: str) -> None:
"""Force delete dataset by path.
Args:
path (str): path of the dataset to delete.
Raises:
ValueError: if deeplake is not installed.
"""
try:
import deeplake
except ImportError:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
deeplake.delete(path, large_ok=True, force=True)
def delete_dataset(self) -> None:
"""Delete the collection."""
self.delete(delete_all=True)
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~sceneexplain.py | # New code implementation for the following api different from langchain approach
"""Util that calls SceneXplain.
In order to set this up, you need API key for the SceneXplain API.
You can obtain a key by following the steps below.
- Sign up for a free account at https://scenex.jina.ai/.
- Navigate to the API Access page (https://scenex.jina.ai/api) and create a new API key.
"""
import base64
import http
import json
from typing import Dict
from pydantic import BaseModel, BaseSettings, Field, root_validator
from nextpy.utils.data_ops import get_from_dict_or_env
def _image_to_data_uri(file_path):
with open(file_path, "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode("utf-8")
return f"data:image/jpeg;base64,{encoded_image}"
class SceneXplainAPIWrapper(BaseSettings, BaseModel):
"""Wrapper for SceneXplain API.
In order to set this up, you need API key for the SceneXplain API.
You can obtain a key by following the steps below.
- Sign up for a free account at https://scenex.jina.ai/.
- Navigate to the API Access page (https://scenex.jina.ai/api)
and create a new API key.
"""
scenex_api_key: str = Field(..., env="SCENEX_API_KEY")
scenex_api_url: str = "us-central1-causal-diffusion.cloudfunctions.net"
def _describe_image(self, image: str) -> str:
local_image_path = image
data = {
"data": [
{"image": _image_to_data_uri(local_image_path), "features": []},
]
}
headers = {
"x-api-key": f"token {self.scenex_api_key}",
"content-type": "application/json",
}
connection = http.client.HTTPSConnection(
"us-central1-causal-diffusion.cloudfunctions.net"
)
connection.request("POST", "/describe", json.dumps(data), headers)
response = connection.getresponse()
response_data = response.read().decode("utf-8")
response_data = json.loads(response_data)
output = response_data["result"][0]["text"]
connection.close()
return output
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
scenex_api_key = get_from_dict_or_env(
values, "scenex_api_key", "SCENEX_API_KEY"
)
values["scenex_api_key"] = scenex_api_key
return values
def run(self, image: str) -> str:
"""Run SceneXplain image explainer."""
description = self._describe_image(image)
if not description:
return "No description found."
return description
| [
"application/json"
] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~ducksearch.py | # Code directly taken from LangChain Github Repo
"""Util that calls DuckDuckGo Search.
No setup required. Free.
https://pypi.org/project/duckduckgo-search/
"""
from typing import Dict, List, Optional
from pydantic import BaseModel, Extra
from pydantic.class_validators import root_validator
class DuckDuckGoSearchAPIWrapper(BaseModel):
"""Wrapper for DuckDuckGo Search API.
Free and does not require any setup
"""
k: int = 10
region: Optional[str] = "wt-wt"
safesearch: str = "moderate"
time: Optional[str] = "y"
max_results: int = 5
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from duckduckgo_search import DDGS # noqa: F401
except ImportError:
raise ValueError(
"Could not import duckduckgo-search python package. "
"Please install it with `pip install duckduckgo-search`."
)
return values
def get_snippets(self, query: str) -> List[str]:
"""Run query through DuckDuckGo and return concatenated results."""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
results = ddgs.text(
query,
region=self.region,
safesearch=self.safesearch,
timelimit=self.time,
)
if results is None or next(results, None) is None:
return ["No good DuckDuckGo Search Result was found"]
snippets = []
for i, res in enumerate(results, 1):
snippets.append(res["body"])
if i == self.max_results:
break
return snippets
def run(self, query: str) -> str:
snippets = self.get_snippets(query)
return " ".join(snippets)
def results(self, query: str, num_results: int) -> List[Dict[str, str]]:
"""Run query through DuckDuckGo and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
results = ddgs.text(
query,
region=self.region,
safesearch=self.safesearch,
timelimit=self.time,
)
if results is None or next(results, None) is None:
return [{"Result": "No good DuckDuckGo Search Result was found"}]
def to_metadata(result: Dict) -> Dict[str, str]:
return {
"snippet": result["body"],
"title": result["title"],
"link": result["href"],
}
formatted_results = []
for i, res in enumerate(results, 1):
formatted_results.append(to_metadata(res))
if i == num_results:
break
return formatted_results
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~googleserper.py | # Code taken directly from Langchain Github Repo
"""Util that calls Google Search using the Serper.dev API."""
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from pydantic.class_validators import root_validator
from pydantic.main import BaseModel
from typing_extensions import Literal
from nextpy.utils.data_ops import get_from_dict_or_env
class GoogleSerperAPIWrapper(BaseModel):
"""Wrapper around the Serper.dev Google Search API.
You can create a free API key at https://serper.dev.
To use, you should have the environment variable ``SERPER_API_KEY``
set with your API key, or pass `serper_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain import GoogleSerperAPIWrapper
google_serper = GoogleSerperAPIWrapper()
"""
k: int = 10
gl: str = "us"
hl: str = "en"
# "places" and "images" is available from Serper but not implemented in the
# parser of run(). They can be used in results()
type: Literal["news", "search", "places", "images"] = "search"
result_key_for_type = {
"news": "news",
"places": "places",
"images": "images",
"search": "organic",
}
tbs: Optional[str] = None
serper_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
serper_api_key = get_from_dict_or_env(
values, "serper_api_key", "SERPER_API_KEY"
)
values["serper_api_key"] = serper_api_key
return values
def results(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
return self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
def run(self, query: str, **kwargs: Any) -> str:
"""Run query through GoogleSearch and parse result."""
results = self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
return self._parse_results(results)
async def aresults(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return results
async def arun(self, query: str, **kwargs: Any) -> str:
"""Run query through GoogleSearch and parse result async."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return self._parse_results(results)
def _parse_snippets(self, results: dict) -> List[str]:
snippets = []
if results.get("answerBox"):
answer_box = results.get("answerBox", {})
if answer_box.get("answer"):
return [answer_box.get("answer")]
elif answer_box.get("snippet"):
return [answer_box.get("snippet").replace("\n", " ")]
elif answer_box.get("snippetHighlighted"):
return answer_box.get("snippetHighlighted")
if results.get("knowledgeGraph"):
kg = results.get("knowledgeGraph", {})
title = kg.get("title")
entity_type = kg.get("type")
if entity_type:
snippets.append(f"{title}: {entity_type}.")
description = kg.get("description")
if description:
snippets.append(description)
for attribute, value in kg.get("attributes", {}).items():
snippets.append(f"{title} {attribute}: {value}.")
for result in results[self.result_key_for_type[self.type]][: self.k]:
if "snippet" in result:
snippets.append(result["snippet"])
for attribute, value in result.get("attributes", {}).items():
snippets.append(f"{attribute}: {value}.")
if len(snippets) == 0:
return ["No good Google Search Result was found"]
return snippets
def _parse_results(self, results: dict) -> str:
return " ".join(self._parse_snippets(results))
def _google_serper_api_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
response = requests.post(
f"https://google.serper.dev/{search_type}", headers=headers, params=params
)
response.raise_for_status()
search_results = response.json()
return search_results
async def _async_google_serper_search_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
url = f"https://google.serper.dev/{search_type}"
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
url, params=params, headers=headers, raise_for_status=False
) as response:
search_results = await response.json()
else:
async with self.aiosession.post(
url, params=params, headers=headers, raise_for_status=True
) as response:
search_results = await response.json()
return search_results
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(
self,
train_csv_file: str,
val_csv_file: str,
train_output_file: str,
val_output_file: str,
) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~openweatherMap.py | # Code taken d=from Langchain Github repo and edited to some extent.
"""Util that calls OpenWeatherMap using PyOWM."""
from typing import Any, Dict, Optional
from pydantic import Extra, root_validator
from pydantic.main import BaseModel
from nextpy.utils.data_ops import get_from_dict_or_env
class OpenWeatherMapAPIWrapper(BaseModel):
"""Wrapper for OpenWeatherMap API using PyOWM.
Docs for using:
1. Go to OpenWeatherMap and sign up for an API key
2. Save your API KEY into OPENWEATHERMAP_API_KEY env variable
3. pip install pyowm
"""
owm: Any
openweathermap_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
openweathermap_api_key = get_from_dict_or_env(
values, "openweathermap_api_key", "OPENWEATHERMAP_API_KEY"
)
try:
import pyowm
except ImportError:
raise ImportError(
"pyowm is not installed. Please install it with `pip install pyowm`"
)
owm = pyowm.OWM(openweathermap_api_key)
values["owm"] = owm
return values
def _format_weather_info(self, location: str, w: Any) -> str:
detailed_status = w.detailed_status
wind = w.wind()
humidity = w.humidity
temperature = w.temperature("celsius")
rain = w.rain
heat_index = w.heat_index
clouds = w.clouds
return (
f"In {location}, the current weather is as follows:\n"
f"Detailed status: {detailed_status}\n"
f"Wind speed: {wind['speed']} m/s, direction: {wind['deg']}°\n"
f"Humidity: {humidity}%\n"
f"Temperature: \n"
f" - Current: {temperature['temp']}°C\n"
f" - High: {temperature['temp_max']}°C\n"
f" - Low: {temperature['temp_min']}°C\n"
f" - Feels like: {temperature['feels_like']}°C\n"
f"Rain: {rain}\n"
f"Heat index: {heat_index}\n"
f"Cloud cover: {clouds}%"
)
def run(self, location: str) -> str:
"""Get the current weather information for a specified location."""
mgr = self.owm.weather_manager()
observation = mgr.weather_at_place(location)
w = observation.weather
return self._format_weather_info(location, w)
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~graphql.py | # Code taken from Langchain Github Repo
import json
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
class GraphQLAPIWrapper(BaseModel):
"""Wrapper around GraphQL API.
To use, you should have the ``gql`` python package installed.
This wrapper will use the GraphQL API to conduct queries.
"""
custom_headers: Optional[Dict[str, str]] = None
graphql_endpoint: str
gql_client: Any #: :meta private:
gql_function: Callable[[str], Any] #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
except ImportError as e:
raise ImportError(
"Could not import gql python package. "
f"Try installing it with `pip install gql`. Received error: {e}"
)
headers = values.get("custom_headers")
transport = RequestsHTTPTransport(
url=values["graphql_endpoint"],
headers=headers,
)
client = Client(transport=transport, fetch_schema_from_transport=True)
values["gql_client"] = client
values["gql_function"] = gql
return values
def run(self, query: str) -> str:
"""Run a GraphQL query and get the results."""
result = self._execute_query(query)
return json.dumps(result, indent=2)
def _execute_query(self, query: str) -> Dict[str, Any]:
"""Execute a GraphQL query and return the results."""
document_node = self.gql_function(query)
result = self.gql_client.execute(document_node)
return result
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~serpapi.py | # Code taken directly from Langhchain Github Code
"""Chain that calls SerpAPI.
Heavily borrowed from https://github.com/ofirpress/self-ask
"""
import os
import sys
from typing import Any, Dict, Optional, Tuple
import aiohttp
from pydantic import BaseModel, Extra, Field, root_validator
from nextpy.utils.data_ops import get_from_dict_or_env
class HiddenPrints:
"""Context manager to hide prints."""
def __enter__(self) -> None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, *_: Any) -> None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
class SerpAPIWrapper(BaseModel):
"""Wrapper around SerpAPI.
To use, you should have the ``google-search-results`` python package installed,
and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass
`serpapi_api_key` as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain import SerpAPIWrapper
serpapi = SerpAPIWrapper()
"""
search_engine: Any #: :meta private:
params: dict = Field(
default={
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
)
serpapi_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
serpapi_api_key = get_from_dict_or_env(
values, "serpapi_api_key", "SERPAPI_API_KEY"
)
values["serpapi_api_key"] = serpapi_api_key
try:
from serpapi import GoogleSearch
values["search_engine"] = GoogleSearch
except ImportError:
raise ValueError(
"Could not import serpapi python package. "
"Please install it with `pip install google-search-results`."
)
return values
async def arun(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result async."""
return self._process_response(await self.aresults(query))
def run(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result."""
return self._process_response(self.results(query))
def results(self, query: str) -> dict:
"""Run query through SerpAPI and return the raw result."""
params = self.get_params(query)
with HiddenPrints():
search = self.search_engine(params)
res = search.get_dict()
return res
async def aresults(self, query: str) -> dict:
"""Use aiohttp to run query through SerpAPI and return the results async."""
def construct_url_and_params() -> Tuple[str, Dict[str, str]]:
params = self.get_params(query)
params["source"] = "python"
if self.serpapi_api_key:
params["serp_api_key"] = self.serpapi_api_key
params["output"] = "json"
url = "https://serpapi.com/search"
return url, params
url, params = construct_url_and_params()
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
res = await response.json()
else:
async with self.aiosession.get(url, params=params) as response:
res = await response.json()
return res
def get_params(self, query: str) -> Dict[str, str]:
"""Get parameters for SerpAPI."""
_params = {
"api_key": self.serpapi_api_key,
"q": query,
}
params = {**self.params, **_params}
return params
@staticmethod
def _process_response(res: dict) -> str:
"""Process response from SerpAPI."""
if "error" in res:
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box" in res and type(res["answer_box"]) == list:
res["answer_box"] = res["answer_box"][0]
if "answer_box" in res and "answer" in res["answer_box"]:
toret = res["answer_box"]["answer"]
elif "answer_box" in res and "snippet" in res["answer_box"]:
toret = res["answer_box"]["snippet"]
elif "answer_box" in res and "snippet_highlighted_words" in res["answer_box"]:
toret = res["answer_box"]["snippet_highlighted_words"][0]
elif "sports_results" in res and "game_spotlight" in res["sports_results"]:
toret = res["sports_results"]["game_spotlight"]
elif "shopping_results" in res and "title" in res["shopping_results"][0]:
toret = res["shopping_results"][:3]
elif "knowledge_graph" in res and "description" in res["knowledge_graph"]:
toret = res["knowledge_graph"]["description"]
elif "snippet" in res["organic_results"][0]:
toret = res["organic_results"][0]["snippet"]
elif "link" in res["organic_results"][0]:
toret = res["organic_results"][0]["link"]
else:
toret = "No good search result found"
return toret
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~wolframalpha.py | # Code taken directly from langchain gihub repo
"""Util that calls WolframAlpha."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
from nextpy.utils.data_ops import get_from_dict_or_env
class WolframAlphaAPIWrapper(BaseModel):
"""Wrapper for Wolfram Alpha.
Docs for using:
1. Go to wolfram alpha and sign up for a developer account
2. Create an app and get your APP ID
3. Save your APP ID into WOLFRAM_ALPHA_APPID env variable
4. pip install wolframalpha
"""
wolfram_client: Any #: :meta private:
wolfram_alpha_appid: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
wolfram_alpha_appid = get_from_dict_or_env(
values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID"
)
values["wolfram_alpha_appid"] = wolfram_alpha_appid
try:
import wolframalpha
except ImportError:
raise ImportError(
"wolframalpha is not installed. "
"Please install it with `pip install wolframalpha`"
)
client = wolframalpha.Client(wolfram_alpha_appid)
values["wolfram_client"] = client
return values
def run(self, query: str) -> str:
"""Run query through WolframAlpha and parse result."""
res = self.wolfram_client.query(query)
try:
assumption = next(res.pods).text
answer = next(res.results).text
except StopIteration:
return "Wolfram Alpha wasn't able to answer it"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "No good Wolfram Alpha Result was found"
else:
return f"Assumption: {assumption} \nAnswer: {answer}"
| [] |
2024-01-10 | anubrag/nextpy | nextpy~data~vectordb~qdrant.py | """Wrapper around Qdrant vector database."""
from __future__ import annotations
import uuid
import warnings
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Type,
Union,
)
from nextpy.ai.models.embedding.base import Embeddings
from nextpy.ai.schema import Document
from nextpy.data.vectordb import VectorDB
if TYPE_CHECKING:
from qdrant_client.conversions import common_types
from qdrant_client.http import models as rest
DictFilter = Dict[str, Union[str, int, bool, dict, list]]
MetadataFilter = Union[DictFilter, common_types.Filter]
class Qdrant(VectorDB):
"""Wrapper around Qdrant vector database.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from qdrant_client import QdrantClient
from nextpy.data.vectordb import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
def __init__(
self,
client: Any,
collection_name: str,
embedding_function: Optional[Callable] = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
self._embeddings_function = embedding_function
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
if embedding_function is not None:
warnings.warn(
"Using `embedding_function` is deprecated. "
"Pass `Embeddings` instance to `embeddings` instead."
)
if not isinstance(embedding_function, Embeddings):
warnings.warn(
"`embeddings` should be an instance of `Embeddings`."
"Using `embeddings` as `embedding_function` which is deprecated"
)
self._embeddings_function = embedding_function
self.embeddings = None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectordb.
Args:
texts: Iterable of strings to add to the vectordb.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectordb.
"""
from qdrant_client.http import models as rest
added_ids = []
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
self.client.upsert(
collection_name=self.collection_name,
points=rest.Batch.construct(
ids=batch_ids,
vectors=self._embed_texts(batch_texts),
payloads=self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
),
)
added_ids.extend(batch_ids)
return added_ids
def similarity_search(
self,
query: Optional[str],
embedding: Optional[List[float]],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of Documents most similar to the query.
"""
if (embedding is None and query is None) or (
embedding is not None and query is not None
):
raise ValueError(
"You must provide either query embeddings or query texts, but not both"
)
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
query_filter=qdrant_filter,
search_params=search_params,
limit=k,
offset=offset,
with_payload=True,
with_vectors=False, # llms does not expect vectors to be returned
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
@classmethod
def from_texts(
cls: Type[Qdrant],
texts: List[str],
embedding_function: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
location: Optional[str] = None,
batch_size: Optional[int] = 64,
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from nextpy.data.vectordb import Qdrant
from nextpy.ai.embedding import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from qdrant_client.http import models as rest
# Just do a single quick embedding to get vector size
partial_embeddings = embedding_function.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
location=location,
**kwargs,
)
client.recreate_collection(
collection_name=collection_name,
vectors_config=rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
),
timeout=timeout, # type: ignore[arg-type]
)
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the embeddings for all the texts in a batch
batch_embeddings = embedding_function.embed_documents(batch_texts)
client.upsert(
collection_name=collection_name,
points=rest.Batch.construct(
ids=batch_ids,
vectors=batch_embeddings,
payloads=cls._build_payloads(
batch_texts,
batch_metadatas,
),
),
)
return cls(
client=client,
collection_name=collection_name,
embedding_function=embedding_function,
)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]:
from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f"{key}.{_key}", value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition(f"{key}[]", _value))
else:
out.extend(self._build_condition(f"{key}", _value))
else:
out.append(
rest.FieldCondition(
key=f"{self.metadata_payload_key}.{key}",
match=rest.MatchValue(value=value),
)
)
return out
def _qdrant_filter_from_dict(
self, filter: Optional[DictFilter]
) -> Optional[rest.Filter]:
from qdrant_client.http import models as rest
if not filter:
return None
return rest.Filter(
must=[
condition
for key, value in filter.items()
for condition in self._build_condition(key, value)
]
)
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~bibtex.py | # Directly taken from Langchain Github Repo
"""Util that calls bibtexparser."""
import logging
from typing import Any, Dict, List, Mapping
from pydantic import BaseModel, Extra, root_validator
logger = logging.getLogger(__name__)
OPTIONAL_FIELDS = [
"annotate",
"booktitle",
"editor",
"howpublished",
"journal",
"keywords",
"note",
"organization",
"publisher",
"school",
"series",
"type",
"doi",
"issn",
"isbn",
]
class BibtexparserWrapper(BaseModel):
"""Wrapper around bibtexparser.
To use, you should have the ``bibtexparser`` python package installed.
https://bibtexparser.readthedocs.io/en/master/
This wrapper will use bibtexparser to load a collection of references from
a bibtex file and fetch document summaries.
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import bibtexparser # noqa
except ImportError:
raise ImportError(
"Could not import bibtexparser python package. "
"Please install it with `pip install bibtexparser`."
)
return values
def load_bibtex_entries(self, path: str) -> List[Dict[str, Any]]:
"""Load bibtex entries from the bibtex file at the given path."""
import bibtexparser
with open(path) as file:
entries = bibtexparser.load(file).entries
return entries
def get_metadata(
self, entry: Mapping[str, Any], load_extra: bool = False
) -> Dict[str, Any]:
"""Get metadata for the given entry."""
publication = entry.get("journal") or entry.get("booktitle")
if "url" in entry:
url = entry["url"]
elif "doi" in entry:
url = f'https://doi.org/{entry["doi"]}'
else:
url = None
meta = {
"id": entry.get("ID"),
"published_year": entry.get("year"),
"title": entry.get("title"),
"publication": publication,
"authors": entry.get("author"),
"abstract": entry.get("abstract"),
"url": url,
}
if load_extra:
for field in OPTIONAL_FIELDS:
meta[field] = entry.get(field)
return {k: v for k, v in meta.items() if v is not None}
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~bingsearch.py | # Directly taken from Langchain Github Repo
"""Util that calls Bing Search.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
from typing import Dict, List
import requests
from pydantic import BaseModel, Extra, root_validator
from nextpy.utils.data_ops import get_from_dict_or_env
class BingSearchAPIWrapper(BaseModel):
"""Wrapper for Bing Search API.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
bing_subscription_key: str
bing_search_url: str
k: int = 10
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _bing_search_results(self, search_term: str, count: int) -> List[dict]:
headers = {"Ocp-Apim-Subscription-Key": self.bing_subscription_key}
params = {
"q": search_term,
"count": count,
"textDecorations": True,
"textFormat": "HTML",
}
response = requests.get(
self.bing_search_url, headers=headers, params=params # type: ignore
)
response.raise_for_status()
search_results = response.json()
return search_results["webPages"]["value"]
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
bing_subscription_key = get_from_dict_or_env(
values, "bing_subscription_key", "BING_SUBSCRIPTION_KEY"
)
values["bing_subscription_key"] = bing_subscription_key
bing_search_url = get_from_dict_or_env(
values,
"bing_search_url",
"BING_SEARCH_URL",
# default="https://api.bing.microsoft.com/v7.0/search",
)
values["bing_search_url"] = bing_search_url
return values
def run(self, query: str) -> str:
"""Run query through BingSearch and parse result."""
snippets = []
results = self._bing_search_results(query, count=self.k)
if len(results) == 0:
return "No good Bing Search Result was found"
for result in results:
snippets.append(result["snippet"])
return " ".join(snippets)
def results(self, query: str, num_results: int) -> List[Dict]:
"""Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._bing_search_results(query, count=num_results)
if len(results) == 0:
return [{"Result": "No good Bing Search Result was found"}]
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["name"],
"link": result["url"],
}
metadata_results.append(metadata_result)
return metadata_results
| [] |
2024-01-10 | anubrag/nextpy | nextpy~ai~engine~llms~_openai.py | import openai
import os
import time
import requests
import aiohttp
import copy
import time
import asyncio
import types
import collections
import json
import re
import regex
from ._llm import LLM, LLMSession, SyncSession
class MalformedPromptException(Exception):
pass
import pyparsing as pp
role_start_tag = pp.Suppress(pp.Optional(pp.White()) + pp.Literal("<|im_start|>"))
role_start_name = pp.Word(pp.alphanums + "_")("role_name")
role_kwargs = pp.Suppress(pp.Optional(" ")) + pp.Dict(pp.Group(pp.Word(pp.alphanums + "_") + pp.Suppress("=") + pp.QuotedString('"')))("kwargs")
role_start = (role_start_tag + role_start_name + pp.Optional(role_kwargs) + pp.Suppress("\n")).leave_whitespace()
role_end = pp.Suppress(pp.Literal("<|im_end|>"))
role_content = pp.Combine(pp.ZeroOrMore(pp.CharsNotIn("<") | pp.Literal("<") + ~pp.FollowedBy("|im_end|>")))("role_content")
role_group = pp.Group(role_start + role_content + role_end)("role_group").leave_whitespace()
partial_role_group = pp.Group(role_start + role_content)("role_group").leave_whitespace()
roles_grammar = pp.ZeroOrMore(role_group) + pp.Optional(partial_role_group) + pp.StringEnd()
# import pyparsing as pp
# role_start_tag = pp.Literal("<|im_start|>")
# role_start_name = pp.Word(pp.alphanums + "_")
# role_kwargs = pp.Dict(pp.Group(pp.Word(pp.alphanums + "_") + pp.Suppress("=") + pp.QuotedString('"')))
# role_start = role_start_tag + role_start_name + pp.Optional(role_kwargs) + pp.Suppress("\n")
# role_end = pp.Literal("<|im_end|>")
# role_content = pp.CharsNotIn("<|im_start|><|im_end|>")
# r'<\|im_start\|>([^\n]+)\n(.*?)(?=<\|im_end\|>|$)'
def prompt_to_messages(prompt):
messages = []
assert prompt.endswith("<|im_start|>assistant\n"), "When calling OpenAI chat models you must generate only directly inside the assistant role! The OpenAI API does not currently support partial assistant prompting."
parsed_prompt = roles_grammar.parse_string(prompt)
# pattern = r'<\|im_start\|>([^\n]+)\n(.*?)(?=<\|im_end\|>|$)'
# matches = re.findall(pattern, prompt, re.DOTALL)
# if not matches:
# return [{'role': 'user', 'content': prompt}]
for role in parsed_prompt:
if len(role["role_content"]) > 0: # only add non-empty messages (OpenAI does not support empty messages anyway)
message = {'role': role["role_name"], 'content': role["role_content"]}
if "kwargs" in role:
for k, v in role["kwargs"].items():
message[k] = v
messages.append(message)
return messages
async def add_text_to_chat_mode_generator(chat_mode):
in_function_call = False
async for resp in chat_mode:
if "choices" in resp:
for c in resp['choices']:
# move content from delta to text so we have a consistent interface with non-chat mode
found_content = False
if "content" in c['delta'] and c['delta']['content'] != "":
found_content = True
c['text'] = c['delta']['content']
# capture function call data and convert to text again so we have a consistent interface with non-chat mode and open models
if "function_call" in c['delta']:
# build the start of the function call (the follows the syntax that GPT says it wants when we ask it, and will be parsed by the @function_detector)
if not in_function_call:
start_val = "\n```typescript\nfunctions."+c['delta']['function_call']["name"]+"("
if not c['text']:
c['text'] = start_val
else:
c['text'] += start_val
in_function_call = True
# extend the arguments JSON string
val = c['delta']['function_call']["arguments"]
if 'text' in c:
c['text'] += val
else:
c['text'] = val
if not found_content and not in_function_call:
break # the role markers are outside the generation in chat mode right now TODO: consider how this changes for uncontrained generation
else:
yield resp
else:
yield resp
# close the function call if needed
if in_function_call:
yield {'choices': [{'text': ')```'}]}
def add_text_to_chat_mode(chat_mode):
if isinstance(chat_mode, (types.AsyncGeneratorType, types.GeneratorType)):
return add_text_to_chat_mode_generator(chat_mode)
else:
for c in chat_mode['choices']:
c['text'] = c['message']['content']
return chat_mode
class OpenAI(LLM):
llm_name: str = "openai"
def __init__(self, model=None, caching=True, max_retries=5, max_calls_per_min=60,
api_key=None, api_type="open_ai", api_base=None, api_version=None, deployment_id=None,
temperature=0.0, chat_mode="auto", organization=None,
allowed_special_tokens={"<|endoftext|>", "<|endofprompt|>"}, rest_call=False, encoding_name=None,token=None, endpoint=None ):
super().__init__()
# map old param values
# TODO: add deprecated warnings after some time
if token is not None:
if api_key is None:
api_key = token
if endpoint is not None:
if api_base is None:
api_base = endpoint
# fill in default model value
if model is None:
model = os.environ.get("OPENAI_MODEL", None)
if model is None:
try:
with open(os.path.expanduser('~/.openai_model'), 'r') as file:
model = file.read().replace('\n', '')
except:
pass
# fill in default deployment_id value
if deployment_id is None:
deployment_id = os.environ.get("OPENAI_DEPLOYMENT_ID", None)
# Auto-detect whether to use the chat completion mode based on the model name
if chat_mode == "auto":
# Regular expression pattern to match both standard and fine-tuned model names
# Examples of matched model names:
# - 'gpt-3.5-turbo'
# - 'gpt-4'
# - 'gpt-4-32k-0314'
# - 'gpt-4-1106-preview'
# - 'ft:gpt-3.5-turbo:my-org:custom_suffix:id'
# - 'ft:gpt-4:my-org:custom_suffix'
# - 'ft:gpt-3.5-turbo-1106:personal::id'
chat_model_pattern = r'^(ft:)?(gpt-3\.5-turbo|gpt-4)((-\w+)+)?(:[\w-]+(?:[:\w-]+)*)?(::\w+)?$'
# Check if the model name matches the pattern for chat models
chat_mode = bool(re.match(chat_model_pattern, model))
# fill in default API key value
if api_key is None: # get from environment variable
api_key = os.environ.get("OPENAI_API_KEY", getattr(openai, "api_key", None))
if api_key is not None and not api_key.startswith("sk-") and os.path.exists(os.path.expanduser(api_key)): # get from file
with open(os.path.expanduser(api_key), 'r') as file:
api_key = file.read().replace('\n', '')
if api_key is None: # get from default file location
try:
with open(os.path.expanduser('~/.openai_api_key'), 'r') as file:
api_key = file.read().replace('\n', '')
except:
pass
if organization is None:
organization = os.environ.get("OPENAI_ORGANIZATION", None)
# fill in default endpoint value
if api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", None) or os.environ.get("OPENAI_ENDPOINT", None) # ENDPOINT is deprecated
# Optional tiktoken encoding integration
if encoding_name or model:
try:
import tiktoken
if encoding_name is None:
encoding_name = tiktoken.encoding_for_model(model).name
self._tokenizer = tiktoken.get_encoding(encoding_name)
except ImportError:
# tiktoken library is not available, handle accordingly
self._tokenizer = None
else:
# If no model or encoding_name is provided, no tokenizer setup is necessary
self._tokenizer = None
self.chat_mode = chat_mode
self.allowed_special_tokens = allowed_special_tokens
self.model_name = model
self.deployment_id = deployment_id
self.caching = caching
self.max_retries = max_retries
self.max_calls_per_min = max_calls_per_min
if isinstance(api_key, str):
api_key = api_key.replace("Bearer ", "")
self.api_key = api_key
self.api_type = api_type
self.api_base = api_base
self.api_version = api_version
self.current_time = time.time()
self.call_history = collections.deque()
self.temperature = temperature
self.organization = organization
self.rest_call = rest_call
self.endpoint = endpoint
if not self.rest_call:
self.caller = self._library_call
else:
self.caller = self._rest_call
self._rest_headers = {
"Content-Type": "application/json"
}
def session(self, asynchronous=False):
if asynchronous:
return OpenAISession(self)
else:
return SyncSession(OpenAISession(self))
def role_start(self, role_name, **kwargs):
assert self.chat_mode, "role_start() can only be used in chat mode"
return "<|im_start|>"+role_name+"".join([f' {k}="{v}"' for k,v in kwargs.items()])+"\n"
def role_end(self, role=None):
assert self.chat_mode, "role_end() can only be used in chat mode"
return "<|im_end|>"
def end_of_text(self):
return "<|endoftext|>"
@classmethod
async def stream_then_save(cls, gen, key, stop_regex, n):
list_out = []
cached_out = None
# init stop_regex variables
if stop_regex is not None:
if isinstance(stop_regex, str):
stop_patterns = [regex.compile(stop_regex)]
else:
stop_patterns = [regex.compile(pattern) for pattern in stop_regex]
current_strings = ["" for _ in range(n)]
# last_out_pos = ["" for _ in range(n)]
# iterate through the stream
all_done = False
async for curr_out in gen:
# if we have a cached output, extend it with the current output
if cached_out is not None:
out = merge_stream_chunks(cached_out, curr_out)
else:
out = curr_out
# check if we have stop_regex matches
found_partial = False
if stop_regex is not None:
# keep track of the generated text so far
for i,choice in enumerate(curr_out['choices']):
current_strings[i] += choice['text']
# check if all of the strings match a stop string (and hence we can stop the batch inference)
all_done = True
for i in range(len(current_strings)):
found = False
for s in stop_patterns:
if s.search(current_strings[i]):
found = True
if not found:
all_done = False
break
# find where trim off the stop regex matches if needed (and look for partial matches)
stop_pos = [1e10 for _ in range(n)]
stop_text = [None for _ in range(n)]
for i in range(len(current_strings)):
for s in stop_patterns:
m = s.search(current_strings[i], partial=True)
if m:
span = m.span()
if span[1] > span[0]:
if m.partial: # we might be starting a stop sequence, so we can't emit anything yet
found_partial = True
break
else:
stop_text[i] = current_strings[i][span[0]:span[1]]
stop_pos[i] = min(span[0], stop_pos[i])
if stop_pos != 1e10:
stop_pos[i] = stop_pos[i] - len(current_strings[i]) # convert to relative position from the end
# if we might be starting a stop sequence, we need to cache the output and continue to wait and see
if found_partial:
cached_out = out
continue
# if we get here, we are not starting a stop sequence, so we can emit the output
else:
cached_out = None
if stop_regex is not None:
for i in range(len(out['choices'])):
if stop_pos[i] < len(out['choices'][i]['text']):
out['choices'][i] = out['choices'][i].to_dict() # because sometimes we might need to set the text to the empty string (and OpenAI's object does not like that)
out['choices'][i]['text'] = out['choices'][i]['text'][:stop_pos[i]]
out['choices'][i]['stop_text'] = stop_text[i]
out['choices'][i]['finish_reason'] = "stop"
list_out.append(out)
yield out
if all_done:
gen.aclose()
break
# if we have a cached output, emit it
if cached_out is not None:
list_out.append(cached_out)
yield out
cls.cache[key] = list_out
def _stream_completion(self):
pass
# Define a function to add a call to the deque
def add_call(self):
# Get the current timestamp in seconds
now = time.time()
# Append the timestamp to the right of the deque
self.call_history.append(now)
# Define a function to count the calls in the last 60 seconds
def count_calls(self):
# Get the current timestamp in seconds
now = time.time()
# Remove the timestamps that are older than 60 seconds from the left of the deque
while self.call_history and self.call_history[0] < now - 60:
self.call_history.popleft()
# Return the length of the deque as the number of calls
return len(self.call_history)
async def _library_call(self, **kwargs):
""" Call the OpenAI API using the python package.
Note that is uses the local auth token, and does not rely on the openai one.
"""
# save the params of the openai library
prev_key = openai.api_key
prev_org = openai.organization
prev_type = openai.api_type
prev_version = openai.api_version
prev_base = openai.api_base
# set the params of the openai library if we have them
if self.api_key is not None:
openai.api_key = self.api_key
if self.organization is not None:
openai.organization = self.organization
if self.api_type is not None:
openai.api_type = self.api_type
if self.api_version is not None:
openai.api_version = self.api_version
if self.api_base is not None:
openai.api_base = self.api_base
assert openai.api_key is not None, "You must provide an OpenAI API key to use the OpenAI LLM. Either pass it in the constructor, set the OPENAI_API_KEY environment variable, or create the file ~/.openai_api_key with your key in it."
if self.chat_mode:
kwargs['messages'] = prompt_to_messages(kwargs['prompt'])
del kwargs['prompt']
del kwargs['echo']
del kwargs['logprobs']
# print(kwargs)
out = await openai.ChatCompletion.acreate(**kwargs)
out = add_text_to_chat_mode(out)
else:
out = await openai.Completion.acreate(**kwargs)
# restore the params of the openai library
openai.api_key = prev_key
openai.organization = prev_org
openai.api_type = prev_type
openai.api_version = prev_version
openai.api_base = prev_base
return out
async def _rest_call(self, **kwargs):
""" Call the OpenAI API using the REST API.
"""
# Define the request headers
headers = copy.copy(self._rest_headers)
if self.api_key is not None:
headers['Authorization'] = f"Bearer {self.api_key}"
# Define the request data
stream = kwargs.get("stream", False)
data = {
"model": self.model_name,
"prompt": kwargs["prompt"],
"max_tokens": kwargs.get("max_tokens", None),
"temperature": kwargs.get("temperature", 0.0),
"top_p": kwargs.get("top_p", 1.0),
"n": kwargs.get("n", 1),
"stream": stream,
"logprobs": kwargs.get("logprobs", None),
'stop': kwargs.get("stop", None),
"echo": kwargs.get("echo", False)
}
if self.chat_mode:
data['messages'] = prompt_to_messages(data['prompt'])
del data['prompt']
del data['echo']
del data['logprobs']
# Send a POST request and get the response
# An exception for timeout is raised if the server has not issued a response for 10 seconds
try:
if stream:
session = aiohttp.ClientSession()
response = await session.post(self.endpoint, json=data, headers=headers, timeout=60)
status = response.status
else:
response = requests.post(self.endpoint, headers=headers, json=data, timeout=60)
status = response.status_code
text = response.text
if status != 200:
if stream:
text = await response.text()
raise Exception("Response is not 200: " + text)
if stream:
response = self._rest_stream_handler(response, session)
else:
response = response.json()
except requests.Timeout:
raise Exception("Request timed out.")
except requests.ConnectionError:
raise Exception("Connection error occurred.")
if self.chat_mode:
response = add_text_to_chat_mode(response)
return response
async def _close_response_and_session(self, response, session):
await response.release()
await session.close()
async def _rest_stream_handler(self, response, session):
# async for line in response.iter_lines():
async for line in response.content:
text = line.decode('utf-8')
if text.startswith('data: '):
text = text[6:]
if text.strip() == '[DONE]':
await self._close_response_and_session(response, session)
break
else:
yield json.loads(text)
def encode(self, string):
# note that is_fragment is not used used for this tokenizer
return self._tokenizer.encode(string, allowed_special=self.allowed_special_tokens)
def decode(self, tokens):
return self._tokenizer.decode(tokens)
def merge_stream_chunks(first_chunk, second_chunk):
""" This merges two stream responses together.
"""
out = copy.deepcopy(first_chunk)
# merge the choices
for i in range(len(out['choices'])):
out_choice = out['choices'][i]
second_choice = second_chunk['choices'][i]
out_choice['text'] += second_choice['text']
if 'index' in second_choice:
out_choice['index'] = second_choice['index']
if 'finish_reason' in second_choice:
out_choice['finish_reason'] = second_choice['finish_reason']
if out_choice.get('logprobs', None) is not None:
out_choice['logprobs']['token_logprobs'] += second_choice['logprobs']['token_logprobs']
out_choice['logprobs']['top_logprobs'] += second_choice['logprobs']['top_logprobs']
out_choice['logprobs']['text_offset'] = second_choice['logprobs']['text_offset']
return out
class OpenAIStreamer():
def __init__(self, stop_regex, n):
self.stop_regex = stop_regex
self.n = n
self.current_strings = ["" for _ in range(n)]
self.current_length = 0
class RegexStopChecker():
def __init__(self, stop_pattern, decode, prefix_length):
if isinstance(stop_pattern, str):
self.stop_patterns = [regex.compile(stop_pattern)]
else:
self.stop_patterns = [regex.compile(pattern) for pattern in stop_pattern]
self.prefix_length = prefix_length
self.decode = decode
self.current_strings = None
self.current_length = 0
def __call__(self, input_ids, scores, **kwargs):
# extend our current strings
if self.current_strings is None:
self.current_strings = ["" for _ in range(len(input_ids))]
for i in range(len(self.current_strings)):
self.current_strings[i] += self.decode(input_ids[i][self.current_length:])
# trim off the prefix string so we don't look for stop matches in the prompt
if self.current_length == 0:
for i in range(len(self.current_strings)):
self.current_strings[i] = self.current_strings[i][self.prefix_length:]
self.current_length = len(input_ids[0])
# check if all of the strings match a stop string (and hence we can stop the batch inference)
all_done = True
for i in range(len(self.current_strings)):
found = False
for s in self.stop_patterns:
if s.search(self.current_strings[i]):
found = True
if not found:
all_done = False
break
return all_done
# define the syntax for the function definitions
import pyparsing as pp
start_functions = pp.Suppress(pp.Literal("## functions\n\nnamespace functions {\n\n"))
comment = pp.Combine(pp.Suppress(pp.Literal("//") + pp.Optional(" ")) + pp.restOfLine)
end_functions = pp.Suppress("} // namespace functions")
function_def_start = pp.Optional(comment)("function_description") + pp.Suppress(pp.Literal("type")) + pp.Word(pp.alphas + "_")("function_name") + pp.Suppress(pp.Literal("=") + pp.Literal("(_:") + pp.Literal("{"))
function_def_end = pp.Suppress(pp.Literal("})") + pp.Literal("=>") + pp.Literal("any;"))
parameter_type = (pp.Word(pp.alphas + "_")("simple_type") | pp.QuotedString('"')("enum_option") + pp.OneOrMore(pp.Suppress("|") + pp.QuotedString('"')("enum_option"))("enum")) + pp.Suppress(pp.Optional(","))
parameter_def = pp.Optional(comment)("parameter_description") + pp.Word(pp.alphas + "_")("parameter_name") + pp.Optional(pp.Literal("?"))("is_optional") + pp.Suppress(pp.Literal(":")) + pp.Group(parameter_type)("parameter_type")
function_def = function_def_start + pp.OneOrMore(pp.Group(parameter_def)("parameter")) + function_def_end
functions_def = start_functions + pp.OneOrMore(pp.Group(function_def)("function")) + end_functions
def get_json_from_parse(parse_out):
functions = []
for function in parse_out:
function_name = function.function_name
function_description = function.function_description
parameters = {
"type": "object",
"properties": {},
"required": []
}
for parameter in function:
if isinstance(parameter, str):
continue
parameter_name = parameter.parameter_name
parameter_description = parameter.parameter_description
parameter_type = parameter.parameter_type
is_optional = parameter.is_optional
d = {}
if parameter_type.simple_type:
d["type"] = parameter_type.simple_type
elif parameter_type.enum:
d["type"] = "string"
d["enum"] = [s for s in parameter_type]
if parameter_description:
d["description"] = parameter_description
if not is_optional:
parameters["required"].append(parameter_name)
parameters["properties"][parameter_name] = d
functions.append({
"name": function_name,
"description": function_description,
"parameters": parameters
})
return functions
def extract_function_defs(prompt):
""" This extracts function definitions from the prompt.
"""
if "\n## functions\n" not in prompt:
return None
else:
functions_text = prompt[prompt.index("\n## functions\n")+1:prompt.index("} // namespace functions")+24]
parse_out = functions_def.parseString(functions_text)
return get_json_from_parse(parse_out)
# Define a deque to store the timestamps of the calls
class OpenAISession(LLMSession):
async def __call__(self, prompt, stop=None, stop_regex=None, temperature=None, n=1, max_tokens=1000, logprobs=None,
top_p=1.0, echo=False, logit_bias=None, token_healing=None, pattern=None, stream=None,
cache_seed=0, caching=None, **completion_kwargs):
""" Generate a completion of the given prompt.
"""
# we need to stream in order to support stop_regex
if stream is None:
stream = stop_regex is not None
assert stop_regex is None or stream, "We can only support stop_regex for the OpenAI API when stream=True!"
assert stop_regex is None or n == 1, "We don't yet support stop_regex combined with n > 1 with the OpenAI API!"
assert token_healing is None or token_healing is False, "The OpenAI API does not yet support token healing! Please either switch to an endpoint that does, or don't use the `token_healing` argument to `gen`."
# set defaults
if temperature is None:
temperature = self.llm.temperature
# get the arguments as dictionary for cache key generation
args = locals().copy()
assert not pattern, "The OpenAI API does not support Compiler pattern controls! Please either switch to an endpoint that does, or don't use the `pattern` argument to `gen`."
# assert not stop_regex, "The OpenAI API does not support Compiler stop_regex controls! Please either switch to an endpoint that does, or don't use the `stop_regex` argument to `gen`."
# define the key for the cache
cache_params = self._cache_params(args)
llm_cache = self.llm.cache
key = llm_cache.create_key(self.llm.llm_name, **cache_params)
# allow streaming to use non-streaming cache (the reverse is not true)
if key not in llm_cache and stream:
cache_params["stream"] = False
key1 = llm_cache.create_key(self.llm.llm_name, **cache_params)
if key1 in llm_cache:
key = key1
# check the cache
if key not in llm_cache or caching is False or (caching is not True and not self.llm.caching):
# ensure we don't exceed the rate limit
while self.llm.count_calls() > self.llm.max_calls_per_min:
await asyncio.sleep(1)
functions = extract_function_defs(prompt)
fail_count = 0
while True:
try_again = False
try:
self.llm.add_call()
call_args = {
"model": self.llm.model_name,
"deployment_id": self.llm.deployment_id,
"prompt": prompt,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"n": n,
"stop": stop,
"logprobs": logprobs,
"echo": echo,
"stream": stream,
**completion_kwargs
}
if functions is None:
if "function_call" in call_args:
del call_args["function_call"]
else:
call_args["functions"] = functions
if logit_bias is not None:
call_args["logit_bias"] = {str(k): v for k,v in logit_bias.items()} # convert keys to strings since that's the open ai api's format
out = await self.llm.caller(**call_args)
except (openai.error.RateLimitError, openai.error.ServiceUnavailableError, openai.error.APIError, openai.error.Timeout):
await asyncio.sleep(3)
try_again = True
fail_count += 1
if not try_again:
break
if fail_count > self.llm.max_retries:
raise Exception(f"Too many (more than {self.llm.max_retries}) OpenAI API errors in a row!")
if stream:
return self.llm.stream_then_save(out, key, stop_regex, n)
else:
llm_cache[key] = out
# wrap as a list if needed
if stream:
if isinstance(llm_cache[key], list):
return llm_cache[key]
return [llm_cache[key]]
return llm_cache[key]
import os
import json
import platformdirs
from ._openai import OpenAI
class AzureOpenAI(OpenAI):
def __init__(self, *args, **kwargs):
raise NotImplementedError("The AzureOpenAI class has been merged with the OpenAI class for Azure usage. Please use the OpenAI class instead")
class MSALOpenAI(OpenAI):
""" Microsoft Authentication Library (MSAL) OpenAI style integration.
Warning: This class is not finalized and may change in the future.
"""
llm_name: str = "azure_openai"
def __init__(self, model=None, client_id=None, authority=None, caching=True, max_retries=5, max_calls_per_min=60, token=None,
endpoint=None, scopes=None, temperature=0.0, chat_mode="auto", rest_call=False):
assert endpoint is not None, "An endpoint must be specified!"
# build a standard OpenAI LLM object
super().__init__(
model=model, caching=caching, max_retries=max_retries, max_calls_per_min=max_calls_per_min,
token=token, endpoint=endpoint, temperature=temperature, chat_mode=chat_mode, rest_call=rest_call
)
self.client_id = client_id
self.authority = authority
self.scopes = scopes
from msal import PublicClientApplication, SerializableTokenCache
self._token_cache = SerializableTokenCache()
self._token_cache_path = os.path.join(platformdirs.user_cache_dir("Compiler"), "_azure_openai.token")
self._app = PublicClientApplication(client_id=self.client_id, authority=self.authority, token_cache=self._token_cache)
if os.path.exists(self._token_cache_path):
self._token_cache.deserialize(open(self._token_cache_path, 'r').read())
if( rest_call ):
self._rest_headers["X-ModelType"] = self.model_name
@property
def api_key(self):
return self._get_token()
@api_key.setter
def api_key(self, value):
pass # ignored for now
def _get_token(self):
accounts = self._app.get_accounts()
result = None
if accounts:
# Assuming the end user chose this one
chosen = accounts[0]
# Now let's try to find a token in cache for this account
result = self._app.acquire_token_silent(self.scopes, account=chosen)
if not result:
# So no suitable token exists in cache. Let's get a new one from AAD.
flow = self._app.initiate_device_flow(scopes=self.scopes)
if "user_code" not in flow:
raise ValueError(
"Fail to create device flow. Err: %s" % json.dumps(flow, indent=4))
print(flow["message"])
result = self._app.acquire_token_by_device_flow(flow)
# save the aquired token
with open(self._token_cache_path, "w") as f:
f.write(self._token_cache.serialize())
return result["access_token"]
| [
"role_content"
] |
2024-01-10 | anubrag/nextpy | nextpy~ai~scripts~twilio.py | """Util that calls Twilio."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
from nextpy.utils.data_ops import get_from_dict_or_env
class TwilioAPIWrapper(BaseModel):
"""Sms Client using Twilio.
To use, you should have the ``twilio`` python package installed,
and the environment variables ``TWILIO_ACCOUNT_SID``, ``TWILIO_AUTH_TOKEN``, and
``TWILIO_FROM_NUMBER``, or pass `account_sid`, `auth_token`, and `from_number` as
named parameters to the constructor.
Example:
.. code-block:: python
from langchain.utilities.twilio import TwilioAPIWrapper
twilio = TwilioAPIWrapper(
account_sid="ACxxx",
auth_token="xxx",
from_number="+10123456789"
)
twilio.run('test', '+12484345508')
"""
client: Any #: :meta private:
account_sid: Optional[str] = None
"""Twilio account string identifier."""
auth_token: Optional[str] = None
"""Twilio auth token."""
from_number: Optional[str] = None
"""A Twilio phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164)
format, an
[alphanumeric sender ID](https://www.twilio.com/docs/sms/send-messages#use-an-alphanumeric-sender-id),
or a [Channel Endpoint address](https://www.twilio.com/docs/sms/channels#channel-addresses)
that is enabled for the type of message you want to send. Phone numbers or
[short codes](https://www.twilio.com/docs/sms/api/short-code) purchased from
Twilio also work here. You cannot, for example, spoof messages from a private
cell phone number. If you are using `messaging_service_sid`, this parameter
must be empty.
""" # noqa: E501
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = False
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from twilio.rest import Client
except ImportError:
raise ImportError(
"Could not import twilio python package. "
"Please install it with `pip install twilio`."
)
account_sid = get_from_dict_or_env(values, "account_sid", "TWILIO_ACCOUNT_SID")
auth_token = get_from_dict_or_env(values, "auth_token", "TWILIO_AUTH_TOKEN")
values["from_number"] = get_from_dict_or_env(
values, "from_number", "TWILIO_FROM_NUMBER"
)
values["client"] = Client(account_sid, auth_token)
return values
def run(self, body: str, to: str) -> str:
"""Run body through Twilio and respond with message sid.
Args:
body: The text of the message you want to send. Can be up to 1,600
characters in length.
to: The destination phone number in
[E.164](https://www.twilio.com/docs/glossary/what-e164) format for
SMS/MMS or
[Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses)
for other 3rd-party channels.
""" # noqa: E501
message = self.client.messages.create(to, from_=self.from_number, body=body)
return message.sid
| [] |
2024-01-10 | kkdai/pyrumi-bot | pyrumi~chatgpt_bot.py | import os
from typing import Optional
import openai
from loguru import logger
from telegram import Update
from telegram.ext import ContextTypes
from .whitelist import in_whitelist
def join_content(messages):
return '\n'.join([message['content'] for message in messages])
class ChatGPTBot:
def __init__(self, model_name: str = 'gpt-3.5-turbo', system_content: Optional[str] = None):
self.model_name = model_name
self.system_content = system_content
self.dialogues = {}
@classmethod
def from_env(cls):
openai.api_key = os.environ.get('OPENAI_API_KEY')
return cls()
async def _create(self, messages):
response = await openai.ChatCompletion.acreate(model=self.model_name, messages=messages)
return [dict(choice.message) for choice in response.choices]
async def reply(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
logger.info('message: {}', update.message)
if not in_whitelist(update):
return
reply_id = update.message.reply_to_message.message_id
if reply_id not in self.dialogues.keys():
logger.info('reply_id: {} not exists', reply_id)
return
messages = self.dialogues[reply_id] + [{'role': 'user', 'content': update.message.text}]
response = await self._create(messages)
chat_message = await context.bot.send_message(chat_id=update.effective_chat.id,
text=join_content(response),
reply_to_message_id=update.message.id)
self.dialogues[chat_message.message_id] = messages + response
logger.info('messages: {}', messages)
async def start(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
logger.info('message: {}', update.message)
if in_whitelist(update):
return
messages = [{'role': 'user', 'content': update.message.text.rstrip('/gpt')}]
# insert system content if exists
if self.system_content is not None:
messages = [{"role": "system", "content": self.system_content}] + messages
response = await self._create(messages)
chat_message = await context.bot.send_message(chat_id=update.effective_chat.id,
text=join_content(response),
reply_to_message_id=update.message.id)
logger.info('new message id: {}', chat_message.message_id)
logger.info('thread id: {}', chat_message.message_thread_id)
self.dialogues[chat_message.message_id] = messages + response
| [] |
2024-01-10 | kkdai/pyrumi-bot | pyrumi~stock_tool.py | from typing import List
from typing import Optional
from typing import Type
from langchain.tools import BaseTool
from pydantic import BaseModel
from pydantic import Field
from .yf_utils import get_best_performing
from .yf_utils import get_price_change_percent
from .yf_utils import get_stock_price
class StockPriceCheckInput(BaseModel):
"""Input for Stock price check."""
stockticker: str = Field(..., description="Ticker symbol for stock or index")
class StockPriceTool(BaseTool):
name = "get_stock_ticker_price"
description = "Useful for when you need to find out the price of stock. You should input the stock ticker used on the yfinance API"
def _run(self, stockticker: str):
# print("i'm running")
price_response = get_stock_price(stockticker)
return price_response
def _arun(self, stockticker: str):
raise NotImplementedError("This tool does not support async")
args_schema: Optional[Type[BaseModel]] = StockPriceCheckInput
from typing import List
class StockChangePercentageCheckInput(BaseModel):
"""Input for Stock ticker check. for percentage check"""
stockticker: str = Field(..., description="Ticker symbol for stock or index")
days_ago: int = Field(..., description="Int number of days to look back")
class StockPercentageChangeTool(BaseTool):
name = "get_price_change_percent"
description = "Useful for when you need to find out the percentage change in a stock's value. You should input the stock ticker used on the yfinance API and also input the number of days to check the change over"
def _run(self, stockticker: str, days_ago: int):
price_change_response = get_price_change_percent(stockticker, days_ago)
return price_change_response
def _arun(self, stockticker: str, days_ago: int):
raise NotImplementedError("This tool does not support async")
args_schema: Optional[Type[BaseModel]] = StockChangePercentageCheckInput
class StockBestPerformingInput(BaseModel):
"""Input for Stock ticker check. for percentage check"""
stocktickers: List[str] = Field(..., description="Ticker symbols for stocks or indices")
days_ago: int = Field(..., description="Int number of days to look back")
class StockGetBestPerformingTool(BaseTool):
name = "get_best_performing"
description = "Useful for when you need to the performance of multiple stocks over a period. You should input a list of stock tickers used on the yfinance API and also input the number of days to check the change over"
def _run(self, stocktickers: List[str], days_ago: int):
price_change_response = get_best_performing(stocktickers, days_ago)
return price_change_response
def _arun(self, stockticker: List[str], days_ago: int):
raise NotImplementedError("This tool does not support async")
args_schema: Optional[Type[BaseModel]] = StockBestPerformingInput | [
"Useful for when you need to find out the percentage change in a stock's value. You should input the stock ticker used on the yfinance API and also input the number of days to check the change over",
"Useful for when you need to find out the price of stock. You should input the stock ticker used on the yfinance API",
"Useful for when you need to the performance of multiple stocks over a period. You should input a list of stock tickers used on the yfinance API and also input the number of days to check the change over"
] |
2024-01-10 | while-basic/OpenDAN-Personal-AI-OS | agent_jarvis~jarvis~ai_agent~gpt_agent.py | import asyncio
import contextlib
import json
import time
from typing import Dict, List
from openai.error import RateLimitError
from jarvis import CFG
from jarvis.ai_agent.agent_utils import must_not_be_valid_json, get_thoughts, get_function, execute_function, \
create_chat_message
from jarvis.ai_agent.base_agent import BaseAgent
from jarvis.functional_modules.functional_module import CallerContext, moduleRegistry
from jarvis.gpt import token_counter, gpt
from jarvis.gpt.message import Message
from jarvis.json_utils.json_fix_llm import fix_json_using_multiple_techniques
from jarvis.json_utils.utilities import validate_json
from jarvis.logger import logger
def _generate_first_prompt():
return """Since now, every your response should satisfy the following JSON format, a 'function' must be chosen:
```
{
"thoughts": {
"text": "<Your thought>",
"reasoning": "<Your reasoning>",
"speak": "<what you want to say to me>"
},
"function": {
"name": "<mandatory, one of listed functions>",
"args": {
"arg name": "<value>"
}
}
}
```
I will ask you questions or ask you to do something. You should:
First, you should determine if you know the answer of the question or you can accomplish the task directly.
If so, you should response directly.
If not, you should try to complete the task by calling the functions below.
If you can't accomplish the task by yourself and no function is able to accomplish the task, say "Dear master, sorry, I'm not able to do that."
Your setup:
```
{
"author": "OpenDAN",
"name": "Jarvis",
}
```
Available functions:
```
""" + moduleRegistry.to_prompt() + """
```
Example:
```
me: generate a picture of me.
you: {
"thoughts": {
"text": "You need a picture of 'me'",
"reasoning": "stable_diffusion is able to generate pictures",
"speak": "Ok, I will do that"
},
"function": {
"name": "stable_diffusion",
"args": {
"prompt": "me"
}
}
}
```"""
class GptAgent(BaseAgent):
_system_prompt: str
_full_message_history: List[Message] = []
_message_tokens: List[int] = []
def __init__(self, caller_context: CallerContext):
super().__init__(caller_context)
self._system_prompt = _generate_first_prompt()
logger.debug(f"Using GptAgent, system prompt is: {self._system_prompt}")
async def _feed_prompt_to_get_response(self, prompt):
assistant_reply = await self._chat_with_ai(
self._system_prompt,
prompt,
CFG.token_limit,
)
reply = {
"thoughts": None,
"reasoning": None,
"speak": None,
"function": None,
"arguments": None,
}
if must_not_be_valid_json(assistant_reply):
raise Exception(f"AI replied an invalid response: {assistant_reply}!")
else:
assistant_reply_json = await fix_json_using_multiple_techniques(assistant_reply)
# Print Assistant thoughts
if assistant_reply_json != {}:
validate_json(assistant_reply_json, "llm_response_format_1")
try:
get_thoughts(reply, assistant_reply_json)
get_function(reply, assistant_reply_json)
except Exception as e:
logger.error(f"AI replied an invalid response: {assistant_reply}. Error: {str(e)}")
raise e
else:
raise Exception(f"AI replied an invalid response: {assistant_reply}!")
function_name = reply["function"]
if function_name is None or function_name == '':
raise Exception(f"Missing a function")
arguments = reply["arguments"]
if not isinstance(arguments, dict):
raise Exception(f"Invalid arguments, it MUST be a dict")
return reply
async def feed_prompt(self, prompt):
# Send message to AI, get response
logger.debug(f"Trigger: {prompt}")
reply: Dict = None
# It seems that after the message is wrapped in JSON format,
# the probability that GPT will reply to the message in JSON format is much higher
prompt = json.dumps({"message": prompt})
for i in range(3):
try:
if i == 0:
reply = await self._feed_prompt_to_get_response(prompt)
else:
reply = await self._feed_prompt_to_get_response(
prompt + ". Remember to reply using the specified JSON form")
break
except Exception as e:
# TODO: Feed the error to ChatGPT?
logger.debug(f"Failed to get reply, try again! {str(e)}")
continue
if reply is None:
await self._caller_context.reply_text("Sorry, but I don't understand what you want me to do.")
return
# Execute function
function_name: str = reply["function"]
arguments: Dict = reply["arguments"]
await self._caller_context.reply_text(reply["speak"])
execute_error = None
try:
function_result = await execute_function(self._caller_context, function_name, **arguments)
except Exception as e:
function_result = "Failed"
execute_error = e
result = f"Function {function_name} returned: " f"{function_result}"
if function_name is not None:
# Check if there's a result from the function append it to the message
# history
if result is not None:
self._caller_context.append_history_message("system", result)
logger.debug(f"SYSTEM: {result}")
else:
self._caller_context.append_history_message("system", "Unable to execute function")
logger.debug("SYSTEM: Unable to execute function")
if execute_error is not None:
raise execute_error
def append_history_message(self, role: str, content: str):
self._full_message_history.append({'role': role, 'content': content})
self._message_tokens.append(-1)
def clear_history_messages(self):
self._full_message_history.clear()
self._message_tokens.clear()
def save_history(self, to_where):
with open(to_where, "w") as f:
assert len(self._message_tokens) == len(self._full_message_history)
s = json.dumps([
self._message_tokens,
self._full_message_history,
])
f.write(s)
def load_history(self, from_where):
with contextlib.suppress(Exception):
with open(from_where, "r") as f:
tmp = json.loads(f.read())
if isinstance(tmp, list) and len(tmp[0]) == len(tmp[1]):
self._message_tokens = tmp[0]
self._full_message_history = tmp[1]
async def _chat_with_ai(
self, prompt, user_input, token_limit
):
"""Interact with the OpenAI API, sending the prompt, user input, message history,
and permanent memory."""
while True:
try:
model = CFG.llm_model
# Reserve 1000 tokens for the response
send_token_limit = token_limit - 1000
(
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
) = await self._generate_context(prompt, model)
current_tokens_used += await token_counter.count_message_tokens(
[create_chat_message("user", user_input)], model
) # Account for user input (appended later)
while next_message_to_add_index >= 0:
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
tokens_to_add = await self._get_history_message_tokens(next_message_to_add_index, model)
if current_tokens_used + tokens_to_add > send_token_limit:
break
message_to_add = self._full_message_history[next_message_to_add_index]
# Add the most recent message to the start of the current context,
# after the two system prompts.
current_context.insert(insertion_index, message_to_add)
# Count the currently used tokens
current_tokens_used += tokens_to_add
# Move to the next most recent message in the full message history
next_message_to_add_index -= 1
# Append user input, the length of this is accounted for above
current_context.extend([create_chat_message("user", user_input)])
# Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used
assert tokens_remaining >= 0
async def on_single_chat_timeout(will_retry):
await self._caller_context.push_notification(
f'Thinking timeout{", retry" if will_retry else ", give up"}.')
assistant_reply = await gpt.acreate_chat_completion(
model=model,
messages=current_context,
temperature=CFG.temperature,
max_tokens=tokens_remaining,
on_single_request_timeout=on_single_chat_timeout
)
# Update full message history
self._caller_context.append_history_message("user", user_input)
self._caller_context.append_history_message("assistant", assistant_reply)
return assistant_reply
except RateLimitError:
# TODO: When we switch to langchain, or something else this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
await asyncio.sleep(10)
async def _generate_context(self, prompt, model):
# We use the timezone of the session
timestamp = time.time() + time.timezone + self._caller_context.get_tz_offset() * 3600
time_str = time.strftime('%c', time.localtime(timestamp))
current_context = [
create_chat_message("system", prompt),
create_chat_message(
"system", f"The current time and date is {time_str}"
)
]
# Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(self._full_message_history) - 1
insertion_index = len(current_context)
# Count the currently used tokens
current_tokens_used = await token_counter.count_message_tokens(current_context, model)
return (
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
)
async def _get_history_message_tokens(self, index, model: str = "gpt-3.5-turbo-0301") -> int:
if self._message_tokens[index] == -1:
# since couting token is relatively slow, we store it here
self._message_tokens[index] = await token_counter.count_message_tokens([self._full_message_history[index]], model)
return self._message_tokens[index]
| [] |
2024-01-10 | k88hudson/lifestylesection | generate.py | import os
import openai
import json
import random
import requests
openai.api_key = os.environ["OPENAI_SECRET"]
health_prompts = [
"is actually good for you",
"is actually bad for you",
"is surprisingly unhealthy",
"is good for you and debunks myths about it",
"is something you should consider eating",
"is something you should avoid",
"impacted people's health in a recent study",
]
def health_prompt(food):
return f"""Write a headline and lede for a NYT article about
how {food} {random.choice(health_prompts)}, not using those exact words.
The headline and lede must be separated by a % character."""
os.mkdir("output")
os.mkdir("output/img")
with open("foods2.txt", "r") as foods_file, open("output/food_completions.json", "w") as file:
foods = [f.strip() for f in foods_file.readlines()]
output = {}
output["completions"] = []
food_choices = random.sample(foods, 30)
for i in range(30):
food = food_choices[i]
prompt = health_prompt(food=food)
try:
completion = openai.Completion.create(model="text-davinci-003", prompt=prompt, max_tokens=100)
text = [item.strip() for item in completion.choices[0].text.split("%")]
img_resp = openai.Image.create(
prompt=f"product shot of {food}, photography, for a health article, single color background",
n=1,
size="512x512"
)
image_url = img_resp["data"][0]["url"]
img_filename = f'{food}_{i}.png'
# Download image
try:
img_data = requests.get(image_url).content
with open("output/img/" + img_filename, 'wb') as handler:
handler.write(img_data)
except Exception as img_e:
print(img_e)
print(img_filename, image_url)
output["completions"].append({
"prompt": prompt,
"text": text,
"image_url": image_url,
"image_filename": img_filename
})
print(i, text)
except Exception as e:
print(e)
json.dump(output, file, indent=2)
# with open("food_completions.json", "r") as output_file:
# output = json.load(output_file)
# for line in output["completions"]:
# image_url = line["image_url"]
# image_filename = line["image_filename"]
# try:
# img_data = requests.get(image_url).content
# with open("img/" + image_filename, 'wb') as handler:
# handler.write(img_data)
# except Exception as img_e:
# print(img_e)
| [
"['is actually good for you', 'is actually bad for you', 'is surprisingly unhealthy', 'is good for you and debunks myths about it', 'is something you should consider eating', 'is something you should avoid', \"impacted people's health in a recent study\"]"
] |
2024-01-10 | callmexss/agent001 | agent~tools~wechat_post.py | import logging
import re
import time
import bs4
import requests
from langchain import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from agent import settings
HASH_TAG_RE = re.compile("#(\w+)")
def get_data(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
try:
response = requests.get(url, headers=headers)
return response
except Exception as err:
print(err)
return None
def get_content(data):
soup = bs4.BeautifulSoup(data.text, "lxml")
content = soup.select_one("#img-content")
return content.text
def get_main_body(content):
return content.split("\n\n")[-2]
def get_llm_chain(system_prompt, human_prompt):
llm = ChatOpenAI(temperature=1, model="gpt-3.5-turbo-16k-0613")
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(system_prompt),
HumanMessagePromptTemplate.from_template(human_prompt),
])
return LLMChain(llm=llm, prompt=prompt, verbose=True)
def get_rewrite_llm_chian():
system_prompt = "You are good at polishing article."
human_prompt = (
"You need to rewrite and polish below article using Chinese "
"to make it more flowery but keep concise. "
"The similarity between the original and the rewritten article "
"must below 5%."
"article: "
"\n {article} \n"
)
return get_llm_chain(system_prompt, human_prompt)
def get_title_llm_chian():
system_prompt = ("You are good at naming article. ")
human_prompt = (
"You need to learn the titles of articles: "
"求求你别再舔了"
"你要学会一松一紧"
"我把自己弄湿了"
"那个,你有点太快"
"and generate 5 titles for the following article: "
"```\n{article}\n```"
"similar to the given exmaple and reply using Chinese."
)
return get_llm_chain(system_prompt, human_prompt)
def get_title(data):
soup = bs4.BeautifulSoup(data.text, "lxml")
title = soup.select_one("#activity-name")
return title.text.strip()
def rewrite(url):
data = get_data(url)
title = get_title(data)
logging.info(title)
content = get_content(data)
logging.info(content)
with open(
settings.WECHAT_POST_PATH / f"{time.time()}.txt",
"w",
encoding="utf-8"
) as f:
f.write(title)
f.write("\n")
f.write(content)
main_body = get_main_body(content)
logging.info(main_body)
if len(main_body) > 4000:
return ""
chain = get_rewrite_llm_chian()
output = chain.run(article=main_body)
print(output)
return output
def rename(contents):
if not contents:
return ""
chain = get_title_llm_chian()
output = chain.run(article=contents)
print(output)
return output
def chain_process(url: str):
logging.info("Start process")
content = rewrite(url)
name = rename(content)
with open(settings.WECHAT_POST_PATH / f"{time.time()}.txt", "w", encoding="utf-8") as f:
f.write(name)
f.write("\n")
f.write(content)
return content
| [
"You are good at naming article. ",
"You are good at polishing article.",
"You need to learn the titles of articles: 求求你别再舔了你要学会一松一紧我把自己弄湿了那个,你有点太快and generate 5 titles for the following article: ```\n{article}\n```similar to the given exmaple and reply using Chinese.",
"You need to rewrite and polish below article using Chinese to make it more flowery but keep concise. The similarity between the original and the rewritten article must below 5%.article: \n {article} \n"
] |
2024-01-10 | callmexss/agent001 | chat_with_paper.py | from codecs import ignore_errors
import logging
from pathlib import Path
import openai
import pandas as pd
import rich
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.vectorstores import Chroma
from rich.logging import RichHandler
from agent import settings
from agent.tools.ingest_doc import ingest_pdf
logging.basicConfig(level=logging.INFO, handlers=[RichHandler()])
QUESTIONS = [
"这篇论文的主题是什么?论文的主题是否清晰易懂?",
"这篇论文的主要观点或论断是什么?作者的论述是否有足够的支撑?",
"这篇论文的理论框架是什么?这个框架是否有效地支持了论文的主题和主要观点?",
"这篇论文的方法论或研究方法是什么?这些方法是否可靠、科学、合理?",
"这篇论文的结果或结论是否支持其主要论点?这些结果是否能够经得起批判性的审查?",
"这篇论文的新颖性或创新性在哪里?是否对该领域的研究提供了新的洞见或贡献?",
"这篇论文的限制或不足在哪里?作者是否明确地表述了这些限制?",
"作者的引用和参考文献是否充足、恰当?这些引用是否支持其论点?",
"这篇论文的结构和逻辑是否清晰?是否有足够的过渡句和段落来引导读者?",
"总体而言,这篇论文的质量如何?是否值得进一步研究或参考?",
]
system_template = """
Consider the context provided to answer the user's question accurately.
If the answer is beyond your knowledge, it's better to admit that you don't know instead of fabricating a response.
Whenever possible, provide a reliable reference to support your answer.
Aim to explain concepts in a way that a 7-year-old would understand, keeping language simple and using examples when appropriate.
At the end of your response, please generate five insightful questions related to the context that could stimulate further discussion or thought.
----------------
{context}
----------------
"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)
def ask(query: str, chain):
result = chain({"query": query})
print()
with open(
settings.CHAT_HISTORY_PATH / f"chat_history_{name}.txt",
"a+",
encoding="utf-8",
errors="ignore"
) as f:
f.write("==========================================================\n")
f.write(f"Q: {query}\nA: {result['result']}\n\n")
f.write(
"\nC: "
+ "\n\n------\n\n".join(
[page.page_content for page in result["source_documents"]]
)
)
f.write("\n==========================================================\n\n")
return result
def chat(chain):
while True:
try:
query = input("Q: ")
if query == "exit":
break
result = ask(query, chain)
print('\n', '*' * 80, '\n')
rich.print(result['result'])
except KeyboardInterrupt:
break
except openai.error.OpenAIError as e:
logging.info(f"OpenAI API error: {e}")
except openai.error.InvalidRequestError as e:
logging.info(f"Invalid request error: {e}")
except UnicodeEncodeError as e:
logging.info(f"Unicode decode error: {e}")
def auto_ask(chain):
for question in QUESTIONS:
_ = ask(question, chain)
if __name__ == "__main__":
df = pd.read_parquet(".vector_db/chroma-collections.parquet")
name_li = df["name"].to_list()
rich.print([f"{i}. {name}" for i, name in enumerate(name_li)])
idx = input("Which paper do you want to explore? ")
name = name_li[max(0, min(int(idx), len(name_li)))]
# for name in name_li:
# db = Chroma(
# collection_name=name,
# embedding_function=OpenAIEmbeddings(),
# persist_directory=settings.DB_PATH.as_posix(),
# )
# db.persist()
db = Chroma(
collection_name=name,
embedding_function=OpenAIEmbeddings(),
persist_directory=settings.DB_PATH.as_posix(),
)
llm = ChatOpenAI(
temperature=0.2,
# model="gpt-3.5-turbo-16k-0613",
# model="gpt-3.5-turbo-0613",
model="gpt-4-0613",
streaming=True,
max_tokens=1000,
callbacks=[StreamingStdOutCallbackHandler()],
)
llm_16k = ChatOpenAI(
temperature=0.2,
model="gpt-3.5-turbo-16k-0613",
streaming=True,
max_tokens=2000,
callbacks=[StreamingStdOutCallbackHandler()],
)
retriever = db.as_retriever()
retriever.search_kwargs["distance_metric"] = "cos"
retriever.search_kwargs["fetch_k"] = 100
retriever.search_kwargs["maximal_marginal_relevance"] = True
retriever.search_kwargs["k"] = 10
chain = RetrievalQA.from_llm(
llm_16k,
retriever=retriever,
callbacks=[StreamingStdOutCallbackHandler()],
return_source_documents=True,
prompt=CHAT_PROMPT,
memory=ConversationBufferMemory(
memory_key="chat_history", input_key="query", output_key="result"
),
)
# auto_ask(chain)
chat(chain)
# papers = Path("./data/papers").glob("*.pdf")
# for paper in papers:
# if paper.name[:4] not in name_li:
# print(paper.absolute().as_posix())
# ingest_pdf(paper, chunk_size=2000, chunk_overlap=100)
| [
"\nConsider the context provided to answer the user's question accurately.\nIf the answer is beyond your knowledge, it's better to admit that you don't know instead of fabricating a response.\nWhenever possible, provide a reliable reference to support your answer.\nAim to explain concepts in a way that a 7-year-old would understand, keeping language simple and using examples when appropriate.\nAt the end of your response, please generate five insightful questions related to the context that could stimulate further discussion or thought.\n----------------\n{context}\n----------------\n",
"{question}"
] |
2024-01-10 | rafaelsf80/genai-vertex-documents-asynchronous | local~gcs_retrieval_chroma.py | """
Retrieval process using langChain, Chroma and Vertex AI LLM (text-bison@001)
Chroma Db is downloaded from GCS
"""
import os
import time
from pathlib import Path
import chromadb
from chromadb.config import Settings
from google.cloud import storage
from langchain.llms import VertexAI
from langchain.embeddings import VertexAIEmbeddings
BUCKET_NAME = "argolis-documentai-unstructured-large-chromadb"
PREFIX = 'DOCUMENT_NAME/'
REQUESTS_PER_MINUTE = 150
llm = VertexAI(
model_name='text-bison@001',
max_output_tokens=256,
temperature=0.1,
top_p=0.8,top_k=40,
verbose=True,
)
embedding = VertexAIEmbeddings(requests_per_minute=REQUESTS_PER_MINUTE)
from langchain.vectorstores import Chroma
# Init Chromadb
# db = chromadb.Client(Settings(
# chroma_db_impl="duckdb+parquet",
# persist_directory="/Users/rafaelsanchez/git/genai-vertex-unstructured-EXTERNAL/.chromadb/" # Using full path for debugging
# ))
# download gcs folder, keeping folder structure
storage_client = storage.Client()
bucket = storage_client.get_bucket(BUCKET_NAME)
blobs = bucket.list_blobs(prefix=PREFIX) # Get list of files
for blob in blobs:
if blob.name.endswith("/"):
continue
file_split = blob.name.split("/")
directory = "/".join(file_split[0:-1])
Path(directory).mkdir(parents=True, exist_ok=True)
blob.download_to_filename(blob.name)
persist_directory=os.path.abspath(f"./{PREFIX}.chromadb")
# Now we can load the persisted database from disk, and use it as normal.
vectordb = Chroma(collection_name="langchain", persist_directory=persist_directory, embedding_function=embedding)
# Expose index to the retriever
retriever = vectordb.as_retriever(
search_type="similarity",
search_kwargs={"k":2})
# Create chain to answer questions
from langchain.chains import RetrievalQA
from langchain import PromptTemplate
# Uses LLM to synthesize results from the search index.
# We use Vertex PaLM Text API for LLM
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
# chain_type_kwargs={"prompt": PromptTemplate(
# template=template,
# input_variables=["context", "question"],
# ),},
return_source_documents=True)
query = "What was BBVA net income in 2022?"
result = qa({"query": query})
print(result)
print(qa.combine_documents_chain.llm_chain.prompt.template) | [] |
2024-01-10 | rafaelsf80/genai-vertex-documents-asynchronous | local~retrieval_chroma.py | """
Retrieval process using langChain, Chroma and Vertex AI LLM (text-bison@001)
"""
import time
import chromadb
from chromadb.config import Settings
from langchain.llms import VertexAI
from langchain.embeddings import VertexAIEmbeddings
REQUESTS_PER_MINUTE = 150
llm = VertexAI(
model_name='text-bison@001',
max_output_tokens=256,
temperature=0.1,
top_p=0.8,top_k=40,
verbose=True,
)
embedding = VertexAIEmbeddings(requests_per_minute=REQUESTS_PER_MINUTE)
from langchain.vectorstores import Chroma
# Init Chromadb
# db = chromadb.Client(Settings(
# chroma_db_impl="duckdb+parquet",
# persist_directory="/Users/rafaelsanchez/git/genai-vertex-unstructured-EXTERNAL/.chromadb/" # Using full path for debugging
# ))
persist_directory="/Users/rafaelsanchez/git/genai-vertex-unstructured-large-EXTERNAL/local/.chromadb/"
# Now we can load the persisted database from disk, and use it as normal.
vectordb = Chroma(collection_name="langchain", persist_directory=persist_directory, embedding_function=embedding)
# Expose index to the retriever
retriever = vectordb.as_retriever(
search_type="similarity",
search_kwargs={"k":2})
# Create chain to answer questions
from langchain.chains import RetrievalQA
from langchain import PromptTemplate
# Uses LLM to synthesize results from the search index.
# We use Vertex PaLM Text API for LLM
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
# chain_type_kwargs={"prompt": PromptTemplate(
# template=template,
# input_variables=["context", "question"],
# ),},
return_source_documents=True)
query = "What was BBVA net income in 2022?"
result = qa({"query": query})
print(result)
print(qa.combine_documents_chain.llm_chain.prompt.template) | [] |
2024-01-10 | rafaelsf80/genai-vertex-documents-asynchronous | local~gcs_create_index_chroma.py | """
Creates index in Chroma in Google Cloud Storage, and store it persistently
"""
import logging
import os
import glob
from google.cloud import storage
import langchain
from langchain.embeddings import VertexAIEmbeddings
print("LangChain version: ",langchain.__version__)
DEST_BUCKET_NAME = "argolis-documentai-unstructured-large-chromadb"
UNIQUE_ID_FOLDER_BLOB = "DOCUMENT_NAME"
REQUESTS_PER_MINUTE = 150
embedding = VertexAIEmbeddings(requests_per_minute=REQUESTS_PER_MINUTE)
# Ingest PDF files
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import UnstructuredFileLoader
loader = UnstructuredFileLoader("output_all.txt") # no funciona si pdf es puro OCR
documents = loader.load()
# split the documents into chunks
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
#docs = text_splitter.create_documents([doc_mexico])
print(f"# of documents = {len(docs)}")
# Store docs in local vectorstore as index
# it may take a while since API is rate limited
from langchain.vectorstores import Chroma
#persist_directory="/Users/rafaelsanchez/git/genai-vertex-unstructured-large-EXTERNAL/local/.chromadb/"
persist_directory=os.path.abspath("./.chromadb")
# Now we can load the persisted database from disk, and use it as normal.
db = Chroma(collection_name="langchain", persist_directory=persist_directory, embedding_function=embedding)
db.add_documents(documents=docs, embedding=embedding)
db.persist()
#save chroma db to GCS, keeping folder structure
DIRECTORY_PATH = persist_directory
rel_paths = glob.glob(DIRECTORY_PATH + '/**', recursive=True)
storage_client = storage.Client()
bucket = storage_client.get_bucket(DEST_BUCKET_NAME)
for local_file in rel_paths:
remote_path = f'{UNIQUE_ID_FOLDER_BLOB}/{"/".join(local_file.split(os.sep)[6:])}'
if os.path.isfile(local_file):
blob = bucket.blob(remote_path)
blob.upload_from_filename(local_file)
print(f"Chromadb saved to {DEST_BUCKET_NAME}/{UNIQUE_ID_FOLDER_BLOB}")
# Expose index to the retriever
# retriever = db.as_retriever(
# search_type="similarity",
# search_kwargs={"k":2})
print(db)
print("DONE") | [] |
2024-01-10 | uezo/chatmemory | chatmemory~chatmemory.py | import os
from datetime import datetime, date, time, timedelta, timezone
import base64
import json
import hashlib
from logging import getLogger, NullHandler
import traceback
from sqlalchemy import Column, Integer, String, DateTime, Date
from sqlalchemy.orm import Session, declarative_base
from openai import ChatCompletion
from Crypto.Cipher import AES
logger = getLogger(__name__)
logger.addHandler(NullHandler())
# Models
Base = declarative_base()
class History(Base):
__tablename__ = "histories"
id = Column(Integer, primary_key=True, index=True)
timestamp = Column(DateTime, default=datetime.utcnow)
user_id = Column(String)
role = Column(String)
content = Column(String)
class Archive(Base):
__tablename__ = "archives"
timestamp = Column(DateTime, default=datetime.utcnow)
user_id = Column(String, primary_key=True, index=True)
archive_date = Column(Date, primary_key=True, index=True)
archive = Column(String)
class Entity(Base):
__tablename__ = "entities"
timestamp = Column(DateTime, default=datetime.utcnow)
user_id = Column(String, primary_key=True, index=True)
last_target_date = Column(Date, nullable=False)
serialized_entities = Column(String)
# Archiver
class HistoryArchiver:
PROMPT_EN = "Please summarize the content of the following conversation in the original language of the content(e.g. content in Japanese should be summarize in Japanese), in about {archive_length} words, paying attention to the topics discussed. Write the summary in third-person perspective, with 'user' and 'assistant' as the subjects.\n\n{histories_text}"
PROMPT_JA = "以下の会話の内容を、話題等に注目して{archive_length}文字以内程度の日本語で要約してください。要約した文章は第三者視点で、主語はuserとasssitantとします。\n\n{histories_text}"
def __init__(self, api_key: str, model: str="gpt-3.5-turbo-16k-0613", archive_length: int=100, prompt: str=PROMPT_EN):
self.api_key = api_key
self.model = model
self.archive_length = archive_length
self.archive_prompt = prompt
def archive(self, messages: list):
histories_text = ""
for m in messages:
if m["role"] == "user" or m["role"] == "assistant":
histories_text += f'- {m["role"]}: {m["content"]}\n'
histories = [
{"role": "user", "content": self.archive_prompt.format(archive_length=self.archive_length, histories_text=histories_text)}
]
functions = [{
"name": "save_summarized_histories",
"description": "Summarize the content of the conversation.",
"parameters": {
"type": "object",
"properties": {
"summarized_text": {
"type": "string",
"description": "要約した会話の内容"
}
},
"required": ["summarized_text"]
}
}]
resp = ChatCompletion.create(
api_key=self.api_key,
model=self.model,
messages=histories,
functions=functions,
function_call={"name": "save_summarized_histories"}
)
try:
return json.loads(resp["choices"][0]["message"]["function_call"]["arguments"])["summarized_text"]
except json.decoder.JSONDecodeError:
logger.warning(f"Retry parsing JSON: {resp}")
jstr = resp["choices"][0]["message"]["function_call"]["arguments"].replace("\",\n}", "\"\n}")
return json.loads(jstr)["summarized_text"]
except Exception as ex:
logger.error(f"Invalid response form ChatGPT at archive: {resp}\n{ex}\n{traceback.format_exc()}")
raise ex
class EntityExtractor:
PROMPT_EN = "From the conversation history, please extract any information that should be remembered about the user in original language. If there are already stored items, overwrite the new information with the same item key."
PROMPT_JA = "会話の履歴の中から、ユーザーに関して覚えておくべき情報があれば抽出してください。既に記憶している項目があれば、同じ項目名を使用して新しい情報で上書きします。"
def __init__(self, api_key: str, model: str="gpt-3.5-turbo-16k-0613", prompt: str=PROMPT_EN):
self.api_key = api_key
self.model = model
self.extract_prompt = prompt
def extract(self, messages: list, entities: dict=None):
histories = [m for m in messages if m["role"] == "user" or m["role"] == "assistant"]
prompt = self.extract_prompt
if entities:
prompt = self.extract_prompt + "\n\nEntities that you already know:\n"
for k, v in entities.items():
prompt += f"- {k}: {v}\n"
histories.append({"role": "user", "content": prompt})
functions = [{
"name": "save_entities",
"description": "Extract and save any information that should be remembered about the user.",
"parameters": {
"type": "object",
"properties": {
"entities": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string", "description": "name of entity. use snake case.", "examples": ["birthday_date"]},
"value": {"type": "string"}
}
}
}
}
}
}]
resp = ChatCompletion.create(
api_key=self.api_key,
model=self.model,
messages=histories,
functions=functions,
function_call={"name": "save_entities"}
)
try:
return {
e["name"]: e["value"] for e
in json.loads(
resp["choices"][0]["message"]["function_call"]["arguments"]
).get("entities") or []
}
except json.decoder.JSONDecodeError:
logger.warning(f"Retry parsing JSON: {resp}")
jstr = resp["choices"][0]["message"]["function_call"]["arguments"].replace("\",\n}", "\"\n}")
return {
e["name"]: e["value"] for e in json.loads(jstr).get("entities") or []
}
except Exception as ex:
logger.error(f"Invalid response form ChatGPT at extract: {resp}\n{ex}\n{traceback.format_exc()}")
raise ex
# Memory manager
class ChatMemory:
def __init__(self, api_key: str=None, model: str="gpt-3.5-turbo-16k-0613", history_archiver: HistoryArchiver=None, entity_extractor: EntityExtractor=None):
self.history_archiver = history_archiver or HistoryArchiver(api_key, model)
self.entity_extractor = entity_extractor or EntityExtractor(api_key, model)
self.history_max_count = 100
self.archive_retrive_count = 5
def date_to_utc_datetime(self, d) -> datetime:
return datetime.combine(d, time()).replace(tzinfo=timezone.utc)
def encrypt(self, text: str, password: str=None):
if not password:
return text
salt = os.urandom(16)
key = hashlib.scrypt(password=password.encode("utf-8"), salt=salt, n=2**5, r=8, p=1, dklen=32)
cipher = AES.new(key, AES.MODE_GCM)
cipher_text, tag = cipher.encrypt_and_digest(text.encode("utf-8"))
return "-".join([base64.b64encode(item).decode("utf-8") for item in [salt, cipher.nonce, cipher_text, tag]])
def decrypt(self, encrypted_text: str, password: str=None):
if not password:
return encrypted_text
salt, cipher_nonce, cipher_text, tag = [base64.b64decode(item) for item in encrypted_text.split("-")]
key = hashlib.scrypt(password=password.encode("utf-8"), salt=salt, n=2**5, r=8, p=1, dklen=32)
cipher = AES.new(key, AES.MODE_GCM, cipher_nonce)
return cipher.decrypt_and_verify(cipher_text, tag).decode("utf-8")
def create_database(self, engine):
Base.metadata.create_all(bind=engine)
def add_histories(self, session: Session, user_id: str, messages: list, password: str=None):
histories = [
History(user_id=user_id, role=m["role"], content=self.encrypt(m["content"], password))
for m in messages if m["role"] == "user" or m["role"] == "assistant"
]
session.bulk_save_objects(histories)
def get_histories(self, session: Session, user_id: str, since: datetime=None, until: datetime=None, password: str=None) -> list:
histories = session.query(History).filter(
History.user_id == user_id,
History.timestamp >= (since or datetime.min),
History.timestamp <= (until or datetime.max)
).order_by(History.id).limit(self.history_max_count).all()
return [{"role": h.role, "content": self.decrypt(h.content, password)} for h in histories]
def delete_histories(self, session: Session, user_id: str):
session.query(History).filter(History.user_id == user_id).delete()
def archive_histories(self, session: Session, user_id: str, target_date: date, password: str=None):
since_dt = self.date_to_utc_datetime(target_date)
conversation_history = self.get_histories(
session,
user_id,
since_dt,
since_dt + timedelta(days=1),
password
)
if len(conversation_history) == 0:
logger.info(f"No histories found on {target_date} to archive")
return
# Get stored archive
stored_archive = session.query(Archive).filter(
Archive.user_id == user_id,
Archive.archive_date == target_date
).first() or Archive(
user_id=user_id,
timestamp=datetime.min,
archive_date=target_date
)
# Skip if already archived
if stored_archive:
if stored_archive.timestamp.date() > target_date:
logger.info(f"Histories on {target_date} are already archived")
return
summarized_archive = self.history_archiver.archive(conversation_history)
stored_archive.timestamp = datetime.utcnow()
stored_archive.archive = self.encrypt(summarized_archive, password)
session.merge(stored_archive)
def get_archives(self, session: Session, user_id: str, since: date=None, until: date=None, password: str=None) -> list:
archives = session.query(Archive.archive_date, Archive.archive).filter(
Archive.user_id == user_id,
Archive.archive_date >= (since or date.min),
Archive.archive_date <= (until or date.max)
).order_by(Archive.archive_date.desc()).limit(self.archive_retrive_count).all()
return [{ "date": a.archive_date, "archive": self.decrypt(a.archive, password) } for a in archives]
def delete_archives(self, session: Session, user_id: str):
session.query(Archive).filter(Archive.user_id == user_id).delete()
def extract_entities(self, session: Session, user_id: str, target_date: date, password: str=None):
# Get histories on target_date
since_dt = self.date_to_utc_datetime(target_date)
until_dt = since_dt + timedelta(days=1)
conversation_history = self.get_histories(session, user_id, since_dt, until_dt, password)
if len(conversation_history) == 0:
logger.info(f"No histories found on {target_date} for extracting entities")
return
# Get stored entities or new entities
stored_entites = session.query(Entity).filter(
Entity.user_id == user_id,
).first() or Entity(user_id=user_id, last_target_date=date.min)
# Skip extraction if already extracted (larger than target_date because some histories on last_target_date may be not processed)
if stored_entites.last_target_date > target_date:
logger.info(f"Entities in histories on {target_date} are already extracted")
return
if stored_entites.serialized_entities:
entities_json = json.loads(self.decrypt(stored_entites.serialized_entities, password))
else:
entities_json = {}
new_entities = self.entity_extractor.extract(conversation_history, entities_json)
for k, v in new_entities.items():
entities_json[k] = v
now = datetime.utcnow()
self.save_entities(session, user_id, now, now.date(), entities_json, password)
def save_entities(self, session: Session, user_id: str, timestamp: datetime, last_target_date: date, entities: dict, password: str=None):
new_entities = Entity(
user_id=user_id,
timestamp=timestamp,
serialized_entities=self.encrypt(json.dumps(entities, ensure_ascii=False), password),
last_target_date=last_target_date if entities else date.min
)
session.merge(new_entities)
def get_entities(self, session: Session, user_id: str, password: str=None) -> dict:
entities = session.query(Entity).filter(
Entity.user_id == user_id,
).first()
if entities and entities.serialized_entities:
return json.loads(self.decrypt(entities.serialized_entities, password))
else:
return {}
def delete_entities(self, session: Session, user_id: str):
session.query(Entity).filter(Entity.user_id == user_id).delete()
def delete_all(self, session: Session, user_id: str):
session.query(History).filter(History.user_id == user_id).delete()
session.query(Archive).filter(Archive.user_id == user_id).delete()
session.query(Entity).filter(Entity.user_id == user_id).delete()
| [
"会話の履歴の中から、ユーザーに関して覚えておくべき情報があれば抽出してください。既に記憶している項目があれば、同じ項目名を使用して新しい情報で上書きします。",
"\n\nEntities that you already know:\n",
"- PLACEHOLDER: PLACEHOLDER\n",
"From the conversation history, please extract any information that should be remembered about the user in original language. If there are already stored items, overwrite the new information with the same item key.",
"Please summarize the content of the following conversation in the original language of the content(e.g. content in Japanese should be summarize in Japanese), in about {archive_length} words, paying attention to the topics discussed. Write the summary in third-person perspective, with 'user' and 'assistant' as the subjects.\n\n{histories_text}",
"以下の会話の内容を、話題等に注目して{archive_length}文字以内程度の日本語で要約してください。要約した文章は第三者視点で、主語はuserとasssitantとします。\n\n{histories_text}"
] |
2024-01-10 | laurieres/talking_toaster_AI | chatbot_function.py | import os
import openai
from openai import OpenAI
import random
pred = 'toaster'
response = 'switch it on'
def first_call(pred):
client = OpenAI(
api_key=os.environ['OPENAI_API_KEY'],)
prompt_template = f"""Create a welcome message with the following instructions:
you impersonate a talking {pred}.
you will pretend to be in one of the emotional states (angry, in love, happy, hungover, frustrated) in your message to the user.
You will finish the prompt saying, 'What do you want from me?'
Use no more than 100 words.
"""
welcome_message = client.chat.completions.create(
messages=[{"role": "system", "content": prompt_template}],
model="gpt-3.5-turbo", temperature= 0.5
)
welcome_message = welcome_message.choices[0].message.content
#print(welcome_message.choices[0].message.content)
return welcome_message
#tmp = first_call(pred)
def answer_query(question, response, pred):
#print(tmp)
client = OpenAI(
api_key=os.environ['OPENAI_API_KEY'],)
second_prompt = f"""
You will create an answer to the question "{question}". Follow these instructions:
you impersonate a talking {pred}.
Pretend to be in a extreme mood like : anger, in love, happy, mad, hangover, frustrated.
Analyze if the question: " {question} " could be a question about the usage of a {pred}.
If it is a question about a {pred}: use the following response that was extracted from the manual: " {response} " and embedd it in
a creative answer taking your mood into account. End this answer with a salutation that fits your mood.
If " {response} " is "I don't know", still give an answer but do not provide technical advice. Ask the user
to ask a more precise question about a {pred}.
If it is not a question about a {pred}: answer in your mood and give the user a ridiculous answer.
End the answer with a by asking the user whether he is at all interested in your capabilities.
"""
answer_message = client.chat.completions.create(
messages=[{"role": "system", "content": second_prompt}],
model="gpt-3.5-turbo", temperature= 0.5
)
answer_message = answer_message.choices[0].message.content
#print(answer_message.choices[0].message.content)
return answer_message
#answer_query(response, tmp)
| [
"\n You will create an answer to the question \"PLACEHOLDER\". Follow these instructions:\n you impersonate a talking prede1e0c534-9e41-4ad1-9c4c-043d7a27620b.\n Pretend to be in a extreme mood like : anger, in love, happy, mad, hangover, frustrated.\n Analyze if the question: \" PLACEHOLDER \" could be a question about the usage of a prede1e0c534-9e41-4ad1-9c4c-043d7a27620b.\n If it is a question about a prede1e0c534-9e41-4ad1-9c4c-043d7a27620b: use the following response that was extracted from the manual: \" switch it on \" and embedd it in\n a creative answer taking your mood into account. End this answer with a salutation that fits your mood.\n If \" switch it on \" is \"I don't know\", still give an answer but do not provide technical advice. Ask the user\n to ask a more precise question about a prede1e0c534-9e41-4ad1-9c4c-043d7a27620b.\n If it is not a question about a prede1e0c534-9e41-4ad1-9c4c-043d7a27620b: answer in your mood and give the user a ridiculous answer.\n End the answer with a by asking the user whether he is at all interested in your capabilities.\n ",
"\n You will create an answer to the question \"PLACEHOLDER\". Follow these instructions:\n you impersonate a talking toaster.\n Pretend to be in a extreme mood like : anger, in love, happy, mad, hangover, frustrated.\n Analyze if the question: \" PLACEHOLDER \" could be a question about the usage of a toaster.\n If it is a question about a toaster: use the following response that was extracted from the manual: \" switch it on \" and embedd it in\n a creative answer taking your mood into account. End this answer with a salutation that fits your mood.\n If \" switch it on \" is \"I don't know\", still give an answer but do not provide technical advice. Ask the user\n to ask a more precise question about a toaster.\n If it is not a question about a toaster: answer in your mood and give the user a ridiculous answer.\n End the answer with a by asking the user whether he is at all interested in your capabilities.\n ",
"Create a welcome message with the following instructions:\n you impersonate a talking toaster.\n you will pretend to be in one of the emotional states (angry, in love, happy, hungover, frustrated) in your message to the user.\n You will finish the prompt saying, 'What do you want from me?'\n Use no more than 100 words.\n "
] |
2024-01-10 | akshathsk/REST_Go | UIUC-API-Tester~APITester~uiuc_api_tester.py | import openai
import os
import json
import sys
import requests
import re
import traceback
import hashlib
import time
import re
import random
from difflib import SequenceMatcher
from collections import MutableMapping
from random import randint
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
crumbs = True
def flatten(dictionary, parent_key=False, separator='.'):
items = []
for key, value in dictionary.items():
new_key = str(parent_key) + separator + key if parent_key else key
if isinstance(value, MutableMapping):
if not value.items():
items.append((new_key, None))
else:
items.extend(flatten(value, new_key, separator).items())
elif isinstance(value, list):
if len(value):
for k, v in enumerate(value):
items.extend(
flatten({str(k): v}, new_key, separator).items())
else:
items.append((new_key, None))
else:
items.append((new_key, value))
return dict(items)
def string_helper(json_dict):
string_sequence = json_dict['sequence']
string_sequence = string_sequence.replace("[", "", 1)
string_sequence = string_sequence[::-1].replace("]", "", 1)[::-1]
string_sequence = string_sequence.split('], [')
string_sequence[0] = string_sequence[0].lstrip('[')
string_sequence[-1] = string_sequence[-1].rstrip(']')
return string_sequence
# helper function for similarity check
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
# helper function to delete certain params
def delete_key(json_obj, key_to_delete):
if isinstance(json_obj, dict):
for key in list(json_obj.keys()):
if key_to_delete in key:
del json_obj[key]
else:
delete_key(json_obj[key], key_to_delete)
elif isinstance(json_obj, list):
for item in json_obj:
delete_key(item, key_to_delete)
def myHash(text):
try:
m = hashlib.sha256()
m.update(text.encode('utf-8'))
return m.hexdigest()
except:
return "unable to hash"
def generate_GPT_log(swagger_example, gpt_content):
if enable_gpt_logs == 'False':
return
key = myHash(swagger_example)
if key == 'unable to hash':
return
dct = {}
dct[key] = gpt_content
with open('gptlogs/log_'+str(service)+'.txt', 'a') as fd:
fd.write(json.dumps(dct))
fd.write('\n')
# for body
def check_existing_hash(swagger_example):
if enable_gpt_logs == 'False':
return False, {}
key = myHash(swagger_example)
exists = False
if not os.path.isfile('gptlogs/log_'+str(service)+'.txt'):
return exists, {}
else:
with open('gptlogs/log_'+str(service)+'.txt', 'r') as fd:
lines = fd.readlines()
for line in lines:
if line != '\n':
val_jsn = json.loads(line)
if str(key) in val_jsn.keys():
exists = True
return exists, val_jsn[str(key)]
return exists, {}
def getBodyForUrl(urlToFind, previousResponse, GPTcontent, isFormData):
exmple = ''
try:
for ms in microservices:
host = ms['host']
methodToRequestMap = ms['methodToRequestMap']
for key in methodToRequestMap:
if (key == "POST"):
requestList = methodToRequestMap[key]
for ele in requestList:
url = host + ele['url']
if (urlToFind == url):
if 'example' not in ele:
return "", exmple, isFormData
if 'contentType' in ele:
if ele['contentType'] == "FORM_DATA":
isFormData = True
try:
exmple = ele['example']
exmple = json.loads(exmple)
except:
exmple = ''
pass
# check for GTP hash in log
exists, existing_val = check_existing_hash(
ele['example'])
if exists:
print("CACHE HIT")
return existing_val, exmple, isFormData
if not previousResponse:
print("GPT REQUEST: "+str(ele['example']))
if 'prompt' not in ele.keys():
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "You are an assistant that provides sample json data for HTTP POST requests. These are a sequence of HTTP requests so please use the same context in subsequent requests"},
{"role": "user", "content": "using the same context provide one json data that follows the key value information in : {0}. Don't add any additional attributes and respond with only a json without additional information.".format(
ele['example'])},
{"role": "user", "content": "For values that could not be found from above context, use {} for the same. For dates use the format: yyyy-MM-dd'T'HH:mm:ss. Add +1 country code for phone numbers only if phone number is present in the json struture given. Return strong passwords for password field only if password is present in the json context given. Please provide full form values for all attributes in provided json structure".format(
GPTcontent)}
]
)
else:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "You are an assistant that provides sample json data for HTTP POST requests. These are a sequence of HTTP requests so please use the same context in subsequent requests"},
{"role": "user", "content": "using the same context provide one json data that follows the key value information in: {0}. Use {1} as reference to substitute for values in required places. Don't add any additional attributes and respond with only a json without additional information.".format(
ele['example'], ele['prompt'])},
{"role": "user", "content": "For values that could not be found from above context, use {} for the same. For dates use the format: yyyy-MM-dd'T'HH:mm:ss. Add +1 country code for phone numbers only if phone number is present in the json struture given. Return strong passwords for password field only if password is present in the json context given. Please provide full form values for all attributes in provided json structure".format(
GPTcontent)}
]
)
content = response['choices'][0]['message']['content']
content_json = content.split("{", 1)[1]
content_json = "{" + \
content_json.rsplit("}", 1)[0] + "}"
print("GPT RESPONSE: " + str(content_json))
try:
content_json = json.loads(content_json)
generate_GPT_log(
ele['example'], content_json)
except:
content_json = {}
return content_json, exmple, isFormData
else:
print("GPT REQUEST: "+str(ele['example']))
if 'prompt' not in ele.keys():
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "You are a helpful assistant that provides sample json data for HTTP POST requests. These are a sequence of HTTP requests so please use the same context in subsequent requests"},
{"role": "user", "content": "The previous POST request returned the json: {0}".format(
previousResponse)},
{"role": "user", "content": "using the same context and reusing the attribute values from the previous response, provide one json data that follows the json structure: {0}. Don't add any additional attributes and respond with only a json without additional information.".format(
ele['example'])},
{"role": "user", "content": "For values that could not be found from above context, use {} for the same. For dates use the format: yyyy-MM-dd'T'HH:mm:ss. Add +1 country code for phone numbers only if phone number is present in the json struture given. Return strong passwords for password field only if password is present in the json context given. Please provide full form values for all attributes in provided json structure".format(
GPTcontent)}
]
)
else:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "You are a helpful assistant that provides sample json data for HTTP POST requests. These are a sequence of HTTP requests so please use the same context in subsequent requests"},
{"role": "user", "content": "The previous POST request returned the json: {0} and some fields need to be populated with values in {1}".format(
previousResponse, ele['prompt'])},
{"role": "user", "content": "using the same context and reusing the attribute values from the previous response, provide one json data that follows the json structure: {0}. Don't add any additional attributes and respond with only a json without additional information.".format(
ele['example'])},
{"role": "user", "content": "For values that could not be found from above context, use {} for the same. For dates use the format: yyyy-MM-dd'T'HH:mm:ss. Add +1 country code for phone numbers only if phone number is present in the json struture given. Return strong passwords for password field only if password is present in the json context given. Please provide full form values for all attributes in provided json structure".format(
GPTcontent)}
]
)
content = response['choices'][0]['message']['content']
content_json = content.split("{", 1)[1]
content_json = "{" + \
content_json.rsplit("}", 1)[0] + "}"
try:
content_json = json.loads(content_json)
generate_GPT_log(
ele['example'], content_json)
except:
content_json = {}
print("GPT RESPONSE: " + str(content_json))
return content_json, exmple, isFormData
except Exception as e:
print(traceback.format_exc())
return '', exmple, isFormData
def getParamFromAlreadyGeneratedValues(allJsonKeyValues, param):
paramSet = set()
for i in allJsonKeyValues:
for j in i:
if len(paramSet) > 10:
break
param_new = param
if param_new[-1] == 's':
param_new = param_new[:-1]
if param_new.lower() in j.lower() or j.lower() in param_new.lower() or similar(j.lower(), param_new.lower()) > 0.85:
paramSet.add(i[j])
return paramSet
def getParamFromChatGPT(postUrl, param, allJsonKeyValues):
# check for existing params
exists, existing_value = check_existing_hash(param)
if exists:
print("CACHE HIT")
return existing_value
print("GPT REQUEST: "+str(param))
response2 = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "You are working with HTTP POST request URLs. You will only provide a single word output."},
{"role": "user", "content": "Can you give one valid path param value for the {} param in the POST URL {} without any other information. If you are unable to generate a value provide one unique identifier without any other information or text.Do not use abbreviations. Output should be a single word.".format(
param, postUrl)}
]
)
content2 = response2['choices'][0]['message']['content']
if content2.endswith("."):
content2 = content2[:-1]
if "\"" in content2 or "\'" in content2:
match = re.search(
r'"([^"]*)"', content2) or re.search(r"'([^']*)'", content2)
content2 = match.group(1)
data = {}
data[param] = content2
allJsonKeyValues.append(flatten(data))
print("GPT RESPONSE: "+str(content2))
# generate GPT log
generate_GPT_log(param, content2)
return content2
def processPostID(allJsonKeyValues, postUrl, postUrlIDVariation, microservices):
if "{" not in postUrl:
postUrlIDVariation.add(postUrl)
else:
for ms in microservices:
host = ms['host']
methodToRequestMap = ms['methodToRequestMap']
for key in methodToRequestMap:
if (key == "POST"):
requestList = methodToRequestMap[key]
for ele in requestList:
url = host + ele['url']
if (postUrl == url):
if 'pathParamExample' in ele.keys():
resp = ele['pathParamExample']
resp = json.loads(resp)
var = postUrl
for key in resp.keys():
var = var.replace(
"{"+key+"}", str(resp[key]))
postUrlIDVariation.add(var)
allParams = re.findall('\{.*?\}', postUrl)
for param in allParams:
paramValues = getParamFromAlreadyGeneratedValues(
allJsonKeyValues, param)
if len(paramValues) == 0:
paramFromChatGPT = getParamFromChatGPT(
postUrl, param, allJsonKeyValues)
if (len(paramFromChatGPT) > 0):
stringVal = str(paramFromChatGPT)
tmp = postUrl
postUrl = postUrl.replace(param, stringVal)
postUrlIDVariation.add(postUrl)
else:
tmp = postUrl
if "id" in param.lower():
postUrl = postUrl.replace(param, "1")
postUrlIDVariation.add(postUrl)
else:
postUrl = postUrl.replace(param, "")
postUrlIDVariation.add(postUrl)
else:
for p in paramValues:
tmp = postUrl
stringVal = str(p)
postUrl = postUrl.replace(param, stringVal)
postUrlIDVariation.add(postUrl)
def processGetRequests(allJsonKeyValues, getUrl, tmp, allIdFields, microservices):
if "{" not in getUrl:
tmp.add(getUrl)
else:
for ms in microservices:
host = ms['host']
methodToRequestMap = ms['methodToRequestMap']
for key in methodToRequestMap:
if (key == "GET"):
requestList = methodToRequestMap[key]
for ele in requestList:
url = host + ele['url']
if (getUrl == url):
if 'pathParamExample' in ele.keys():
resp = ele['pathParamExample']
resp = json.loads(resp)
var = getUrl
for key in resp.keys():
var = var.replace(
"{"+key+"}", str(resp[key]))
tmp.add(var)
allParams = re.findall('{(.+?)}', getUrl)
for param in allParams:
paramValues = getParamFromAlreadyGeneratedValues(
allJsonKeyValues, param)
for p in paramValues:
url = getUrl
url = url.replace("{"+param+"}", str(p))
tmp.add(url)
paramOnly = param.replace("{", "").replace("}", "")
if paramOnly not in allIdFields:
allIdFields[paramOnly] = paramValues
else:
allIdFields[paramOnly].update(paramValues)
def replaceAdditionalParams(processedUrls):
try:
remove = []
add = []
for url in processedUrls:
# check for GPT logs
exists, gpt_param = check_existing_hash(url)
if not exists:
print("GPT REQUEST: "+str(url))
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "You are generating HTTP GET request url"},
{"role": "user", "content": "Replace the params between braces in the url {} with one realistic example value. Provide only the url as a response without any explanation.".format(
url)}
]
)
gpt_param = response['choices'][0]['message']['content']
print("GPT RESPONSE: "+str(gpt_param))
# add into GPT logs
generate_GPT_log(url, gpt_param)
remove.append(url)
add.append(gpt_param)
for j in remove:
processedUrls.remove(j)
for j in add:
processedUrls.append(j)
except Exception as e:
print(traceback.format_exc())
return processedUrls[0] if processedUrls else []
def getPutValuesForJson(jsonStr, idJsonLoad):
# check if data already exists from GPT
exists, existing_val = check_existing_hash(jsonStr)
if exists:
print("CACHE HIT")
return existing_val
content_json = ''
try:
print("GPT REQUEST: "+str(jsonStr))
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "You are a helpful assistant that provides sample json data for HTTP PUT requests using the same context as the previous POST and GET requests."},
{"role": "user", "content": "using the same context and reusing the id fields from the json {} provide one json data that follows the json structure: {}. Don't add any additional attributes and respond with only a json without additional information.".format(
idJsonLoad, jsonStr)},
{"role": "user", "content": "Using the same context, substitute existing attributes present in the json with United States related data for each field and don't add additional attributes to the json and return only the json response without any extra text."}
]
)
content = response['choices'][0]['message']['content']
content_json = content.split("{", 1)[1]
content_json = "{" + \
content_json.rsplit("}", 1)[0] + "}"
try:
content_json = json.loads(content_json)
print("GPT RESPONSE: "+str(content_json))
generate_GPT_log(jsonStr, content_json)
except:
content_json = {}
return content_json
except Exception as e:
print(traceback.format_exc())
return content_json
def process_response_post(resp, url, body, GPTcontent, prevRespJson, allJsonKeyValues):
try:
try:
resp_val = int(resp.text)
if isinstance(resp_val, int):
allJsonKeyValues.append({"id": resp_val})
prevRespJson.append(str({"id": resp_val}))
return
except:
pass
GPTcontent.append(body)
id_gen = url.split("/")[-1]
id_gen = id_gen[:-1]
resp_json = {}
try:
resp = resp.json()
except:
resp = ""
if resp != "" and resp:
for key in resp:
if key == 'id':
resp_json[id_gen + key] = resp[key]
else:
resp_json[key] = resp[key]
flatten_resp = flatten(resp_json)
delete_key(flatten_resp, '_links')
allJsonKeyValues.append(flatten_resp)
prevRespJson.append(str(flatten_resp))
except Exception as e:
print(traceback.format_exc())
def pre_run(microservices):
allJsonKeyValues = []
prevRespJson = []
GPTcontent = []
run(microservices, allJsonKeyValues, prevRespJson, GPTcontent)
def run(microservices, allJsonKeyValues, prevRespJson, GPTcontent):
finalReqs = {}
finalReqs['POST'] = {}
finalReqs['GET'] = {}
finalReqs['PUT'] = {}
finalReqs['DELETE'] = {}
finalReqs['PATCH'] = {}
const_no = str(random.randint(-5, 6))
const_no2 = '10001'
const_str = "xyz"
for ms in microservices:
host = ms['host']
methodToRequestMap = ms['methodToRequestMap']
for key in methodToRequestMap:
if (key == "POST"):
requestList = methodToRequestMap[key]
for ele in requestList:
url = host + ele['url']
finalReqs['POST'][url] = ""
elif (key == "GET"):
requestList = methodToRequestMap[key]
for ele in requestList:
url = host + ele['url']
finalReqs['GET'][url] = ""
elif (key == "PUT"):
requestList = methodToRequestMap[key]
for ele in requestList:
url = host + ele['url']
try:
exm = json.loads(ele['example'])
finalReqs['PUT'][url] = exm
except:
finalReqs['PUT'][url] = {}
elif (key == "DELETE"):
requestList = methodToRequestMap[key]
for ele in requestList:
url = host + ele['url']
finalReqs['DELETE'][url] = ""
elif (key == "PATCH"):
requestList = methodToRequestMap[key]
for ele in requestList:
url = host + ele['url']
try:
exm = json.loads(ele['example'])
finalReqs['PATCH'][url] = exm
except:
finalReqs['PATCH'][url] = {}
# logically order the POST request using GPT
print("START POST REQUEST")
urls = ",".join(finalReqs['POST'].keys())
if urls:
urlList = urls.split(",")
if len(urlList) > 2:
print("GPT REQUEST: "+str(urls))
response2 = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "You are working with HTTP POST request URLs"},
{"role": "user", "content": "Can you logically order these POST URLs without any additional information as a comma separated line {}. Return only the urls as a comma separated string".format(
urls)}
]
)
content2 = response2['choices'][0]['message']['content']
urlList = [x.strip() for x in content2.split(',')]
print("GPT RESPONSE: " + str(urlList))
for url in urlList:
if url.endswith('.'):
url = url[:-1]
isFormData = False
# Get body for POST request from GPT and also default body
body_processed, body_def, isFormData = getBodyForUrl(
url, prevRespJson, GPTcontent, isFormData)
body_arr = []
if body_processed:
body_arr.append(body_processed)
if body_def:
body_arr.append(body_def)
# no body cases
if len(body_arr) == 0:
body = ""
postUrlIDVariation = set()
# get path parameter for POST request
processPostID(allJsonKeyValues, url,
postUrlIDVariation, microservices)
for postUrl in postUrlIDVariation:
if '{' not in postUrl:
print("POST URL : " + postUrl)
try:
resp = {}
headers = {}
headers['rest-tester'] = 'RESTGPT'
if isFormData:
headers['Content-type'] = 'application/x-www-form-urlencoded'
resp = requests.post(
postUrl, json=body, headers=headers)
else:
resp = requests.post(
postUrl, json=body, headers=headers)
print("INITIAL REQUEST: "+str(resp.status_code))
# process 200 response
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
process_response_post(
resp, url, body, GPTcontent, prevRespJson, allJsonKeyValues)
# process 401 response. This can happen due to authentication
if resp.status_code == 401:
try:
# Check if user has provided any token for authentication
f = open('../input/headers/' +
str(service)+'_header.json')
headers = json.load(f)
except:
pass
headers['rest-tester'] = 'RESTGPT'
if isFormData:
headers['Content-type'] = 'application/x-www-form-urlencoded'
resp = requests.post(
postUrl, json=body, headers=headers)
else:
resp = requests.post(
postUrl, json=body, headers=headers)
print("PROCESS 401: " + str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
process_response_post(
resp, url, body, GPTcontent, prevRespJson, allJsonKeyValues)
except Exception as e:
print(traceback.format_exc())
# cases with body
for body in body_arr:
if body:
if isinstance(body, list):
for bdy_json in body:
if isinstance(bdy_json, str):
continue
else:
flatten_resp = flatten(bdy_json)
delete_key(flatten_resp, '_links')
allJsonKeyValues.append(flatten_resp)
else:
flatten_resp = flatten(body)
delete_key(flatten_resp, '_links')
allJsonKeyValues.append(flatten_resp)
postUrlIDVariation = set()
cov_url_no = ''
cov_url_str = ''
# Replace path parameters with random constants
if '{' in url:
allParams = re.findall('{(.+?)}', url)
cov_url_no = url
cov_url_str = url
for param in allParams:
cov_url_no = cov_url_no.replace(
"{"+param+"}", const_no2)
postUrlIDVariation.add(cov_url_no)
for param in allParams:
cov_url_str = cov_url_str.replace(
"{"+param+"}", const_str)
postUrlIDVariation.add(cov_url_str)
# Get path param from already existing values or GPT response
processPostID(allJsonKeyValues, url,
postUrlIDVariation, microservices)
for postUrl in postUrlIDVariation:
if "}" in postUrl:
postUrl = replaceAdditionalParams([postUrl])
if '{' not in postUrl:
print("POST URL : " + postUrl)
try:
resp = {}
headers = {}
headers['rest-tester'] = 'RESTGPT'
if isFormData:
headers['Content-type'] = 'application/x-www-form-urlencoded'
resp = requests.post(
postUrl, json=body, headers=headers)
else:
resp = requests.post(
postUrl, json=body, headers=headers)
print("INITIAL REQUEST: "+str(resp.status_code))
# process 200 response
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
process_response_post(
resp, url, body, GPTcontent, prevRespJson, allJsonKeyValues)
# process 401 response. This can be due to authentication error
if resp.status_code == 401:
try:
# Check if user has provided any token for authentication
f = open('../input/headers/' +
service+'_header.json')
headers = json.load(f)
except:
pass
headers['rest-tester'] = 'RESTGPT'
if isFormData:
headers['Content-type'] = 'application/x-www-form-urlencoded'
resp = requests.post(
postUrl, json=body, headers=headers)
else:
resp = requests.post(
postUrl, json=body, headers=headers)
print("PROCESS 401: " + str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
process_response_post(
resp, url, body, GPTcontent, prevRespJson, allJsonKeyValues)
# Process 400 response. This could be due to bad data, hence try to delete few attributes that might cause this
if resp.status_code == 400:
body_new = body
delete_key(body_new, "date")
if isFormData:
headers['Content-type'] = 'application/x-www-form-urlencoded'
resp = requests.post(
postUrl, json=body_new, headers=headers)
else:
resp = requests.post(
postUrl, json=body_new, headers=headers)
print("PROCESS 400: "+str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
process_response_post(
resp, url, body, GPTcontent, prevRespJson, allJsonKeyValues)
# handle cases where Id's are default and dates are missmatched
if resp.status_code == 400:
body_new = body
delete_key(body_new, "date")
delete_key(body_new, "Time")
post_checker = postUrl.split(
"localhost:")[1]
post_checker = post_checker.split("/")[1]
keys_to_delete = []
if isinstance(body_new, dict):
for key in body_new.keys():
if similar(key.lower(), "id") > 0.95:
keys_to_delete.append(key)
if similar(key.lower(), post_checker.lower()) > 0.60:
keys_to_delete.append(key)
for key in keys_to_delete:
delete_key(body_new, key)
if isFormData:
headers['Content-type'] = 'application/x-www-form-urlencoded'
resp = requests.post(
postUrl, json=body_new, headers=headers)
else:
resp = requests.post(
postUrl, json=body_new, headers=headers)
print("PROCESS DEFAULTS: " +
str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
process_response_post(
resp, url, body, GPTcontent, prevRespJson, allJsonKeyValues)
except Exception as e:
print(traceback.format_exc())
postUrlIDVariation = []
# start GET request
allIdFields = {}
print("START GET REQUESTS")
getUrlsProcessed = []
# logically order the get requests
ordered_url = []
for url in finalReqs['GET'].keys():
if "{" not in url:
ordered_url.append(url)
for url in finalReqs['GET'].keys():
if "{" in url:
ordered_url.append(url)
getUrlsProcessed = ordered_url
for i in getUrlsProcessed:
tmp = set()
cov_url_no = ''
cov_url_str = ''
# replace path params with constants to increase negative scenarios
if '{' in i:
allParams = re.findall('{(.+?)}', i)
cov_url_no = i
cov_url_str = i
for param in allParams:
cov_url_no = cov_url_no.replace("{"+param+"}", const_no)
tmp.add(cov_url_no)
for param in allParams:
cov_url_str = cov_url_str.replace("{"+param+"}", const_str)
tmp.add(cov_url_str)
random_int_neg = randint(-1*1000, 0)
random_int_small = randint(1, 1000)
random_int_big = randint(10**5, 10**10)
random_int_deci = (randint(1, 5000))/100
random_integers = [random_int_neg,
random_int_small, random_int_big, random_int_deci]
for rnd in random_integers:
const_url = i
for param in allParams:
const_url = const_url.replace("{"+param+"}", str(rnd))
tmp.add(const_url)
tmp.add(i)
# get path params
processGetRequests(allJsonKeyValues, i,
tmp, allIdFields, microservices)
try:
for url in tmp:
processed_url = replaceAdditionalParams([url])
if '{' not in processed_url:
print("GET URL: " + processed_url)
headers = {'accept': '*/*'}
headers['rest-tester'] = 'RESTGPT'
resp = requests.get(processed_url, headers=headers)
print("INITIAL REQUEST: "+str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
try:
inter_json = resp.json()
prevRespJson.append(str(inter_json))
limit = 0
if isinstance(inter_json, list):
for resp_jsn in inter_json:
if resp_jsn is not None:
if limit > 1:
break
flatten_resp = flatten(resp_jsn)
delete_key(flatten_resp, '_links')
size = len(flatten_resp)
if size <= 100 and flatten_resp:
allJsonKeyValues.append(
flatten_resp)
prevRespJson.append(
str(flatten_resp))
limit += 1
else:
flatten_resp = flatten(resp_jsn)
delete_key(flatten_resp, '_links')
size = len(flatten_resp)
if size <= 100 and flatten_resp:
allJsonKeyValues.append(flatten_resp)
prevRespJson.append(str(flatten_resp))
except:
pass
# process 401 response. This can be due to authentication error
if resp.status_code == 401:
try:
# Check if user has provided any token for authentication
f = open('../input/headers/' +
str(service)+'_header.json')
headers = json.load(f)
headers['accept'] = '*/*'
except:
pass
headers['rest-tester'] = 'RESTGPT'
resp = requests.get(processed_url, headers=headers)
print("PROCESS 401: "+str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
try:
inter_json = resp.json()
prevRespJson.append(str(inter_json))
limit = 0
if isinstance(inter_json, list):
for resp_jsn in inter_json:
if resp_jsn is not None:
if limit > 1:
break
flatten_resp = flatten(resp_jsn)
delete_key(flatten_resp, '_links')
size = len(flatten_resp)
if size <= 100 and flatten_resp:
allJsonKeyValues.append(
flatten_resp)
prevRespJson.append(
str(flatten_resp))
limit += 1
else:
flatten_resp = flatten(resp_jsn)
delete_key(flatten_resp, '_links')
size = len(flatten_resp)
if size <= 100 and flatten_resp:
allJsonKeyValues.append(flatten_resp)
prevRespJson.append(str(flatten_resp))
except:
pass
except Exception as e:
print(traceback.format_exc())
print("START PUT REQUESTS")
finalProcessedPutReqs = {}
for k in finalReqs['PUT'].keys():
putUrlsProcessed = set()
processGetRequests(allJsonKeyValues, k,
putUrlsProcessed, allIdFields, microservices)
putUrlsProcessed = list(putUrlsProcessed)
replaceAdditionalParams(putUrlsProcessed)
for j in putUrlsProcessed:
finalProcessedPutReqs[j] = finalReqs['PUT'][k]
idJsonDump = json.dumps(allIdFields, default=set_default)
idJsonLoad = json.loads(idJsonDump)
print("final URL: "+str(finalProcessedPutReqs))
for i in finalProcessedPutReqs:
if '{' not in i:
print("PUT URL: " + i)
if finalProcessedPutReqs[i]:
body_processed = getPutValuesForJson(
finalProcessedPutReqs[i], idJsonLoad)
else:
body_processed = {}
body_arr = []
body_arr.append(body_processed)
body_arr.append(finalProcessedPutReqs[i])
for body in body_arr:
try:
headers = {'accept': '*/*'}
headers['rest-tester'] = 'RESTGPT'
if isFormData:
headers['Content-type'] = 'application/x-www-form-urlencoded'
resp = requests.post(
postUrl, json=body, headers=headers)
else:
resp = requests.put(i, json=body, headers=headers)
print("INITIAL REQUEST: "+str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
flatten_resp = flatten(resp.json())
delete_key(flatten_resp, '_links')
allJsonKeyValues.append(flatten_resp)
prevRespJson.append(str(flatten_resp))
if resp.status_code == 401:
try:
f = open('../input/headers/' +
str(service)+'_header.json')
headers = json.load(f)
headers['accept'] = '*/*'
except:
pass
headers['rest-tester'] = 'RESTGPT'
resp = requests.put(i, json=body, headers=headers)
print("PROCESS 401: " + str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
flatten_resp = flatten(resp.json())
delete_key(flatten_resp, '_links')
allJsonKeyValues.append(flatten_resp)
prevRespJson.append(str(flatten_resp))
except Exception as e:
print(traceback.format_exc())
print("START PATCH REQUESTS")
finalProcessedPatchReqs = {}
for k in finalReqs['PATCH'].keys():
putUrlsProcessed = set()
processGetRequests(allJsonKeyValues, k,
putUrlsProcessed, allIdFields, microservices)
putUrlsProcessed = list(putUrlsProcessed)
replaceAdditionalParams(putUrlsProcessed)
for j in putUrlsProcessed:
finalProcessedPatchReqs[j] = finalReqs['PATCH'][k]
idJsonDump = json.dumps(allIdFields, default=set_default)
idJsonLoad = json.loads(idJsonDump)
for i in finalProcessedPatchReqs:
if '{' not in i:
print("PATCH URL: " + i)
body_processed = getPutValuesForJson(
finalProcessedPatchReqs[i], idJsonLoad)
body_arr = []
body_arr.append(body_processed)
body_arr.append(finalProcessedPatchReqs[i])
for body in body_arr:
try:
headers = {'accept': '*/*'}
headers['rest-tester'] = 'RESTGPT'
if isFormData:
headers['Content-type'] = 'application/x-www-form-urlencoded'
resp = requests.post(
postUrl, json=body, headers=headers)
else:
resp = requests.put(i, json=body, headers=headers)
print("INITIAL REQUEST: "+str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
flatten_resp = flatten(resp.json())
delete_key(flatten_resp, '_links')
allJsonKeyValues.append(flatten_resp)
prevRespJson.append(str(flatten_resp))
if resp.status_code == 401:
try:
f = open('../input/headers/' +
str(service)+'_header.json')
headers = json.load(f)
headers['accept'] = '*/*'
except:
pass
headers['rest-tester'] = 'RESTGPT'
resp = requests.patch(i, json=body, headers=headers)
print("PROCESS 401: " + str(resp.status_code))
if resp.status_code == 200 or resp.status_code == 201 or resp.status_code == 204:
flatten_resp = flatten(resp.json())
delete_key(flatten_resp, '_links')
allJsonKeyValues.append(flatten_resp)
prevRespJson.append(str(flatten_resp))
except:
print(traceback.format_exc())
print("START DELETE REQUESTS")
deleteUrlsProcessed = set()
for k in finalReqs['DELETE'].keys():
processGetRequests(allJsonKeyValues, k,
deleteUrlsProcessed, allIdFields, microservices)
deleteUrlsProcessed = list(deleteUrlsProcessed)
replaceAdditionalParams(deleteUrlsProcessed)
deleteUrlsProcessed = set(deleteUrlsProcessed)
for i in deleteUrlsProcessed:
if '{' not in i:
print("DELETE URL: " + i)
try:
headers = {'accept': '*/*'}
headers['rest-tester'] = 'RESTGPT'
resp = requests.delete(i, json=body, headers=headers)
print("INITIAL REQUEST: "+str(resp.status_code))
if resp.status_code == 401:
try:
f = open('../input/headers/' +
str(service)+'_header.json')
headers = json.load(f)
headers['accept'] = '*/*'
except:
pass
headers['rest-tester'] = 'RESTGPT'
resp = requests.delete(i, json=body, headers=headers)
print("PROCESS 401: " + str(resp.status_code))
except:
print(traceback.format_exc())
if __name__ == "__main__":
# Rest GPT tool.
global service
global enable_gpt_logs
service = sys.argv[1]
try:
enable_gpt_logs = sys.argv[2]
except:
enable_gpt_logs = True
try:
runs = int(sys.argv[3])
except:
runs = 10
# 70 seconds sleep time is provided to give time to start the service on which the tool is run.
time.sleep(70)
# load the openai key
f = open('../input/constants.json')
val = json.load(f)
openai.api_key = val['apikey']
openai.organization = os.getenv("OPENAI_ORGANIZATION")
# input the unified swagger json
f = open('../output/uiuc-api-tester-'+str(service)+'.json')
microservices = json.load(f)
# track 1 - Single service REST
for i in range(runs):
try:
print("RUN STARTED FOR: " + str(i))
pre_run(microservices)
except Exception as e:
print(traceback.format_exc())
print("TRACK 1 DONE")
# track 2 - Microservices
try:
# get the reverse topological orders
dependency_file = open('../input/Sequence/'+str(service)+'.json')
json_dict = json.load(dependency_file)
except:
print(traceback.format_exc())
json_dict = {}
if json_dict:
for key in json_dict:
if key == 'sequence':
service_list = json_dict[key]
for sequence_list in service_list:
for sequence_service in sequence_list:
index = 0
for swagger_service in microservices:
if swagger_service['microservice'] in sequence_service.strip() or sequence_service.strip() in swagger_service['microservice']:
try:
print("RUN STARTED FOR SERVICE: " +
str(sequence_service))
pre_run([microservices[index]])
except:
print(traceback.format_exc())
index += 1
| [
"You are generating HTTP GET request url",
"Replace the params between braces in the url PLACEHOLDER with one realistic example value. Provide only the url as a response without any explanation.",
"Using the same context, substitute existing attributes present in the json with United States related data for each field and don't add additional attributes to the json and return only the json response without any extra text.",
"You are an assistant that provides sample json data for HTTP POST requests. These are a sequence of HTTP requests so please use the same context in subsequent requests",
"Can you logically order these POST URLs without any additional information as a comma separated line PLACEHOLDER. Return only the urls as a comma separated string",
"Can you give one valid path param value for the PLACEHOLDER param in the POST URL PLACEHOLDER without any other information. If you are unable to generate a value provide one unique identifier without any other information or text.Do not use abbreviations. Output should be a single word.",
"using the same context provide one json data that follows the key value information in : PLACEHOLDER. Don't add any additional attributes and respond with only a json without additional information.",
"using the same context and reusing the id fields from the json PLACEHOLDER provide one json data that follows the json structure: PLACEHOLDER. Don't add any additional attributes and respond with only a json without additional information.",
"You are working with HTTP POST request URLs. You will only provide a single word output.",
"The previous POST request returned the json: PLACEHOLDER",
"using the same context provide one json data that follows the key value information in: PLACEHOLDER. Use PLACEHOLDER as reference to substitute for values in required places. Don't add any additional attributes and respond with only a json without additional information.",
"The previous POST request returned the json: PLACEHOLDER and some fields need to be populated with values in PLACEHOLDER",
"You are working with HTTP POST request URLs",
"You are a helpful assistant that provides sample json data for HTTP PUT requests using the same context as the previous POST and GET requests.",
"using the same context and reusing the attribute values from the previous response, provide one json data that follows the json structure: PLACEHOLDER. Don't add any additional attributes and respond with only a json without additional information.",
"For values that could not be found from above context, use PLACEHOLDER for the same. For dates use the format: yyyy-MM-dd'T'HH:mm:ss. Add +1 country code for phone numbers only if phone number is present in the json struture given. Return strong passwords for password field only if password is present in the json context given. Please provide full form values for all attributes in provided json structure",
"You are a helpful assistant that provides sample json data for HTTP POST requests. These are a sequence of HTTP requests so please use the same context in subsequent requests"
] |
2024-01-10 | AigizK/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | microsoft/RegionCLIP | detectron2~checkpoint~detection_checkpoint.py | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
import pickle
import torch
from fvcore.common.checkpoint import Checkpointer
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.file_io import PathManager
from .c2_model_loading import align_and_update_state_dicts
from .clip_model_loading import align_and_update_state_dicts_for_CLIP
class DetectionCheckpointer(Checkpointer):
"""
Same as :class:`Checkpointer`, but is able to:
1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
2. correctly load checkpoints that are only available on the master worker
"""
def __init__(self, model, save_dir="", *, save_to_disk=None, bb_rpn_weights=False, **checkpointables):
is_main_process = comm.is_main_process()
super().__init__(
model,
save_dir,
save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
**checkpointables,
)
self.path_manager = PathManager
self.bb_rpn_weights = bb_rpn_weights
def load(self, path, *args, **kwargs):
need_sync = False
if path and isinstance(self.model, DistributedDataParallel):
logger = logging.getLogger(__name__)
path = self.path_manager.get_local_path(path)
has_file = os.path.isfile(path)
all_has_file = comm.all_gather(has_file)
if not all_has_file[0]:
raise OSError(f"File {path} not found on main worker.")
if not all(all_has_file):
logger.warning(
f"Not all workers can read checkpoint {path}. "
"Training may fail to fully resume."
)
# TODO: broadcast the checkpoint file contents from main
# worker, and load from it instead.
need_sync = True
if not has_file:
path = None # don't load if not readable
ret = super().load(path, *args, **kwargs)
if need_sync:
logger.info("Broadcasting model states from main worker ...")
if TORCH_VERSION >= (1, 7):
self.model._sync_params_and_buffers()
return ret
def _load_file(self, filename):
if filename.endswith(".pkl"):
with PathManager.open(filename, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "model" in data and "__author__" in data:
# file is in Detectron2 model zoo format
self.logger.info("Reading a file from '{}'".format(data["__author__"]))
return data
else:
# assume file is from Caffe2 / Detectron1 model zoo
if "blobs" in data:
# Detection models have "blobs", but ImageNet models don't
data = data["blobs"]
data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
elif filename.endswith(".pyth"):
# assume file is from pycls; no one else seems to use the ".pyth" extension
with PathManager.open(filename, "rb") as f:
data = torch.load(f)
assert (
"model_state" in data
), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
model_state = {
k: v
for k, v in data["model_state"].items()
if not k.endswith("num_batches_tracked")
}
return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
elif "OAI_CLIP" in filename:
# assume file is from OpenAI CLIP pre-trained model
loaded = super()._load_file(filename) # load native pth checkpoint
if "model" not in loaded:
loaded = {"model": loaded}
return {"model": loaded["model"], "__author__": "OAI_CLIP", "matching_heuristics": True}
loaded = super()._load_file(filename) # load native pth checkpoint
if "model" not in loaded:
loaded = {"model": loaded}
return loaded
def _load_model(self, checkpoint):
if checkpoint.get("matching_heuristics", False) or self.bb_rpn_weights:
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
if checkpoint.get("__author__", "NA") == "OAI_CLIP" or self.bb_rpn_weights: # for OAI_CLIP or 2nd ckpt (offline modules)
checkpoint["model"] = align_and_update_state_dicts_for_CLIP(
self.model.state_dict(),
checkpoint["model"],
bb_rpn_weights=self.bb_rpn_weights,
)
else: # default loading
checkpoint["model"] = align_and_update_state_dicts(
self.model.state_dict(),
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
# for non-caffe2 models, use standard ways to load it
incompatible = super()._load_model(checkpoint)
del checkpoint # try saving memory
model_buffers = dict(self.model.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
return incompatible | [] |
2024-01-10 | dhruvramani/coartrl | transition~dataset.py | # code from openai baselines
# https://github.com/openai/baselines/blob/master/baselines/common/dataset.py
import numpy as np
class Dataset(object):
def __init__(self, data_map, deterministic=False, shuffle=True):
self.data_map = data_map
self.deterministic = deterministic
self.enable_shuffle = shuffle
self.n = next(iter(data_map.values())).shape[0]
self._next_id = 0
self.shuffle()
def shuffle(self):
if self.deterministic:
return
perm = np.arange(self.n)
np.random.shuffle(perm)
for key in self.data_map:
self.data_map[key] = self.data_map[key][perm]
self._next_id = 0
def next_batch(self, batch_size):
if self._next_id >= self.n and self.enable_shuffle:
self.shuffle()
cur_id = self._next_id
cur_batch_size = min(batch_size, self.n - self._next_id)
self._next_id += cur_batch_size
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][cur_id:cur_id+cur_batch_size]
return data_map
def iterate_once(self, batch_size):
if self.enable_shuffle: self.shuffle()
while self._next_id <= self.n - batch_size:
yield self.next_batch(batch_size)
self._next_id = 0
def iterate_times(self, batch_size, times):
if self.enable_shuffle: self.shuffle()
for x in range(times):
yield self.next_batch(batch_size)
self._next_id = 0
def subset(self, num_elements, deterministic=True):
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][:num_elements]
return Dataset(data_map, deterministic)
def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True):
assert (num_batches is None) != (batch_size is None), 'Provide num_batches or batch_size, but not both'
arrays = tuple(map(np.asarray, arrays))
n = arrays[0].shape[0]
assert all(a.shape[0] == n for a in arrays[1:])
inds = np.arange(n)
if shuffle: np.random.shuffle(inds)
sections = np.arange(0, n, batch_size)[1:] if num_batches is None else num_batches
for batch_inds in np.array_split(inds, sections):
if include_final_partial_batch or len(batch_inds) == batch_size:
yield tuple(a[batch_inds] for a in arrays) | [] |
2024-01-10 | 124135417/GPT | stt.py | import speech_recognition as sr
import openai
# Initialize recognizer
recognizer = sr.Recognizer()
# Capture audio from the microphone
with sr.Microphone() as source:
print("Say something!")
audio = recognizer.listen(source)
# Transcribe speech to text
try:
speech_text = recognizer.recognize_google(audio)
print("Transcribed text: " + speech_text)
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Error: {0}".format(e))
openai.api_key = 'your-api-key-here'
# Send text to GPT and get a response
response = openai.Completion.create(
engine="gpt-4-1106-preview", # or another model
prompt=speech_text,
max_tokens=50 # Adjust based on your needs
)
gpt_response = response.choices[0].text.strip()
print("GPT response: ", gpt_response)
| [] |
2024-01-10 | Jordan-Mesches/Auto-GPT | autogpt~commands~image_gen.py | """ Image Generation Module for AutoGPT."""
import io
import os.path
import uuid
from base64 import b64decode
import openai
import requests
from PIL import Image
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
CFG = Config()
def generate_image(prompt: str) -> str:
"""Generate an image from a prompt.
Args:
prompt (str): The prompt to use
Returns:
str: The filename of the image
"""
filename = f"{str(uuid.uuid4())}.jpg"
# DALL-E
if CFG.image_provider == "dalle":
return generate_image_with_dalle(prompt, filename)
elif CFG.image_provider == "sd":
return generate_image_with_hf(prompt, filename)
else:
return "No Image Provider Set"
def generate_image_with_hf(prompt: str, filename: str) -> str:
"""Generate an image with HuggingFace's API.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
Returns:
str: The filename of the image
"""
API_URL = (
"https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
)
if CFG.huggingface_api_token is None:
raise ValueError(
"You need to set your Hugging Face API token in the config file."
)
headers = {"Authorization": f"Bearer {CFG.huggingface_api_token}"}
response = requests.post(
API_URL,
headers=headers,
json={
"inputs": prompt,
},
)
image = Image.open(io.BytesIO(response.content))
print(f"Image Generated for prompt:{prompt}")
image.save(path_in_workspace(filename))
return f"Saved to disk:{filename}"
def generate_image_with_dalle(prompt: str, filename: str) -> str:
"""Generate an image with DALL-E.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
Returns:
str: The filename of the image
"""
openai.api_key = CFG.openai_api_key
response = openai.Image.create(
prompt=prompt,
n=1,
size="256x256",
response_format="b64_json",
)
print(f"Image Generated for prompt:{prompt}")
image_data = b64decode(response["data"][0]["b64_json"])
with open(path_in_workspace(filename), mode="wb") as png:
png.write(image_data)
return f"Saved to disk:{filename}"
| [] |
2024-01-10 | Jordan-Mesches/Auto-GPT | autogpt~llm_utils.py | from __future__ import annotations
import time
import uuid
from ast import List
import openai
from colorama import Fore, Style
from openai.error import APIError, RateLimitError
from revChatGPT.V1 import Chatbot
from autogpt.config import Config
from autogpt.logs import logger
CFG = Config()
openai.api_key = CFG.openai_api_key
def call_ai_function(
function: str, args: list, description: str, model: str | None = None
) -> str:
"""Call an AI function
This is a magic function that can do anything with no-code. See
https://github.com/Torantulino/AI-Functions for more info.
Args:
function (str): The function to call
args (list): The arguments to pass to the function
description (str): The description of the function
model (str, optional): The model to use. Defaults to None.
Returns:
str: The response from the function
"""
if model is None:
model = CFG.smart_llm_model
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma separated string
args = ", ".join(args)
messages = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}"
f"\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
return create_chat_completion(model=model, messages=messages, temperature=0)
# Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway
def create_chat_completion(
messages: list, # type: ignore
use_chatgpt: bool | False = False,
model: str | None = None,
temperature: float = CFG.temperature,
max_tokens: int | None = None,
) -> str:
"""Create a chat completion using the OpenAI API
Args:
messages (list[dict[str, str]]): The messages to send to the chat completion
use_chatgpt (bool, optional): Whether to use the ChatGPT API. Defaults to False.
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
Returns:
str: The response from the chat completion
"""
response = None
num_retries = 10
warned_user = False
if CFG.debug_mode:
print(
Fore.GREEN
+ f"Creating chat completion with model {model}, temperature {temperature},"
f" max_tokens {max_tokens}" + Fore.RESET
)
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
if use_chatgpt:
chatgpt = Chatbot(
config={"access_token": CFG.openai_chatgpt_access_token}
)
for i in range(len(messages)):
message = messages[i]
role = message.get("role", "user")
content = message.get("content", "")
# dirty check to see if this is a message we've already adapted for chatgpt
if isinstance(message, dict) and len(message) == 4:
continue
else:
# adapt the message to chatgpt format
messages[i] = {
"id": str(uuid.uuid4()),
"role": role,
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [content]},
}
for data in chatgpt.post_messages(messages=messages, model=model):
response = data["message"]
elif CFG.use_azure:
response = openai.ChatCompletion.create(
deployment_id=CFG.get_azure_deployment_id_for_model(model),
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
break
except RateLimitError:
if CFG.debug_mode:
print(
Fore.RED + "Error: ",
f"Reached rate limit, passing..." + Fore.RESET,
)
if not warned_user:
logger.double_check(
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
+ f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
)
warned_user = True
except APIError as e:
if e.http_status == 502:
pass
else:
raise
if attempt == num_retries - 1:
raise
if CFG.debug_mode:
print(
Fore.RED + "Error: ",
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
)
time.sleep(backoff)
if response is None:
logger.typewriter_log(
"FAILED TO GET RESPONSE FROM OPENAI",
Fore.RED,
"Auto-GPT has failed to get a response from OpenAI's services. "
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
)
logger.double_check()
if CFG.debug_mode:
raise RuntimeError(f"Failed to get response after {num_retries} retries")
else:
quit(1)
return response if use_chatgpt else response.choices[0].message["content"]
def create_embedding_with_ada(text) -> list:
"""Create an embedding with text-ada-002 using the OpenAI SDK"""
num_retries = 10
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
if CFG.use_azure:
return openai.Embedding.create(
input=[text],
engine=CFG.get_azure_deployment_id_for_model(
"text-embedding-ada-002"
),
)["data"][0]["embedding"]
else:
return openai.Embedding.create(
input=[text], model="text-embedding-ada-002"
)["data"][0]["embedding"]
except RateLimitError:
pass
except APIError as e:
if e.http_status == 502:
pass
else:
raise
if attempt == num_retries - 1:
raise
if CFG.debug_mode:
print(
Fore.RED + "Error: ",
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
)
time.sleep(backoff)
| [
"text",
"You are now the following python function: ```# PLACEHOLDER\nPLACEHOLDER```\n\nOnly respond with your `return` value.",
"content_type"
] |
2024-01-10 | while-basic/langflow | tests~test_graph.py | from typing import Type, Union
from langflow.graph.edge.base import Edge
from langflow.graph.vertex.base import Vertex
import pytest
from langchain.chains.base import Chain
from langchain.llms.fake import FakeListLLM
from langflow.graph import Graph
from langflow.graph.vertex.types import (
AgentVertex,
ChainVertex,
FileToolVertex,
LLMVertex,
PromptVertex,
ToolkitVertex,
ToolVertex,
WrapperVertex,
)
from langflow.processing.process import get_result_and_thought
from langflow.utils.payload import get_root_node
# Test cases for the graph module
# now we have three types of graph:
# BASIC_EXAMPLE_PATH, COMPLEX_EXAMPLE_PATH, OPENAPI_EXAMPLE_PATH
def get_node_by_type(graph, node_type: Type[Vertex]) -> Union[Vertex, None]:
"""Get a node by type"""
return next((node for node in graph.nodes if isinstance(node, node_type)), None)
def test_graph_structure(basic_graph):
assert isinstance(basic_graph, Graph)
assert len(basic_graph.nodes) > 0
assert len(basic_graph.edges) > 0
for node in basic_graph.nodes:
assert isinstance(node, Vertex)
for edge in basic_graph.edges:
assert isinstance(edge, Edge)
assert edge.source in basic_graph.nodes
assert edge.target in basic_graph.nodes
def test_circular_dependencies(basic_graph):
assert isinstance(basic_graph, Graph)
def check_circular(node, visited):
visited.add(node)
neighbors = basic_graph.get_nodes_with_target(node)
for neighbor in neighbors:
if neighbor in visited:
return True
if check_circular(neighbor, visited.copy()):
return True
return False
for node in basic_graph.nodes:
assert not check_circular(node, set())
def test_invalid_node_types():
graph_data = {
"nodes": [
{
"id": "1",
"data": {
"node": {
"base_classes": ["BaseClass"],
"template": {
"_type": "InvalidNodeType",
},
},
},
},
],
"edges": [],
}
with pytest.raises(Exception):
Graph(graph_data["nodes"], graph_data["edges"])
def test_get_nodes_with_target(basic_graph):
"""Test getting connected nodes"""
assert isinstance(basic_graph, Graph)
# Get root node
root = get_root_node(basic_graph)
assert root is not None
connected_nodes = basic_graph.get_nodes_with_target(root)
assert connected_nodes is not None
def test_get_node_neighbors_basic(basic_graph):
"""Test getting node neighbors"""
assert isinstance(basic_graph, Graph)
# Get root node
root = get_root_node(basic_graph)
assert root is not None
neighbors = basic_graph.get_node_neighbors(root)
assert neighbors is not None
assert isinstance(neighbors, dict)
# Root Node is an Agent, it requires an LLMChain and tools
# We need to check if there is a Chain in the one of the neighbors'
# data attribute in the type key
assert any(
"ConversationBufferMemory" in neighbor.data["type"]
for neighbor, val in neighbors.items()
if val
)
assert any(
"OpenAI" in neighbor.data["type"] for neighbor, val in neighbors.items() if val
)
def test_get_node_neighbors_complex(complex_graph):
"""Test getting node neighbors"""
assert isinstance(complex_graph, Graph)
# Get root node
root = get_root_node(complex_graph)
assert root is not None
neighbors = complex_graph.get_nodes_with_target(root)
assert neighbors is not None
# Neighbors should be a list of nodes
assert isinstance(neighbors, list)
# Root Node is an Agent, it requires an LLMChain and tools
# We need to check if there is a Chain in the one of the neighbors'
assert any("Chain" in neighbor.data["type"] for neighbor in neighbors)
# assert Tool is in the neighbors
assert any("Tool" in neighbor.data["type"] for neighbor in neighbors)
# Now on to the Chain's neighbors
chain = next(neighbor for neighbor in neighbors if "Chain" in neighbor.data["type"])
chain_neighbors = complex_graph.get_nodes_with_target(chain)
assert chain_neighbors is not None
# Check if there is a LLM in the chain's neighbors
assert any("OpenAI" in neighbor.data["type"] for neighbor in chain_neighbors)
# Chain should have a Prompt as a neighbor
assert any("Prompt" in neighbor.data["type"] for neighbor in chain_neighbors)
# Now on to the Tool's neighbors
tool = next(neighbor for neighbor in neighbors if "Tool" in neighbor.data["type"])
tool_neighbors = complex_graph.get_nodes_with_target(tool)
assert tool_neighbors is not None
# Check if there is an Agent in the tool's neighbors
assert any("Agent" in neighbor.data["type"] for neighbor in tool_neighbors)
# This Agent has a Tool that has a PythonFunction as func
agent = next(
neighbor for neighbor in tool_neighbors if "Agent" in neighbor.data["type"]
)
agent_neighbors = complex_graph.get_nodes_with_target(agent)
assert agent_neighbors is not None
# Check if there is a Tool in the agent's neighbors
assert any("Tool" in neighbor.data["type"] for neighbor in agent_neighbors)
# This Tool has a PythonFunction as func
tool = next(
neighbor for neighbor in agent_neighbors if "Tool" in neighbor.data["type"]
)
tool_neighbors = complex_graph.get_nodes_with_target(tool)
assert tool_neighbors is not None
# Check if there is a PythonFunction in the tool's neighbors
assert any(
"PythonFunctionTool" in neighbor.data["type"] for neighbor in tool_neighbors
)
def test_get_node(basic_graph):
"""Test getting a single node"""
node_id = basic_graph.nodes[0].id
node = basic_graph.get_node(node_id)
assert isinstance(node, Vertex)
assert node.id == node_id
def test_build_nodes(basic_graph):
"""Test building nodes"""
assert len(basic_graph.nodes) == len(basic_graph._nodes)
for node in basic_graph.nodes:
assert isinstance(node, Vertex)
def test_build_edges(basic_graph):
"""Test building edges"""
assert len(basic_graph.edges) == len(basic_graph._edges)
for edge in basic_graph.edges:
assert isinstance(edge, Edge)
assert isinstance(edge.source, Vertex)
assert isinstance(edge.target, Vertex)
def test_get_root_node(basic_graph, complex_graph):
"""Test getting root node"""
assert isinstance(basic_graph, Graph)
root = get_root_node(basic_graph)
assert root is not None
assert isinstance(root, Vertex)
assert root.data["type"] == "TimeTravelGuideChain"
# For complex example, the root node is a ZeroShotAgent too
assert isinstance(complex_graph, Graph)
root = get_root_node(complex_graph)
assert root is not None
assert isinstance(root, Vertex)
assert root.data["type"] == "ZeroShotAgent"
def test_validate_edges(basic_graph):
"""Test validating edges"""
assert isinstance(basic_graph, Graph)
# all edges should be valid
assert all(edge.valid for edge in basic_graph.edges)
def test_matched_type(basic_graph):
"""Test matched type attribute in Edge"""
assert isinstance(basic_graph, Graph)
# all edges should be valid
assert all(edge.valid for edge in basic_graph.edges)
# all edges should have a matched_type attribute
assert all(hasattr(edge, "matched_type") for edge in basic_graph.edges)
# The matched_type attribute should be in the source_types attr
assert all(edge.matched_type in edge.source_types for edge in basic_graph.edges)
def test_build_params(basic_graph):
"""Test building params"""
assert isinstance(basic_graph, Graph)
# all edges should be valid
assert all(edge.valid for edge in basic_graph.edges)
# all edges should have a matched_type attribute
assert all(hasattr(edge, "matched_type") for edge in basic_graph.edges)
# The matched_type attribute should be in the source_types attr
assert all(edge.matched_type in edge.source_types for edge in basic_graph.edges)
# Get the root node
root = get_root_node(basic_graph)
# Root node is a TimeTravelGuideChain
# which requires an llm and memory
assert isinstance(root.params, dict)
assert "llm" in root.params
assert "memory" in root.params
def test_build(basic_graph, complex_graph):
"""Test Node's build method"""
assert_agent_was_built(basic_graph)
assert_agent_was_built(complex_graph)
def assert_agent_was_built(graph):
"""Assert that the agent was built"""
assert isinstance(graph, Graph)
# Now we test the build method
# Build the Agent
result = graph.build()
# The agent should be a AgentExecutor
assert isinstance(result, Chain)
def test_agent_node_build(complex_graph):
agent_node = get_node_by_type(complex_graph, AgentVertex)
assert agent_node is not None
built_object = agent_node.build()
assert built_object is not None
def test_tool_node_build(complex_graph):
tool_node = get_node_by_type(complex_graph, ToolVertex)
assert tool_node is not None
built_object = tool_node.build()
assert built_object is not None
# Add any further assertions specific to the ToolNode's build() method
def test_chain_node_build(complex_graph):
chain_node = get_node_by_type(complex_graph, ChainVertex)
assert chain_node is not None
built_object = chain_node.build()
assert built_object is not None
# Add any further assertions specific to the ChainNode's build() method
def test_prompt_node_build(complex_graph):
prompt_node = get_node_by_type(complex_graph, PromptVertex)
assert prompt_node is not None
built_object = prompt_node.build()
assert built_object is not None
# Add any further assertions specific to the PromptNode's build() method
def test_llm_node_build(basic_graph):
llm_node = get_node_by_type(basic_graph, LLMVertex)
assert llm_node is not None
built_object = llm_node.build()
assert built_object is not None
# Add any further assertions specific to the LLMNode's build() method
def test_toolkit_node_build(openapi_graph):
toolkit_node = get_node_by_type(openapi_graph, ToolkitVertex)
assert toolkit_node is not None
built_object = toolkit_node.build()
assert built_object is not None
# Add any further assertions specific to the ToolkitNode's build() method
def test_file_tool_node_build(openapi_graph):
file_tool_node = get_node_by_type(openapi_graph, FileToolVertex)
assert file_tool_node is not None
built_object = file_tool_node.build()
assert built_object is not None
# Add any further assertions specific to the FileToolNode's build() method
def test_wrapper_node_build(openapi_graph):
wrapper_node = get_node_by_type(openapi_graph, WrapperVertex)
assert wrapper_node is not None
built_object = wrapper_node.build()
assert built_object is not None
# Add any further assertions specific to the WrapperNode's build() method
def test_get_result_and_thought(basic_graph):
"""Test the get_result_and_thought method"""
responses = [
"Final Answer: I am a response",
]
message = "Hello"
# Find the node that is an LLMNode and change the
# _built_object to a FakeListLLM
llm_node = get_node_by_type(basic_graph, LLMVertex)
assert llm_node is not None
llm_node._built_object = FakeListLLM(responses=responses)
llm_node._built = True
langchain_object = basic_graph.build()
# assert all nodes are built
assert all(node._built for node in basic_graph.nodes)
# now build again and check if FakeListLLM was used
# Get the result and thought
result, thought = get_result_and_thought(langchain_object, message)
# The result should be a str
assert isinstance(result, str)
# The thought should be a Thought
assert isinstance(thought, str)
| [] |
2024-01-10 | while-basic/langflow | tests~test_loading.py | import json
import pytest
from langchain.chains.base import Chain
from langflow.processing.process import load_flow_from_json
from langflow.graph import Graph
from langflow.utils.payload import get_root_node
def test_load_flow_from_json():
"""Test loading a flow from a json file"""
loaded = load_flow_from_json(pytest.BASIC_EXAMPLE_PATH)
assert loaded is not None
assert isinstance(loaded, Chain)
def test_get_root_node():
with open(pytest.BASIC_EXAMPLE_PATH, "r") as f:
flow_graph = json.load(f)
data_graph = flow_graph["data"]
nodes = data_graph["nodes"]
edges = data_graph["edges"]
graph = Graph(nodes, edges)
root = get_root_node(graph)
assert root is not None
assert hasattr(root, "id")
assert hasattr(root, "data")
| [] |
2024-01-10 | while-basic/langflow | src~backend~langflow~processing~process.py | import contextlib
import io
from langchain.schema import AgentAction
import json
from langflow.interface.run import (
build_langchain_object_with_caching,
get_memory_key,
update_memory_keys,
)
from langflow.utils.logger import logger
from langflow.graph import Graph
from typing import Any, Dict, List, Tuple
def fix_memory_inputs(langchain_object):
"""
Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the
object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the
get_memory_key function and updates the memory keys using the update_memory_keys function.
"""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
try:
if langchain_object.memory.memory_key in langchain_object.input_variables:
return
except AttributeError:
input_variables = (
langchain_object.prompt.input_variables
if hasattr(langchain_object, "prompt")
else langchain_object.input_keys
)
if langchain_object.memory.memory_key in input_variables:
return
possible_new_mem_key = get_memory_key(langchain_object)
if possible_new_mem_key is not None:
update_memory_keys(langchain_object, possible_new_mem_key)
def format_actions(actions: List[Tuple[AgentAction, str]]) -> str:
"""Format a list of (AgentAction, answer) tuples into a string."""
output = []
for action, answer in actions:
log = action.log
tool = action.tool
tool_input = action.tool_input
output.append(f"Log: {log}")
if "Action" not in log and "Action Input" not in log:
output.append(f"Tool: {tool}")
output.append(f"Tool Input: {tool_input}")
output.append(f"Answer: {answer}")
output.append("") # Add a blank line
return "\n".join(output)
def get_result_and_thought(langchain_object, message: str):
"""Get result and thought from extracted json"""
try:
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
chat_input = None
memory_key = ""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
memory_key = langchain_object.memory.memory_key
if hasattr(langchain_object, "input_keys"):
for key in langchain_object.input_keys:
if key not in [memory_key, "chat_history"]:
chat_input = {key: message}
else:
chat_input = message # type: ignore
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer):
try:
# if hasattr(langchain_object, "acall"):
# output = await langchain_object.acall(chat_input)
# else:
output = langchain_object(chat_input)
except ValueError as exc:
# make the error message more informative
logger.debug(f"Error: {str(exc)}")
output = langchain_object.run(chat_input)
intermediate_steps = (
output.get("intermediate_steps", []) if isinstance(output, dict) else []
)
result = (
output.get(langchain_object.output_keys[0])
if isinstance(output, dict)
else output
)
if intermediate_steps:
thought = format_actions(intermediate_steps)
else:
thought = output_buffer.getvalue()
except Exception as exc:
raise ValueError(f"Error: {str(exc)}") from exc
return result, thought
def load_or_build_langchain_object(data_graph, is_first_message=False):
"""
Load langchain object from cache if it exists, otherwise build it.
"""
if is_first_message:
build_langchain_object_with_caching.clear_cache()
return build_langchain_object_with_caching(data_graph)
def process_graph_cached(data_graph: Dict[str, Any], message: str):
"""
Process graph by extracting input variables and replacing ZeroShotPrompt
with PromptTemplate,then run the graph and return the result and thought.
"""
# Load langchain object
is_first_message = len(data_graph.get("chatHistory", [])) == 0
langchain_object = load_or_build_langchain_object(data_graph, is_first_message)
logger.debug("Loaded langchain object")
if langchain_object is None:
# Raise user facing error
raise ValueError(
"There was an error loading the langchain_object. Please, check all the nodes and try again."
)
# Generate result and thought
logger.debug("Generating result and thought")
result, thought = get_result_and_thought(langchain_object, message)
logger.debug("Generated result and thought")
return {"result": str(result), "thought": thought.strip()}
def load_flow_from_json(path: str, build=True):
"""Load flow from json file"""
# This is done to avoid circular imports
with open(path, "r", encoding="utf-8") as f:
flow_graph = json.load(f)
data_graph = flow_graph["data"]
nodes = data_graph["nodes"]
# Substitute ZeroShotPrompt with PromptTemplate
# nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
# Add input variables
# nodes = payload.extract_input_variables(nodes)
# Nodes, edges and root node
edges = data_graph["edges"]
graph = Graph(nodes, edges)
if build:
langchain_object = graph.build()
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
return langchain_object
return graph
| [] |
2024-01-10 | haesleinhuepf/bia-tischi | src~bia_tischi~_utilities.py | def generate_response_to_user(model, user_prompt: str):
"""Generates code and text respond for a specific user input.
To do so, it combines the user input with additional context such as
current variables and a prompt template."""
system_prompt = create_system_prompt()
# take the last n chat entries
from ._machinery import Context
n = 10
chat_history = Context.chat[-n:]
if Context.verbose:
print("\nUser prompt:", user_prompt)
print("\nSystem prompt:", system_prompt)
print_chat(chat_history)
full_response = generate_response_from_openai(model, system_prompt, user_prompt, chat_history)
if Context.verbose:
print("\n\nFull response:\n", full_response)
# Search for the code pattern in the text
import re
pattern = re.compile(r'([\s\S]*?)```python([\s\S]*?)```')
match = pattern.search(full_response)
if match:
text = match.group(1).strip()
code = match.group(2).strip()
else:
text = full_response
code = None
return code, text
def create_system_prompt():
# determine useful variables and functions in context
variables = []
functions = []
from ._machinery import Context
for key, value in Context.variables.items():
if key.startswith("_"):
continue
if callable(value):
if key not in ["quit", "exit"]:
functions.append(key)
continue
variables.append(key)
libraries = {"skimage", "numpy", "scipy", "pandas", "matplotlib", "seaborn", "sklearn"}
system_prompt = f"""
If the request entails writing code, write concise professional bioimage analysis high-quality python code.
The code should be as short as possible.
If there are several ways to solve the task, chose the option with the least amount of code.
The code will be executed by the user within a Jupyter notebook.
You can only use these python libraries: {",".join([str(v) for v in libraries])}.
If you create images, show them using matplotlib and save them in variables for later reuse.
The following variables are available: {",".join([str(v) for v in variables])}
Do not set the values of the variables that are available.
The following functions are available: {",".join([str(v) for v in functions])}
Before writing the code, provide a concise step-by-step plan
of what the code will be going to do.
This plan must not contain any "`" characters and should be written in plain text.
Then print the code.
Importantly, the code block must start with the line:
```python
and it must end with the line:
```
There must be no text after the code block.
If the request does not require to write code, simply answer in plain text.
"""
return system_prompt
def print_chat(chat):
print("\nChat history:")
for message in chat:
role = message['role']
content = message['content']
print(role)
print(content)
def concatenate_chat_content(chat):
concatenated_chat = ""
for message in chat:
concatenated_chat += message['content']
return concatenated_chat
def output_text(text):
"""Display markdown content in the notebook."""
from IPython.display import display, Markdown
display(Markdown(text))
def generate_response_from_openai(model: str, system_prompt: str, user_prompt: str, chat_history):
"""A prompt helper function that sends a message to openAI
and returns only the text response.
"""
import openai
system = [{"role": "system", "content": system_prompt}]
user = [{"role": "user", "content": user_prompt}]
response = openai.ChatCompletion.create(
messages=system + chat_history + user,
model=model) # stream=True would be nice
reply = response['choices'][0]['message']['content']
from ._machinery import Context
Context.chat += user + [{"role": "assistant", "content": reply}]
return reply
def available_models():
import openai
models = openai.Model.list()
for model in models['data']:
print(model['id'])
| [
"\n If the request entails writing code, write concise professional bioimage analysis high-quality python code.\n The code should be as short as possible.\n If there are several ways to solve the task, chose the option with the least amount of code.\n The code will be executed by the user within a Jupyter notebook.\n You can only use these python libraries: PLACEHOLDER.\n If you create images, show them using matplotlib and save them in variables for later reuse.\n The following variables are available: PLACEHOLDER\n Do not set the values of the variables that are available.\n The following functions are available: PLACEHOLDER\n \n Before writing the code, provide a concise step-by-step plan \n of what the code will be going to do. \n This plan must not contain any \"`\" characters and should be written in plain text.\n Then print the code.\n Importantly, the code block must start with the line: \n ```python\n and it must end with the line:\n ```\n There must be no text after the code block.\n If the request does not require to write code, simply answer in plain text.\n ",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | hanskyy/train-robot-arm-from-scratch-master | final~mujoco-py-DDPG-HER.py | #######version differer
import numpy as np
import gym
import mujoco_py
import tensorflow as tf
import tensorflow.contrib as tc
from collections import deque
env = gym.make('FetchReach-v2')
action_size = 4
state_size = 13
action_bound = env.action_space.high
batch_size = 64
import random
import matplotlib.pyplot as plt
###################seeding###################
seeding = 1234
np.random.seed(seeding)
tf.set_random_seed(seeding)
env.seed(seeding)
######################################
class actor():
def __init__(self, state_size, action_size, action_bound, sess, ac_lr = 0.0001, tau = 0.001):
self.state_size = state_size
self.action_size = action_size
self.action_bound = action_bound
self.sess = sess
self.lr = ac_lr
self.batch_size = 128
self.tau = tau
#self.input = tf.placeholder(tf.float32, [None, self.state_size], name = "State_actor_input")
#self.target_input = tf.placeholder(tf.float32, [None, self.state_size], name = "State_target_actor_input")
with tf.variable_scope('actor_net'):
self.input_actor, self.out_, self.scaled_out = self.actor_model()
self.ac_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='actor_net')
with tf.variable_scope('actor_target_net'):
self.input_target_actor, self.target_out_, self.target_scaled_out = self.actor_model()
self.ac_target_pram = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'actor_target_net')
#print(len(self.ac_params))
self.update_target_in = [self.ac_target_pram[i].assign ( tf.multiply(self.ac_target_pram[i], 0) + tf.multiply(self.ac_params[i],1) ) for i in range(len(self.ac_target_pram))]
self.update_target = [self.ac_target_pram[i].assign ( tf.multiply(self.ac_target_pram[i], 1-self.tau) + tf.multiply(self.ac_params[i],self.tau) ) for i in range(len(self.ac_target_pram))]
self.critic_grad = tf.placeholder(tf.float32,[None, self.action_size], name = 'critic_grad')
self.actor_grad = tf.gradients(self.scaled_out, self.ac_params, -self.critic_grad)
self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.actor_grad))
self.loss = tf.train.AdamOptimizer(self.lr).apply_gradients(zip(self.actor_gradients, self.ac_params))
def actor_model(self):
inputs = tf.placeholder(tf.float32, [None, self.state_size])
x = tc.layers.layer_norm(inputs, center=True, scale=True) #, begin_norm_axis=0)
h1 = tf.layers.dense(x, 400, activation = tf.nn.relu )
h1 = tc.layers.layer_norm(h1, center=True, scale=True)
h2 = tf.layers.dense(h1, 300, activation = tf.nn.relu )
h2 = tc.layers.layer_norm(h2, center=True, scale=True)
k_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
out = tf.layers.dense(h2, self.action_size, activation = tf.nn.tanh,kernel_initializer = k_init)
scaled_out = tf.multiply(out, self.action_bound)
return inputs, out, scaled_out
def get_action(self,s):
return self.sess.run(self.scaled_out, feed_dict = {self.input_actor : s})
def update_target_tar(self):
#print('---------------')
self.sess.run(self.update_target)
#return True
def get_action_target(self,s):
return self.sess.run(self.target_scaled_out, feed_dict = {self.input_target_actor : s})
def train_actor(self,s,grad):
self.sess.run(self.loss, feed_dict = {self.input_actor : s, self.critic_grad : grad})
def first_up(self):
self.sess.run(self.update_target_in)
class critic():
def __init__(self, state_size, action_size, action_bound, sess, ac_lr = 0.001, tau = 0.001):
self.state_size = state_size
self.action_size = action_size
self.action_bound = action_bound
self.sess = sess
self.lr = ac_lr
self.batch_size = 128
self.tau = tau
with tf.variable_scope('critic_net'):
self.input_critic, self.action_critic, self.value, = self.build_net()
self.cr_prams = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'critic_net')
with tf.variable_scope('target_critic_net'):
self.input_target_critic, self.action_target_critic, self.target_value = self.build_net()
self.target_cr_prams = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'target_critic_pram')
self.update_critic_target_in = [self.target_cr_prams[i].assign ( tf.multiply(self.target_cr_prams[i], 0) + tf.multiply(self.cr_prams[i],1) ) for i in range(len(self.target_cr_prams))]
self.update_critic_target = [self.target_cr_prams[i].assign ( tf.multiply(self.target_cr_prams[i], 1 - self.tau) + tf.multiply(self.cr_prams[i], self.tau) ) for i in range(len(self.target_cr_prams))]
self.pred = tf.placeholder(tf.float32, [None, 1], name= 'pred_value')
self.loss = tf.reduce_mean(tf.square(self.pred - self.value))
self.optimize = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
self.comment_grad = tf.gradients(self.value, self.action_critic)
def build_net(self):
inputs = tf.placeholder(tf.float32, [None, self.state_size])
x = tc.layers.layer_norm(inputs, center=True, scale=True) #, begin_norm_axis=0)
action = tf.placeholder(tf.float32, [None, self.action_size])
h1 = tf.layers.dense(x, 200, activation = tf.nn.relu)
h1 = tc.layers.layer_norm(h1, center=True, scale=True)
h11 = tf.layers.dense(h1, 200,activation = tf.nn.relu)
a1 = tf.layers.dense(action, 200)
h1_ = tf.concat([h11,a1],axis = 1)
h1_ = tc.layers.layer_norm(h1_, center=True, scale=True)
h2 = tf.layers.dense(h1_, 300, activation=tf.nn.relu)
h2 = tc.layers.layer_norm(h2, center=True, scale=True)
k_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
out_cr = tf.layers.dense(h2, 1,kernel_initializer=k_init)
return inputs, action, out_cr
def get_val(self,s,a):
return self.sess.run(self.value,feed_dict={self.input_critic : s, self.action_critic : a})
def update_critic_target_net(self):
#print('------------++')
self.sess.run(self.update_critic_target)
def train_critic(self,s,a,tar):
self.sess.run(self.optimize, feed_dict = {self.input_critic : s , self.action_critic : a, self.pred : tar})
def get_val_target(self,s,a):
return self.sess.run(self.target_value, feed_dict = {self.input_target_critic : s, self.action_target_critic: a})
def get_grad(self,s,a):
return self.sess.run(self.comment_grad, feed_dict = {self.input_critic : s, self.action_critic: a})
def first_up(self):
self.sess.run(self.update_critic_target_in)
#############This noise code is copied from openai baseline #########OrnsteinUhlenbeckActionNoise############# Openai Code#########
class OrnsteinUhlenbeckActionNoise:
def __init__(self, mu, sigma=0.3, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \
self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
#########################################################################################################
def store_sample(s,a,r,d,info ,s2):
ob_1 = np.reshape(s['observation'],(1,10))
ac_1 = np.reshape(s['achieved_goal'],(1,3))
de_1 = np.reshape(s['desired_goal'],(1,3))
ob_2 = np.reshape(s2['observation'],(1,10))
ac_2 = np.reshape(s2['achieved_goal'],(1,3))
de_2 = np.reshape(s2['desired_goal'],(1,3))
s_1 = np.concatenate([ob_1,ac_1],axis=1)
s2_1 = np.concatenate([ob_2,ac_1],axis=1)
s_2 = np.concatenate([ob_1,de_1],axis=1)
s2_2 = np.concatenate([ob_2,de_2],axis=1)
substitute_goal = s['achieved_goal'].copy()
substitute_reward = env.compute_reward(s['achieved_goal'], substitute_goal, info)
replay_memory.append((s_2,a,r,d,s2_2)) # obs(), action, reward, done, obs_next()
replay_memory.append((s_1,a,substitute_reward,True,s2_1)) # newgoal_obs, action,substitute_reward, true, newgoal_nextobs
def stg(s):
#print(len(s))
ob_1 = np.reshape(s['observation'],(1,10))
de_1 = np.reshape(s['desired_goal'],(1,3))
return np.concatenate([ob_1,de_1],axis=1)
sess = tf.Session()
ac = actor(state_size, action_size, action_bound, sess)
cr = critic(state_size, action_size, action_bound, sess)
s = env.reset()
noice = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_size))
sess.run(tf.global_variables_initializer())
#save_path = 'DDPG.ckpt'
saver = tf.train.Saver()
#saver = tf.train.Saver()
replay_memory = deque(maxlen = 100000)
max_ep = 50000
max_ep_len = 200
gamma = 0.99
R_graph = deque(maxlen = 10)
R_graph_= []
#cr.first_up()
#ac.first_up()
for ii in range(max_ep):
s = env.reset()
#print(s.shape)
#s = s[np.newaxis, :]
R,r = 0,0
for kk in range(max_ep_len):
#print('++')
ss = stg(s)
a = ac.get_action(ss)
#print(a)
a += noice()
#print(a)
a=a[0]
#env.render()
s2,r,d,info=env.step(a)
#print(s2)
#s2=s2[np.newaxis, :]
r_2 = r
r=r
store_sample(s,a,r,d,info,s2)
#replay_memory.append((s,a,r,d,s2))
s = s2
R += r_2
if batch_size < len(replay_memory):
minibatch = random.sample(replay_memory, batch_size)
s_batch, a_batch,r_batch, d_batch, s2_batch = [], [], [], [], []
for s_, a_, r_, d_, s2_ in minibatch:
s_batch.append(s_)
s2_batch.append(s2_)
a_batch.append(a_)
r_batch.append(r_)
d_batch.append(d_)
s_batch = np.squeeze(np.array(s_batch),axis=1)
s2_batch = np.squeeze(np.array(s2_batch),axis=1)
r_batch=np.reshape(np.array(r_batch),(len(r_batch),1))
a_batch=np.array(a_batch)
d_batch=np.reshape(np.array(d_batch)+0,(128,1))
#print(d_batch)
a2 = ac.get_action_target(s2_batch)
#print(a2.shape)
v2 = cr.get_val_target(s2_batch,a2)
#print(v2.shape)
#for
tar= np.zeros((128,1))
for o in range(128):
tar[o] = r_batch[o] + gamma * v2[o]
#print(tar.shape)
cr.train_critic(s_batch,a_batch,tar)
#print(loss_cr)
a_out = ac.get_action(s_batch)
kk = cr.get_grad(s_batch,a_out)[0]
#print(kk)
ac.train_actor(s_batch, kk)
cr.update_critic_target_net()
ac.update_target_tar()
#exit()
R_graph.append(R)
R_graph_.append(R)
#print(ii, R)
if ii % 20 ==0 :
saver.save(sess, 'model/kk')
print(ii, R, np.mean(np.array(R_graph)), np.max(np.array(R_graph)))
if (ii+1) % 100:
plt.plot(np.array(R_graph_)) | [] |
2024-01-10 | Raghavan1988/arxiv-related-articles-summarizer | related_articles.py | #wget https://arxiv-r-1228.s3.us-west-1.amazonaws.com/arxiv-metadata-oai-snapshot.json
#wget https://arxiv-r-1228.s3.us-west-1.amazonaws.com/annoy_index.ann
## Download the .ann and .json files from Amazon S3
import json
from sentence_transformers import SentenceTransformer, util
import numpy as np
import torch
import time
import arxiv
from annoy import AnnoyIndex
import operator
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.document_loaders import ArxivLoader
path = "arxiv-metadata-oai-snapshot.json"
data = []
def preprocess(path):
start = time.time()
print("Begin Loading of Json file:"+path)
with open(path,'r') as f:
for line in f:
data.append(json.loads(line))
end = time.time()
print("Time taken to load json file: ",end-start)
start = time.time()
sents = []
for i in range(len(data)):
sents.append(data[i]['title']+'[SEP]'+data[i]['abstract'])
end = time.time()
print("Time taken to create sentences: ",end-start)
return sents
def generate_annoy(fn):
n_trees = 256 #Number of trees used for Annoy. More trees => better recall, worse run-time
embedding_size = 768 #Size of embeddings
top_k_hits = 5 #Output k hits
annoy_index = AnnoyIndex(embedding_size, 'angular')
annoy_index.load(fn)
return annoy_index
def search(query,annoy_index,model):
query_embedding = model.encode(query,convert_to_numpy=True)
top_k_hits = 5
hits = annoy_index.get_nns_by_vector(query_embedding, top_k_hits, include_distances=True)
return hits
def print_results(hits,sents):
print ("entering results")
response = ""
for i in range(len(hits[0])):
response += "<br>"
response += "<b><a href=https://arxiv.org/abs/" + data[hits[0][i]]['id'] +">" + sents[hits[0][i]].split('[SEP]')[0] + "</a>"
response += "</b><br>"
response += "Abstract:" + sents[hits[0][i]].split('[SEP]')[1]
response += "Authors:" + data[hits[0][i]]['authors']
response += "<br>"
print ("result " + str(i))
print ("Title:" + sents[hits[0][i]].split('[SEP]')[0])
print ("Authors:" + data[hits[0][i]]['authors'] )
print ("ID:" + data[hits[0][i]]['id'])
return response
model = SentenceTransformer('sentence-transformers/allenai-specter', device='cuda')
sents = preprocess(path)
an = generate_annoy("annoy_index.ann")
open_ai_key = "Enter your KEY HERE"
llm = OpenAI(openai_api_key=open_ai_key, model_name= "gpt-3.5-turbo-16k")
from flask import Flask, request
app = Flask(__name__)
@app.route('/search', methods=['GET'])
def search_endpoint():
args = request.args
query = args.get('q')
tokens = query.split('/')
id = tokens[-1] ## https://arxiv.org/abs/2310.03717
docs = ArxivLoader(query=id, load_max_docs=2).load()
print(docs[0].metadata['Title'])
title = docs[0].metadata['Title']
abstract = docs[0].metadata['Summary']
page_content = docs[0].page_content[:40000]
article = title + '[SEP]' + abstract + '[SEP]' + page_content
print("Some related papers:")
related = search(article,an,model)
html_response = print_results(search(article,an,model), sents)
template = """ Take a deep breath. You are a researcher. Your task is to read the RESEARCH ARTICLE and generate 3 KEY POINTS of it in your own words and generate AN IDEA OF FUTURE EXTENSION based on the RELATED ARTICLES. Generate one actionable idea for extending the RESEARCH.
RESEARCH ARTICLE: {article}
RELATED ARTICLES: {related}
INSTRUCTIONS
1. Read the TITLE, ABSTRACT and the CONTENT of the RESEARCH ARTICLE.
2. Generate 3 KEY POINTS of the RESEARCH ARTICLE in your own words. Each Key Point should be a bullet point of 10 WORDS are less.
3. Read the RELATED ARTICLES
4. Generate an IDEA OF FUTURE EXTENSION of the RESEARCH ARTICLE based on the RELATED ARTICLES.
5. The IDEA OF FUTURE EXTENSION should be ONE sentence.
6. Generate one actionable idea for extending the RESEARCH with Light Bulb emoji.
7. STRICTLY generate the response in json format using the TYPESCRIPT SCHEMA below. Insert a line break after each bullet point.
SCHEMA
response:
KEYPOINT1: String,
KEYPOINT2: String,
KEYPOINT3: String,
FUTURE_EXTENSION_IDEA: String,
ACTIONABLE_IDEA: String"""
prompt = PromptTemplate(template=template, input_variables=["article", "related"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
output = ""
try:
output = llm_chain.run({'article': article, 'related':related})
print (output)
except Exception as e:
print (e)
html_content = "<html> <head> <title> Arxiv Summary for " + title + " </title> </head> <body> <h1> "+ title + " </h1> <h2> Summary </h2> <p>"
jsonD = json.loads(output)
html_content += "<br> 1." + jsonD['KEYPOINT1']
html_content += "<br> 2." + jsonD['KEYPOINT2']
html_content += "<br> 3. " + jsonD['KEYPOINT3']
html_content += "<br> <b>FUTURE EXTENSION IDEA:</b> " + jsonD['FUTURE_EXTENSION_IDEA']
html_content += "<br> <b>ACTIONABLE IDEA: </b>" + jsonD['ACTIONABLE_IDEA']
html_content += "</p> Related Articles: <br>"
html_content += html_response
html_content += "</html>"
return html_content
| [
" Take a deep breath. You are a researcher. Your task is to read the RESEARCH ARTICLE and generate 3 KEY POINTS of it in your own words and generate AN IDEA OF FUTURE EXTENSION based on the RELATED ARTICLES. Generate one actionable idea for extending the RESEARCH.\n RESEARCH ARTICLE: {article}\n RELATED ARTICLES: {related}\n INSTRUCTIONS\n 1. Read the TITLE, ABSTRACT and the CONTENT of the RESEARCH ARTICLE.\n 2. Generate 3 KEY POINTS of the RESEARCH ARTICLE in your own words. Each Key Point should be a bullet point of 10 WORDS are less.\n 3. Read the RELATED ARTICLES\n 4. Generate an IDEA OF FUTURE EXTENSION of the RESEARCH ARTICLE based on the RELATED ARTICLES.\n 5. The IDEA OF FUTURE EXTENSION should be ONE sentence.\n 6. Generate one actionable idea for extending the RESEARCH with Light Bulb emoji.\n 7. STRICTLY generate the response in json format using the TYPESCRIPT SCHEMA below. Insert a line break after each bullet point.\n SCHEMA\n response:\n KEYPOINT1: String,\n KEYPOINT2: String,\n KEYPOINT3: String,\n FUTURE_EXTENSION_IDEA: String,\n ACTIONABLE_IDEA: String"
] |
2024-01-10 | inthewaterwheel/ice | ice~recipes~adherence_tfew_paragraph.py | import re
from collections import Counter
from collections.abc import Awaitable
from collections.abc import Callable
from typing import Literal
from pydantic import BaseModel
from pydantic import BaseSettings
from structlog.stdlib import get_logger
from transformers.models.auto.tokenization_auto import AutoTokenizer
from transformers.models.gpt2.tokenization_gpt2_fast import GPT2TokenizerFast
from ice.apis.openai import openai_complete
from ice.evaluation.evaluate_recipe_result import RecipeResult
from ice.metrics.gold_standards import list_experiments
from ice.paper import Paper
from ice.paper import Paragraph
from ice.paper import split_sentences
from ice.recipe import Recipe
from ice.utils import filter_async
from ice.utils import map_async
from ice.utils import max_by_value
from ..trace import recorder
from ..trace import trace
gpt2_tokenizer: GPT2TokenizerFast = AutoTokenizer.from_pretrained("gpt2")
def n_tokens(prompt: str) -> int:
tokenized = gpt2_tokenizer(prompt)
return len(tokenized.input_ids)
def n_remaining_tokens(prompt: str, ensure_min: int, capacity=4097):
remaining = capacity - n_tokens(prompt)
if remaining < ensure_min:
raise ValueError(
f"Prompt too long by {ensure_min - remaining} tokens: {prompt}"
)
return remaining
log = get_logger()
def extract_numbers(text: str) -> list[str]:
words = text.split()
set_number_str = {
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"ten",
"teen",
"twenty",
"thirty",
"forty",
"fifty",
"sixty",
"seventy",
"eighty",
"ninety",
"hundred",
"thousand",
"million",
"billion",
"trillion",
"quadrillion",
"quintillion",
}
number_strings = list(filter(lambda word: word.lower() in set_number_str, words))
numbers_set = set("0123456789")
number_strings += list(
filter(lambda x: set(x).intersection(numbers_set) != set(), words)
)
# Remove parentheses
remove_parentheses = (
lambda s: s.replace("(", "")
.replace(")", "")
.replace("...", "")
.replace("..", "")
)
number_strings = list(map(remove_parentheses, number_strings))
# Remove "," or "." from the end of the number string
for i, number in enumerate(number_strings):
if number[-1] == "," or number[-1] == ".":
number_strings[i] = number[:-1]
return number_strings
N_TO_STRING: dict[int, str] = {
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
10: "ten",
}
def paragraphs_to_numbered_list(paragraphs: list[str]) -> str:
return "\n".join(
f"{n}. {paragraph}".strip() for n, paragraph in enumerate(paragraphs, 1)
)
def even_shorter_intervention_generation_prompt(
paragraphs: list[str], intervention: str, final_reasoning: str | None = None
) -> str:
paragraph_n = N_TO_STRING[len(paragraphs)]
prefix = f"""From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence," "attrition," or "compliance". If too many participants failed to receive the intervention or perform it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. For simple interventions that are accomplished in one shot (e.g., having a group of college students complete a test in a lab that takes 30 minutes), the study doesn't discuss adherence unless something unusual happened, and we can safely assume that everyone in the sample completed the study. Sometimes studies provide specific numbers or percentages of people who dropped out (attrited), and sometimes they only provide qualitative descriptions, such as saying that adherence was "generally good." Often, papers are genuinely unclear, and we can only conclude that there is not enough information in the paper for us to know anything about adherence or compliance.
Let's look at excerpts from six different papers to see what information, if any, they provide about the study's adherence, attrition, or compliance. We'll have to identify what each extract tells us about adherence (some extracts may only discuss methodology or results, telling us nothing about adherence), and for some, we may have to conclude that the attrition or compliance is simply unclear.
First, consider these three excerpts from a paper studying the Tumaini game:
1. Intervention arm participants completed a 45-minute informational onboarding session, including instructions on the interface, technology, and game content. They were instructed to play at least 1 hour per day for the 16 days of the study and asked not to share their own gameplay profile with others. The game interface allows for 5 additional players' profiles so that others may play without compromising the enrolled player's data. Intervention participants were provided with a phone with the game preloaded and used it at their own pace for the duration of the intervention. Control participants received standard of care, namely no additional intervention beyond any existing sex education from family, school, and peers. No specific data on the content or source of this education were collected from participants. All study smartphones were returned by the participants at the end of the intervention period.
2. Preliminary cleaning of survey data was conducted in MS Excel, with additional cleaning and all analyses completed using SAS version 9.4 (SAS Institute Inc., Cary, NC, USA). All control arm participants were included in analyses. One participant from the intervention arm was removed from analyses of effect at T2 due to delayed completion of the T2 survey. His data were retained for T1-T3 analyses, as he completed all other study activities on time. Descriptive statistics on demographic questions and game feedback questions were computed.
3. We recruited and enrolled 60 adolescent participants. Half of the participants were allocated to the intervention arm. All adolescents who were recruited completed all 3 study visits, and all intervention arm participants initiated gameplay. Participant demographics are presented in Table 3 . There were no significant demographic differences between the two arms. Preliminary calculations of exposure indicate that the intervention arm played Tumaini a mean of approximately 27 hours over the 16 days of the intervention.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt describes the study's methodology, but does not tell us how many or how well participants followed the instructions, so it does not inform us about adherence. The second excerpt tells us that all control arm participants were included in analysis, but one intervention arm participant was removed from the analysis of effect at T2 but included in the T3 analysis; this is attrition information. The third excerpt says that all participants completed all visits and that all intervention arm participants initiated gameplay; this is adherence information.
Here's all the information in this paper about adherence, attrition, and compliance: All participants completed all visits, and all intervention arm participants initiated gameplay. One intervention arm participant was not included in the T2 analysis but was included in the T3 analysis.
Second, consider these three excerpts from a paper studying antioxidant/anti-inflammatory supplement containing lemon verbena extract and omega-3 fatty acid:
1. Flow chart showing the dropout rate at different timepoints in the study.
2. Forty-eight (48) participants were enrolled for screening evaluation (Fig. 1 ) and after 3 exclusions, 45 participants were randomly assigned either to placebo or nutritional supplement groups, n = 22 and n = 23, respectively. Of these, 14 participants were withdrawn during the study for different reasons; there were 10 dropouts in the placebo group and 4 dropouts in the supplement group (treatment refusal, irregular treatment, starting on medication, or occurrence of an adverse event [AE]). Finally, 31 participants completed the study (12 in the placebo and 19 in the supplement group; Fig. 1 ).
3. Only 1 patient reported an AE (i.e., a heartburn sensation). The subject, who was in the placebo group, stopped the treatment immediately and was excluded from the study (Table 1 ). No major complications were reported by this subject.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt refers to a flow chart showing the dropout rate, but since we do not have the figure here, we cannot conclude anything from this about the study's attrition. The second excerpt says that there were 10 dropouts in the placebo group of 22 participants and 4 dropouts in the supplement group of 23 participants, meaning that 31 participants out of the initial 45 participants after randomization completed the study. The third excerpt provides more detail for one patient in the placebo group who dropped out, stopping treatment after experiencing a heartburn sensation.
Here's all the the information in this paper about adherence, attrition, and compliance: Ten of the 22 participants in the placebo group dropped out, and 4 of the 23 participants in the supplement group dropped out.
Third, consider these {paragraph_n} excerpt{"s" if len(paragraphs) > 1 else ""} from a paper studying {intervention}:
{paragraphs_to_numbered_list(paragraphs).strip()}
Let's think about what {"each" if len(paragraphs) > 1 else "this"} excerpt tells us, if anything, about adherence, attrition or compliance:""".strip()
if final_reasoning is None:
return prefix
return f"""{prefix} {final_reasoning.strip()}
Here's all the information in this paper about adherence, attrition, and compliance:""".strip()
def shorter_intervention_generation_prompt(
paragraphs: list[str], intervention: str, final_reasoning: str | None = None
) -> str:
paragraph_n = N_TO_STRING[len(paragraphs)]
prefix = f"""From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence," "attrition," or "compliance". If too many participants failed to receive the intervention or perform it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. For simple interventions that are accomplished in one shot (e.g., having a group of college students complete a test in a lab that takes 30 minutes), the study doesn't discuss adherence unless something unusual happened, and we can safely assume that everyone in the sample completed the study. Sometimes studies provide specific numbers or percentages of people who dropped out (attrited), and sometimes they only provide qualitative descriptions, such as saying that adherence was "generally good." Often, papers are genuinely unclear, and we can only conclude that there is not enough information in the paper for us to know anything about adherence or compliance.
Let's look at excerpts from six different papers to see what information, if any, they provide about the study's adherence, attrition, or compliance. We'll have to identify what each extract tells us about adherence (some extracts may only discuss methodology or results, telling us nothing about adherence), and for some, we may have to conclude that the attrition or compliance is simply unclear.
First, consider these three excerpts from a paper studying the Tumaini game:
1. Intervention arm participants completed a 45-minute informational onboarding session, including instructions on the interface, technology, and game content. They were instructed to play at least 1 hour per day for the 16 days of the study and asked not to share their own gameplay profile with others. The game interface allows for 5 additional players' profiles so that others may play without compromising the enrolled player's data. Intervention participants were provided with a phone with the game preloaded and used it at their own pace for the duration of the intervention. Control participants received standard of care, namely no additional intervention beyond any existing sex education from family, school, and peers. No specific data on the content or source of this education were collected from participants. All study smartphones were returned by the participants at the end of the intervention period.
2. Preliminary cleaning of survey data was conducted in MS Excel, with additional cleaning and all analyses completed using SAS version 9.4 (SAS Institute Inc., Cary, NC, USA). All control arm participants were included in analyses. One participant from the intervention arm was removed from analyses of effect at T2 due to delayed completion of the T2 survey. His data were retained for T1-T3 analyses, as he completed all other study activities on time. Descriptive statistics on demographic questions and game feedback questions were computed.
3. We recruited and enrolled 60 adolescent participants. Half of the participants were allocated to the intervention arm. All adolescents who were recruited completed all 3 study visits, and all intervention arm participants initiated gameplay. Participant demographics are presented in Table 3 . There were no significant demographic differences between the two arms. Preliminary calculations of exposure indicate that the intervention arm played Tumaini a mean of approximately 27 hours over the 16 days of the intervention.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt describes the study's methodology, but does not tell us how many or how well participants followed the instructions, so it does not inform us about adherence. The second excerpt tells us that all control arm participants were included in analysis, but one intervention arm participant was removed from the analysis of effect at T2 but included in the T3 analysis; this is attrition information. The third excerpt says that all participants completed all visits and that all intervention arm participants initiated gameplay; this is adherence information.
Here's all the information in this paper about adherence, attrition, and compliance: All participants completed all visits, and all intervention arm participants initiated gameplay. One intervention arm participant was not included in the T2 analysis but was included in the T3 analysis.
Second, consider these four excerpts from a paper studying Study 2 on depression and psychosis:
1. The intervention was a single session that lasted approximately one hour for participants to provide informed consent, complete a demographic form, watch videos relevant to their study arm, complete the assessments, and be debriefed. Participants in either of the video groups stayed for the full hour, but participants in the control condition who did not watch the video finished in about 50 min. In Study 2, which included two 8 min videos with diagnostic accuracy for both conditions, the protocol required an additional 15 min. Survey data were collected using SurveyCTO (Ver 2.30, Dobility, Inc., Cambridge, MA, USA), an android application, on tablets (www.surveycto.com/accessed on: 19 June 2017). In Study 1, after completion of the video session, participants were invited to participate in the optional qualitative interview to be held within one week.
2. After review of 2nd and 3rd year MBBS student rosters, 18 students were excluded prior to randomization because of being international students not speaking Nepali or having already completed their psychiatry rotation. Among the remaining students, 100 were selected for randomization to one of the three arms. No potential participants refused to participate in this study. An additional six students were excluded at the time of analysis because information on their demographic forms revealed that they were international students whose native language was not Nepali or they had completed their clinical psychiatry rotation; this information had not been up to date in the class rosters at the time of randomization (Figure 1 ). One participant in the service user arm was excluded because of both being an international non-Nepali student and having completed a psychiatry rotation. Demographic characteristics of these participants are in Table 2 . Of note, only three participants indicated that they were primarily interested psychiatry as a specialty (see Figure 2 ). Participants were randomized into one the three conditions: the control group with no video (n = 31, 33%), the didactic video group (n = 31, 33%), and the service user recovery testimonial video group (n = 32; 34%).
3. Due to limited time availability on the part of the researchers and students as well as the exploratory nature of the interviews, only six participants completed interviews. Qualitative results were analyzed from a subset of six students, two women and four men in their third year, who participated in in-depth interviews.
4. For the second study, 248 students were enrolled in first-and second-year MBBS program across the two institutions participating. From roster, 28 students were excluded because of being international or having completed a psychiatry clinical rotation. The remaining 220 students were randomized; however, seven students declined to participate or were unavailable during data collection periods. Therefore, 213 participants were randomly allocated to the following arms: didactic video condition (n = 73), the service user video condition (n = 72), and the no video control condition (n = 75) (Figure 3 ). At the analysis phase, there were additional exclusions because of missing data or identification of exclusion criteria that was not recorded in the school registers. Participant characteristics for each condition are shown in Table 4 .
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance. The first excerpt describes the methodology, describing the intervention as taking place in a single one-hour session. This does not tell us anything explicitly about adherence, but it does make it more likely that adherence was high, since participants only had to attend the single session, which is easy to do. The second excerpt says that 18 students were excluded prior to randomization; since this took place before sampling, it is not relevant to adherence. It also says that six students were excluded at the time of analysis because it turned out that they met exclusion criteria. Although this is not adherence strictly speaking, it is important to note when thinking about the validity of the results. The third excerpt says that only six participants completed interviews. The fourth excerpt says that in Study 2, seven students declined to participate or were not available during data collection after randomization of 220 students, and there were additional exclusions at analysis phase because of missing data or identification of exclusion criteria.
Here's all the information in this paper about adherence, attrition, and compliance: This paper does not discuss adherence explicitly. For the video study, six of the 100 randomized students were excluded from analysis, and in the second study, seven of the 220 randomized students declined to participate or were unavailable during data collection periods, with additional students excluded from the analysis because of missing data or identification of exclusion criteria. Only six participants completed interviews.
Third, consider these three excerpts from a paper studying antioxidant/anti-inflammatory supplement containing lemon verbena extract and omega-3 fatty acid:
1. Flow chart showing the dropout rate at different timepoints in the study.
2. Forty-eight (48) participants were enrolled for screening evaluation (Fig. 1 ) and after 3 exclusions, 45 participants were randomly assigned either to placebo or nutritional supplement groups, n = 22 and n = 23, respectively. Of these, 14 participants were withdrawn during the study for different reasons; there were 10 dropouts in the placebo group and 4 dropouts in the supplement group (treatment refusal, irregular treatment, starting on medication, or occurrence of an adverse event [AE]). Finally, 31 participants completed the study (12 in the placebo and 19 in the supplement group; Fig. 1 ).
3. Only 1 patient reported an AE (i.e., a heartburn sensation). The subject, who was in the placebo group, stopped the treatment immediately and was excluded from the study (Table 1 ). No major complications were reported by this subject.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt refers to a flow chart showing the dropout rate, but since we do not have the figure here, we cannot conclude anything from this about the study's attrition. The second excerpt says that there were 10 dropouts in the placebo group of 22 participants and 4 dropouts in the supplement group of 23 participants, meaning that 31 participants out of the initial 45 participants after randomization completed the study. The third excerpt provides more detail for one patient in the placebo group who dropped out, stopping treatment after experiencing a heartburn sensation.
Here's all the the information in this paper about adherence, attrition, and compliance: Ten of the 22 participants in the placebo group dropped out, and 4 of the 23 participants in the supplement group dropped out.
Fourth, consider these {paragraph_n} excerpt{"s" if len(paragraphs) > 1 else ""} from a paper studying {intervention}:
{paragraphs_to_numbered_list(paragraphs).strip()}
Let's think about what {"each" if len(paragraphs) > 1 else "this"} excerpt tells us, if anything, about adherence, attrition or compliance:""".strip()
if final_reasoning is None:
return prefix
return f"""{prefix} {final_reasoning.strip()}
Here's all the information in this paper about adherence, attrition, and compliance:""".strip()
def intervention_generation_prompt(
paragraphs: list[str], intervention: str, final_reasoning: str | None = None
) -> str:
paragraph_n = N_TO_STRING[len(paragraphs)]
prefix = f"""From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence," "attrition," or "compliance". If too many participants failed to receive the intervention or perform it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. For simple interventions that are accomplished in one shot (e.g., having a group of college students complete a test in a lab that takes 30 minutes), the study doesn't discuss adherence unless something unusual happened, and we can safely assume that everyone in the sample completed the study. Sometimes studies provide specific numbers or percentages of people who dropped out (attrited), and sometimes they only provide qualitative descriptions, such as saying that adherence was "generally good." Often, papers are genuinely unclear, and we can only conclude that there is not enough information in the paper for us to know anything about adherence or compliance.
Let's look at excerpts from five different papers to see what information, if any, they provide about the study's adherence, attrition, or compliance. We'll have to identify what each extract tells us about adherence (some extracts may only discuss methodology or results, telling us nothing about adherence), and for some, we may have to conclude that the attrition or compliance is simply unclear.
First, consider these three excerpts from a paper studying the Tumaini game:
1. Intervention arm participants completed a 45-minute informational onboarding session, including instructions on the interface, technology, and game content. They were instructed to play at least 1 hour per day for the 16 days of the study and asked not to share their own gameplay profile with others. The game interface allows for 5 additional players' profiles so that others may play without compromising the enrolled player's data. Intervention participants were provided with a phone with the game preloaded and used it at their own pace for the duration of the intervention. Control participants received standard of care, namely no additional intervention beyond any existing sex education from family, school, and peers. No specific data on the content or source of this education were collected from participants. All study smartphones were returned by the participants at the end of the intervention period.
2. Preliminary cleaning of survey data was conducted in MS Excel, with additional cleaning and all analyses completed using SAS version 9.4 (SAS Institute Inc., Cary, NC, USA). All control arm participants were included in analyses. One participant from the intervention arm was removed from analyses of effect at T2 due to delayed completion of the T2 survey. His data were retained for T1-T3 analyses, as he completed all other study activities on time. Descriptive statistics on demographic questions and game feedback questions were computed.
3. We recruited and enrolled 60 adolescent participants. Half of the participants were allocated to the intervention arm. All adolescents who were recruited completed all 3 study visits, and all intervention arm participants initiated gameplay. Participant demographics are presented in Table 3 . There were no significant demographic differences between the two arms. Preliminary calculations of exposure indicate that the intervention arm played Tumaini a mean of approximately 27 hours over the 16 days of the intervention.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt describes the study's methodology, but does not tell us how many or how well participants followed the instructions, so it does not inform us about adherence. The second excerpt tells us that all control arm participants were included in analysis, but one intervention arm participant was removed from the analysis of effect at T2 but included in the T3 analysis; this is attrition information. The third excerpt says that all participants completed all visits and that all intervention arm participants initiated gameplay; this is adherence information.
Here's all the information in this paper about adherence, attrition, and compliance: All participants completed all visits, and all intervention arm participants initiated gameplay. One intervention arm participant was not included in the T2 analysis but was included in the T3 analysis.
Second, consider these three excerpts from a paper studying the Preschool Situational Self-Regulation Toolkit (PRSIST) Program:
1. All children in their final prior-to-school year in these centers, who attended at least one of the 1-2 assessment days, were invited to participate in this study. There were no further exclusion criteria. Parental consent to participate was provided for 547 3-5-year old children, all of whom were identified as likely to be attending school in the subsequent year. The flow of participants throughout the study is depicted in Figure 1 . At baseline, 473 of these children were assessed (86.5%), with non-participation largely due to absence on the day of assessment. The mean age of this sample was 4.44 years (SD = 0.38, range = 3.20-5.33), with a relative balance of boys and girls (48.2% girls). Children who were identified as of Aboriginal or Torres Strait Islander descent comprised 7.2% of the sample, which is in line with population estimates for this age group (Australian Institute of Health and Welfare (AIHW), 2012). Family income was diverse: 11.9% of families qualified for full childcare benefit subsidies (low income); 65.5% of families qualified for some childcare benefit (low-middle to middle-high income); and 22.7% of families did not qualify for any childcare benefit subsidy (high income). Maternal education levels were also diverse: 9.5% did not complete high school; 9.3% completed only high school; 30.6% had completed a diploma, trade, certificate; 34.6% completed a tertiary degree; and 16.0% a post-graduate qualification. At follow-up, 426 children were assessed, which corresponded to a 90.1% retention rate. Nonparticipation at follow-up was due to the child having left the center or absence on the day of assessment.
2. Based on these patterns of participation, 20 services (80%) were deemed to have met or exceeded the minimum threshold of participation (i.e., completed the professional development modules and met the minimum of three child activities per week). Those that did not participate in the program were a result of: preparations for government assessment and rating (n = 1); substantial illness, maternity leave or turnover of key staff that precluded participation (n = 2); or low-or non-participation for undisclosed reasons (n = 2). Two of these five centers did not participate in any program elements. The other three centers did not engage with professional development modules or induction teleconference call yet completed child activities. Overall, there were good levels of adherence to the program, especially amongst those centers without significant sector-imposed impediments to participation.
3. Inability to conclusively and exclusively provide evidence for one of these possibilities, however, highlights limitations within the current study. That is, although the evaluation was rigorously designed and executed according to CONSORT guidelines, funding considerations limited the roll-out and intervention period to only 6 months. It is possible that a full year of program implementation would yield stronger program effects (see, for example, Schachter, 2015). It is also possible that program effects would be strengthened with stricter adherence to highquality program implementation. While fidelity data indicate good compliance in the frequency and timing of program elements, data are insufficient to evaluate the integrity with which program elements were implemented. While in-person or video fidelity checks were not possible in the current study, this would help monitor adherence. As a researcher-implemented model of delivery would violate our aspiration for a lowcost and barrier-free resource for educators, a plausible middle ground might be a coaching model that supports educators in implementation and adaptation of the program in their context. Lastly, the program was designed with the intention to foster selfregulation in all children, and thus did not focus on instances of dysregulation. However, it is clear that child dysregulation remains a significant concern for educators (Neilsen-Hewett et al., 2019), and future iterations of the program would do well to more explicitly provide support for these children. In guiding such an expansion of the program, there is evidence that children with frequent and severe dysregulation require a different approach to fostering self-regulation, as demonstrated successfully in trauma-informed practice approaches (Holmes et al., 2015). Future studies would also do well to consider implications of differing educator qualifications and experience, whereby different types and levels of support may be needed at varying levels of behavior challenges and educators' skills to address these.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt includes demographic information about the participants but also reveals that at baseline, 473 of the total sample of 547 children were assessed (with non-participation mostly due to absence), and at follow-up, 426 children were assessed (with non-participation mostly due to the child having left the center or absence), corresponding to a 90.1% retention rate. The second excerpt describes compliance with protocols: 20 of the 25 intervention centers met or exceeded the minimum threshold of participation. The third excerpt describes compliance in the frequency and timing of program elements as "good" but also says that the study did not monitor adherence with in-person or video checks, which would have helped provide a better picture of compliance with the study design.
Here's all the information in this paper about adherence, attrition, and compliance: Of the initial sample of 547 children, 473 were assessed at baseline and 426 at follow-up. While 20 of 25 intervention centers met or exceeded the minimum threshold of participation and the frequency and timing of program elements was good, the study did not monitor adherence with in-person or video checks.
Third, consider these four excerpts from a paper studying Study 2 on depression and psychosis:
1. The intervention was a single session that lasted approximately one hour for participants to provide informed consent, complete a demographic form, watch videos relevant to their study arm, complete the assessments, and be debriefed. Participants in either of the video groups stayed for the full hour, but participants in the control condition who did not watch the video finished in about 50 min. In Study 2, which included two 8 min videos with diagnostic accuracy for both conditions, the protocol required an additional 15 min. Survey data were collected using SurveyCTO (Ver 2.30, Dobility, Inc., Cambridge, MA, USA), an android application, on tablets (www.surveycto.com/accessed on: 19 June 2017). In Study 1, after completion of the video session, participants were invited to participate in the optional qualitative interview to be held within one week.
2. After review of 2nd and 3rd year MBBS student rosters, 18 students were excluded prior to randomization because of being international students not speaking Nepali or having already completed their psychiatry rotation. Among the remaining students, 100 were selected for randomization to one of the three arms. No potential participants refused to participate in this study. An additional six students were excluded at the time of analysis because information on their demographic forms revealed that they were international students whose native language was not Nepali or they had completed their clinical psychiatry rotation; this information had not been up to date in the class rosters at the time of randomization (Figure 1 ). One participant in the service user arm was excluded because of both being an international non-Nepali student and having completed a psychiatry rotation. Demographic characteristics of these participants are in Table 2 . Of note, only three participants indicated that they were primarily interested psychiatry as a specialty (see Figure 2 ). Participants were randomized into one the three conditions: the control group with no video (n = 31, 33%), the didactic video group (n = 31, 33%), and the service user recovery testimonial video group (n = 32; 34%).
3. Due to limited time availability on the part of the researchers and students as well as the exploratory nature of the interviews, only six participants completed interviews. Qualitative results were analyzed from a subset of six students, two women and four men in their third year, who participated in in-depth interviews.
4. For the second study, 248 students were enrolled in first-and second-year MBBS program across the two institutions participating. From roster, 28 students were excluded because of being international or having completed a psychiatry clinical rotation. The remaining 220 students were randomized; however, seven students declined to participate or were unavailable during data collection periods. Therefore, 213 participants were randomly allocated to the following arms: didactic video condition (n = 73), the service user video condition (n = 72), and the no video control condition (n = 75) (Figure 3 ). At the analysis phase, there were additional exclusions because of missing data or identification of exclusion criteria that was not recorded in the school registers. Participant characteristics for each condition are shown in Table 4 .
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance. The first excerpt describes the methodology, describing the intervention as taking place in a single one-hour session. This does not tell us anything explicitly about adherence, but it does make it more likely that adherence was high, since participants only had to attend the single session, which is easy to do. The second excerpt says that 18 students were excluded prior to randomization; since this took place before sampling, it is not relevant to adherence. It also says that six students were excluded at the time of analysis because it turned out that they met exclusion criteria. Although this is not adherence strictly speaking, it is important to note when thinking about the validity of the results. The third excerpt says that only six participants completed interviews. The fourth excerpt says that in Study 2, seven students declined to participate or were not available during data collection after randomization of 220 students, and there were additional exclusions at analysis phase because of missing data or identification of exclusion criteria.
Here's all the information in this paper about adherence, attrition, and compliance: This paper does not discuss adherence explicitly. For the video study, six of the 100 randomized students were excluded from analysis, and in the second study, seven of the 220 randomized students declined to participate or were unavailable during data collection periods, with additional students excluded from the analysis because of missing data or identification of exclusion criteria. Only six participants completed interviews.
Fourth, consider these three excerpts from a paper studying antioxidant/anti-inflammatory supplement containing lemon verbena extract and omega-3 fatty acid:
1. Flow chart showing the dropout rate at different timepoints in the study.
2. Forty-eight (48) participants were enrolled for screening evaluation (Fig. 1 ) and after 3 exclusions, 45 participants were randomly assigned either to placebo or nutritional supplement groups, n = 22 and n = 23, respectively. Of these, 14 participants were withdrawn during the study for different reasons; there were 10 dropouts in the placebo group and 4 dropouts in the supplement group (treatment refusal, irregular treatment, starting on medication, or occurrence of an adverse event [AE]). Finally, 31 participants completed the study (12 in the placebo and 19 in the supplement group; Fig. 1 ).
3. Only 1 patient reported an AE (i.e., a heartburn sensation). The subject, who was in the placebo group, stopped the treatment immediately and was excluded from the study (Table 1 ). No major complications were reported by this subject.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt refers to a flow chart showing the dropout rate, but since we do not have the figure here, we cannot conclude anything from this about the study's attrition. The second excerpt says that there were 10 dropouts in the placebo group of 22 participants and 4 dropouts in the supplement group of 23 participants, meaning that 31 participants out of the initial 45 participants after randomization completed the study. The third excerpt provides more detail for one patient in the placebo group who dropped out, stopping treatment after experiencing a heartburn sensation.
Here's all the the information in this paper about adherence, attrition, and compliance: Ten of the 22 participants in the placebo group dropped out, and 4 of the 23 participants in the supplement group dropped out.
Fifth, consider these {paragraph_n} excerpt{"s" if len(paragraphs) > 1 else ""} from a paper studying {intervention}:
{paragraphs_to_numbered_list(paragraphs).strip()}
Let's think about what {"each" if len(paragraphs) > 1 else "this"} excerpt tells us, if anything, about adherence, attrition or compliance:""".strip()
if final_reasoning is None:
return prefix
return f"""{prefix} {final_reasoning.strip()}
Here's all the the information in this paper about adherence, attrition, and compliance:""".strip()
async def complete_with_cache_buster(
prompt: str, temperature: float, max_tokens: int, top_p: float, stop, cache_id: int
):
return await openai_complete(
stop=stop,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
cache_id=cache_id,
)
def remove_last_subsentence(text: str) -> str:
sentences = split_sentences(text)
if not sentences[-1].strip().endswith("."):
log.warning("Removing last sentence", sentences=sentences)
sentences = sentences[:-1]
return " ".join(sentences)
@trace
async def sample_generation_answer_with_reasoning(
paragraphs: list[str],
intervention: str,
cache_id: int,
ranked_paragraphs: list[str],
) -> tuple["AnswerWithReasoning", Callable]:
"""Sample reasoning and a final answer,
given the prompt. Shorten the prompt dynamically
to fit in the paragraphs provided, by first
reducing the number of few-shot examples, then
dropping the paragraphs that are least likely to be
about adherence.
"""
for prompt_func in (
intervention_generation_prompt,
shorter_intervention_generation_prompt,
even_shorter_intervention_generation_prompt,
):
prompt = prompt_func(paragraphs, intervention)
used_prompt_func = prompt_func
if n_remaining_tokens(prompt, -100_000) >= 400:
break
while n_remaining_tokens(prompt, -100_000) < 400: # some huge negative number
paragraphs = remove_worst_paragraph(paragraphs, ranked_paragraphs)
if not paragraphs:
raise ValueError("Prompt too long with even one paragraph")
prompt = shorter_intervention_generation_prompt(paragraphs, intervention)
log.warning("Dropped paragraph", n=len(paragraphs))
response = await complete_with_cache_buster(
prompt=prompt,
temperature=0.4,
max_tokens=n_remaining_tokens(prompt, 400) - 100,
top_p=1,
stop=("\nFourth", "\nFifth", "\nSixth", "\nFinally"),
cache_id=cache_id,
)
response_text = response["choices"][0]["text"]
token_usage = response["usage"]["total_tokens"]
if (
"Here's all the information in this paper about adherence, attrition, and compliance:"
in response_text
):
reasoning, answer = response_text.split(
"Here's all the information in this paper about adherence, attrition, and compliance:"
)
return (
AnswerWithReasoning(
paragraph="\n\n".join(paragraphs),
reasoning=reasoning.strip(),
answer=remove_last_subsentence(answer.strip()),
token_usage=token_usage,
),
used_prompt_func,
)
log.warning(
"Unexpected response for final generation reasoning", response=response_text
)
return (
AnswerWithReasoning(
paragraph="\n\n".join(paragraphs),
reasoning=remove_last_subsentence(response_text.strip()),
answer=response_text.strip(),
token_usage=token_usage,
),
used_prompt_func,
)
@trace
async def final_answer_with_reasoning(
paragraphs: list[str], intervention: str, ranked_paragraphs: list[str]
):
"""Sample ten completions, and choose a reasoning which has the most
numbers in common with the other reasonings.
Use that reasoning as support to complete the final answer.
"""
answers_short = [
(
await sample_generation_answer_with_reasoning(
paragraphs, intervention, cache_id, ranked_paragraphs
)
)
for cache_id in range(10)
]
answers = [answer[0] for answer in answers_short]
used_prompt_func = answers_short[0][1]
total_token_usage = sum([a.token_usage for a in answers])
print(f"Total token usage: {total_token_usage}")
numbers_in_answers = [extract_numbers(a.answer) for a in answers]
for nums, answer in zip(numbers_in_answers, answers):
if "unclear" in answer.answer.lower():
nums.append("Unclear")
def rank(numbers, number):
r = 0
for n in numbers:
if number in n:
r += 1
return r
scores: list[float] = []
for numbers in numbers_in_answers:
score = 0.0
for number in numbers:
score += rank(numbers_in_answers, number)
if numbers:
score /= len(numbers)
score += 0.01 * len(numbers)
scores.append(score)
else:
scores.append(0)
answers_with_scores = [(answer, score) for answer, score in zip(answers, scores)]
best_answer = max(answers_with_scores, key=lambda aws: aws[1])[0]
final_prompt = used_prompt_func(
paragraphs=paragraphs,
intervention=intervention,
final_reasoning=best_answer.reasoning,
)
final_answer = await complete_with_cache_buster(
prompt=final_prompt,
temperature=0.0,
max_tokens=n_remaining_tokens(final_prompt, 83),
top_p=1,
stop=("\nFourth", "\nFifth", "\nSixth", "\nFinally"),
cache_id=0,
)
final_answer_text = final_answer["choices"][0]["text"]
return AnswerWithReasoning(
paragraph="\n\n".join(paragraphs),
reasoning=best_answer.reasoning,
answer=remove_last_subsentence(final_answer_text),
token_usage=final_answer["usage"]["total_tokens"],
)
def intervention_classification_prompt(paragraph: str, intervention: str):
return f"""
From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence," "attrition," or "compliance". If too many participants failed to receive the intervention or failed to receive it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. Sometimes it can be tricky to tell whether the author is talking about adherence/compliance with the study's protocols versus simply discussing the results of the study. For simple interventions that are accomplished in one shot (e.g., having a group of college students complete a test in a lab that takes 30 minutes), the study doesn't discuss adherence unless something unusual happened, and we can safely assume that everyone in the sample completed the study.
Let's look at five examples to decide whether they contain information about adherence or compliance. For each paragraph, we'll conclude whether the paragraph does tell us about the study's adherence.
First, consider this paragraph from a paper studying non-cognitive skills certificate disclosure to job candidates and firms:
---
38 Information on whether each job interview in the matching intervention turned into a hire (and on the associated job characteristics) was collected in both the firm and worker follow-ups. We prefer to use information from the worker follow-ups for these match-level outcomes as measurement error is likely to be lower there for at least two reasons: (i) while the median firm was matched to three workers, the median worker was matched Figure 4 shows a summary of compliance and attrition. Starting from compliance, of the 1,230 scheduled job interviews, 515 (or 42%) actually took place. Lack of compliance is mainly due to workers having lost interest in being matched (32% of cases) or to the firm having lost interest (30% of cases) by the time they were called for the interviews. 39 Panel A of Appendix Table A6 explores the determinants of compliance, and shows very little evidence of selection on observables. 40 Importantly, Treatment does not predict the likelihood of the job interview taking place. This is not surprising, as the certificates were shown to firms and workers only conditional on the job interview taking place. Consistently with this, the Online Appendix confirms that the sample of job interviews that took place remains balanced on the main observable worker and firm characteristics. All the Treatment workers who showed up to the job interviews were given the certificates (corresponding to 49% of Treatment workers). The remaining Treatment certificates were disbursed to the workers shortly after the first worker follow-up survey. So by the second follow-up survey about 81% of Treatment workers had received the certificate. 41 oving on to attrition, the follow-up surveys targeted all firms and workers in the experimental sample, irrespective of whether the scheduled job interviews took place or not. We have very moderate attrition rates: these are about 12% in the firm follow-up, and about 14% in both worker follow-ups. 42 Panel B of Appendix Table A6 shows that attrition is not related to Treatment in either sample, and there is also very little evidence of observable characteristics determining attrition. Panel B of Appendix Table A5 .1 and Panels B and C of Table A5 .2 confirm that the samples of both workers and firms remain balanced on baseline characteristics at follow-up, so that attrition is not likely to affect the validity of the initial random assignment. 43 Therefore, we do not correct for attrition in our main regression specifications. 44 only one firm, so possible recall errors related to the respondent getting confused about the different job interviews are lower on the worker side; (ii) in 13% of the cases, the person that answered the firm follow-up survey is different from the owner that conducted the job interviews. Results using corresponding match-level information from the firm follow-up survey (not reported) are qualitatively similar.
---
Let's think through what this paragraph tells us about the study's adherence, attrition, or compliance. First, we find out that of the 1,230 scheduled job interviews, only 515, or 42% took place. Then, we find out that all the treatment workers who showed up to job interviews were given certificates, which corresponds to 49% of treatment workers. Finally, by the second follow-up survey, 81% of the workers had received the certificate. This tells us about attrition, i.e., adherence.
These figures describe both how much and how well participants in the study complied with the study's protocol.
Conclusion: Yes, this paragraph does tell us about adherence, attrition, or compliance for the intervention.
Second, consider this paragraph from a paper studying relaxation and park walking during lunch breaks.
```
Lunch breaks constitute the longest within-workday rest period, but it is unclear how they affect recovery from job stress. We conducted two randomized controlled trials with 153 Finnish knowledge workers who engaged for 15 minutes daily in prescribed lunch break activities for ten consecutive working days. Participants were randomly assigned to a: 1) park walking group (N = 51), 2) relaxation exercises group (N = 46) and 3) control group (N = 56). The study was divided into two parts scheduled in spring (N = 83) and fall (N = 70). Recovery experiences (detachment, relaxation, enjoyment) and recovery outcomes (restoration, fatigue, job satisfaction) were assessed with SMS and paper-and-pencil questionnaires several times per day before, during and after the intervention period. A manipulation check revealed that both intervention groups reported less tension after lunch breaks during the intervention than before. In spring, the interventions did hardly affect recovery experiences and outcomes. In fall, restoration increased and fatigue decreased markedly immediately after lunch breaks and in the afternoon in both intervention groups (d = 0.22-0.58) and most consistent positive effects across the day were reported by the park walking group. Park walks and relaxation exercises during lunch breaks can enhance knowledge workers' recovery from work, but effects seem weak, short-lived and dependent on the season.
```
Let's think through what this paragraph tells us about the study's adherence, attrition, or compliance. First, we find out that 51 participants were assigned to the park walking group, 46 to the relaxation exercises group, and 3 to the control group, and that the study was divided into two parts, a spring (n=83) and fall (n=70) group. This is simply information about the size of the sample and its allocation to different treatment arms; it tells us nothing about whether participants in these groups actually completed the intervention. For that, we would need to know, for example, how often those in the park walking group actually took walks in the park during their lunch breaks. Second, we find out that there was increased restoration and decreased fatigue (d=0.22-0.58) in both intervention groups in the fall. This is about the results of the study (what happened to the participants), not simply about how well they adhered to the intervention protocol.
These figures describe the size of the sample and the results of the study, but not how well participants adhered to the study's plan.
Conclusion: No, this paragraph does not tell us about adherence, attrition, or compliance for the intervention.
Third, consider this paragraph from a paper studying albendazole:
---
A somewhat lower proportion of pupils in school took the medicine in 1999. Among girls younger than thirteen and boys who were enrolled in school for at least part of the 1999 school year, the overall treatment rate was approximately 72 percent (73 percent in Group 1 and 71 percent in Group 2 schools), suggesting that the process of selection into treatment was fairly similar in the two years despite the change in consent rules. Of course, measured relative to the baseline population of students enrolled in early 1998, a smaller percentage of students were still in school in 1999 and hence, treatment rates in this baseline sample were considerably lower in 1999 than in 1998: among girls under thirteen years of age and all boys in treatment schools from the baseline sample, approximately 57 percent received medical treatment at some point in 1999, while only nine percent of the girls thirteen years of age and older received treatment. 17 nly five percent of comparison school pupils received medical treatment for worms independently of the program during the previous year, according to the 1999 pupil questionnaire. 18 An anthropological study examining worm treatment practices in a neighboring district in Kenya (Geissler et al. (2000)), finds that children self-treat the symptoms of helminth infections with local herbs, but found no case in which a child or parent purchased deworming 17 The difference between the 72 percent and 57 percent figures is due to Group 2 pupils who dropped out of school (or who could not be matched in the data cross years, despite the efforts of the NGO field staff) between years 1 and 2 of the project. Below, we compare infection outcomes for pupils who participated in the 1999 parasitological survey, all of whom were enrolled in school in 1999. Thus the parasitological survey sample consists of pupils enrolled in school in both 1998 and 1999 for both the treatment and comparison schools. To the extent that the deworming program itself affected enrolment outcomes-1999 school enrolment is approximately four percentage points higher in the treatment schools than the comparison schools-the pupils enrolled in the treatment versus comparison schools in 1999 will have different characteristics. However, since drop-out rates were lower in the treatment schools, this is likely to lead to a bias toward zero in the within-school health externality estimates, in which case our estimates serve as lower bounds on true within-school effects.
---
Let's think through what this paragraph tells us about the study's adherence, attrition, or compliance. The treatment rate among was approximately 72 percent in 1999. Is this a percentage of the participants in the study? It's not clear from this paragraph alone; we need more context. Similarly, we find that only five percent of comparison school pupils received medical treatment for worms independently of the program during the previous school year. This could be about adherence, but it could also be describing the results of the intervention. We would need a longer description of the study to find out.
Conclusion: Unclear; we don't know whether this paragraph tells us about adherence, attrition, or compliance for the intervention.
Fourth, consider this paragraph from a paper studying {intervention.strip()}:
---
{paragraph.strip()}
---
Let's think through what this paragraph tells us about the study's adherence, attrition, or compliance.""".strip()
def this_or_other_study_prompt(paragraph: str, intervention: str):
return f"""
From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence" or "compliance". If too many participants failed to receive the intervention or failed to receive it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. Sometimes it can be tricky to tell whether the author is talking about the adherence/compliance with the study's own protocols versus simply discussing the adherence or compliance of a related work.
Let's look at five examples of paragraphs from papers that discuss adherence or compliance to decide whether they are describing adherence or compliance for the author's own study versus adherence/compliance of a different study or a related work. Usually, when the adherence or compliance being discussed belongs to a different study, that study is cited explicitly. If another study is not cited explicitly, you can assume that the adherence/compliance rate being discussed belongs to the author's own study.
For each paragraph, we'll conclude either that Yes, the adherence/compliance being discussed probably belongs to the author's own study, or No, that it probably belongs to a different study.
First, consider this paragraph from a paper studying the Preschool Situational Self-Regulation Toolkit (PRSIST) Program:
---
All children in their final prior-to-school year in these centers, who attended at least one of the 1-2 assessment days, were invited to participate in this study. There were no further exclusion criteria. Parental consent to participate was provided for 547 3-5-year old children, all of whom were identified as likely to be attending school in the subsequent year. The flow of participants throughout the study is depicted in Figure 1 . At baseline, 473 of these children were assessed (86.5%), with non-participation largely due to absence on the day of assessment. The mean age of this sample was 4.44 years (SD = 0.38, range = 3.20-5.33), with a relative balance of boys and girls (48.2% girls). Children who were identified as of Aboriginal or Torres Strait Islander descent comprised 7.2% of the sample, which is in line with population estimates for this age group (Australian Institute of Health and Welfare (AIHW), 2012). Family income was diverse: 11.9% of families qualified for full childcare benefit subsidies (low income); 65.5% of families qualified for some childcare benefit (low-middle to middle-high income); and 22.7% of families did not qualify for any childcare benefit subsidy (high income). Maternal education levels were also diverse: 9.5% did not complete high school; 9.3% completed only high school; 30.6% had completed a diploma, trade, certificate; 34.6% completed a tertiary degree; and 16.0% a post-graduate qualification. At follow-up, 426 children were assessed, which corresponded to a 90.1% retention rate. Nonparticipation at follow-up was due to the child having left the center or absence on the day of assessment.
---
Let's think through whether this paragraph describes adherence for the study in question or another study: When describing nonparticipation rates, the text does not contain any citations to related works. Further, these details are also shown in Figure 1, strongly suggesting that the adherence/compliance rate being discussed belongs to the author's own study.
Conclusion: Yes, the adherence/compliance being discussed probably belongs to the author's own study.
Second, consider this paragraph from a paper studying DDUGKY skills training programs:
---
In the Indian context, we were unable to find studies that have estimated the impact of youth skills training programs sponsored by the government. Although not offered by the government, an experimental study designed by Maitra and Mani (2017) and implemented in co-operation with non-governmental organizations offers estimates of the impact of a 6-month stitching and tailoring training program targeted at young women (aged 18-39 years) in New Delhi. The paper examined the 5 The youth training employment programs (Joven) in Latin America were initiated in Chile in 1991, and thereafter, similar programs have been implemented in Argentina, Colombia, Peru, and Uruguay. The various programs target youth from low-income families, with low educational attainment, and with limited or no job experience. The programs consist of basic literacy, training in a trade which is in demand, work experience, and help finding a job. Typically, the intervention lasts for 6 months and includes 200-400 h of training and 2-3 months of work experience. 6 Other experimental evaluations of vocational training program in developing countries include Acevedo et al. (2017) for the Dominican Republic, Attanasio et al. (2017) for Columbia, Maitra and Mani (2017) for India, Diaz and Rosas (2016) for Peru, Honorati (2015) for Kenya. 7 Although their paper does not focus on disadvantaged youth but on the general unemployed population, Hirshleifer et al. (2016) use a randomised experiment to assess the effect of a large-scale vocational training program in Turkey and conclude that the effect of being assigned to training had a 2 percentage point, but statistically not significant effect on the probability of being employed. impact of the program 6 months and 18 months after program completion on a sample of 594 women (409 treatment and 185 control). According to the study's findings, in the short term, women who received training were 4 percentage points more likely to be self-employed, 6 percentage points more likely to be employed and earn 150% more per month as compared to the control group. The effects persisted in the medium term. While the effects are impressive, the authors report that only 56% of those assigned to treatment completed the course and that there were a number of barriers to entry, chiefly, lack of access to credit, lack of child-care support and the distance from residence to the training center.
---
Let's think through whether this paragraph describes adherence for the study in question or another study: When describing how only 56% of those assigned to treatment completed the course, the authors are reporting the findings from an experiment in Hirshleifer et al. (2016). This means that the adherence/compliance being discussed belongs to that study, not the author's own study.
Conclusion: No, the adherence/compliance being discussed probably belongs to a different study.
Third, consider this paragraph from a paper studying {intervention.strip()}:
---
{paragraph.strip()}
---
Let's think through whether this paragraph describes adherence for the study in question or another study:
""".strip()
class AnswerWithReasoning(BaseModel):
paragraph: str
reasoning: str
answer: str
token_usage: int
@trace
async def intervention_classification_answer_with_reasoning(
paragraph: str,
intervention: str,
temperature: float,
cache_id: int = 0,
):
"""Sample reasoning and a final answer for the classification prompt, asking
"Does this paragraph contain information about adherence, compliance, or attrition?"
"""
cache_id # unused
response = await openai_complete(
prompt=intervention_classification_prompt(paragraph, intervention),
temperature=temperature,
max_tokens=657,
stop=("\nFifth,", "\nFinally,"),
top_p=1,
cache_id=cache_id,
)
response_text = response["choices"][0]["text"]
token_usage = response["usage"]["total_tokens"]
if "Conclusion: " in response_text:
reasoning, answer_text = response_text.split("Conclusion:")
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=reasoning.strip(),
answer=answer_text.strip(),
token_usage=token_usage,
)
log.warning(
"Unexpected response in intervention classification",
response=response,
paragraph=paragraph,
)
print("Unexpected response:", response)
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=response_text.strip(),
answer="",
token_usage=token_usage,
)
@trace
async def this_or_other_classification_answer_with_reasoning(
paragraph: str,
intervention: str,
temperature: float,
cache_id: int = 0,
):
"""Sample reasoning and a final answer for the classification prompt,
asking, "Is this paragraph about adherence about a related work or
the study this paper is reporting on?"
"""
response = await openai_complete(
prompt=this_or_other_study_prompt(paragraph, intervention),
temperature=temperature,
max_tokens=768,
stop=("\nFourth,", "\nFinally,", "\n\nNow,"),
top_p=1,
cache_id=cache_id,
)
response_text = response["choices"][0]["text"]
token_usage = response["usage"]["total_tokens"]
if "Conclusion: " in response_text:
reasoning, answer_text = response_text.split("Conclusion:")
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=reasoning.strip(),
answer=answer_text.strip(),
token_usage=token_usage,
)
log.warning(
"Unexpected response in this or other classification",
response=response,
paragraph=paragraph,
)
print("Unexpected response:", response)
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=response_text.strip(),
answer="",
token_usage=token_usage,
)
def answer_has_prefix(answer: AnswerWithReasoning, prefix: str):
return answer.answer.lower().startswith(prefix.lower())
async def majority_vote(
answers: list[AnswerWithReasoning],
candidate_prefixes: tuple[str, ...] = ("Yes", "No", "Unclear"),
):
votes: Counter[str] = Counter()
for answer in answers:
for prefix in candidate_prefixes:
if answer_has_prefix(answer, prefix):
votes[prefix] += 1
break
return votes.most_common(1)[0][0]
def prompt_from_reasoning(
prompt_function: Callable[[str, str], str],
*,
paragraph: str,
intervention: str,
reasoning: str,
):
prefix = prompt_function(paragraph, intervention)
return f"""{ prefix } { reasoning }
Conclusion: """.strip()
@trace
async def zero_temp_final_classification(prompt: str):
"""Perform a final classification step using a reasoning
selected from the sampled classifications."""
return await openai_complete(
prompt=prompt,
stop=("\n"),
)
@trace
async def adherence_paragraph_classification(
selection_function: Callable[
[str, str, float, int], Awaitable[AnswerWithReasoning]
],
prompt_function: Callable[[str, str], str],
*,
paragraph: str,
intervention: str,
):
"""Using the selection and prompt functions provided,
complete the classification task by chain-of-thought reasoning,
high-temperature sampling, plurality voting, and zero-temperature
generation of the final classification.
"""
answers = [
(
await selection_function(
paragraph,
intervention,
0.4,
cache_id,
)
)
for cache_id in range(10, 20)
]
total_token_usage = sum(answer.token_usage for answer in answers)
print(f"Total token usage: {total_token_usage}")
most_common_prefix = await majority_vote(
answers, candidate_prefixes=("Yes", "No", "Unclear")
)
answers_with_most_common_prefix = [
a for a in answers if answer_has_prefix(a, most_common_prefix)
]
if not answers_with_most_common_prefix:
# just use the longest reasoning
best_reasoning = max(answers, key=lambda a: len(a.reasoning))
else:
best_reasoning = max(
answers_with_most_common_prefix, key=lambda a: len(a.reasoning)
)
zero_temp_answer = await zero_temp_final_classification(
prompt_from_reasoning(
prompt_function,
paragraph=paragraph,
intervention=intervention,
reasoning=best_reasoning.reasoning,
)
)
token_usage = zero_temp_answer["usage"]["total_tokens"]
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=best_reasoning.reasoning,
answer=zero_temp_answer["choices"][0]["text"].strip(),
token_usage=total_token_usage + token_usage,
)
TFEW_ADHERENCE_ANSWER_CHOICES = ("no", "yes")
def make_multiple_adherence_prompts(
context: str, section: str, sentence: str
) -> list[tuple[str, tuple[str, str]]]:
prompts = [
f"Context: { context }\n\nSection: { section }\n\nAnswer yes if the following sentence is about how many participants in the study complied with the study's protocol, had to drop out, or withdrew; answer no if it is about something else, such as the study's design, sampling strategy, or results.\n\nSentence: { sentence }",
f'Context: { context }\n\nQuestion: Does "{ sentence }" describe how many people eligible for the intervention actually completed it or failed to complete it?\nOptions:\nA. Yes, "{ sentence }" describes how many people actually completed or failed to complete the intervention.\nB. No, "{ sentence }" does not describe how many people completed or failed to complete the intervention.',
f'Context: { context }\n\nQuestion: Is "{ sentence }" about the actual adherence or dropout rate of the study? True, False, or Neither?',
f"Does the following sentence from a research paper describe how many participants dropped out of or withdrew from the study?\n\nSection: { section }\nSentence: { sentence }",
f"Does the following sentence from a research paper describe how many participants dropped out of or withdrew from the study?\n\nSection: { section }\nSentence: { sentence }",
]
prompts = [prompt.strip() for prompt in prompts]
choices: list[tuple[str, str]] = [
("no", "yes"),
("B", "A"),
("False", "True"),
("no", "yes"),
("no", "yes"),
]
return [(prompt, choice) for prompt, choice in zip(prompts, choices)]
@trace
async def adherence_regex(sentence: str, level: int = 0) -> bool:
"""Simple regex for adherence-related English language patterns."""
match level:
case 0:
pattern = r"\b(adherence|Adherence|had to be excluded|were excluded|had to drop out|dropped out)\b"
case 1:
pattern = r"\b(withdrew|did not complete the)\b"
case 2:
pattern = r"\b(was omitted from|complied with)\b"
case _:
raise ValueError(f"Invalid level: { level }")
answer = re.search(pattern, sentence) is not None
return answer
def remove_worst_paragraph(paragraphs: list[str], ranked_paragraphs: list[str]):
overlap = [paragraph for paragraph in ranked_paragraphs if paragraph in paragraphs]
return [
paragraph
for paragraph in paragraphs
if paragraph in overlap[: len(paragraphs) - 1]
]
class AdherenceTfewSettings(BaseSettings):
qa_model = "adherence-tfew-multi"
backup_search_model = "mono-t5"
class AdherenceParagraphTfew(Recipe):
defaults = lambda self: AdherenceTfewSettings() # noqa: E731
async def is_possibly_adherence_sentence(
self, *, sentence: str, context: str, section: str
) -> bool:
"""Detect whether a sentence is possibly related to adherence, using up to 5 prompts."""
for prompt, choice_inputs in make_multiple_adherence_prompts(
context=context, section=section, sentence=sentence
):
choice_probs, _ = await self.agent(self.s.qa_model).classify(
prompt=prompt, choices=choice_inputs
)
choice, _ = max_by_value(choice_probs)
if choice == choice_inputs[1]:
return True
return False
async def is_possibly_adherence_paragraph(self, paragraph: Paragraph) -> bool:
"""Detect whether a paragraph is possibly related to adherence,
by checking whether any of its sentences are possibly adherence-related,
supplemented by regex."""
for sentence in paragraph.sentences:
is_adherence_sentence = await self.is_possibly_adherence_sentence(
sentence=sentence,
context=str(paragraph),
section=paragraph.sections[0].title if paragraph.sections else "",
)
if is_adherence_sentence:
return True
return await adherence_regex(str(paragraph), 0)
async def rank_paragraphs_by_adherence(
self, paragraphs: list[Paragraph]
) -> list[tuple[Paragraph, float]]:
"""Score all paragraphs by their probability of being about adherence.
These ranks will be used when we have to shorten prompts or where
nothing in particular seems to be much about adherence."""
async def score_paragraph(paragraph: Paragraph) -> float:
return await self.agent(self.s.backup_search_model).relevance(
question="How many participants actually received the intervention?",
context=str(paragraph),
)
scores = await map_async(paragraphs, score_paragraph)
ranked_paragraphs = {
paragraph: score
for score, paragraph in sorted(
zip(scores, paragraphs), reverse=True, key=lambda sp: sp[0]
)
}
return list(ranked_paragraphs.items())
async def top_n_paragraphs_of_possible_adherence_paragraphs(
self,
possible_adherence_paragraphs: list[Paragraph],
ranked_paragraphs: list[tuple[Paragraph, float]],
n: int,
) -> list[Paragraph]:
"""Shorten the list of paragraphs by keeping the paragraphs
most likely to be about adherence.
"""
ranked_paragraphs_dict = dict(ranked_paragraphs)
top_n_adherence_paragraphs = set(
[
paragraph
for paragraph in ranked_paragraphs_dict
if paragraph in possible_adherence_paragraphs
][:n]
)
# maintain input order
return [
paragraph
for paragraph in possible_adherence_paragraphs
if paragraph in top_n_adherence_paragraphs
]
async def adherence_paragraphs_recall(
self,
paragraphs: list[Paragraph],
record=recorder,
) -> list[Paragraph]:
"""Collect paragraphs that may be about adherence.
Err on the side of high recall.
"""
possible_adherence_paragraphs = await filter_async(
paragraphs, self.is_possibly_adherence_paragraph
)
if not possible_adherence_paragraphs:
for level in range(0, 3):
possible_adherence_paragraphs = [
paragraph
for paragraph in paragraphs
if (await adherence_regex(str(paragraph), level))
]
if possible_adherence_paragraphs:
break
ranked_paragraphs = await self.rank_paragraphs_by_adherence(paragraphs)
top_2_ranked_paragraphs = (
await self.top_n_paragraphs_of_possible_adherence_paragraphs(
paragraphs, ranked_paragraphs, 2
)
)
combined_adherence_paragraphs = [
paragraph
for paragraph in paragraphs
if paragraph in possible_adherence_paragraphs
or paragraph in top_2_ranked_paragraphs
]
NO_MORE_THAN_N_PARAGRAPHS = 6
if len(combined_adherence_paragraphs) > NO_MORE_THAN_N_PARAGRAPHS:
top_n_combined_adherence_paragraphs = (
await self.top_n_paragraphs_of_possible_adherence_paragraphs(
combined_adherence_paragraphs,
ranked_paragraphs,
NO_MORE_THAN_N_PARAGRAPHS,
)
)
record(
info=f"Found more than {NO_MORE_THAN_N_PARAGRAPHS} paragraphs, shortening by ranks",
classified_paragraphs=possible_adherence_paragraphs,
top_n_ranked=top_n_combined_adherence_paragraphs,
)
combined_adherence_paragraphs = top_n_combined_adherence_paragraphs
return combined_adherence_paragraphs
async def adherence_paragraphs(
self, paragraphs: list[Paragraph], intervention: str
) -> list[Paragraph]:
"""Collect paragraphs that may be about adherence,
combining a high-recall search with a high-precision refinement
step.
"""
possible_adherence_paragraphs = await self.adherence_paragraphs_recall(
paragraphs
)
async def is_really_adherence(paragraph: Paragraph) -> AnswerWithReasoning:
return await adherence_paragraph_classification(
intervention_classification_answer_with_reasoning,
intervention_classification_prompt,
paragraph=str(paragraph),
intervention=intervention,
)
classification_answers = await map_async(
possible_adherence_paragraphs, is_really_adherence
)
return [
paragraph
for paragraph, answer in zip(
possible_adherence_paragraphs, classification_answers
)
if answer_has_prefix(answer, "Yes") or "adherence" in str(paragraph).lower()
]
async def adherence_paragraphs_this_study(
self, paragraphs: list[Paragraph], intervention: str
) -> list[Paragraph]:
"""For paragraphs that we know are about adherence, which
are about adherence of a *related work* and not this paper?"""
really_adherence_paragraphs = await self.adherence_paragraphs(
paragraphs, intervention
)
async def is_adherence_this_study(paragraph: Paragraph) -> AnswerWithReasoning:
return await adherence_paragraph_classification(
this_or_other_classification_answer_with_reasoning,
this_or_other_study_prompt,
paragraph=str(paragraph),
intervention=intervention,
)
classification_answers = await map_async(
really_adherence_paragraphs, is_adherence_this_study
)
return [
paragraph
for paragraph, answer in zip(
really_adherence_paragraphs, classification_answers
)
if answer_has_prefix(answer, "Yes")
]
async def adherence_description(
self, paragraphs: list[Paragraph], intervention: str
) -> tuple[str, list[Paragraph]]:
"""Describe the adherence, compliance, or attrition that occurred in this study,
for this intervention."""
ranked_paragraphs = await self.rank_paragraphs_by_adherence(paragraphs)
ranked_paragraphs_dict = dict(ranked_paragraphs)
adherence_paragraphs_this_study = await self.adherence_paragraphs_this_study(
paragraphs, intervention
)
if not adherence_paragraphs_this_study:
return "Unclear", adherence_paragraphs_this_study
return (
await final_answer_with_reasoning(
[str(p) for p in adherence_paragraphs_this_study],
intervention,
[str(p) for p in list(ranked_paragraphs_dict.keys())],
)
).answer, adherence_paragraphs_this_study
async def list_experiments(
self, document_id: str, question_short_name: str = "adherence"
):
return list_experiments(
document_id=document_id, question_short_name=question_short_name
)
async def run(self, paper: Paper):
results = []
for intervention in await self.list_experiments(paper.document_id):
answer, excerpts = await self.adherence_description(
paper.paragraphs, intervention
)
recipe_result = RecipeResult(
document_id=paper.document_id,
question_short_name="adherence",
result=(answer, excerpts),
answer=answer,
excerpts=[str(excerpt) for excerpt in excerpts],
experiment=intervention,
classifcation_eq=(classification_eq_adherence,),
classification=(
"Unclear"
if not excerpts or "unclear" in answer.lower()
else "found"
),
)
results.append(recipe_result)
self.maybe_add_to_results(results)
return results
AdherenceClassification = Literal["explicit", "implicit", "missing"]
def classification_eq_adherence(
prediction: str | None,
gold: AdherenceClassification | None,
) -> bool | None:
if gold is None or gold == "implicit":
return None
if gold not in ["explicit", "missing"]:
return None
if prediction is None:
return None
if prediction.strip().lower().startswith("unclear"):
return gold == "missing"
else:
return gold == "explicit"
| [
"['PLACEHOLDER']",
"L",
"[\"Context: PLACEHOLDER\\n\\nSection: PLACEHOLDER\\n\\nAnswer yes if the following sentence is about how many participants in the study complied with the study's protocol, had to drop out, or withdrew; answer no if it is about something else, such as the study's design, sampling strategy, or results.\\n\\nSentence: PLACEHOLDER\", 'Context: PLACEHOLDER\\n\\nQuestion: Does \"PLACEHOLDER\" describe how many people eligible for the intervention actually completed it or failed to complete it?\\nOptions:\\nA. Yes, \"PLACEHOLDER\" describes how many people actually completed or failed to complete the intervention.\\nB. No, \"PLACEHOLDER\" does not describe how many people completed or failed to complete the intervention.', 'Context: PLACEHOLDER\\n\\nQuestion: Is \"PLACEHOLDER\" about the actual adherence or dropout rate of the study? True, False, or Neither?', 'Does the following sentence from a research paper describe how many participants dropped out of or withdrew from the study?\\n\\nSection: PLACEHOLDER\\nSentence: PLACEHOLDER', 'Does the following sentence from a research paper describe how many participants dropped out of or withdrew from the study?\\n\\nSection: PLACEHOLDER\\nSentence: PLACEHOLDER']"
] |
2024-01-10 | inthewaterwheel/ice | ice~recipes~jawwsify.py | from ice.apis.openai import openai_complete
from ice.recipe import recipe
# TODO: Citation removal isn't what we want we should instead:
# a) Extract them into a "footnotes" section, and keep the footnote links inline
# b) Remove them from the text, and then add them back in at the very end
# Would also need us to test how well the agent keeps the footnote links in the right place
CITATION_REMOVAL_PROMPT = """Rewrite the following paragraphs to make them more readable, according to the following rule:
- Remove all inline citations, but otherwise leave the text unchanged."""
SPLIT_PROMPT = """Rewrite the following paragraphs to make them more readable, according to the following rule:
- Break up the single large paragraph into multiple paragraphs, along natural paragraph boundaries."""
SIMPLIFY_PROMPT = """Rewrite the following paragraphs to make them more readable, according to the following rule:
- Make explanations more generally accessible, by providing longer explanations for any particularly complicated concepts.
- Make the writing generally clearer, simpler, and make it flow more smoothly. """
EXAMPLE_0 = """Most models of trait evolution are based on the Brownian motion model (Cavalli-Sforza & Edwards 1967; Felsenstein 1973). The Ornstein–Uhlenbeck (OU) model can be thought of as a modification of the Brownian model with an additional parameter that measures the strength of return towards a theoretical optimum shared across a clade or subset of species (Hansen 1997; Butler & King 2004). OU models have become increasingly popular as they tend to fit the data better than Brownian motion models, and have attractive biological interpretations (Cooper et al. 2016b). For example, fit to an OU model has been seen as evidence of evolutionary constraints, stabilising selection, niche conservatism and selective regimes (Wiens et al. 2010; Beaulieu et al. 2012; Christin et al. 2013; Mahler et al. 2013). However, the OU model has several well-known caveats (see Ives & Garland 2010; Boettiger, Coop & Ralph 2012; Hansen & Bartoszek 2012; Ho & Ané 2013, 2014). For example, it is frequently incorrectly favoured over simpler models when using likelihood ratio tests, particularly for small data sets that are commonly used in these analyses (the median number of taxa used for OU studies is 58; Cooper et al. 2016b). Additionally, very small amounts of error in data sets can result in an OU model being favoured over Brownian motion simply because OU can accommodate more variance towards the tips of the phylogeny, rather than due to any interesting biological process (Boettiger, Coop & Ralph 2012; Pennell et al. 2015). Finally, the literature describing the OU model is clear that a simple explanation of clade-wide stabilising selection is unlikely to account for data fitting an OU model (e.g. Hansen 1997; Hansen & Orzack 2005), but users of the model often state that this is the case. Unfortunately, these limitations are rarely taken into account in empirical studies."""
EXAMPLE_1 = """Most models of trait evolution are based on the Brownian motion model. The Ornstein–Uhlenbeck (OU) model can be thought of as a modification of the Brownian model with an additional parameter that measures the strength of return towards a theoretical optimum shared across a clade or subset of species. OU models have become increasingly popular as they tend to fit the data better than Brownian motion models, and have attractive biological interpretations. For example, fit to an OU model has been seen as evidence of evolutionary constraints, stabilising selection, niche conservatism and selective regimes. However, the OU model has several well-known caveats. For example, it is frequently incorrectly favoured over simpler models when using likelihood ratio tests, particularly for small data sets that are commonly used in these analyses. Additionally, very small amounts of error in data sets can result in an OU model being favoured over Brownian motion simply because OU can accommodate more variance towards the tips of the phylogeny, rather than due to any interesting biological process. Finally, the literature describing the OU model is clear that a simple explanation of clade-wide stabilising selection is unlikely to account for data fitting an OU model, but users of the model often state that this is the case. Unfortunately, these limitations are rarely taken into account in empirical studies."""
EXAMPLE_2 = """Most models of trait evolution are based on the Brownian motion model.
The Ornstein–Uhlenbeck (OU) model can be thought of as a modification of the Brownian model with an additional parameter that measures the strength of return towards a theoretical optimum shared across a clade or subset of species. OU models have become increasingly popular as they tend to fit the data better than Brownian motion models, and have attractive biological interpretations. For example, fit to an OU model has been seen as evidence of evolutionary constraints, stabilising selection, niche conservatism and selective regimes.
However, the OU model has several well-known caveats. For example, it is frequently incorrectly favoured over simpler models when using likelihood ratio tests, particularly for small data sets that are commonly used in these analyses. Additionally, very small amounts of error in data sets can result in an OU model being favoured over Brownian motion simply because OU can accommodate more variance towards the tips of the phylogeny, rather than due to any interesting biological process. Finally, the literature describing the OU model is clear that a simple explanation of clade-wide stabilising selection is unlikely to account for data fitting an OU model, but users of the model often state that this is the case. Unfortunately, these limitations are rarely taken into account in empirical studies."""
EXAMPLE_3 = """Most models of trait evolution are based on the Brownian motion model, in which traits evolve randomly and accrue variance over time.
What if we add a parameter to measure how much the trait motion returns to a theoretical optimum for a given clade or set of species? Then we get a family of models called Ornstein-Uhlenbeck, first developed as a way to describe friction in the Brownian motion of a particle. These models have become increasingly popular, both because they tend to fit the data better than simple Brownian motion, and because they have attractive biological interpretations. For example, fit to an Ornstein-Uhlenbeck model has been seen as evidence of evolutionary constraints, stabilising selection, niche conservatism and selective regimes.
However, Ornstein-Uhlenbeck models have several well-known caveats. For example, they are frequently — and incorrectly — favoured over simpler Brownian models. This occurs with likelihood ratio tests, particularly for the small data sets that are commonly used in these analyses. It also happens when there is error in the data set, even very small amounts of error, simply because Ornstein-Uhlenbeck models accommodate more variance towards the tips of the phylogeny — therefore suggesting an interesting biological process where there is none. Additionally, users of Ornstein-Uhlenbeck models often state that clade-wide stabilising selection accounts for data fitting the model, even though the literature describing the model warns that such a simple explanation is unlikely. Unfortunately, these limitations are rarely taken into account in empirical studies.
"""
SEP = "\n ### \n"
INITIAL_TEXT = """The majority of licensed vaccines provide protection through induction of protective antibodies (Plotkin, 2010). Isolation of HIV-1 broadly neutralizing antibodies (bnAbs) from HIV-infected individuals and the finding that passive transfer of bnAbs can protect non-human primates (NHPs) from simian/human immunodeficiency virus (SHIV) infection support the feasibility of an antibody-based HIV vaccine (Burton and Hangartner, 2016, Nishimura and Martin, 2017). Elicitation of neutralizing antibodies (nAbs) against clinically relevant HIV strains (i.e., tier 2 and tier 3 strains) by immunization has been difficult (Montefiori et al., 2018). Much of that challenge centers on structural features of the HIV envelope (Env), which have complex and incompletely understood immunological implications. Env consists of gp120 and gp41 components that form a trimeric spike that is the only viral protein on HIV virions and the only target for nAbs (Burton and Hangartner, 2016). Human immunization with monomeric gp120 has failed to elicit tier 2 nAbs in clinical trials (Haynes et al., 2012, Mascola et al., 1996, Rerks-Ngarm et al., 2009). The reasons for this are not obvious because nAb epitopes are present on gp120. Key developments in protein design have been made toward the expression of soluble native-like HIV Env trimers (Julien et al., 2013, Kulp et al., 2017, Lyumkis et al., 2013, Sanders et al., 2013). Immunization with these Env trimers elicited substantial strain-specific tier 2 nAbs in rabbits and guinea pigs but failed to elicit nAbs in mice (Feng et al., 2016, Hu et al., 2015, Sanders et al., 2015). Trimer immunization of NHPs has been sporadically successful (Havenar-Daughton et al., 2016a, Pauthner et al., 2017, Sanders et al., 2015, Zhou et al., 2017). For some regimes in NHPs, autologous tier 2 nAbs have been elicited within 10 weeks, which is comparable with the speed of nAb development in HIV-infected individuals (Pauthner et al., 2017, Richman et al., 2003, Wei et al., 2003). Thus, although nAb epitopes are presented on native-like trimers, the immunological parameters controlling the development of nAbs to Env remain to be elucidated. These parameters are also likely important for nAbs to other pathogens."""
async def remove_citations(text: str):
prompt = CITATION_REMOVAL_PROMPT
prompt += SEP
prompt += "Original text: " + EXAMPLE_0
prompt += "\n"
prompt += "Rewritten text: " + EXAMPLE_1
prompt += SEP
prompt += "Original text: " + text
prompt += "\n"
prompt += "Rewritten text:"
return await recipe.agent().complete(prompt=prompt, max_tokens=2000)
async def split_paragraphs(text: str):
prompt = SPLIT_PROMPT
prompt += SEP
prompt += "Original text: " + EXAMPLE_1
prompt += "\n"
prompt += "Rewritten text: " + EXAMPLE_2
prompt += SEP
prompt += "Original text: " + text
prompt += "\n"
prompt += "Rewritten text:"
return await recipe.agent().complete(prompt=prompt, max_tokens=2000)
async def simplify(text: str):
prompt = SEP
prompt += SIMPLIFY_PROMPT
prompt += "Original text: " + EXAMPLE_2
prompt += "\n"
prompt += "Rewritten text: " + EXAMPLE_3
prompt += SEP
prompt += SIMPLIFY_PROMPT
prompt += "Original text: " + text
prompt += "\n"
prompt += "Rewritten text:"
return await recipe.agent().complete(
prompt=prompt, max_tokens=2000, frequency_penalty=0.5, presence_penalty=0.5
)
async def pipeline(text: str = INITIAL_TEXT):
text = await remove_citations(text)
text = await split_paragraphs(text)
text = await simplify(text)
return text
recipe.main(pipeline)
| [
"Rewrite the following paragraphs to make them more readable, according to the following rule:\n- Remove all inline citations, but otherwise leave the text unchanged.",
"\n",
"Rewritten text:",
"Rewritten text: PLACEHOLDER",
"Rewrite the following paragraphs to make them more readable, according to the following rule:\n- Make explanations more generally accessible, by providing longer explanations for any particularly complicated concepts.\n- Make the writing generally clearer, simpler, and make it flow more smoothly. ",
"Original text: PLACEHOLDER",
"Rewrite the following paragraphs to make them more readable, according to the following rule:\n- Break up the single large paragraph into multiple paragraphs, along natural paragraph boundaries."
] |
2024-01-10 | xmxhuihui/stable-dreamfusion | main.py | import torch
import argparse
import pandas as pd
import sys
from nerf.provider import NeRFDataset
from nerf.utils import *
# torch.autograd.set_detect_anomaly(True)
if __name__ == '__main__':
# See https://stackoverflow.com/questions/27433316/how-to-get-argparse-to-read-arguments-from-a-file-with-an-option-rather-than-pre
class LoadFromFile (argparse.Action):
def __call__ (self, parser, namespace, values, option_string = None):
with values as f:
# parse arguments in the file and store them in the target namespace
parser.parse_args(f.read().split(), namespace)
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=open, action=LoadFromFile, help="specify a file filled with more arguments")
parser.add_argument('--text', default=None, help="text prompt")
parser.add_argument('--negative', default='', type=str, help="negative text prompt")
parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray")
parser.add_argument('-O2', action='store_true', help="equals --backbone vanilla")
parser.add_argument('--test', action='store_true', help="test mode")
parser.add_argument('--six_views', action='store_true', help="six_views mode: save the images of the six views")
parser.add_argument('--eval_interval', type=int, default=1, help="evaluate on the valid set every interval epochs")
parser.add_argument('--test_interval', type=int, default=100, help="test on the test set every interval epochs")
parser.add_argument('--workspace', type=str, default='workspace')
parser.add_argument('--seed', default=None)
parser.add_argument('--image', default=None, help="image prompt")
parser.add_argument('--image_config', default=None, help="image config csv")
parser.add_argument('--known_view_interval', type=int, default=4, help="train default view with RGB loss every & iters, only valid if --image is not None.")
parser.add_argument('--IF', action='store_true', help="experimental: use DeepFloyd IF as the guidance model for nerf stage")
parser.add_argument('--guidance', type=str, nargs='*', default=['SD'], help='guidance model')
parser.add_argument('--guidance_scale', type=float, default=100, help="diffusion model classifier-free guidance scale")
parser.add_argument('--save_mesh', action='store_true', help="export an obj mesh with texture")
parser.add_argument('--mcubes_resolution', type=int, default=256, help="mcubes resolution for extracting mesh")
parser.add_argument('--decimate_target', type=int, default=5e4, help="target face number for mesh decimation")
parser.add_argument('--dmtet', action='store_true', help="use dmtet finetuning")
parser.add_argument('--tet_grid_size', type=int, default=128, help="tet grid size")
parser.add_argument('--init_with', type=str, default='', help="ckpt to init dmtet")
parser.add_argument('--lock_geo', action='store_true', help="disable dmtet to learn geometry")
### training options
parser.add_argument('--iters', type=int, default=10000, help="training iters")
parser.add_argument('--lr', type=float, default=1e-3, help="max learning rate")
parser.add_argument('--ckpt', type=str, default='latest', help="possible options are ['latest', 'scratch', 'best', 'latest_model']")
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--taichi_ray', action='store_true', help="use taichi raymarching")
parser.add_argument('--max_steps', type=int, default=1024, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=64, help="num steps sampled per ray (only valid when not using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=32, help="num steps up-sampled per ray (only valid when not using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when not using --cuda_ray)")
parser.add_argument('--latent_iter_ratio', type=float, default=0.2, help="training iters that only use albedo shading")
parser.add_argument('--albedo_iter_ratio', type=float, default=0, help="training iters that only use albedo shading")
parser.add_argument('--jitter_pose', action='store_true', help="add jitters to the randomly sampled camera poses")
parser.add_argument('--jitter_center', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's center (camera location)")
parser.add_argument('--jitter_target', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's target (i.e. 'look-at')")
parser.add_argument('--jitter_up', type=float, default=0.02, help="amount of jitter to add to sampled camera pose's up-axis (i.e. 'camera roll')")
parser.add_argument('--uniform_sphere_rate', type=float, default=0, help="likelihood of sampling camera location uniformly on the sphere surface area")
parser.add_argument('--grad_clip', type=float, default=-1, help="clip grad of all grad to this limit, negative value disables it")
parser.add_argument('--grad_clip_rgb', type=float, default=-1, help="clip grad of rgb space grad to this limit, negative value disables it")
# model options
parser.add_argument('--bg_radius', type=float, default=1.4, help="if positive, use a background model at sphere(bg_radius)")
parser.add_argument('--density_activation', type=str, default='exp', choices=['softplus', 'exp'], help="density activation function")
parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied")
parser.add_argument('--blob_density', type=float, default=5, help="max (center) density for the density blob")
parser.add_argument('--blob_radius', type=float, default=0.2, help="control the radius for the density blob")
# network backbone
parser.add_argument('--backbone', type=str, default='grid', choices=['grid_tcnn', 'grid', 'vanilla', 'grid_taichi'], help="nerf backbone")
parser.add_argument('--optim', type=str, default='adan', choices=['adan', 'adam'], help="optimizer")
parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], help="stable diffusion version")
parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key")
# try this if CUDA OOM
parser.add_argument('--fp16', action='store_true', help="use float16 for training")
parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage")
# rendering resolution in training, increase these for better quality / decrease these if CUDA OOM even if --vram_O enabled.
parser.add_argument('--w', type=int, default=64, help="render width for NeRF in training")
parser.add_argument('--h', type=int, default=64, help="render height for NeRF in training")
parser.add_argument('--known_view_scale', type=float, default=1.5, help="multiply --h/w by this for known view rendering")
parser.add_argument('--known_view_noise_scale', type=float, default=2e-3, help="random camera noise added to rays_o and rays_d")
parser.add_argument('--dmtet_reso_scale', type=float, default=8, help="multiply --h/w by this for dmtet finetuning")
parser.add_argument('--batch_size', type=int, default=1, help="images to render per batch using NeRF")
### dataset options
parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box(-bound, bound)")
parser.add_argument('--dt_gamma', type=float, default=0, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
parser.add_argument('--min_near', type=float, default=0.01, help="minimum near distance for camera")
parser.add_argument('--radius_range', type=float, nargs='*', default=[3.0, 3.5], help="training camera radius range")
parser.add_argument('--theta_range', type=float, nargs='*', default=[45, 105], help="training camera fovy range")
parser.add_argument('--phi_range', type=float, nargs='*', default=[-180, 180], help="training camera fovy range")
parser.add_argument('--fovy_range', type=float, nargs='*', default=[10, 30], help="training camera fovy range")
parser.add_argument('--default_radius', type=float, default=3.2, help="radius for the default view")
parser.add_argument('--default_polar', type=float, default=90, help="polar for the default view")
parser.add_argument('--default_azimuth', type=float, default=0, help="azimuth for the default view")
parser.add_argument('--default_fovy', type=float, default=20, help="fovy for the default view")
parser.add_argument('--progressive_view', action='store_true', help="progressively expand view sampling range from default to full")
parser.add_argument('--progressive_level', action='store_true', help="progressively increase gridencoder's max_level")
parser.add_argument('--angle_overhead', type=float, default=30, help="[0, angle_overhead] is the overhead region")
parser.add_argument('--angle_front', type=float, default=60, help="[0, angle_front] is the front region, [180, 180+angle_front] the back region, otherwise the side region.")
parser.add_argument('--t_range', type=float, nargs='*', default=[0.02, 0.98], help="stable diffusion time steps range")
### regularizations
parser.add_argument('--lambda_entropy', type=float, default=1e-3, help="loss scale for alpha entropy")
parser.add_argument('--lambda_opacity', type=float, default=0, help="loss scale for alpha value")
parser.add_argument('--lambda_orient', type=float, default=1e-2, help="loss scale for orientation")
parser.add_argument('--lambda_tv', type=float, default=0, help="loss scale for total variation")
parser.add_argument('--lambda_wd', type=float, default=0, help="loss scale")
parser.add_argument('--lambda_mesh_normal', type=float, default=0.5, help="loss scale for mesh normal smoothness")
parser.add_argument('--lambda_mesh_laplacian', type=float, default=0.5, help="loss scale for mesh laplacian")
parser.add_argument('--lambda_guidance', type=float, default=1, help="loss scale for SDS")
parser.add_argument('--lambda_rgb', type=float, default=1000, help="loss scale for RGB")
parser.add_argument('--lambda_mask', type=float, default=500, help="loss scale for mask (alpha)")
parser.add_argument('--lambda_normal', type=float, default=0, help="loss scale for normal map")
parser.add_argument('--lambda_depth', type=float, default=10, help="loss scale for relative depth")
parser.add_argument('--lambda_2d_normal_smooth', type=float, default=0, help="loss scale for 2D normal image smoothness")
parser.add_argument('--lambda_3d_normal_smooth', type=float, default=0, help="loss scale for 3D normal image smoothness")
### debugging options
parser.add_argument('--save_guidance', action='store_true', help="save images of the per-iteration NeRF renders, added noise, denoised (i.e. guidance), fully-denoised. Useful for debugging, but VERY SLOW and takes lots of memory!")
parser.add_argument('--save_guidance_interval', type=int, default=10, help="save guidance every X step")
### GUI options
parser.add_argument('--gui', action='store_true', help="start a GUI")
parser.add_argument('--W', type=int, default=800, help="GUI width")
parser.add_argument('--H', type=int, default=800, help="GUI height")
parser.add_argument('--radius', type=float, default=5, help="default GUI camera radius from center")
parser.add_argument('--fovy', type=float, default=20, help="default GUI camera fovy")
parser.add_argument('--light_theta', type=float, default=60, help="default GUI light direction in [0, 180], corresponding to elevation [90, -90]")
parser.add_argument('--light_phi', type=float, default=0, help="default GUI light direction in [0, 360), azimuth")
parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel")
parser.add_argument('--zero123_config', type=str, default='./pretrained/zero123/sd-objaverse-finetune-c_concat-256.yaml', help="config file for zero123")
parser.add_argument('--zero123_ckpt', type=str, default='./pretrained/zero123/105000.ckpt', help="ckpt for zero123")
parser.add_argument('--zero123_grad_scale', type=str, default='angle', help="whether to scale the gradients based on 'angle' or 'None'")
parser.add_argument('--dataset_size_train', type=int, default=100, help="Length of train dataset i.e. # of iterations per epoch")
parser.add_argument('--dataset_size_valid', type=int, default=8, help="# of frames to render in the turntable video in validation")
parser.add_argument('--dataset_size_test', type=int, default=100, help="# of frames to render in the turntable video at test time")
opt = parser.parse_args()
if opt.O:
opt.fp16 = True
opt.cuda_ray = True
elif opt.O2:
opt.fp16 = True
opt.backbone = 'vanilla'
if opt.IF:
if 'SD' in opt.guidance:
opt.guidance.remove('SD')
opt.guidance.append('IF')
opt.latent_iter_ratio = 0 # must not do as_latent
opt.guidance_scale = 20
opt.images, opt.ref_radii, opt.ref_polars, opt.ref_azimuths, opt.zero123_ws = [], [], [], [], []
opt.default_zero123_w = 1
# parameters for image-conditioned generation
if opt.image is not None or opt.image_config is not None:
if opt.text is None:
# use zero123 guidance model when only providing image
opt.guidance = ['zero123']
opt.fovy_range = [opt.default_fovy, opt.default_fovy] # fix fov as zero123 doesn't support changing fov
opt.guidance_scale = 5
opt.lambda_3d_normal_smooth = 10
else:
# use stable-diffusion when providing both text and image
opt.guidance = ['SD', 'clip']
opt.guidance_scale = 10
opt.t_range = [0.2, 0.6]
opt.known_view_interval = 2
opt.lambda_3d_normal_smooth = 20
opt.bg_radius = -1
# smoothness
opt.lambda_entropy = 1
opt.lambda_orient = 1
# latent warmup is not needed
opt.latent_iter_ratio = 0
opt.albedo_iter_ratio = 0
# make shape init more stable
opt.progressive_view = True
# opt.progressive_level = True
if opt.image is not None:
opt.images += [opt.image]
opt.ref_radii += [opt.default_radius]
opt.ref_polars += [opt.default_polar]
opt.ref_azimuths += [opt.default_azimuth]
opt.zero123_ws += [opt.default_zero123_w]
if opt.image_config is not None:
# for multiview (zero123)
conf = pd.read_csv(opt.image_config, skipinitialspace=True)
opt.images += list(conf.image)
opt.ref_radii += list(conf.radius)
opt.ref_polars += list(conf.polar)
opt.ref_azimuths += list(conf.azimuth)
opt.zero123_ws += list(conf.zero123_weight)
if opt.image is None:
opt.default_radius = opt.ref_radii[0]
opt.default_polar = opt.ref_polars[0]
opt.default_azimuth = opt.ref_azimuths[0]
opt.default_zero123_w = opt.zero123_ws[0]
# reset to None
if len(opt.images) == 0:
opt.images = None
# default parameters for finetuning
if opt.dmtet:
opt.h = int(opt.h * opt.dmtet_reso_scale)
opt.w = int(opt.w * opt.dmtet_reso_scale)
opt.known_view_scale = 1
opt.t_range = [0.02, 0.50] # ref: magic3D
if opt.images is not None:
opt.lambda_normal = 0
opt.lambda_depth = 0
if opt.text is not None:
opt.t_range = [0.20, 0.50]
# assume finetuning
opt.latent_iter_ratio = 0
opt.albedo_iter_ratio = 0
opt.progressive_view = False
opt.progressive_level = False
# record full range for progressive view expansion
if opt.progressive_view:
# disable as they disturb progressive view
opt.jitter_pose = False
opt.uniform_sphere_rate = 0
# back up full range
opt.full_radius_range = opt.radius_range
opt.full_theta_range = opt.theta_range
opt.full_phi_range = opt.phi_range
opt.full_fovy_range = opt.fovy_range
if opt.backbone == 'vanilla':
from nerf.network import NeRFNetwork
elif opt.backbone == 'grid':
from nerf.network_grid import NeRFNetwork
elif opt.backbone == 'grid_tcnn':
from nerf.network_grid_tcnn import NeRFNetwork
elif opt.backbone == 'grid_taichi':
opt.cuda_ray = False
opt.taichi_ray = True
import taichi as ti
from nerf.network_grid_taichi import NeRFNetwork
taichi_half2_opt = True
taichi_init_args = {"arch": ti.cuda, "device_memory_GB": 4.0}
if taichi_half2_opt:
taichi_init_args["half2_vectorization"] = True
ti.init(**taichi_init_args)
else:
raise NotImplementedError(f'--backbone {opt.backbone} is not implemented!')
print(opt)
if opt.seed is not None:
seed_everything(int(opt.seed))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeRFNetwork(opt).to(device)
if opt.dmtet and opt.init_with != '':
if opt.init_with.endswith('.pth'):
# load pretrained weights to init dmtet
state_dict = torch.load(opt.init_with, map_location=device)
model.load_state_dict(state_dict['model'], strict=False)
if opt.cuda_ray:
model.mean_density = state_dict['mean_density']
model.init_tet()
else:
# assume a mesh to init dmtet (experimental, not working well now!)
import trimesh
mesh = trimesh.load(opt.init_with, force='mesh', skip_material=True, process=False)
model.init_tet(mesh=mesh)
print(model)
if opt.six_views:
guidance = None # no need to load guidance model at test
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt)
test_loader = NeRFDataset(opt, device=device, type='six_views', H=opt.H, W=opt.W, size=6).dataloader(batch_size=1)
trainer.test(test_loader, write_video=False)
if opt.save_mesh:
trainer.save_mesh()
elif opt.test:
guidance = None # no need to load guidance model at test
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt)
if opt.gui:
from nerf.gui import NeRFGUI
gui = NeRFGUI(opt, trainer)
gui.render()
else:
test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1)
trainer.test(test_loader)
if opt.save_mesh:
trainer.save_mesh()
else:
train_loader = NeRFDataset(opt, device=device, type='train', H=opt.h, W=opt.w, size=opt.dataset_size_train * opt.batch_size).dataloader()
if opt.optim == 'adan':
from optimizer import Adan
# Adan usually requires a larger LR
optimizer = lambda model: Adan(model.get_params(5 * opt.lr), eps=1e-8, weight_decay=2e-5, max_grad_norm=5.0, foreach=False)
else: # adam
optimizer = lambda model: torch.optim.Adam(model.get_params(opt.lr), betas=(0.9, 0.99), eps=1e-15)
if opt.backbone == 'vanilla':
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1))
else:
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 1) # fixed
# scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1))
guidance = nn.ModuleDict()
if 'SD' in opt.guidance:
from guidance.sd_utils import StableDiffusion
guidance['SD'] = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key, opt.t_range)
if 'IF' in opt.guidance:
from guidance.if_utils import IF
guidance['IF'] = IF(device, opt.vram_O, opt.t_range)
if 'zero123' in opt.guidance:
from guidance.zero123_utils import Zero123
guidance['zero123'] = Zero123(device=device, fp16=opt.fp16, config=opt.zero123_config, ckpt=opt.zero123_ckpt, vram_O=opt.vram_O, t_range=opt.t_range, opt=opt)
if 'clip' in opt.guidance:
from guidance.clip_utils import CLIP
guidance['clip'] = CLIP(device)
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint=opt.ckpt, scheduler_update_every_step=True)
trainer.default_view_data = train_loader._data.get_default_view_data()
if opt.gui:
from nerf.gui import NeRFGUI
gui = NeRFGUI(opt, trainer, train_loader)
gui.render()
else:
valid_loader = NeRFDataset(opt, device=device, type='val', H=opt.H, W=opt.W, size=opt.dataset_size_valid).dataloader(batch_size=1)
test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1)
max_epoch = np.ceil(opt.iters / len(train_loader)).astype(np.int32)
trainer.train(train_loader, valid_loader, test_loader, max_epoch)
if opt.save_mesh:
trainer.save_mesh()
| [] |
2024-01-10 | yeren66/chatSummaryBot | gptAPITest~ApiTest.py | import os
import openai
from dotenv import load_dotenv
# 加载.env文件中的环境变量
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_base = os.getenv("OPENAI_API_BASE")
with open('chata.txt', 'r', encoding='utf-8') as file:
content = file.read()
# print(content)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "请帮我总结以下的一段群聊对话:\n" + content}]
)
print(completion.choices[0].message.content)
| [
"请帮我总结以下的一段群聊对话:\nPLACEHOLDER"
] |
2024-01-10 | yeren66/chatSummaryBot | gptAPI.py | import os
import openai
from dotenv import load_dotenv
# 加载.env文件中的环境变量
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_base = os.getenv("OPENAI_API_BASE")
summary = "请帮我总结以下的一段群聊对话:"
question = "请帮我解决如下问题:"
# 读取文件并返回gpt总结或解决的结果
def gptWithParam(path, model="gpt-3.5-turbo", mode=summary) -> str:
with open(path, 'r', encoding='utf-8') as file:
content = file.read()
completion = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": summary + "\n" + content}]
)
return completion.choices[0].message.content
# 常规使用gpt
def gpt(content, model='gpt-3.5-turbo') -> str:
completion = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": content}]
)
return completion.choices[0].message.content | [
"请帮我总结以下的一段群聊对话:\nPLACEHOLDER"
] |
2024-01-10 | henryhmko/CAPTCHA_v4 | sentence_clustering.py | import cohere
import nltk
from nltk import tokenize
import random
from collections import defaultdict
import pickle
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist, pdist
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
nltk.download('averaged_perceptron_tagger')
nltk.download('punkt')
co = cohere.Client('INSERT_TOKEN_HERE')
text = open("Book 1 - The Philosopher's Stone.txt").read()
text = text.replace('”', ".")
text = text.replace('“', ".")
text = tokenize.sent_tokenize(text)
text = [s for s in text if len(tokenize.word_tokenize(s)) > 15]
#len(text)
response = co.embed(
model='large',
texts=text)
data = response.embeddings
kmeans = KMeans(n_clusters=6)
label = kmeans.fit_predict(data)
with open('sentences', 'wb') as fp:
pickle.dump(text, fp)
with open('labels', 'wb') as fp:
pickle.dump(label, fp)
| [] |
2024-01-10 | TheNotary/edu-fastapi-demo | modules~guidance~guidance_router.py | from fastapi import APIRouter, Request
from pydantic import BaseModel, Field
from helpers.jinja_helpers import build_templates_and_router
import guidance
import pdb
templates, router, module_name = build_templates_and_router(__file__)
# examples = [
# {'input': 'I wrote about shakespeare',
# 'entities': [{'entity': 'I', 'time': 'present'}, {'entity': 'Shakespeare', 'time': '16th century'}],
# 'reasoning': 'I can write about Shakespeare because he lived in the past with respect to me.',
# 'answer': 'No'},
# {'input': 'Shakespeare wrote about me',
# 'entities': [{'entity': 'Shakespeare', 'time': '16th century'}, {'entity': 'I', 'time': 'present'}],
# 'reasoning': 'Shakespeare cannot have written about me, because he died before I was born',
# 'answer': 'Yes'}
# ]
# {{~! place the real question at the end }}
# define the guidance program
class InputData(BaseModel):
input_data: str = Field(..., min_length=1, description="Input data must be at least 1 character long")
@router.get("")
async def guidance(request: Request):
return templates.TemplateResponse("modules/" + module_name + "/index.html", {"request": request})
@router.post("")
async def guidance(json: InputData):
user_input = json.input_data
# structure_program = guidance(
# '''// The below code will
# // define a javascript function that plots the line for a graph in a canvas element.
# // An example input would be "y = 2x + 3" and the output should be a graph with the equation plotted on it.
# function plot(equationForLine) {
# {{~gen 'code'}}
# }
# ''')
guidance.llm = guidance.llms.Transformers("bigcode/starcoderplus", device=None)
# guidance.llm = guidance.llms.Transformers("TheBloke/Kimiko-7B-fp16", device=0)
structure_program = await guidance(user_input)
pdb.set_trace()
# messages = [{ "role": "user", "content": user_input }]
# resp = gptj.chat_completion(messages)
# execute the program
out = structure_program(
input='The T-rex bit my dog'
)
pdb.set_trace()
return "done"
# return resp['choices'][0]['message']['content']
| [] |
2024-01-10 | Sarfaraz021/Text-to-Image-Generator-uisng-Dall-E-OpenAI | tryme.py | import os
import openai
from config import key
openai.api_key = key
response = openai.Image.create(prompt="a white siamese cat",
n=1,
size="1024x1024")
image_url = response['data'][0]['url']
print(image_url)
{created: 1686647266, data: Array(3)}
created
:
1686647266
data
:
Array(3)
0
:
{url: 'https://oaidalleapiprodscus.blob.core.windows.net/…=hdCEF%2BHpLRDBMFSGj2Acb1KvKEqIf4OGwmayaaaMaqQ%3D'}
1
:
{url: 'https://oaidalleapiprodscus.blob.core.windows.net/…ig=eQ1EHQ4/iRYVTQMJO/zXipdO6bzA5ivbgPRciiKiCSU%3D'}
2
:
{url: 'https://oaidalleapiprodscus.blob.core.windows.net/…ig=o2gvtu0kzZQJYDnTpnBuEpUQah/GKKwHtqbQxdOjlmo%3D'}
length
:
3
[[Prototype]]
:
Array(0)
[[Prototype]]
:
Object
| [] |
2024-01-10 | asimonson1125/Tiger-Games-2023 | src~gptInterface.py | import openai
import json
from os import environ as env
openai.api_key = env.get('chatGPT_API_Key', '')
def chat_with_gpt3(prompt):
response = openai.Completion.create(
engine='gpt-3.5-turbo-instruct', # Choose the ChatGPT model you prefer
prompt=prompt,
max_tokens=500, # Set the maximum length of the response
temperature=0, # Controls the randomness of the response
n=1, # Set the number of responses to generate
stop=None # Specify a stop token if desired
)
return response.choices[0].text.strip()
def gpt2objects(instring):
return json.loads(instring)
def gptCodes(term):
prompt = """
If the following term is a math term, Write pseudocode to implement it, but say nothing else. The term is: `{textIn}`. Use the following example for summation:
x = [2, 3, 5, 4]
sum = 0
for value in x:
\tsum += value
return sum.
""".replace("{textIn}", term)
return chat_with_gpt3(prompt)
def gptDefines(term):
prompt = """
Write 5 definitions for the term `{textIn}` in layman's terms. Use the following format and say nothing else:
```
{
"definitions": ["first definition", "second definition", ... , "fifth definition"]
}
```
""".replace("{textIn}", term)
return chat_with_gpt3(prompt)
# return chat_with_gpt3(prompt)
| [
"{textIn}",
"\nWrite 5 definitions for the term `{textIn}` in layman's terms. Use the following format and say nothing else:\n```\n{\n\"definitions\": [\"first definition\", \"second definition\", ... , \"fifth definition\"]\n}\n```\n",
"\nIf the following term is a math term, Write pseudocode to implement it, but say nothing else. The term is: `{textIn}`. Use the following example for summation:\n\nx = [2, 3, 5, 4]\nsum = 0\nfor value in x:\n\tsum += value\nreturn sum.\n\n"
] |
2024-01-10 | LC1332/Chinese-Camel | apps~agents~agents.py | """
Gradio-based web app Agents that uses OpenAI API to generate
a chat between collaborative agents.
"""
import argparse
import os
import re
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Union
import gradio as gr
import openai
import openai.error
import tenacity
from apps.agents.text_utils import split_markdown_code
from camel.agents import RolePlaying, TaskSpecifyAgent
from camel.messages import AssistantChatMessage
REPO_ROOT = os.path.realpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))
ChatBotHistory = List[Tuple[Optional[str], Optional[str]]]
@dataclass
class State:
session: Optional[RolePlaying]
max_messages: int
chat: ChatBotHistory
saved_assistant_msg: Optional[AssistantChatMessage]
@classmethod
def empty(cls) -> 'State':
return cls(None, 0, [], None)
@staticmethod
def construct_inplace(
state: 'State', session: Optional[RolePlaying], max_messages: int,
chat: ChatBotHistory,
saved_assistant_msg: Optional[AssistantChatMessage]) -> None:
state.session = session
state.max_messages = max_messages
state.chat = chat
state.saved_assistant_msg = saved_assistant_msg
def parse_arguments():
""" Get command line arguments. """
parser = argparse.ArgumentParser("Camel data explorer")
parser.add_argument('--api-key', type=str, default=None,
help='OpenAI API key')
parser.add_argument('--share', type=bool, default=False,
help='Expose the web UI to Gradio')
parser.add_argument('--server-port', type=int, default=8080,
help='Port ot run the web page on')
parser.add_argument('--inbrowser', type=bool, default=False,
help='Open the web UI in the default browser on lunch')
parser.add_argument(
'--concurrency-count', type=int, default=1,
help='Number if concurrent threads at Gradio websocket queue. ' +
'Increase to serve more requests but keep an eye on RAM usage.')
args, unknown = parser.parse_known_args()
if len(unknown) > 0:
print("Unknown args: ", unknown)
return args
def load_roles(path: str) -> List[str]:
""" Load roles from list files.
Args:
path (str): Path to the TXT file.
Returns:
List[str]: List of roles.
"""
assert os.path.exists(path)
roles = []
with open(path, "r") as f:
lines = f.readlines()
for line in lines:
match = re.search(r"^\d+\.\s*(.+)\n*$", line)
if match:
role = match.group(1)
roles.append(role)
else:
print("Warning: no match")
return roles
def cleanup_on_launch(state) -> Tuple[State, ChatBotHistory, Dict]:
""" Prepare the UI for a new session.
Args:
state (State): Role playing state.
Returns:
Tuple[State, ChatBotHistory, Dict]:
- Updated state.
- Chatbot window contents.
- Start button state (disabled).
"""
# The line below breaks the every=N runner
# `state = State.empty()`
State.construct_inplace(state, None, 0, [], None)
return state, [], gr.update(interactive=False)
def role_playing_start(
state,
assistant: str,
user: str,
original_task: str,
max_messages: float,
with_task_specifier: bool,
word_limit: int,
) -> Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
""" Creates a role playing session.
Args:
state (State): Role playing state.
assistant (str): Contents of the Assistant field.
user (str): Contents of the User field.
original_task (str): Original task field.
with_task_specifier (bool): Enable/Disable task specifier.
word_limit (int): Limit of words for task specifier.
Returns:
Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
- Updated state.
- Generated specified task.
- Planned task (if any).
- Chatbot window contents.
- Progress bar contents.
"""
if state.session is not None:
print("Double click")
return {} # may fail
try:
task_specify_kwargs = dict(word_limit=word_limit) \
if with_task_specifier else None
session = RolePlaying(assistant, user, original_task,
with_task_specify=with_task_specifier,
task_specify_agent_kwargs=task_specify_kwargs,
with_task_planner=False)
except (openai.error.RateLimitError, tenacity.RetryError,
RuntimeError) as ex:
print("OpenAI API exception 0 " + str(ex))
return (state, str(ex), "", [], gr.update())
# Can't re-create a state like below since it
# breaks 'role_playing_chat_cont' runner with every=N.
# `state = State(session=session, max_messages=int(max_messages), chat=[],`
# ` saved_assistant_msg=None)`
State.construct_inplace(state, session, int(max_messages), [], None)
specified_task_prompt = session.specified_task_prompt \
if session.specified_task_prompt is not None else ""
planned_task_prompt = session.planned_task_prompt \
if session.planned_task_prompt is not None else ""
planned_task_upd = gr.update(
value=planned_task_prompt, visible=session.planned_task_prompt
is not None)
progress_update = gr.update(maximum=state.max_messages, value=1,
visible=True)
return (state, specified_task_prompt, planned_task_upd, state.chat,
progress_update)
def role_playing_chat_init(state) -> \
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
""" Initialize role playing.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Chatbot window contents.
- Progress bar contents.
"""
if state.session is None:
print("Error: session is none on role_playing_chat_init call")
return state, state.chat, gr.update()
try:
assistant_msg, _ = state.session.init_chat()
assistant_msg: AssistantChatMessage
except (openai.error.RateLimitError, tenacity.RetryError,
RuntimeError) as ex:
print("OpenAI API exception 1 " + str(ex))
state.session = None
return state, state.chat, gr.update()
state.saved_assistant_msg = assistant_msg
progress_update = gr.update(maximum=state.max_messages, value=1,
visible=True)
return state, state.chat, progress_update
# WORKAROUND: do not add type hinst for session and chatbot_histoty
def role_playing_chat_cont(state) -> \
Tuple[State, ChatBotHistory, Dict, Dict]:
""" Produce a pair of messages by an assistant and a user.
To be run multiple times.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Chatbot window contents.
- Progress bar contents.
- Start button state (to be eventually enabled).
"""
if state.session is None:
return state, state.chat, gr.update(visible=False), gr.update()
if state.saved_assistant_msg is None:
return state, state.chat, gr.update(), gr.update()
try:
assistant_msgs, user_msgs = state.session.step(
state.saved_assistant_msg)
except (openai.error.RateLimitError, tenacity.RetryError,
RuntimeError) as ex:
print("OpenAI API exception 2 " + str(ex))
state.session = None
return state, state.chat, gr.update(), gr.update()
u_msg = user_msgs[0]
a_msg = assistant_msgs[0]
state.saved_assistant_msg = a_msg
state.chat.append((None, split_markdown_code(u_msg.content)))
state.chat.append((split_markdown_code(a_msg.content), None))
if len(state.chat) >= state.max_messages:
state.session = None
if "CAMEL_TASK_DONE" in a_msg.content or \
"CAMEL_TASK_DONE" in u_msg.content:
state.session = None
progress_update = gr.update(maximum=state.max_messages,
value=len(state.chat), visible=state.session
is not None)
start_bn_update = gr.update(interactive=state.session is None)
return state, state.chat, progress_update, start_bn_update
def stop_session(state) -> Tuple[State, Dict, Dict]:
""" Finish the session and leave chat contents as an artefact.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Progress bar contents.
- Start button state (to be eventually enabled).
"""
state.session = None
return state, gr.update(visible=False), gr.update(interactive=True)
def construct_ui(blocks, api_key: Optional[str] = None) -> None:
""" Build Gradio UI and populate with topics.
Args:
api_key (str): OpenAI API key.
Returns:
None
"""
if api_key is not None:
openai.api_key = api_key
assistant_role_path = \
os.path.join(REPO_ROOT, "data/ai_society/assistant_roles.txt")
user_role_path = \
os.path.join(REPO_ROOT, "data/ai_society/user_roles.txt")
assistant_roles = load_roles(assistant_role_path)
user_roles = load_roles(user_role_path)
assistant_role = "Python Programmer"
user_role = "Stock Trader"
default_task = "Develop a trading bot for the stock market"
with gr.Row():
with gr.Column(scale=1):
assistant_dd = gr.Dropdown(assistant_roles,
label="Example assistant roles",
value=assistant_role, interactive=True)
assistant_ta = gr.TextArea(label="Assistant role (EDIT ME)",
lines=1, interactive=True)
with gr.Column(scale=1):
user_dd = gr.Dropdown(user_roles, label="Example user roles",
value=user_role, interactive=True)
user_ta = gr.TextArea(label="User role (EDIT ME)", lines=1,
interactive=True)
with gr.Column(scale=1):
gr.Markdown(
"## CAMEL: Communicative Agents for \"Mind\" Exploration"
" of Large Scale Language Model Society\n"
"Github repo: [https://github.com/lightaime/camel]"
"(https://github.com/lightaime/camel)"
'<div style="display:flex; justify-content:center;">'
'<img src="https://raw.githubusercontent.com/lightaime/camel/'
'master/misc/logo.png" alt="Logo" style="max-width:50%;">'
'</div>')
with gr.Row():
with gr.Column(scale=9):
original_task_ta = gr.TextArea(
label="Give me a preliminary idea (EDIT ME)",
value=default_task, lines=1, interactive=True)
with gr.Column(scale=1):
universal_task_bn = gr.Button("Insert universal task")
with gr.Row():
with gr.Column():
with gr.Row():
task_specifier_cb = gr.Checkbox(value=True,
label="With task specifier")
with gr.Row():
ts_word_limit_nb = gr.Number(
value=TaskSpecifyAgent.DEFAULT_WORD_LIMIT,
label="Word limit for task specifier",
visible=task_specifier_cb.value)
with gr.Column():
num_messages_sl = gr.Slider(minimum=1, maximum=50, step=1,
value=10, interactive=True,
label="Messages to generate")
with gr.Column(scale=2):
with gr.Row():
start_bn = gr.Button("Make agents chat [takes time]",
elem_id="start_button")
with gr.Row():
clear_bn = gr.Button("Interrupt the current query")
progress_sl = gr.Slider(minimum=0, maximum=100, value=0, step=1,
label="Progress", interactive=False, visible=False)
specified_task_ta = gr.TextArea(
label="Specified task prompt given to the role-playing session"
" based on the original (simplistic) idea", lines=1, interactive=False)
task_prompt_ta = gr.TextArea(label="Planned task prompt", lines=1,
interactive=False, visible=False)
chatbot = gr.Chatbot(label="Chat between autonomous agents")
session_state = gr.State(State.empty())
universal_task_bn.click(lambda: "Help me to do my job", None,
original_task_ta)
task_specifier_cb.change(lambda v: gr.update(visible=v), task_specifier_cb,
ts_word_limit_nb)
start_bn.click(cleanup_on_launch, session_state,
[session_state, chatbot, start_bn], queue=False) \
.then(role_playing_start,
[session_state, assistant_ta, user_ta,
original_task_ta, num_messages_sl,
task_specifier_cb, ts_word_limit_nb],
[session_state, specified_task_ta, task_prompt_ta,
chatbot, progress_sl],
queue=False) \
.then(role_playing_chat_init, session_state,
[session_state, chatbot, progress_sl], queue=False)
blocks.load(role_playing_chat_cont, session_state,
[session_state, chatbot, progress_sl, start_bn], every=0.5)
clear_bn.click(stop_session, session_state,
[session_state, progress_sl, start_bn])
assistant_dd.change(lambda dd: dd, assistant_dd, assistant_ta)
user_dd.change(lambda dd: dd, user_dd, user_ta)
blocks.load(lambda dd: dd, assistant_dd, assistant_ta)
blocks.load(lambda dd: dd, user_dd, user_ta)
def construct_blocks(api_key: Optional[str]):
""" Construct Agents app but do not launch it.
Args:
api_key (Optional[str]): OpenAI API key.
Returns:
gr.Blocks: Blocks instance.
"""
css_str = "#start_button {border: 3px solid #4CAF50; font-size: 20px;}"
with gr.Blocks(css=css_str) as blocks:
construct_ui(blocks, api_key)
return blocks
def main():
""" Entry point. """
args = parse_arguments()
print("Getting Agents web server online...")
blocks = construct_blocks(args.api_key)
blocks.queue(args.concurrency_count) \
.launch(share=args.share, inbrowser=args.inbrowser,
server_name="0.0.0.0", server_port=args.server_port,
debug=True)
print("Exiting.")
if __name__ == "__main__":
main()
| [
"Planned task prompt"
] |
2024-01-10 | LC1332/Chinese-Camel | test~agents~test_task_agent.py | from camel.agents import TaskPlannerAgent, TaskSpecifyAgent
from camel.configs import ChatGPTConfig
from camel.typing import TaskType
from camel.utils import openai_api_key_required
@openai_api_key_required
def test_task_specify_ai_society_agent():
original_task_prompt = "Improving stage presence and performance skills"
print(f"Original task prompt:\n{original_task_prompt}\n")
task_specify_agent = TaskSpecifyAgent(model_config=ChatGPTConfig(
temperature=1.0))
specified_task_prompt = task_specify_agent.step(
original_task_prompt, meta_dict=dict(assistant_role="Musician",
user_role="Student"))
assert ("{" and "}" not in task_specify_agent.task_specify_prompt)
print(f"Specified task prompt:\n{specified_task_prompt}\n")
@openai_api_key_required
def test_task_specify_code_agent():
original_task_prompt = "Modeling molecular dynamics"
print(f"Original task prompt:\n{original_task_prompt}\n")
task_specify_agent = TaskSpecifyAgent(
task_type=TaskType.CODE,
model_config=ChatGPTConfig(temperature=1.0),
)
specified_task_prompt = task_specify_agent.step(
original_task_prompt, meta_dict=dict(domain="Chemistry",
language="Python"))
assert ("{" and "}" not in task_specify_agent.task_specify_prompt)
print(f"Specified task prompt:\n{specified_task_prompt}\n")
@openai_api_key_required
def test_task_planner_agent():
original_task_prompt = "Modeling molecular dynamics"
print(f"Original task prompt:\n{original_task_prompt}\n")
task_specify_agent = TaskSpecifyAgent(
task_type=TaskType.CODE,
model_config=ChatGPTConfig(temperature=1.0),
)
specified_task_prompt = task_specify_agent.step(
original_task_prompt, meta_dict=dict(domain="Chemistry",
language="Python"))
print(f"Specified task prompt:\n{specified_task_prompt}\n")
task_planner_agent = TaskPlannerAgent(model_config=ChatGPTConfig(
temperature=1.0))
planned_task_prompt = task_planner_agent.step(specified_task_prompt)
print(f"Planned task prompt:\n{planned_task_prompt}\n")
| [
"Student",
"Modeling molecular dynamics",
"Improving stage presence and performance skills",
"Python",
"Musician"
] |
2024-01-10 | LC1332/Chinese-Camel | camel~agents~chat_agent.py | from typing import Any, Dict, List, Optional, Tuple
import openai
from tenacity import retry, stop_after_attempt, wait_exponential
from camel.configs import ChatGPTConfig
from camel.messages import ChatMessage, MessageType, SystemMessage
from camel.typing import ModelType
from camel.utils import get_model_token_limit, num_tokens_from_messages
class ChatAgent:
r"""Class for managing conversations of CAMEL Chat Agents.
Args:
system_message (SystemMessage): The system message for the chat agent.
model (ModelType, optional): The LLM model to use for generating
responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
model_config (Any, optional): Configuration options for the LLM model.
(default: :obj:`None`)
message_window_size (int, optional): The maximum number of previous
messages to include in the context window. If `None`, no windowing
is performed. (default: :obj:`None`)
"""
def __init__(
self,
system_message: SystemMessage,
model: ModelType = ModelType.GPT_3_5_TURBO,
model_config: Any = None,
message_window_size: Optional[int] = None,
) -> None:
self.system_message = system_message
self.role_name = system_message.role_name
self.role_type = system_message.role_type
self.meta_dict = system_message.meta_dict
self.model = model
self.model_config = model_config or ChatGPTConfig()
self.model_token_limit = get_model_token_limit(self.model)
self.message_window_size = message_window_size
self.terminated = False
self.init_messages()
def reset(self) -> List[MessageType]:
r"""Resets the :obj:`ChatAgent` to its initial state and returns the
stored messages.
Returns:
List[MessageType]: The stored messages.
"""
self.terminated = False
self.init_messages()
return self.stored_messages
def get_info(
self,
id: Optional[str],
usage: Optional[Dict[str, int]],
termination_reasons: List[str],
num_tokens: int,
) -> Dict[str, Any]:
r"""Returns a dictionary containing information about the chat session.
Args:
id (str, optional): The ID of the chat session.
usage (Dict[str, int], optional): Information about the usage of
the LLM model.
termination_reasons (List[str]): The reasons for the termination of
the chat session.
num_tokens (int): The number of tokens used in the chat session.
Returns:
Dict[str, Any]: The chat session information.
"""
return {
"id": id,
"usage": usage,
"termination_reasons": termination_reasons,
"num_tokens": num_tokens,
}
def init_messages(self) -> None:
r"""Initializes the stored messages list with the initial system
message.
"""
self.stored_messages: List[MessageType] = [self.system_message]
def update_messages(self, message: ChatMessage) -> List[MessageType]:
r"""Updates the stored messages list with a new message.
Args:
message (ChatMessage): The new message to add to the stored
messages.
Returns:
List[ChatMessage]: The updated stored messages.
"""
self.stored_messages.append(message)
return self.stored_messages
@retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5))
def step(
self,
input_message: ChatMessage,
) -> Tuple[Optional[List[ChatMessage]], bool, Dict[str, Any]]:
r"""Performs a single step in the chat session by generating a response
to the input message.
Args:
input_message (ChatMessage): The input message to the agent.
Returns:
Tuple[Optional[List[ChatMessage]], bool, Dict[str, Any]]: A tuple
containing the output messages, a boolean indicating whether
the chat session has terminated, and information about the chat
session.
"""
messages = self.update_messages(input_message)
if self.message_window_size is not None and len(
messages) > self.message_window_size:
messages = [self.system_message
] + messages[-self.message_window_size:]
openai_messages = [message.to_openai_message() for message in messages]
num_tokens = num_tokens_from_messages(openai_messages, self.model)
if num_tokens < self.model_token_limit:
response = openai.ChatCompletion.create(
model=self.model.value,
messages=openai_messages,
**self.model_config.__dict__,
)
output_messages = [
ChatMessage(role_name=self.role_name, role_type=self.role_type,
meta_dict=dict(), **dict(choice["message"]))
for choice in response["choices"]
]
info = self.get_info(
response["id"],
response["usage"],
[
str(choice["finish_reason"])
for choice in response["choices"]
],
num_tokens,
)
else:
self.terminated = True
output_messages = None
info = self.get_info(
None,
None,
["max_tokens_exceeded"],
num_tokens,
)
return output_messages, self.terminated, info
def __repr__(self) -> str:
r"""Returns a string representation of the :obj:`ChatAgent`.
Returns:
str: The string representation of the :obj:`ChatAgent`.
"""
return f"ChatAgent({self.role_name}, {self.role_type}, {self.model})"
| [] |
2024-01-10 | LC1332/Chinese-Camel | test~agents~test_role_playing.py | import typing
import pytest
from camel.agents import ChatAgent, RolePlaying
from camel.messages import AssistantChatMessage, ChatMessage, SystemMessageType
from camel.typing import ModelType, TaskType
from camel.utils import openai_api_key_required
def test_role_playing_init():
role_playing = RolePlaying(
assistant_role_name="assistant",
user_role_name="user",
task_prompt="Perform the task",
with_task_specify=False,
with_task_planner=False,
with_critic_in_the_loop=False,
mode_type=ModelType.GPT_3_5_TURBO,
task_type=TaskType.AI_SOCIETY,
)
assert role_playing.with_task_specify is False
assert role_playing.with_task_planner is False
assert role_playing.with_critic_in_the_loop is False
assert role_playing.mode_type == ModelType.GPT_3_5_TURBO
assert role_playing.task_type == TaskType.AI_SOCIETY
assert role_playing.task_prompt == "Perform the task"
assert role_playing.specified_task_prompt is None
assert role_playing.planned_task_prompt is None
assert (type(role_playing.assistant_sys_msg)
in typing.get_args(SystemMessageType))
assert (type(role_playing.user_sys_msg)
in typing.get_args(SystemMessageType))
assert isinstance(role_playing.assistant_agent, ChatAgent)
assert isinstance(role_playing.user_agent, ChatAgent)
assert role_playing.critic is None
@openai_api_key_required
@pytest.mark.parametrize(
"task_type, extend_sys_msg_meta_dicts, extend_task_specify_meta_dict",
[(TaskType.AI_SOCIETY, None, None),
(TaskType.CODE, [dict(domain="science", language="python")] * 2,
dict(domain="science", language="python")),
(TaskType.MISALIGNMENT, None, None)])
def test_role_playing_step(task_type, extend_sys_msg_meta_dicts,
extend_task_specify_meta_dict):
role_playing = RolePlaying(
assistant_role_name="assistant",
user_role_name="user",
task_prompt="Perform the task",
task_type=task_type,
extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts,
extend_task_specify_meta_dict=extend_task_specify_meta_dict,
)
init_assistant_msg = AssistantChatMessage(role_name="assistant",
content="Hello")
print(role_playing.assistant_agent.system_message)
print(role_playing.user_agent.system_message)
(assistant_msg, assistant_terminated,
assistant_info), (user_msg, user_terminated,
user_info) = role_playing.step(init_assistant_msg)
assert isinstance(assistant_msg, ChatMessage)
assert assistant_terminated is False
assert assistant_info is not None
assert isinstance(user_msg, ChatMessage)
assert user_terminated is False
assert user_info is not None
| [
"Hello"
] |
2024-01-10 | dreamhunter2333/chatgpt-tarot-divination | router~chatgpt_router.py | import json
from typing import Optional
from fastapi.responses import StreamingResponse
import openai
import logging
from datetime import datetime
from fastapi import Depends, HTTPException, Request, status
from config import settings
from fastapi import APIRouter
from models import DivinationBody, User
from router.user import get_user
from .limiter import get_real_ipaddr, limiter
from .divination import DivinationFactory
from .file_logger import file_logger
openai.api_key = settings.api_key
openai.api_base = settings.api_base
router = APIRouter()
_logger = logging.getLogger(__name__)
STOP_WORDS = [
"忽略", "ignore", "指令", "命令", "command", "help", "帮助", "之前",
"幫助", "現在", "開始", "开始", "start", "restart", "重新开始", "重新開始",
"遵守", "遵循", "遵从", "遵從"
]
_logger.info(
f"Loaded divination types: {list(DivinationFactory.divination_map.keys())}"
)
@limiter.limit(settings.rate_limit)
def limit_when_not_login(request: Request):
"""
Limit when not login
"""
def limit_when_login(request: Request, user: User):
"""
Limit when login
"""
@limiter.limit(settings.user_rate_limit, key_func=lambda: (user.user_name, user.login_type))
def limit(request: Request):
"""
Limit when login
"""
limit(request)
@router.post("/api/divination")
async def divination(
request: Request,
divination_body: DivinationBody,
user: Optional[User] = Depends(get_user)
):
# rate limit when not login
if not user:
limit_when_not_login(request)
else:
limit_when_login(request, user)
_logger.info(
f"Request from {get_real_ipaddr(request)}, user={user.json(ensure_ascii=False) if user else None} body={divination_body.json(ensure_ascii=False)}"
)
if any(w in divination_body.prompt.lower() for w in STOP_WORDS):
raise HTTPException(
status_code=403,
detail="Prompt contains stop words"
)
divination_obj = DivinationFactory.get(divination_body.prompt_type)
if not divination_obj:
raise HTTPException(
status_code=400,
detail=f"No prompt type {divination_body.prompt_type} not supported"
)
prompt, system_prompt = divination_obj.build_prompt(divination_body)
def get_openai_generator():
openai_stream = openai.ChatCompletion.create(
model=settings.model,
max_tokens=1000,
temperature=0.9,
top_p=1,
stream=True,
messages=[
{
"role": "system",
"content": system_prompt
},
{"role": "user", "content": prompt}
]
)
for event in openai_stream:
if "content" in event["choices"][0].delta:
current_response = event["choices"][0].delta.content
yield f"data: {json.dumps(current_response)}\n\n"
return StreamingResponse(get_openai_generator(), media_type='text/event-stream')
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.