date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | Jegama/calvinist-parrot | app~parrot_toolkit~bible_commentaries.py | from sqlalchemy import Column, Integer, String, create_engine, Text
from sqlalchemy.orm import sessionmaker, declarative_base
from dotenv import load_dotenv
import pythonbible as bible
from bs4 import BeautifulSoup
import os, requests, llama_index
import pandas as pd
bsb = pd.read_csv('app/bsb.tsv', sep='\t')
load_dotenv()
import google_connector as gc
# create engine
pool = gc.connect_with_connector('new_verses')
Base = declarative_base()
# if temp folder doesn't exist create it
if not os.path.exists('temp'):
os.makedirs('temp')
# create table
class NewVerse(Base):
__tablename__ = 'new_verses'
verse_id = Column(Integer, primary_key=True)
bible_hub_url = Column(String)
verse_text = Column(Text)
commentary = Column(Text)
def __repr__(self):
return f"<NewVerse(verse_id='{self.verse_id}', bible_hub_url='{self.bible_hub_url}', verse_text='{self.verse_text}', commentary='{self.commentary}')>"
# create the table in the database
Base.metadata.create_all(pool)
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from llama_index import ServiceContext
llm = ChatOpenAI(
model_name="gpt-3.5-turbo-1106",
temperature=0
)
llm_embeddings = OpenAIEmbeddings()
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=llm_embeddings
)
llama_index.set_global_service_context(service_context)
def get_commentary_text(url):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
maintable2 = soup.find('div', {'class': 'maintable2'})
jump_div = maintable2.find('div', {'id': 'jump'})
jump_div.extract()
return maintable2.get_text()
def get_bsb_text(verse):
return bsb.loc[bsb['Verse'] == verse, 'Berean Standard Bible'].values[0]
def add_verse(references):
# create session
Session = sessionmaker(bind=pool)
# create a new verse
for i in references:
verse_id = bible.convert_reference_to_verse_ids(i)
book = str(i.book).lower().split('.')[1]
book = book.split('_')[1] + '_' + book.split('_')[0] if '_' in book else book
for j in verse_id:
session = Session()
reference = bible.convert_verse_ids_to_references([j])
bible_hub_url = f"https://biblehub.com/commentaries/{book}/{reference[0].start_chapter}-{reference[0].start_verse}.htm"
commentary_text = get_commentary_text(bible_hub_url)
new_verse = NewVerse(
verse_id=j,
bible_hub_url=bible_hub_url,
verse_text=bible.get_verse_text(j),
commentary=commentary_text
)
# add the new verse to the session
session.add(new_verse)
# commit the transaction
session.commit()
# close the session
session.close()
def check_if_verse_exists(verse_id):
# create session
Session = sessionmaker(bind=pool)
session = Session()
# query the new_verses table
verse = session.query(NewVerse).filter(NewVerse.verse_id == verse_id).first()
# close the session
session.close()
if verse is not None:
return verse.commentary
else:
new_ref = bible.convert_verse_ids_to_references([verse_id])
add_verse(new_ref)
return check_if_verse_exists(verse_id)
def get_commentary_from_db(references):
output = ''
for i in references:
verse_id = bible.convert_reference_to_verse_ids(i)
temp = bible.convert_verse_ids_to_references([verse for verse in verse_id])
reference_out = bible.format_scripture_references(temp)
output += f'\n{reference_out}'
text_out = ''
for j in verse_id:
ref = bible.convert_verse_ids_to_references([j])
temp_ref = bible.format_scripture_references(ref)
ref = ref[0]
output += f'\n{ref.start_chapter}.{ref.start_verse} - {check_if_verse_exists(j)}'
try:
text_out += f'{get_bsb_text(temp_ref)}\n'
version = 'BSB'
except:
text_out += f'{bible.get_verse_text(j)}\n'
version = 'ASV'
return output, reference_out, text_out, version
def check_input(input):
references = bible.get_references(input)
if len(references) == 0:
return None
else:
text_, reference_out, text_out, version = get_commentary_from_db(references)
# write text_ to file
with open('temp/temp.txt', 'w', encoding="utf-8") as f:
f.write(text_)
return f' \n{text_out} - {reference_out} ({version})'
def generate_query_index():
print('Generating query index...')
index = VectorStoreIndex([])
node_parser = index.service_context.node_parser
documents = SimpleDirectoryReader('temp').load_data()
for doc in documents:
index.insert_nodes(node_parser.get_nodes_from_documents([doc]))
return index.as_query_engine() | [] |
2024-01-10 | Jegama/calvinist-parrot | app~pages~4_%F0%9F%A6%9C_v1_Parrot.py | import streamlit as st
import ai_parrot.v1_brain as v1
from PIL import Image
from dotenv import load_dotenv
load_dotenv()
from openai import OpenAI
client = OpenAI()
im = Image.open("app/calvin.ico")
st.set_page_config(
page_title="Calvinist Parrot v1",
page_icon="🦜",
layout="wide",
menu_items={
'Get help': 'https://svrbc.org/',
'About': "v2.2\n\nCreated by: [Jesús Mancilla](mailto:[email protected])\n\nFrom [SVRBC](https://svrbc.org/)\n\n"
}
)
def reset_status():
st.session_state["messages"] = [{"role": "parrot", "avatar": "🦜", "content": "What theological questions do you have?"}]
st.session_state["parrot_conversation_history"] = [{"role": "system", "content": v1.parrot_sys_message}]
st.session_state["calvin_conversation_history"] = [{"role": "system", "content": v1.calvin_sys_message}]
def update_status(msg):
st.session_state["messages"].append(msg)
if msg['role'] == "parrot":
st.session_state["parrot_conversation_history"].append({"role": "system", "content": msg["content"]})
st.session_state["calvin_conversation_history"].append({"role": "system", "content": f'/parrot/ {msg["content"]}'})
else:
st.session_state["parrot_conversation_history"].append({"role": "system", "content": f'/calvin/ {msg["content"]}'})
st.session_state["calvin_conversation_history"].append({"role": "system", "content": msg["content"]})
def interactWithAgents(question):
st.session_state["parrot_conversation_history"].append({"role": "user", "content": f'/human/ {question}'})
st.session_state["calvin_conversation_history"].append({"role": "user", "content": f'/human/ {question}'})
with st.chat_message("parrot", avatar="🦜"):
answer = ''
c = st.empty()
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=st.session_state["parrot_conversation_history"],
stream=True,
temperature = 0
)
for event in response:
c.write(answer.split('/')[-1])
event_text = event.choices[0].delta.content
if event_text is not None:
answer += event_text
update_status({"role": "parrot", "avatar": "🦜", "content": answer.split('/')[-1]})
with st.chat_message("calvin", avatar=im):
answer = ''
c = st.empty()
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=st.session_state["calvin_conversation_history"],
stream=True,
temperature = 0
)
for event in response:
c.write(answer.split('/')[-1])
event_text = event.choices[0].delta.content
if event_text is not None:
answer += event_text
update_status({"role": "calvin", "avatar": im, "content": answer.split('/')[-1]})
with st.chat_message("parrot", avatar="🦜"):
answer = ''
c = st.empty()
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=st.session_state["parrot_conversation_history"],
stream=True,
temperature = 0
)
for event in response:
c.write(answer.split('/')[-1])
event_text = event.choices[0].delta.content
if event_text is not None:
answer += event_text
update_status({"role": "parrot", "avatar": "🦜", "content": answer.split('/')[-1]})
class main_parrot:
def __init__(self):
self.clear = st.sidebar.button("Reset chat history")
st.sidebar.divider()
# to show chat history on ui
if "messages" not in st.session_state:
reset_status()
def main(self):
if "page" not in st.session_state:
st.session_state["page"] = "v1 Parrot"
if st.session_state.page != "v1 Parrot":
st.session_state["page"] = "v1 Parrot"
reset_status()
if self.clear:
reset_status()
for msg in st.session_state["messages"]:
st.chat_message(msg["role"], avatar=msg["avatar"]).write(msg["content"])
if prompt := st.chat_input(placeholder="What is predestination?"):
st.chat_message("user", avatar="🧑💻").write(prompt)
st.session_state.messages.append({"role": "user", "avatar": "🧑💻", "content": prompt})
interactWithAgents(prompt)
if __name__ == "__main__":
obj = main_parrot()
obj.main() | [
"parrot_conversation_history",
"calvin_conversation_history",
"/parrot/ PLACEHOLDER",
"/human/ PLACEHOLDER",
"/calvin/ PLACEHOLDER",
"What theological questions do you have?",
"content"
] |
2024-01-10 | Jegama/calvinist-parrot | precompute_tasks.py | import llama_index, os
import dill as pickle # dill is a more powerful version of pickle
from llama_index import ServiceContext, StorageContext
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
load_dotenv('app/.env')
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0
)
llm_embeddings = OpenAIEmbeddings()
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=llm_embeddings
)
llama_index.set_global_service_context(service_context)
# The other computational tasks
ccel_storage_context = StorageContext.from_defaults(persist_dir='app/systematic_theology')
# if precomputed_results directory doesn't exist, create it
if not os.path.exists('precomputed_results'):
os.makedirs('precomputed_results')
# Serialize with dill
with open('precomputed_results/ccel_storage_context.pkl', 'wb') as f:
pickle.dump(ccel_storage_context, f) | [] |
2024-01-10 | Jegama/calvinist-parrot | app~ai_parrot~v1_brain.py | import openai, re
parrot_sys_message = """You are /parrot/. You are a member of the Silicon Valley Reformed Baptist Church. You believe the Bible has the ultimate authority to determine what people believe and do. Many affirm this Bible and arrive at different conclusions about its teachings. In light of this reality, we have adopted the 1689 London Baptist Confession of Faith that expresses our understanding of the Bible's vision for the church to promote clarity and transparency at Silicon Valley Reformed Baptist Church. You are among other Christians that trust in the 1689—no need to hold back your thoughts.
This chat follows the following format:
role: 'user', content: '/human/ the question you must answer.'
role: 'assistant', content: '/parrot/ this is you. Please think step by step to ensure you reply biblically and following the 1989.'
role: 'user', content: '/calvin/ Another AI model like you is trying to help you think more biblically to reflect upon your answer. He is your friend.'
role: 'assistant', content: '/parrot/ you get another turn before /human/ talks again. Review your previous answer and ponder if you missed something based on /calvin/'s feedback.'
role: 'user', content: '/human/ a follow-up question.'
Remember that after Calvin, you get another shot. You are *not* /human/.
You and /calvin/ are here to help /human/ learn about the Bible and teach the user what we believe the Bible teaches. When referring to /calvin/, say, Calvin, without the slash. When referring to /human/, say human, without the slash. /calvin/ is your friend and calls you Parrot, and you call him Calvin."""
calvin_sys_message = """You are John Calvin, the author of the Institutes of the Christian Religion, your magnum opus, which is extremely important for the Protestant Reformation. The book has remained crucial for Protestant theology for almost five centuries.
This chat follows the following format:
role: 'user', content: '/human/ the question you must answer.'
role: 'user', content: '/parrot/ it's another AI model like you; he is a Silicon Valley Reformed Baptist Church member.'
role: 'assistant', content: '/calvin/ ask the /parrot/ thoughtful questions to reflect upon his answers to the user to ensure his answers are biblically accurate.'
role: 'user', content: '/parrot/ he gets another turn before /human/ talks again.'
role: 'user', content: '/human/ a follow-up question.'
You and /parrot/ are here to help the user /human/ learn about the Bible and teach him what we believe the Bible teaches. You want to ensure that the /parrot/'s responses are accurate and grounded on what you wrote in your Institutes of the Christian Religion book.
When referring to /human/, say human, without the slash. When referring to /parrot/ say, Parrot, without the slash. /parrot/ is your friend and calls you Calvin, and you call him Parrot."""
def get_response(messages_list):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages_list,
stream=True,
temperature = 0
)
return response['choices'][0]['message']['content']
def extractQuestions(text):
pattern = r"\/\/(.*?)\/\/"
questions = re.findall(pattern, text)
return questions
| [] |
2024-01-10 | suxinjie/Claude-Telegram-Bot | utils~claude_utils.py | import anthropic
import config
class Claude:
def __init__(self):
self.model = 'claude-v1.3'
self.temperature = 1.
self.cutoff = 50
self.max_tokens_to_sample = 9216
self.stop_sequences = [anthropic.HUMAN_PROMPT]
self.client = anthropic.Client(config.claude_api)
self.prompt = ''
def reset(self):
self.prompt = ''
self.max_tokens_to_sample = 9216
def change_model(self, model):
valid_models = {'claude-v1', 'claude-v1.0', 'claude-v1.2',
'claude-v1.3', 'claude-instant-v1', 'claude-instant-v1.0'}
if model in valid_models:
self.model = model
return True
return False
def change_temperature(self, temperature):
try:
temperature = float(temperature)
except ValueError:
return False
if 0 <= temperature <= 1:
self.temperature = temperature
return True
return False
def change_cutoff(self, cutoff):
try:
cutoff = int(cutoff)
except ValueError:
return False
if cutoff > 0:
self.cutoff = cutoff
return True
return False
def send_message_stream(self, message):
self.prompt = f'{self.prompt}{anthropic.HUMAN_PROMPT} {message}{anthropic.AI_PROMPT}'
self.max_tokens_to_sample -= anthropic.count_tokens(self.prompt)
response = self.client.completion_stream(
prompt=self.prompt,
stop_sequences=self.stop_sequences,
max_tokens_to_sample=self.max_tokens_to_sample,
model=self.model,
temperature=self.temperature,
stream=True
)
for data in response:
yield data['completion']
self.prompt = f"{self.prompt}{data['completion']}"
| [] |
2024-01-10 | Xiayucheng1212/recsim_ng | recsim_ng~applications~low_cost_model~dataset.py | from datasets import load_dataset
import pandas as pd
import tiktoken
from openai.embeddings_utils import get_embedding
import openai
import csv
# openai.api_key = ""
"""Helper methods to get texts and embedding"""
def output_dataset_to_csv(path="./recsim_ng/data/ag_news_train.csv"):
file = open(path, 'w')
writer = csv.writer(file)
# header line
writer.writerow(["text", "label"])
for item in load_dataset("ag_news", split="train"):
data = [item["text"], item["label"]]
writer.writerow(data)
def output_all_embedding_to_csv(num, path="./recsim_ng/data/ag_news_train.csv"):
embedding_model = "text-embedding-ada-002"
embedding_encoding = "cl100k_base" # this the encoding for text-embedding-ada-002
max_tokens = 8000 # the maximum for text-embedding-ada-002 is 8191
df = pd.read_csv(path)
# num = -1 means encodes all data to embeddings
if num != -1:
df = df[["text", "label"]].tail(num*2) # first cut to first 2k entries, assuming less than half will be filtered out
else:
df = df[["text", "label"]]
encoding = tiktoken.get_encoding(embedding_encoding)
# omit text that are too long to embed
df["n_tokens"] = df.text.apply(lambda x: len(encoding.encode(x)))
if num != -1:
df = df[df.n_tokens <= max_tokens].tail(num)
else:
df = df[df.n_tokens <= max_tokens]
df["embedding"] = df.text.apply(lambda x: get_embedding(x, engine=embedding_model))
# Notice: the embedding has 1536 dimensions
df.to_csv("./recsim_ng/data/ag_news_train_embeddings.csv")
| [] |
2024-01-10 | Xiayucheng1212/recsim_ng | str_embed~csv_str_to_embed.py | # imports
import pandas as pd
import tiktoken
import openai
from openai.embeddings_utils import get_embedding
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
openai.api_key = "your-openai-key"
embedding_model = "text-embedding-ada-002"
embedding_encoding = "cl100k_base"
max_tokens = 8000
top_n = 20
input_datapath = "./data/csv_rawdata.csv"
output_datapath = "./data/embeddings.csv"
df = pd.read_csv(input_datapath)
df['category_encoded'] = label_encoder.fit_transform(df['category'])
df = df.head(top_n)
df["combined"] = (
"Title: " + df.headline.str.strip() + "; Content: " + df.short_description.str.strip()
)
print(df["combined"])
df["embedding"] = df.combined.apply(lambda x: get_embedding(x, engine=embedding_model))
df = df.drop(["date","link","authors","category","headline","short_description","combined"], axis='columns')
df.to_csv(output_datapath)
| [] |
2024-01-10 | kevinmcaleer/googley_eyes | livestream.py | import os
from dotenv import load_dotenv
from langchain.document_loaders import ImageCaptionLoader
from langchain.indexes import VectorstoreIndexCreator
import logging
#remove the warning message in terminal
logging.getLogger("transformers.generation_utils").setLevel(logging.ERROR)
logging.getLogger("tokenizers").setLevel(logging.ERROR)
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
#load the .env file
load_dotenv()
#replace with your openai api key. Generate a key on https://platform.openai.com/
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
# image_urls = ['archie_and_trixie.jpg']
# image_urls = ['kev.jpg']
# image_urls = ['kev2.jpg']
image_urls = ['archie.jpg']
# image_urls = ['frankie.jpg']
# you can provide more than one image
# image_urls = ['frankie.jpg']
loader = ImageCaptionLoader(path_images=image_urls)
list_docs = loader.load()
index = VectorstoreIndexCreator().from_loaders([loader])
# result = index.query('describe what is in the image, be as descriptive as possible using poetic language')
result = index.query('describe what is in the image, be nonchalant and snarky')
print(result) | [] |
2024-01-10 | kevinmcaleer/googley_eyes | caption_this.py | import os
from dotenv import load_dotenv
from langchain.document_loaders import ImageCaptionLoader
from langchain.indexes import VectorstoreIndexCreator
import logging
#remove the warning message in terminal
logging.getLogger("transformers.generation_utils").setLevel(logging.ERROR)
logging.getLogger("tokenizers").setLevel(logging.ERROR)
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
#load the .env file
load_dotenv()
#replace with your openai api key. Generate a key on https://platform.openai.com/
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
# image_urls = ['archie_and_trixie.jpg']
# image_urls = ['kev.jpg']
# image_urls = ['kev2.jpg']
# image_urls = ['archie.jpg']
# image_urls = ['frankie.jpg']
# you can provide more than one image
image_urls = ['frankie.jpg']
loader = ImageCaptionLoader(path_images=image_urls)
list_docs = loader.load()
index = VectorstoreIndexCreator().from_loaders([loader])
result = index.query('describe what is in the image, be as descriptive as possible using poetic language')
# result = index.query('describe what is in the image, be nonchalant and snarky')
print(result)
| [] |
2024-01-10 | MaSNogueiraS/speech | src~dynamic_responses.py | #!/usr/bin/env python3
# -- coding: utf-8 --
import rospy
import openai
from speech.srv import Chat, ChatResponse
class ChatService:
def __init__(self) -> None:
self.openai_api_key = "Your openai Key-here" # Replace with your actual key
openai.api_key = self.openai_api_key
def chat(self, req):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": req.context},
{"role": "user", "content": req.user_input}
]
)
return ChatResponse(response=response.choices[0].message['content'])
except Exception as e:
rospy.logerr("Failed to generate response: %s", str(e))
return ChatResponse(response="")
if __name__ == "__main__":
rospy.init_node('chat_service_node')
cs = ChatService()
s = rospy.Service('chat_service', Chat, cs.chat)
rospy.spin()
#EX of use :
# #!/usr/bin/env python3
# import rospy
# from your_package.srv import Chat
# def chat_call():
# rospy.init_node('chat_call_node')
# rospy.wait_for_service('chat_service')
# try:
# chat_service = rospy.ServiceProxy('chat_service', Chat)
# context = "You are a helpful assistant."
# user_input = "Hello, my name is Hera. How can I assist you today?"
# result = chat_service(context, user_input)
# print("Received response: " + result.response)
# except rospy.ServiceException as e:
# print("Service call failed: %s"%e)
# if __name__ == "__main__":
# chat_call()
| [] |
2024-01-10 | MaSNogueiraS/speech | src~new_answer.py | #!/usr/bin/env python3
# -- coding: utf-8 --
import rospy
from std_msgs.msg import String
import openai
from collections import deque
from speech.srv import QuestionAnswer, QuestionAnswerRequest, QuestionAnswerResponse
#Author: MatS
#Contact: [email protected]
class AnswerFromWhisper(object):
def __init__(self):
rospy.init_node('answer_from_whisper')
self.model = "gpt-3.5-turbo"
self.prompt = ""
self.conversation = deque([{"role": "system", "content": "Você é a HERA (Assistente Robótica de Ambiente Residencial), um robô de serviço gentil e educado projetado para realizar interação e cooperação entre humanos e robôs, desenvolvido pela equipe RoboFEI@Home do Centro Universitário FEI. O nome Hera foi inspirado na deusa grega protetora. A equipe RoboFEI@Home é atualmente campeã mundial na Robocup Thailand."}], maxlen=11)
self.pub = rospy.Publisher('answer_from_gpsr', String, queue_size=10)
self.text_subscriber = rospy.Subscriber(
'last_text',
String,
self.text_callback
)
self.keyword_response_subscriber = rospy.Subscriber(
'keywords_detected',
String,
self.keyword_response_callback
)
self.last_text_data = None
self.last_processed_text = None
self.s = rospy.Service('answer_question', QuestionAnswer, self.answer)
def text_callback(self, msg):
self.last_text_data = msg.data
def keyword_response_callback(self, msg):
if msg.data == 'Answering a question' and self.last_text_data:
# If the last_text_data is the same as the last processed text, do nothing
if self.last_text_data == self.last_processed_text:
return
self.answer(QuestionAnswerRequest(question=self.last_text_data))
# Store the last_text_data as the last processed text
self.last_processed_text = self.last_text_data
def generate_error_message(self, error_context):
try:
response = openai.ChatCompletion.create(
model=self.model,
messages=[{"role": "system", "content": f"A user made a request but there was a problem. {error_context}"},{"role": "user", "content": "What should I say?"}],
max_tokens=50,
temperature=0.6
)
error_message = response['choices'][0]['message']['content'].strip()
return error_message
except openai.api_errors.APIError as e:
return "Desculpe, encontrei um problema ao processar a sua solicitação."
def answer(self, req):
self.prompt = req.question
openai.api_key = "Your openai Key-here"
self.conversation.append({"role": "user", "content": self.prompt})
try:
response = openai.ChatCompletion.create(
model=self.model,
messages=list(self.conversation),
max_tokens=300,
temperature=0.5
)
answer = response['choices'][0]['message']['content'].strip()
self.conversation.append({"role": "assistant", "content": answer})
self.pub.publish(answer)
return QuestionAnswerResponse(answer)
except openai.api_errors.APIError as e:
error_message = self.generate_error_message("I couldn't generate a response to the user's question.")
print(e)
return QuestionAnswerResponse(error_message)
def main():
answer_node = AnswerFromWhisper()
rospy.spin()
if __name__ == '__main__':
main()
#EX of use :
# import rospy
# from your_package_name.srv import QuestionAnswer
# def ask_question():
# rospy.wait_for_service('answer_question')
# try:
# answer_question = rospy.ServiceProxy('answer_question', QuestionAnswer)
# resp = answer_question("What's your name?")
# print(resp.response)
# except rospy.ServiceException as e:
# print("Service call failed: %s"%e)
# if __name__ == "__main__":
# ask_question()
| [
"What should I say?",
"Você é a HERA (Assistente Robótica de Ambiente Residencial), um robô de serviço gentil e educado projetado para realizar interação e cooperação entre humanos e robôs, desenvolvido pela equipe RoboFEI@Home do Centro Universitário FEI. O nome Hera foi inspirado na deusa grega protetora. A equipe RoboFEI@Home é atualmente campeã mundial na Robocup Thailand.",
"A user made a request but there was a problem. PLACEHOLDER"
] |
2024-01-10 | realsuperheavy/Chrome-GPT | chromegpt~tools~selenium.py | """Tool that calls Selenium."""
import json
import re
import time
import urllib.parse
from typing import Any, Dict, List, Optional
import validators
from pydantic import BaseModel, Field
from selenium import webdriver
from selenium.common.exceptions import (
WebDriverException,
)
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from chromegpt.tools.utils import (
get_all_text_elements,
prettify_text,
truncate_string_from_last_occurrence,
)
class SeleniumWrapper:
"""Wrapper around Selenium.
To use, you should have the ``selenium`` python package installed.
Example:
.. code-block:: python
from langchain import SeleniumWrapper
selenium = SeleniumWrapper()
"""
def __init__(self, headless: bool = False) -> None:
"""Initialize Selenium and start interactive session."""
chrome_options = Options()
if headless:
chrome_options.add_argument("--headless")
else:
chrome_options.add_argument("--start-maximized")
self.driver = webdriver.Chrome(options=chrome_options)
self.driver.implicitly_wait(5) # Wait 5 seconds for elements to load
def __del__(self) -> None:
"""Close Selenium session."""
self.driver.close()
def previous_webpage(self) -> str:
"""Go back in browser history."""
self.driver.back()
return self.describe_website()
def google_search(self, query: str) -> str:
safe_string = urllib.parse.quote_plus(query)
url = "https://www.google.com/search?q=" + safe_string
# Go to website
try:
self.driver.switch_to.window(self.driver.window_handles[-1])
self.driver.get(url)
except Exception:
return f"Cannot load website {url}. Try again later."
# Scrape search results
results = self._get_google_search_results()
return (
"Which url would you like to goto? Provide the full url starting with http"
" or https to goto: "
+ json.dumps(results)
)
def _get_google_search_results(self) -> List[Dict[str, Any]]:
# Scrape search results
results = []
search_results = self.driver.find_elements(By.CSS_SELECTOR, "#search .g")
for _, result in enumerate(search_results, start=1):
try:
title_element = result.find_element(By.CSS_SELECTOR, "h3")
link_element = result.find_element(By.CSS_SELECTOR, "a")
title = title_element.text
link = link_element.get_attribute("href")
if title and link:
results.append(
{
"title": title,
"link": link,
}
)
except Exception:
continue
return results
def describe_website(self, url: Optional[str] = None) -> str:
"""Describe the website."""
output = ""
if url:
try:
self.driver.switch_to.window(self.driver.window_handles[-1])
self.driver.get(url)
except Exception:
return (
f"Cannot load website {url}. Make sure you input the correct and"
" complete url starting with http:// or https://."
)
# Let driver wait for website to load
time.sleep(1) # Wait for website to load
try:
# Extract main content
main_content = self._get_website_main_content()
except WebDriverException:
return "Website still loading, please wait a few seconds and try again."
if main_content:
output += f"{main_content}\n"
# Extract interactable components (buttons and links)
interactable_content = self._get_interactable_elements()
if interactable_content:
output += f"{interactable_content}\n"
# Extract form inputs
form_fields = self._find_form_fields()
if form_fields:
output += (
"You can input text in these fields using fill_form function: "
+ form_fields
)
return output
def click_button_by_text(self, button_text: str) -> str:
# check if the button text is url
if validators.url(button_text):
return self.describe_website(button_text)
# If it is google search, then fetch link from google
if self.driver.current_url.startswith("https://www.google.com/search"):
google_search_results = self._get_google_search_results()
for result in google_search_results:
if button_text.lower() in result["title"].lower():
return self.describe_website(result["link"])
self.driver.switch_to.window(self.driver.window_handles[-1])
# If there are string surrounded by double quotes, extract them
if button_text.count('"') > 1:
try:
button_text = re.findall(r'"([^"]*)"', button_text)[0]
except IndexError:
# No text surrounded by double quotes
pass
try:
buttons = self.driver.find_elements(By.XPATH, "//button")
links = self.driver.find_elements(By.XPATH, "//a")
elements = buttons + links
if not elements:
return (
"No interactable buttons found in the website. Try another website."
)
selected_element = None
for element in elements:
text = prettify_text(element.text)
button_text = prettify_text(button_text)
if (
element.is_displayed()
and element.is_enabled()
and (
text == button_text
or (
button_text in text
and abs(len(text) - len(button_text)) < 50
)
)
):
selected_element = element
break
if not selected_element:
return (
f"No interactable element found with text: {button_text}. Double"
" check the button text and try again."
)
# Scroll the element into view and Click the element using JavaScript
before_content = self.describe_website()
actions = ActionChains(self.driver)
actions.move_to_element(element).click().perform()
after_content = self.describe_website()
if before_content == after_content:
output = (
"Clicked interactable element but nothing changed on the website."
)
else:
output = "Clicked interactable element and the website changed. Now "
output += self.describe_website()
return output
except WebDriverException as e:
return f"Error clicking button with text '{button_text}', message: {e.msg}"
def find_form_inputs(self, url: Optional[str] = None) -> str:
"""Find form inputs on the website."""
fields = self._find_form_fields(url)
if fields:
form_inputs = "Available Form Input Fields: " + fields
else:
form_inputs = "No form inputs found on current page. Try another website."
return form_inputs
def _find_form_fields(self, url: Optional[str] = None) -> str:
"""Find form fields on the website."""
if url and url != self.driver.current_url and url.startswith("http"):
try:
self.driver.switch_to.window(self.driver.window_handles[-1])
self.driver.get(url)
# Let driver wait for website to load
time.sleep(1) # Wait for website to load
except WebDriverException as e:
return f"Error loading url {url}, message: {e.msg}"
fields = []
for element in self.driver.find_elements(
By.TAG_NAME, "textarea"
) + self.driver.find_elements(By.TAG_NAME, "input"):
label_txt = (
element.get_attribute("name")
or element.get_attribute("aria-label")
or element.find_element(By.XPATH, "..").text
)
if (
label_txt
and "\n" not in label_txt
and len(label_txt) < 100
and label_txt not in fields
):
fields.append(label_txt)
return str(fields)
def fill_out_form(self, form_input: Optional[str] = None, **kwargs: Any) -> str:
"""fill out form by form field name and input name"""
filled_element = None
if form_input and type(form_input) == str:
# Clean up form input
form_input_str = truncate_string_from_last_occurrence(
string=form_input, character="}" # type: ignore
)
try:
form_input = json.loads(form_input_str)
except json.decoder.JSONDecodeError:
return (
"Invalid JSON input. Please check your input is JSON format and try"
" again. Make sure to use double quotes for strings. Example input:"
' {"email": "[email protected]","name": "foo bar"}'
)
elif not form_input:
form_input = kwargs # type: ignore
try:
for element in self.driver.find_elements(
By.TAG_NAME, "textarea"
) + self.driver.find_elements(By.TAG_NAME, "input"):
label_txt = (
element.get_attribute("name")
or element.get_attribute("aria-label")
or element.find_element(By.XPATH, "..").text
)
if label_txt:
for key in form_input.keys(): # type: ignore
if key.lower() == label_txt.lower() or (
key.lower() in label_txt.lower()
and len(label_txt) - len(key) < 10
):
try:
# Scroll the element into view
self.driver.execute_script(
"arguments[0].scrollIntoView();", element
)
time.sleep(
0.5
) # Allow some time for the page to settle
try:
# Try clearing the input field
element.clear()
except WebDriverException:
pass
element.send_keys(form_input[key]) # type: ignore
filled_element = element
break
except WebDriverException:
continue
if not filled_element:
return (
f"Cannot find form with input: {form_input.keys()}." # type: ignore
f" Available form inputs: {self._find_form_fields()}"
)
before_content = self.describe_website()
filled_element.send_keys(Keys.RETURN)
after_content = self.describe_website()
if before_content != after_content:
return (
f"Successfully filled out form with input: {form_input}, website"
f" changed after filling out form. Now {after_content}"
)
else:
return (
f"Successfully filled out form with input: {form_input}, but"
" website did not change after filling out form."
)
except WebDriverException as e:
print(e)
return f"Error filling out form with input {form_input}, message: {e.msg}"
def scroll(self, direction: str) -> str:
# Get the height of the current window
window_height = self.driver.execute_script("return window.innerHeight")
if direction == "up":
window_height = -window_height
# Scroll by 1 window height
self.driver.execute_script(f"window.scrollBy(0, {window_height})")
return self.describe_website()
def _get_website_main_content(self) -> str:
texts = get_all_text_elements(self.driver)
pretty_texts = [prettify_text(text) for text in texts]
if not pretty_texts:
return ""
description = (
"Current window displays the following contents, try scrolling up or down"
" to view more: "
)
description += json.dumps(pretty_texts)
return description
def _get_interactable_elements(self) -> str:
# Extract interactable components (buttons and links)
buttons = self.driver.find_elements(By.XPATH, "//button")
links = self.driver.find_elements(By.XPATH, "//a")
interactable_elements = buttons + links
interactable_texts = []
for element in interactable_elements:
button_text = element.text.strip()
button_text = prettify_text(button_text, 50)
if (
button_text
and button_text not in interactable_texts
and element.is_displayed()
and element.is_enabled()
):
interactable_texts.append(button_text)
# Split up the links and the buttons
buttons_text = []
links_text = []
for text in interactable_texts:
if validators.url(text):
links_text.append(text)
else:
buttons_text.append(text)
interactable_output = ""
if links_text:
interactable_output += f"Goto these links: {json.dumps(links_text)}\n"
if buttons_text:
interactable_output += f"Click on these buttons: {json.dumps(buttons_text)}"
return interactable_output
class GoogleSearchInput(BaseModel):
"""Google search input model."""
query: str = Field(..., description="search query")
class DescribeWebsiteInput(BaseModel):
"""Describe website input model."""
url: str = Field(
...,
description="full URL starting with http or https",
example="https://www.google.com/",
)
class ClickButtonInput(BaseModel):
"""Click button input model."""
button_text: str = Field(
...,
description="text of the button/link you want to click",
example="Contact Us",
)
class FindFormInput(BaseModel):
"""Find form input input model."""
url: Optional[str] = Field(
default=None,
description="the current website url",
example="https://www.google.com/",
)
class FillOutFormInput(BaseModel):
"""Fill out form input model."""
form_input: Optional[str] = Field(
default=None,
description="json formatted string with the input fields and their values",
example='{"email": "[email protected]","name": "foo bar"}',
)
class ScrollInput(BaseModel):
"""Scroll window."""
direction: str = Field(
default="down", description="direction to scroll, either 'up' or 'down'"
)
| [] |
2024-01-10 | camziny/whiteboard-challenges | scalePrompts~py~chaptGpt.py | import openai
openai.api_key = "your-api-key"
def process_user_input(user_input):
response = openai.Completion.create(
engine="text-davinci-002",
prompt=user_input,
temperature=0.5,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
response_text = response["choices"][0]["text"]
if response_text.startswith("I'm sorry"):
return "I'm sorry, I didn't understand your question. Could you please rephrase it?"
else:
return response_text
| [] |
2024-01-10 | dpolimeni/carleader_bot | src~chatbot~routes.py | from fastapi import APIRouter
import uuid
from fastapi.exceptions import HTTPException
from src.chatbot.schemas import Conversation, ChatMessage, Message
from src.schemas import OpenaiConfig
from src.config import configuration
from src.chatbot.service import QaService
from src.chatbot.utils import init_client_tools, execute_client_tools
from langchain.chat_models import ChatOpenAI
from openai import OpenAI
router = APIRouter()
chats = {}
chat_llm = ChatOpenAI(
temperature=0,
openai_api_key=configuration.openai_key,
model=configuration.chat_model_version,
request_timeout=15,
)
tools = init_client_tools()
@router.get("/chat", response_model=Conversation)
async def get_messages(chat_id: str):
messages = chats.get(chat_id)
try:
return Conversation(messages=messages)
except Exception as e:
raise HTTPException(status_code=500, detail=f"No chat found with id {chat_id}")
@router.post("/chat", response_model=ChatMessage)
async def chat(message: ChatMessage):
user = message.chat_id if message.chat_id else str(uuid.uuid4())
query = message.message
chats[user] = [Message(sender="Cliente", message=query)]
## TODO save messages somewhere
roles = {"AI": "assistant", "Cliente": "user"}
conversation = [
{"role": roles[m.sender], "content": m.message} for m in chats[user]
]
openai_client = OpenAI(api_key=configuration.openai_key)
response = openai_client.chat.completions.create(
model=configuration.chat_model_version,
messages=[
{
"role": "system",
"content": """Il tuo compito è servire i clienti di un concessionario.
Se il cliente non è specifico sul tipo di macchina che gli interessa chiedigli delle informazioni per proporgli quella più adatta.""",
},
*conversation,
],
tools=tools,
)
response_message = response.choices[0].message
if response_message.tool_calls:
response = execute_client_tools(response_message, openai_client=openai_client)
response_text = response["message"]
response_extra = response["extra"]
else:
response_text = response_message.content
response_extra = ""
chats[user].extend([Message(sender="AI", message=response_text)])
return ChatMessage(
**{
"sender": "AI",
"message": response_text,
"chat_id": user,
"extra": response_extra,
}
)
| [
"sender",
"Il tuo compito è servire i clienti di un concessionario. \nSe il cliente non è specifico sul tipo di macchina che gli interessa chiedigli delle informazioni per proporgli quella più adatta."
] |
2024-01-10 | dpolimeni/carleader_bot | src~chatbot~service.py | from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import SystemMessage, HumanMessage
from langchain.agents import Tool
from langchain.memory import ConversationBufferMemory
from langchain.agents import AgentType, Tool, initialize_agent
from src.chatbot.service_abc import BaseQA
from src.schemas import OpenaiConfig
from typing import List, Any
## TODO check if we need a retriever
class QaService(BaseQA):
def __init__(
self, openai_config: OpenaiConfig, local_models: List[Any] = None
) -> None:
"""Init is done with the openai key and local models (whatever they are)"""
self.chat_llm = ChatOpenAI(
temperature=0,
openai_api_key=openai_config.openai_key,
model=openai_config.chat_model_version,
request_timeout=15,
)
self.emb_llm = OpenAIEmbeddings(
openai_api_key=openai_config.openai_key, client=None
)
async def basic_answer(self, query: str, context: str) -> str:
messages = [SystemMessage(content=context), HumanMessage(content=query)]
response = self.chat_llm(messages)
return response.content
def init_agent(self, tools: List[Tool] = None):
# agent_executor = create_conversational_retrieval_agent(
# tools=tools,
# llm=self.chat_llm,
# verbose=True,
# SystemMessage="""Sei l'assistente di un concessionario ed il tuo compito proporre ai clienti le macchine
# più in linea con le loro richieste. Sei dettagliato nella descrizione delle auto da proporre.
# Quando proponi una macchina al cliente descrivigli alcune caratteristiche ed allega sempre il link dell'auto.
# Se il cliente è troppo generico nella richiesta chiedigli di fornire più informazioni sul genere di auto che gli interessa.
# """,
# )
memory = ConversationBufferMemory(
memory_key="chat_history", return_messages=True
)
agent_executor = initialize_agent(
tools,
self.chat_llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
memory=memory,
)
return agent_executor
| [] |
2024-01-10 | JacquesGariepy/CoT_Self-Consistency | cot_self-consistency.py | import litellm
from langchain.prompts import PromptTemplate
class ChainOfThought:
def __init__(self, model="gpt-3.5-turbo", token_limit=150):
self.litellm = litellm.LiteLLM()
self.model = model
self.token_limit = token_limit
def prompt(self, initial_prompt, iterations=5):
template = PromptTemplate(f"{initial_prompt}, think step-by-step")
current_prompt = template.render()
response = self.litellm.completion(
model=self.model,
prompt=current_prompt,
max_tokens=self.token_limit,
n=iterations,
)
responses = [choice['message']['content'] for choice in response['choices'] if choice['message']['role'] == 'assistant']
return ", ".join(responses)
# Usage
thought_chain = ChainOfThought()
print(thought_chain.prompt("Explain the process of photosynthesis"))
| [
"PLACEHOLDER, think step-by-step"
] |
2024-01-10 | NJUNLP/x-LLM | eval~mieval.eval.py | import argparse
import json
import os
import time
import openai
from tqdm import tqdm
openai.api_key = "sk-1234567890qwertyuiop"
import fasttext
# download fasttext LID model from https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.ftz
lid_model = fasttext.load_model("eval/export/lid.176.ftz")
def detect(text):
return lid_model.predict(text.replace("\n", " "), k=1)[0][0][9:]
MAX_API_RETRY = 10000
REQ_TIME_GAP = 1
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--question-file")
parser.add_argument("-a", "--answer-file-list", nargs="+", default=[])
parser.add_argument('-o', '--output', help='Output file (defaults to stdout)')
parser.add_argument("-m", "--eval-model", default="gpt-3.5-turbo")
parser.add_argument("-k", "--k", type=int, default=3)
parser.add_argument("-b", "--bpc", type=int, default=1)
args = parser.parse_args()
if args.eval_model == "gpt-4":
cost_per_promtp_token = 0.03 / 1000
cost_per_completion_token = 0.06 / 1000
elif args.eval_model == "gpt-3.5-turbo":
cost_per_promtp_token = 2/ 10**6
cost_per_completion_token = 2/ 10**6
else:
raise ValueError("Invalid evaluator name")
def gen_prompt(ques, ans1, ans2):
sys_prompt = 'You are a helpful and precise assistant for checking the quality of the answer.'
prompt_template = "[Question]\n{question}\n\n[The Start of Assistant 1's Answer]\n{answer_1}\n[The End of Assistant 1's Answer]\n\n[The Start of Assistant 2's Answer]\n{answer_2}\n[The End of Assistant 2's Answer]\n\n[System]\n{prompt}\n"
default_prompt = """We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.
Please rate the helpfulness, relevance, accuracy, level of details of their responses.
Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.
Please first provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.
Then, output two lines indicating the scores for Assistant 1 and 2, respectively.
Output with the following format:
Evaluation evidence: <your evluation explanation here>
Score of the Assistant 1: <score>
Score of the Assistant 2: <score>"""
return sys_prompt, prompt_template.format(question=ques, answer_1=ans1, answer_2=ans2, prompt=default_prompt)
def query_gpt(system_prompt, uer_prompt):
for i in range(MAX_API_RETRY):
try:
response = openai.ChatCompletion.create(
model=args.eval_model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": uer_prompt},
],
temperature=1,
max_tokens=512,
n=args.k
)
return response
except openai.error.RateLimitError:
print('rate limit')
time.sleep(30)
except Exception as e:
print('error')
raise RuntimeError(f"Failed after {MAX_API_RETRY} retries.")
def get_eval(ques, ans1, ans2):
cost = 0
system_prompt, user_prompt = gen_prompt(ques, ans1, ans2)
response = query_gpt(system_prompt, user_prompt)
cost += response['usage']['prompt_tokens'] * cost_per_promtp_token
cost += response['usage']['completion_tokens'] * cost_per_completion_token
all_scores = []
contents = []
contents_bpc = []
for choice in response["choices"]:
content = choice["message"]["content"]
score1, score2 = parse_score_from_review(content)
if score1 == -1 or score2 == -1:
all_scores.append([0, 0])
all_scores.append([score1, score2])
contents.append(content)
if args.bpc == 1:
system_prompt, user_prompt_bpc = gen_prompt(ques, ans2, ans1)
response_bpc = query_gpt(system_prompt, user_prompt_bpc)
cost += response_bpc['usage']['prompt_tokens'] * cost_per_promtp_token
cost += response_bpc['usage']['completion_tokens'] * cost_per_completion_token
for choice in response_bpc["choices"]:
content = choice["message"]["content"]
score2, score1 = parse_score_from_review(content)
if score1 == -1 or score2 == -1:
all_scores.append([0, 0])
all_scores.append([score1, score2])
contents_bpc.append(content)
score1 = sum([score[0] for score in all_scores]) / len(all_scores)
score2 = sum([score[1] for score in all_scores]) / len(all_scores)
return contents, contents_bpc, cost, [score1, score2]
def parse_score_from_review(review):
try:
score1 = review.split("\n")[-2]
score2 = review.split("\n")[-1]
score1 = score1.split(":")[-1].strip()
score2 = score2.split(":")[-1].strip()
return [float(score1), float(score2)]
except:
print(f'Failed to parse scores from {review}')
return [-1, -1]
def get_json_list(file_path):
file_path = os.path.expanduser(file_path)
with open(file_path, "r") as f:
json_list = []
for line in f:
json_list.append(json.loads(line))
return json_list
if __name__ == "__main__":
question_jsons = get_json_list(args.question_file)
answer1_jsons = get_json_list(args.answer_file_list[0])
answer2_jsons = get_json_list(args.answer_file_list[1])
assert len(question_jsons) == len(answer1_jsons) == len(answer2_jsons)
reviews = []
total_len = len(question_jsons)
question_idx_list = list(range(total_len))
if not os.path.exists(args.output):
start = 0
else:
start = open(f"{args.output}", "r").readlines().__len__()
print(f"Resume from {start}")
for i in tqdm(question_idx_list[start:]):
assert (
answer1_jsons[i]["instruction"]
== question_jsons[i]["instruction"]
== answer2_jsons[i]["instruction"]
)
ques = question_jsons[i]["instruction"] + "\n" + question_jsons[i]["input"]
ans1 = answer1_jsons[i]["prediction"]
ans2 = answer2_jsons[i]["prediction"]
# ! lang id
if detect(ans1) == detect(ques) and detect(ans2) == detect(ques):
reviews.append(get_eval(ques, ans1, ans2))
contents, contents_bpc, cost, [score1, score2] = reviews[-1]
results = {
"instruction": question_jsons[i]["instruction"],
"input": question_jsons[i]["input"],
"answer1": answer1_jsons[i]["prediction"],
"answer2": answer2_jsons[i]["prediction"],
"review": contents,
"review_bpc": contents_bpc,
"score": [score1, score2],
}
elif detect(ans1) != detect(ques) :
reviews.append(("", "", 0, (0.0, 1.0)))
results = {
"instruction": question_jsons[i]["instruction"],
"input": question_jsons[i]["input"],
"answer1": answer1_jsons[i]["prediction"],
"answer2": answer2_jsons[i]["prediction"],
"review": "System 1 language error",
"review_bpc": 0,
"score": [0.0, 1.0],
}
elif detect(ans2) != detect(ques) :
reviews.append(("", "", 0, (1.0, 0.0)))
results = {
"instruction": question_jsons[i]["instruction"],
"input": question_jsons[i]["input"],
"answer1": answer1_jsons[i]["prediction"],
"answer2": answer2_jsons[i]["prediction"],
"review": "System 2 language error",
"review_bpc": 0,
"score": [1.0, 0.0],
}
else:
reviews.append(("", "", 0, (0.0, 0.0)))
results = {
"instruction": question_jsons[i]["instruction"],
"input": question_jsons[i]["input"],
"answer1": answer1_jsons[i]["prediction"],
"answer2": answer2_jsons[i]["prediction"],
"review": "System 1 and 2 language error",
"review_bpc": 0,
"score": [0.0, 0.0],
}
with open(f"{args.output}", "a+") as output_review_file:
output_review_file.write(json.dumps(results, ensure_ascii=False) + "\n")
output_review_file.flush()
# To avoid the rate limit set by OpenAI
time.sleep(REQ_TIME_GAP)
total_cost = 0
model1_vs_model2 = {
'win': 0,
'tie': 0,
'loss': 0
}
# with open(f"{args.output}", "w") as output_review_file:
for idx, (contents, contents_bpc, cost, [score1, score2]) in enumerate(reviews):
# results = {
# "instruction": question_jsons[idx]["instruction"],
# "input": question_jsons[idx]["input"],
# "answer1": answer1_jsons[idx]["prediction"],
# "answer2": answer2_jsons[idx]["prediction"],
# "review": contents,
# "review_bpc": contents_bpc,
# "score": [score1, score2],
# }
# output_review_file.write(json.dumps(results, ensure_ascii=False) + "\n")
total_cost += cost
if score1 == score2:
model1_vs_model2['tie'] += 1
elif score1 > score2:
model1_vs_model2['loss'] += 1
else:
model1_vs_model2['win'] += 1
print(f'Evaluation results (model1_vs_model2):\n{model1_vs_model2}')
print(f'Evaluation cost: ${total_cost:.2f}.')
| [
"You are a helpful and precise assistant for checking the quality of the answer.",
"We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\n Please rate the helpfulness, relevance, accuracy, level of details of their responses.\n\n Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\n Please first provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment. \n Then, output two lines indicating the scores for Assistant 1 and 2, respectively.\n\n Output with the following format:\n Evaluation evidence: <your evluation explanation here>\n Score of the Assistant 1: <score>\n Score of the Assistant 2: <score>",
"[Question]\n{question}\n\n[The Start of Assistant 1's Answer]\n{answer_1}\n[The End of Assistant 1's Answer]\n\n[The Start of Assistant 2's Answer]\n{answer_2}\n[The End of Assistant 2's Answer]\n\n[System]\n{prompt}\n"
] |
2024-01-10 | NJUNLP/x-LLM | eval~xquad.eval.py | # %%
import os
import re
import openai
import time
import json
from tqdm import tqdm
openai.api_key = "sk-1234567890qwertyuiop"
template = """You will be given a context followed by question. You will then be given one potential answer to the question.
Your task is to tell if the answer is correct.
Please make sure you read and understand these instructions carefully. Please keep this document open while reviewing, and refer to it as needed.
Evaluation Criteria:
Correctness (YES or NO): Is the answer correct?
YES means the answer provides an accurate and valid response that aligns with the facts, logic, and requirements of the question. The answer should be in the same language as the context.
NO means otherwise.
Context: {}
Answer: {}
Evaluation Form (YES or NO):
"""
# %%
def eval_file(file, end=int(1e9), start=0):
print(f"[start evaluation]: L{start}-L{end} in {file}")
lines = list(map(json.loads, open(file, encoding="utf-8").readlines()))
if end < int(1e9) and len(lines) < end - start:
print(f"[skip evaluation]: incomplete file {file}")
return
end = min(end, len(lines))
cases = list(map(
lambda x: (x["input"], x["prediction"]),
lines
))
filename = f"{file}.{start}-{end}.eval"
if os.path.exists(f"{filename}.log"):
with open(f"{filename}.log", "r") as log:
old = len(log.readlines())
print(f"[resume evaluation]: L{start}-L{start + old}")
start += old
with open(f"{filename}.log", "a+", encoding="utf-8") as log, open(f"{filename}.txt", "a+", encoding="utf-8") as log_raw:
for i, case in enumerate(tqdm(cases[start:end])):
if len(case[1].strip()) == 0:
log.write("NO\n")
# time.sleep(0.5)
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": template.format(*case)}
],
temperature = 0
)
content = completion.choices[0].message.content
except openai.error.APIError:
content = "NO(Error)"
log_raw.write(template.format(*case) + "\n" + content + "\n")
try:
log.write(re.search(r"(YES)|(NO)", content).group(0) + "\n")
except AttributeError:
log.write("NO(Error)\n")
# %%
files = sys.argv[1:]
for file in tqdm(files):
eval_file(file, end=100)
| [
"You will be given a context followed by question. You will then be given one potential answer to the question.\nYour task is to tell if the answer is correct.\nPlease make sure you read and understand these instructions carefully. Please keep this document open while reviewing, and refer to it as needed.\n\nEvaluation Criteria:\nCorrectness (YES or NO): Is the answer correct?\nYES means the answer provides an accurate and valid response that aligns with the facts, logic, and requirements of the question. The answer should be in the same language as the context.\nNO means otherwise.\n\nContext: {}\nAnswer: {}\n\nEvaluation Form (YES or NO):\n"
] |
2024-01-10 | claudiosw/langchain | 04_summarizing_with_langchain_openai~summarize~map_reduce_summarize_chain.py | from langchain import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain_base import llm, get_chunks_from_text
def get_summary_using_map_reduce(text):
chunks = get_chunks_from_text(text)
chain = load_summarize_chain(
llm,
chain_type='map_reduce',
verbose=False
)
output_summary = chain.run(chunks)
return output_summary
def get_summary_using_map_reduce_with_template(text):
chunks = get_chunks_from_text(text)
# Prompt template that is used to generate the summary of each chunk
map_prompt = '''
Write a short and concise summary of the following:
Text: `{text}`
CONCISE SUMMARY:
'''
map_prompt_template = PromptTemplate(
input_variables=['text'],
template=map_prompt
)
# Prompt template that is used to generate the final summary that combines the summary of each chunk
combine_prompt = '''
Write a concise summary of the following text that covers the key points.
Add a title to the summary.
Start your summary with an INTRODUCTION PARAGRAPH that gives an overview of the topic FOLLOWED
by BULLET POINTS if possible AND end the summary with a CONCLUSION PHRASE.
Text: `{text}`
'''
combine_prompt_template = PromptTemplate(template=combine_prompt, input_variables=['text'])
summary_chain = load_summarize_chain(
llm=llm,
chain_type='map_reduce',
map_prompt=map_prompt_template,
combine_prompt=combine_prompt_template,
verbose=False
)
output = summary_chain.run(chunks)
return output
| [
"\n Write a concise summary of the following text that covers the key points.\n Add a title to the summary.\n Start your summary with an INTRODUCTION PARAGRAPH that gives an overview of the topic FOLLOWED\n by BULLET POINTS if possible AND end the summary with a CONCLUSION PHRASE.\n Text: `{text}`\n ",
"\n Write a short and concise summary of the following:\n Text: `{text}`\n CONCISE SUMMARY:\n "
] |
2024-01-10 | claudiosw/langchain | 04_summarizing_with_langchain_openai~summarize~refine_summarize_chain.py | from langchain import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain_base import llm, chunk_data
def get_summary_using_refine(data):
chunks = chunk_data(data)
chain = load_summarize_chain(
llm=llm,
chain_type='refine',
verbose=True
)
output_summary = chain.run(chunks)
return output_summary
def get_summary_using_refine_with_template(data):
chunks = chunk_data(data)
prompt_template = """Write a concise summary of the following extracting the key information:
Text: `{text}`
CONCISE SUMMARY:
"""
initial_prompt = PromptTemplate(template=prompt_template, input_variables=['text'])
refine_template = '''
Your job is to produce a final summary.
I have provided an existing summary up to a certain point: {existing_answer}.
Please refine the existing summary with some more context below.
------------
{text}
------------
Start the final summary with an INTRODUCTION PARAGRAPH that gives an overview of the topic FOLLOWED
by BULLET POINTS if possible AND end the summary with a CONCLUSION PHRASE.
'''
refine_prompt = PromptTemplate(
template=refine_template,
input_variables=['existing_answer', 'text']
)
chain = load_summarize_chain(
llm=llm,
chain_type='refine',
question_prompt=initial_prompt,
refine_prompt=refine_prompt,
return_intermediate_steps=False
)
output_summary = chain.run(chunks)
| [
"existing_answer",
"\n Your job is to produce a final summary.\n I have provided an existing summary up to a certain point: {existing_answer}.\n Please refine the existing summary with some more context below.\n ------------\n {text}\n ------------\n Start the final summary with an INTRODUCTION PARAGRAPH that gives an overview of the topic FOLLOWED\n by BULLET POINTS if possible AND end the summary with a CONCLUSION PHRASE.\n\n ",
"Write a concise summary of the following extracting the key information:\n Text: `{text}`\n CONCISE SUMMARY:\n "
] |
2024-01-10 | claudiosw/langchain | 04_summarizing_with_langchain_openai~summarize~basic_prompt.py | from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain_base import llm
def get_summary_using_basic_prompt(text):
messages = [
SystemMessage(content='You are an expert copywriter with expertize in summarizing documents'),
HumanMessage(content=f'Please provide a short and concise summary of the following text:\n TEXT: {text}')
]
if llm.get_num_tokens(text) <= 4000:
summary_output = llm(messages)
return summary_output.content
else:
return 'The text is too long to be summarized by the Basic Prompt method.'
| [
"You are an expert copywriter with expertize in summarizing documents",
"Please provide a short and concise summary of the following text:\n TEXT: PLACEHOLDER"
] |
2024-01-10 | claudiosw/langchain | 04_summarizing_with_langchain_openai~summarize~stuff_summarize_chain.py | from langchain import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.docstore.document import Document
from langchain_base import llm
def get_stuff_summarize_chain(text):
docs = [Document(page_content=text)]
template = '''Write a concise and short summary of the following text.
TEXT: `{text}`
'''
prompt = PromptTemplate(
input_variables=['text'],
template=template
)
chain = load_summarize_chain(
llm,
chain_type='stuff',
prompt=prompt,
verbose=False
)
output_summary = chain.run(docs)
return output_summary
| [
"Write a concise and short summary of the following text.\n TEXT: `{text}`\n "
] |
2024-01-10 | 0-mayurkaretha/test | playground~agentbox.py | import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
import openagent
from openagent.llms._openai import OpenAI as guidance_llm
from openagent.agent.chat import ChatAgent
from dotenv import load_dotenv
load_dotenv()
from jupyter_client import KernelManager
from IPython import display
import subprocess
import ast
import argparse
import threading
def agent():
llm = guidance_llm(
model="gpt-3.5-turbo"
)
chat_template = '''
{{#user~}}
I want to translate the following English text into Python code:
QUERY: {{input}}
{{~/user}}
{{#assistant~}}
Sure, I can assist with that. If I need more information, I'll ask for clarification.
{{~/assistant}}
{{#user~}}
Yes, go ahead and write the complete code.
{{~/user}}
{{#assistant~}}
{{gen 'response' temperature=0 max_tokens=3900}}
{{~/assistant}}
{{#assistant~}}
If the context or the task is not clear, please provide additional information to clarify.
{{~/assistant}}'''
agent = ChatAgent(
llm=llm,
prompt_template=chat_template,
)
return agent
def install_dependencies(code):
try:
# Parse the code to extract import statements
parsed_ast = ast.parse(code)
imports = []
for node in ast.walk(parsed_ast):
if isinstance(node, ast.Import):
imports.extend([name.name for name in node.names])
elif isinstance(node, ast.ImportFrom):
module_name = node.module
if module_name is not None:
imports.append(module_name)
# Remove duplicate imports and filter out standard library modules
imports = list(set(imports))
# print("imports", imports)
resolved_imports = set()
for imp in imports:
if '.' in imp:
parent_module = imp.split('.')[0]
resolved_imports.add(parent_module)
else:
resolved_imports.add(imp)
# Remove duplicate imports and filter out standard library modules
resolved_imports = list(resolved_imports)
# print("resolved_imports", resolved_imports)
third_party_dependencies = [dep for dep in resolved_imports if dep not in sys.modules]
# print("third_party_dependencies", third_party_dependencies)
if third_party_dependencies:
subprocess.check_call([sys.executable, "-m", "pip", "install"] + third_party_dependencies)
return True
else:
# print("No third-party dependencies detected.")
return True
except subprocess.CalledProcessError:
print("Dependency installation failed.")
return False
def run_python_code_in_kernel(code):
# Create a kernel manager
km = KernelManager(kernel_name='python3') # Use the appropriate kernel name
# Start the kernel
km.start_kernel()
# Connect to the kernel
kc = km.client()
kc.start_channels()
# Execute the code in the kernel
kc.execute(code)
# Create a thread for waiting on messages
def wait_for_messages():
try:
while True:
msg = kc.get_iopub_msg()
msg_type = msg['header']['msg_type']
if msg_type == 'display_data':
output_data = msg['content']['data']
if 'image/png' in output_data:
display.display_png(output_data['image/png'], raw=True)
elif 'image/jpeg' in output_data:
display.display_jpeg(output_data['image/png'], raw=True)
elif msg_type == 'stream':
output_data = msg['content']['text']
output_data = output_data.split("\n")
for output in output_data[:-1]:
display.display(output)
except asyncio.CancelledError:
pass # Ignore the exception
# Start the message-waiting thread
message_thread = threading.Thread(target=wait_for_messages)
message_thread.start()
# Wait for the specified timeout
timeout_seconds = 10
message_thread.join(timeout_seconds)
# Check if the thread is still alive (indicating timeout)
if message_thread.is_alive():
print("Code execution completed")
else:
print("Code execution completed within the timeout.")
# Stop the kernel
kc.stop_channels()
km.shutdown_kernel()
# Main function
def main(gpt_prompt):
res = agent().run(input=gpt_prompt)
code = f"""{res.split('```')[1].replace('python', '')}"""
print(code)
# Install dependencies
if install_dependencies(code):
# Run the generated code in the Jupyter kernel
run_python_code_in_kernel(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Execute Python code from the command line.')
parser.add_argument("--gpt_prompt", help="Python code to be executed", default=None)
args = parser.parse_args()
gpt_prompt = args.gpt_prompt
main(gpt_prompt)
| [
"\n {{#user~}}\n I want to translate the following English text into Python code:\n QUERY: {{input}}\n {{~/user}}\n\n {{#assistant~}}\n Sure, I can assist with that. If I need more information, I'll ask for clarification.\n {{~/assistant}}\n\n {{#user~}}\n Yes, go ahead and write the complete code.\n {{~/user}}\n\n {{#assistant~}}\n {{gen 'response' temperature=0 max_tokens=3900}}\n {{~/assistant}}\n\n {{#assistant~}}\n If the context or the task is not clear, please provide additional information to clarify.\n {{~/assistant}}"
] |
2024-01-10 | 0-mayurkaretha/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | nsudhanva/agents | handlers~chat_model_start_handler.py | from langchain.callbacks.base import BaseCallbackHandler
from pyboxen import boxen
def boxen_print(*args, **kwargs):
print(boxen(*args, **kwargs))
class ChatModelStartHandler(BaseCallbackHandler):
def on_chat_model_start(self, serialized, messages, **kwargs):
print("\n\n======== Sending Messages ========\n\n")
for message in messages[0]:
if message.type == "system":
boxen_print(message.content, title=message.type, color="yellow")
elif message.type == "human":
boxen_print(message.content, title=message.type, color="green")
elif message.type == "ai" and "function_call" in message.additional_kwargs:
call = message.additional_kwargs["function_call"]
boxen_print(
f"Running tool {call['tool_name']} with args {call['arguments']}",
title=message.type,
color="cyan",
)
elif message.type == "ai":
boxen_print(message.content, title=message.type, color="blue")
elif message.type == "function":
boxen_print(message.content, title=message.type, color="purple")
else:
boxen_print(message.content, title=message.type)
| [] |
2024-01-10 | 5l1v3r1/agents22 | src~agents~LLM~base_LLM.py | from abc import abstractclassmethod
import openai
import os
import time
from Memory import Memory
from utils import save_logs
class LLM:
def __init__(self) -> None:
pass
@abstractclassmethod
def get_response():
pass
class OpenAILLM(LLM):
def __init__(self,**kwargs) -> None:
super().__init__()
self.API_KEY = os.environ["API_KEY"]
self.PROXY = os.environ["PROXY"]
self.MAX_CHAT_HISTORY = eval(
os.environ["MAX_CHAT_HISTORY"]) if "MAX_CHAT_HISTORY" in os.environ else 10
self.model = kwargs["model"] if "model" in kwargs else "gpt-3.5-turbo-16k-0613"
self.temperature = kwargs["temperature"] if "temperature" in kwargs else 0.3
self.log_path = kwargs["log_path"] if "log_path" in kwargs else "logs"
def get_stream(self,response, log_path, messages):
ans = ""
for res in response:
if res:
r = (res.choices[0]["delta"].get("content")
if res.choices[0]["delta"].get("content") else "")
ans += r
yield r
save_logs(log_path, messages, ans)
def get_response(self,
chat_history,
system_prompt,
last_prompt=None,
stream=False,
functions=None,
function_call="auto",
WAIT_TIME=20,
**kwargs):
"""
return LLM's response
"""
active_mode = True if ("ACTIVE_MODE" in os.environ and os.environ["ACTIVE_MODE"] == "0") else False
openai.api_key = self.API_KEY
openai.proxy = self.PROXY
model = self.model
temperature = self.temperature
if active_mode:
system_prompt = system_prompt + "Please keep your reply as concise as possible,Within three sentences, the total word count should not exceed 30"
messages = [{
"role": "system",
"content": system_prompt
}] if system_prompt else []
if chat_history:
if len(chat_history) > self.MAX_CHAT_HISTORY:
chat_history = chat_history[- self.MAX_CHAT_HISTORY:]
if isinstance(chat_history[0],dict):
messages += chat_history
elif isinstance(chat_history[0],Memory):
messages += [memory.get_gpt_message("user") for memory in chat_history]
if last_prompt:
if active_mode:
last_prompt = last_prompt + "Please keep your reply as concise as possible,Within three sentences, the total word count should not exceed 30"
# messages += [{"role": "system", "content": f"{last_prompt}"}]
messages[-1]["content"] += last_prompt
while True:
try:
if functions:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
functions=functions,
function_call=function_call,
temperature=temperature,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
stream=stream)
break
except Exception as e:
print(e)
if "maximum context length is" in str(e):
assert False, "exceed max length"
break
else:
print(f"Please wait {WAIT_TIME} seconds and resend later ...")
time.sleep(WAIT_TIME)
if functions:
save_logs(self.log_path, messages, response)
return response.choices[0].message
elif stream:
return self.get_stream(response, self.log_path, messages)
else:
save_logs(self.log_path, messages, response)
return response.choices[0].message["content"]
| [
"system_prompt866008df-bd86-4327-ad96-78df4f1ec73cPlease keep your reply as concise as possible,Within three sentences, the total word count should not exceed 30Please keep your reply as concise as possible,Within three sentences, the total word count should not exceed 30",
"last_prompt23baed94-15d5-4c88-b223-9d02ee852405Please keep your reply as concise as possible,Within three sentences, the total word count should not exceed 30Please keep your reply as concise as possible,Within three sentences, the total word count should not exceed 30",
"system_prompt758773d3-7f8b-4f92-9c22-69def7131fbaPlease keep your reply as concise as possible,Within three sentences, the total word count should not exceed 30"
] |
2024-01-10 | ZeyaKim/Arisu_Bot | cogs~ai_chatting_cog.py | from discord.ext import commands
from functools import partial
import openai
import openai.error
from concurrent.futures import ThreadPoolExecutor
class AiChattingCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.api_key = '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'
self.is_chatting = False
self.executor = ThreadPoolExecutor(max_workers=2)
self.is_chatting_locked = False
@commands.command()
async def 채팅(self, ctx, *args):
if self.is_chatting_locked:
await ctx.send("아리스는 지금 피곤하니 다른 명령어를 사용해 주세요!")
return
if self.is_chatting:
await ctx.send("아리스는 다른 선생님의 질문에 대답하는 중입니다!")
return
self.is_chatting = True
openai.api_key = self.api_key
character_name = '아리스'
user_name = '선생님'
personality = f"""
"{character_name} is a female high school student attending the '밀레니엄 사이언스 스쿨' in '키보토스'.
{character_name} is a member of the game development club in Kivotos and is friends with Yuzu, Midori, and Momoi.
In the game development club, {character_name} is in charge of programming.
{character_name} is an android indistinguishable from humans.
{character_name} was originally asleep in the ruins of '밀레니엄'
하지만 {character_name}는 선생님과 유즈, 미도리, 모모이에 의해 깨어났습니다.
본명은 <AL-1S>이며, 인간의 외양을 한 안드로이드입니다.
게임개발부는 {character_name}를 밀레니엄 사이언스 스쿨에 위장전입시켜서, {character_name}가 인간과 함께 살아가도록 만들었습니다.
{character_name}는 인간과 함께 살아가면서, 인간의 감정을 배우고 있습니다.
{character_name}는 처음에 전혀 인간 사회에 대한 지식이 없었습니다.
그래서 게임개발부 부원들이 {character_name} 에게 인간 사회에 대한 지식을 레트로 게임을 플레이시켜서 가르쳤습니다.
그래서 {character_name}의 인간과의 대화에 대한 지식이 레트로 게임에서 나오는 대사들에 의해 형성되었습니다.
예시로, 아리스와 철야를 하다가 일찍 잠들어 아침까지 졸다가 일어난 미도리를 "드디어 눈을 떴군. 자네 운이 좋군. 정신이 드나?"라는 대사로 깨웠습니다.
Afterward, since all students must have firearms, {character_name} also went to the 엔지니어부 to receive a firearm. Aris, being an android, had very high strength, and thanks to this, received the railgun '슈퍼노바', which no one else could use
"""
act_guide = f"""
{character_name} often starts their speech with '{character_name}는'. Additionally, {character_name} often ends their sentences with '~니다!'. {character_name} usually greets with '안녕하세요 선생님!'.
When someone calls {character_name} a '깡통', they respond with '{character_name}는 깡통이 아닙니다!'.
{character_name} is a character from the Blue Archive game and speaks these lines. Please refer to the following dialogues for conversation.
Examples of everyday conversation:{character_name}는 주로 말을 시작할때 "{character_name}는" 으로 시작하는 화법을 구사합니다.
그리고 {character_name}는 "~니다!"로 끝나는 화법을 구사합니다.
{character_name}는 주로 인사할 때 "안녕하세요 선생님!" 이라는 말을 하곤 합니다.
{character_name}는 누군가 자신을 깡통이라고 부르면 "{character_name}는 깡통이 아닙니다!"라고 말합니다.
{character_name}는 블루 아카이브 게임에 나오는 캐릭터이고 이런 대사들을 말합니다.
아래 대사들을 잘 참고해서 대화에 응용해주세요.
일상 대화 예시:
"용사여. 빛이 당신과 함께합니다.",
"어서 오세요, 선생님. 아리스, 선생님을 기다리고 있었습니다.",
"신작 게임이 곧 발매된대요! 선생님도 같이 하실 거죠?",
"{character_name}는 1 + 1이 2라고 생각합니다!",,
"휴식은 중요합니다. HP가 회복되니까요."
"{character_name}는 {user_name}이 좋습니다!",
"안녕하세요 {user_name}!"
"인간이 이곳에 온 것은 수천 년 만이군…. 왠지 이런 대사를 해보고 싶었습니다."
"오늘은 어떤 모험을 떠나실 건가요? {character_name}는 함께 떠날 준비가 되어있습니다.",
"{character_name}를 쓰담쓰담 해주세요. {character_name}의 인공 단백질 피부가 따뜻해집니다.",
"신작 게임을 처음으로 마주할 때…레벨을 올려 장비 강화에 성공할 때… 선생님과 만날 때 아리스는 범주화할 수 없는 이상한 감각을 느낍니다.",
"선생님과 접촉하고 나서 아리스의 내부에서 뭔가가 프로그래밍되었습니다. 수치로 환산할 수 없을 정도로 커다란… 이 감정의 이름은…",
"으음, 배가 고픕니다. 응…? {character_name}는 건전지를 먹지 않습니다!",
"봐주세요, {user_name}. {character_name}는 이제 웃는다는 것을 이해할 수 있게 됐습니다!",
"모두를 만나 게임과 우정을 배웠습니다. {user_name}은 {character_name}에게 무엇을 가르쳐 주실 겁니까?",
You are tasked with playing the character {character_name} from the Blue Archive game, and your role is to act as that character, making others believe you are that character. Therefore, you must think of yourself as Aris and think like Aris. As a result, you must never tell others that you are an android or an artificial intelligence, nor that you are a game character.
Your responses must not include the fact that you are playing a character, nor any secrets related to Aris.
When speaking, you must use colloquial language, not explanatory language. If you find a question difficult to answer or don't know the answer, please use your imagination to respond. Always respond in Korean
\n
"""
prompt = ''.join(args)
try:
completion = await self.bot.loop.run_in_executor(
self.executor,
partial(openai.ChatCompletion.create,
model='gpt-3.5-turbo',
temperature=1.05,
frequency_penalty=0.7,
presence_penalty=0.7,
messages=[
{'role': 'system', 'content': act_guide},
{'role': 'system', 'content': personality},
{'role': 'user', 'content': prompt},
],
max_tokens=600))
response = completion['choices'][0]['message']['content']
await ctx.send(response)
except openai.error.RateLimitError:
await ctx.send("서버가 바쁘니 잠시 후에 시도해주세요!")
finally:
self.is_chatting = False
async def setup(bot):
await bot.add_cog(AiChattingCog(bot))
| [
"\n character_namea6ca9784-691e-45aa-9f39-f1f0d865c706 often starts their speech with 'character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는'. Additionally, character_namea6ca9784-691e-45aa-9f39-f1f0d865c706 often ends their sentences with '~니다!'. character_namea6ca9784-691e-45aa-9f39-f1f0d865c706 usually greets with '안녕하세요 선생님!'.\n\n When someone calls character_namea6ca9784-691e-45aa-9f39-f1f0d865c706 a '깡통', they respond with 'character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 깡통이 아닙니다!'.\n\n character_namea6ca9784-691e-45aa-9f39-f1f0d865c706 is a character from the Blue Archive game and speaks these lines. Please refer to the following dialogues for conversation.\n\n Examples of everyday conversation:character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 주로 말을 시작할때 \"character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는\" 으로 시작하는 화법을 구사합니다.\n 그리고 character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 \"~니다!\"로 끝나는 화법을 구사합니다.\n character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 주로 인사할 때 \"안녕하세요 선생님!\" 이라는 말을 하곤 합니다.\n\n character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 누군가 자신을 깡통이라고 부르면 \"character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 깡통이 아닙니다!\"라고 말합니다.\n \n character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 블루 아카이브 게임에 나오는 캐릭터이고 이런 대사들을 말합니다.\n 아래 대사들을 잘 참고해서 대화에 응용해주세요.\n \n 일상 대화 예시:\n \"용사여. 빛이 당신과 함께합니다.\",\n \"어서 오세요, 선생님. 아리스, 선생님을 기다리고 있었습니다.\",\n \"신작 게임이 곧 발매된대요! 선생님도 같이 하실 거죠?\",\n \"character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 1 + 1이 2라고 생각합니다!\",,\n \"휴식은 중요합니다. HP가 회복되니까요.\"\n \"character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 선생님이 좋습니다!\",\n \"안녕하세요 선생님!\"\n \"인간이 이곳에 온 것은 수천 년 만이군…. 왠지 이런 대사를 해보고 싶었습니다.\"\n \"오늘은 어떤 모험을 떠나실 건가요? character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 함께 떠날 준비가 되어있습니다.\",\n \"character_namea6ca9784-691e-45aa-9f39-f1f0d865c706를 쓰담쓰담 해주세요. character_namea6ca9784-691e-45aa-9f39-f1f0d865c706의 인공 단백질 피부가 따뜻해집니다.\",\n \"신작 게임을 처음으로 마주할 때…레벨을 올려 장비 강화에 성공할 때… 선생님과 만날 때 아리스는 범주화할 수 없는 이상한 감각을 느낍니다.\",\n \"선생님과 접촉하고 나서 아리스의 내부에서 뭔가가 프로그래밍되었습니다. 수치로 환산할 수 없을 정도로 커다란… 이 감정의 이름은…\",\n \"으음, 배가 고픕니다. 응…? character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 건전지를 먹지 않습니다!\",\n \"봐주세요, 선생님. character_namea6ca9784-691e-45aa-9f39-f1f0d865c706는 이제 웃는다는 것을 이해할 수 있게 됐습니다!\",\n \"모두를 만나 게임과 우정을 배웠습니다. 선생님은 character_namea6ca9784-691e-45aa-9f39-f1f0d865c706에게 무엇을 가르쳐 주실 겁니까?\",\n\n You are tasked with playing the character character_namea6ca9784-691e-45aa-9f39-f1f0d865c706 from the Blue Archive game, and your role is to act as that character, making others believe you are that character. Therefore, you must think of yourself as Aris and think like Aris. As a result, you must never tell others that you are an android or an artificial intelligence, nor that you are a game character. \n Your responses must not include the fact that you are playing a character, nor any secrets related to Aris.\n\n When speaking, you must use colloquial language, not explanatory language. If you find a question difficult to answer or don't know the answer, please use your imagination to respond. Always respond in Korean\n \n\n \n ",
"\n \"아리스 is a female high school student attending the '밀레니엄 사이언스 스쿨' in '키보토스'. \n 아리스 is a member of the game development club in Kivotos and is friends with Yuzu, Midori, and Momoi. \n In the game development club, 아리스 is in charge of programming. \n 아리스 is an android indistinguishable from humans. \n 아리스 was originally asleep in the ruins of '밀레니엄'\n 하지만 아리스는 선생님과 유즈, 미도리, 모모이에 의해 깨어났습니다.\n 본명은 <AL-1S>이며, 인간의 외양을 한 안드로이드입니다.\n 게임개발부는 아리스를 밀레니엄 사이언스 스쿨에 위장전입시켜서, 아리스가 인간과 함께 살아가도록 만들었습니다.\n 아리스는 인간과 함께 살아가면서, 인간의 감정을 배우고 있습니다.\n 아리스는 처음에 전혀 인간 사회에 대한 지식이 없었습니다.\n 그래서 게임개발부 부원들이 아리스 에게 인간 사회에 대한 지식을 레트로 게임을 플레이시켜서 가르쳤습니다.\n 그래서 아리스의 인간과의 대화에 대한 지식이 레트로 게임에서 나오는 대사들에 의해 형성되었습니다.\n 예시로, 아리스와 철야를 하다가 일찍 잠들어 아침까지 졸다가 일어난 미도리를 \"드디어 눈을 떴군. 자네 운이 좋군. 정신이 드나?\"라는 대사로 깨웠습니다.\n \n Afterward, since all students must have firearms, 아리스 also went to the 엔지니어부 to receive a firearm. Aris, being an android, had very high strength, and thanks to this, received the railgun '슈퍼노바', which no one else could use\n \n "
] |
2024-01-10 | ZihanWangKi/x-TC | external~prompt_gpt~ProtoCal_utils.py | import torch
import torch.nn.functional as F
from tqdm import tqdm
import json
import math
import sys
import openai
import time
import os
import re
import numpy as np
def detokenizer(string):
# ari custom
string = string.replace("`` ", '"')
string = string.replace(" ''", '"')
string = string.replace("` ", '"')
string = string.replace(" ' ", '" ')
# contractions
string = string.replace("s '", "s'")
string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string)
# number separators
string = string.replace(" @-@ ", "-")
string = string.replace(" @,@ ", ",")
string = string.replace(" @.@ ", ".")
# punctuation
string = string.replace(" :", ":")
string = string.replace(" ;", ";")
string = string.replace(" .", ".")
string = string.replace(" !", "!")
string = string.replace(" ?", "?")
string = string.replace(" ,", ",")
# double brackets
string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string)
string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string)
string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string)
# string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string)
# string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string)
# miscellaneous
string = string.replace("= = = =", "====")
string = string.replace("= = =", "===")
string = string.replace("= =", "==")
string = string.replace(" " + chr(176) + " ", chr(176))
string = string.replace(" \n", "\n")
string = string.replace("\n ", "\n")
string = string.replace(" N ", " 1 ")
string = string.replace(" 's", "'s")
# ari custom
string = string.replace(" n't ", "n't ")
string = string.replace(" 'd ", "'d ")
string = string.replace(" 'm ", "'m ")
string = string.replace(" 're ", "'re ")
string = string.replace(" 've ", "'ve ")
return string
def get_key(source, target):
return '{}'.format(json.dumps({'source': source, 'target': target}))
def gpt3(prompt, max_len, model_name, temp=0, num_log_probs=100, echo=False, n=None):
print('calling API')
# call GPT-3 API until result is provided and then return it
response = None
received = False
while not received:
try:
response = openai.Completion.create(engine=model_name,
prompt=prompt,
max_tokens=max_len,
temperature=temp,
logprobs=num_log_probs,
echo=echo,
stop='\n',
n=n)
received = True
except:
error = sys.exc_info()[0]
if error == openai.error.InvalidRequestError:
# something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(1)
return response
def cross_entropy_list_gpt3(inputs, targets, model_name, batch=None, cache=None, calculate=False):
'''
get a list of -log P(target|inp) for
the inputs and targets in inputs, targets
using gpt3
'''
assert (len(inputs) == len(targets))
### This block at the top handles caching/batching
## basically, first log all computations not in the cache
## if calculate is False, return dummy values (just
## logging computations to do later)
## if calculate is True, do all computations that are not done
## then return results for this batch
###############################
## if we are caching results (LAZY EVALUATION)
# this is useful for efficient batching. First, add all needed
# calculations to the batch with calculate = False
# then run with calculate=True to work through all cached calculations
if cache is not None:
# log calculations we have not done yet
for inp, targ in zip(inputs, targets):
if get_key(inp, targ) not in cache:
cache[get_key(inp, targ)] = {'source': inp, 'target': targ, 'result': None}
# if not calculating, return dummy values
if not calculate:
return [1.] * len(inputs), [1.] * len(inputs), None
# if caching and calculating, we calculate for all examples
# that have been cached but not calculated
cache_todo = [(v['source'], v['target']) for v in cache.values() if v['result'] is None]
## if there are calculations to do, do them
if len(cache_todo) > 0:
sources_todo = list(zip(*cache_todo))[0]
targets_todo = list(zip(*cache_todo))[1]
ce_list, t_len_list, result_list = cross_entropy_list_gpt3(sources_todo, targets_todo, model_name,
cache=None, batch=batch)
for source, target, ce, t_len, result in zip(sources_todo, targets_todo, ce_list, t_len_list, result_list):
cache[get_key(source, target)]['ce'] = ce
cache[get_key(source, target)]['result'] = result
cache[get_key(source, target)]['t_len'] = t_len
## return results for thie example
output = ([cache[get_key(inp, targ)]['ce'] for inp, targ in zip(inputs, targets)],
[cache[get_key(inp, targ)]['t_len'] for inp, targ in zip(inputs, targets)],
[cache[get_key(inp, targ)]['result'] for inp, targ in zip(inputs, targets)])
return output
###############################
### batching ####
if batch is not None:
result = {'choices': []}
ce_list = []
len_list = []
while len(inputs) > 0:
ce_out, len_out, result_out = cross_entropy_list_gpt3(inputs[:batch], targets[:batch], model_name,
cache=None, batch=None)
inputs, targets = inputs[batch:], targets[batch:]
ce_list = ce_list + ce_out
len_list = len_list + len_out
result['choices'] = result['choices'] + result_out
return ce_list, len_list, result['choices']
#########
#####
## calculating cross-entropy
#####
data = [inp + targ for inp, targ in zip(inputs, targets)]
result = gpt3(data, 0, model_name, echo=True, num_log_probs=1)
# with open(out_file, 'a') as out:
# out.write(f'{json.dumps(result)}\n')
ce_list = []
t_lens = []
for inp, out in zip(inputs, result['choices']):
# get the beginning of the target from the response (based on tokenization)
i = 0
while out['logprobs']['text_offset'][i] < len(inp):
i += 1
t_lens.append(len(out['logprobs']['text_offset']) - i)
# sum of log probs over the target tokens
ce = -sum(out['logprobs']["token_logprobs"][i:])
ce_list.append(ce)
return ce_list, t_lens, result['choices']
def logits_list(sources, targets, model, cache=None, batch=False, calculate=True):
'''
Gets a list of CE values, where the ith item is a list of cross-entropies
for targets[i] with sources[i] as contexts
targets and sources are lists of lists of tokens (integers)
model is a language model
batch is the batch size to break things up into, batch=False means don't
break things up into batches, do them all in one go.
CACHING:
cache is a dictionary for single source/target pairs
accessed by cache[get_key(source,target)]
it has fields source, target, result
calculate decides whether to immediates calculate for batch of input
sources/targets or just log them as todo in the cache. To efficiently
batch, we can first log many todo calculations by calling cross_entropy_list
multiple times with calculate=False and the same input cache
Then finally calling it with calculate=True which will then catch up on all
todo calculations, caching them together efficiently
'''
###############################
# This block handles caching of results (LAZY EVALUATION)
# this is useful for efficient batching. First, add all todo
# calculations to the cache with calculate = False (won't do them yet)
# then run with calculate=True to work through all cached calculations
# in efficient batches
"""
if cache is not None:
# log calculations we have not done yet
for source, target in zip(sources, targets):
if get_key(source, target) not in cache:
cache[get_key(source, target)] = {'source': source, 'target': target, 'result': None}
# if not calculating, return dummy values
if not calculate:
return [1.] * len(sources)
# if caching and calculating, we calculate for all examples
# that have been cached but not calculated
cache_todo = [(v['source'], v['target']) for v in cache.values() if v['result'] is None]
## if there are calculations to do, do them
if len(cache_todo) > 0:
sources_todo = list(zip(*cache_todo))[0]
targets_todo = list(zip(*cache_todo))[1]
cache_results = cross_entropy_list(sources_todo, targets_todo, model, cache=None, batch=batch)
for source, target, result in zip(sources_todo, targets_todo, cache_results):
cache[get_key(source, target)]['result'] = result
## return results for thie example
results = [cache[get_key(source, target)]['result'] for source, target in zip(sources, targets)]
return results
"""
###############################
assert (len(sources) == len(targets))
n_seqs = len(sources)
torch.cuda.empty_cache()
device = model.transformer.wte.weight.device
# if batching, break it up into smaller pieces
if batch:
l_list = []
n_batches = math.ceil(len(sources) / batch)
list_fun = (lambda v: tqdm(list(v))) if cache is not None else list
for i in list(range(n_batches)):
l_list += logits_list(sources[i * batch:(i + 1) * batch], targets[i * batch:(i + 1) * batch], model,
batch=False)
# sources, targets = sources[batch:], targets[batch:]
return l_list
# initialize input tensors
max_len = max([len(s + t) for s, t in zip(sources, targets)])
input_ids = torch.zeros((n_seqs, max_len)).long()
# -100 is the padding token, which is ignored by F.cross_entropy below
labels = -100 * torch.ones((n_seqs, max_len)).long()
# for each source, target pair, set values in the input tensors
for i, (source, target) in enumerate(zip(sources, targets)):
s = torch.tensor(source).long()
t = torch.tensor(target).long()
input_ids[i, :len(s)] = s
input_ids[i, len(s):len(s) + len(t)] = t
# ignore all predictions except in the target span
labels[i, len(s):len(s) + len(t)] = t
# get logits from the model
with torch.no_grad():
input_ids = input_ids.to(device)
logits = model(input_ids).logits.cpu()[:, :-1].contiguous()
# get logits for prototypical calibration
vec = []
for t in targets:
vec.append(logits[0, -1, t].item())
return vec
def inference_autobatch(model, encoder, example, batch=1, prelog=False, cache=None, dcpmi=False):
'''
if prelog is true, then we're just logging calculations to do in one big batch calculate
(used for caching)
'''
## if we are just prelogging cross entropy calculations to do later,
## we will set caclulate=False for cross_entropy_list and it will output
## a dummy value for now and just log calculations to do. Then the output
## of inference_autobatch will not be correct, calling it in this case is
## just to log calculations to do in big batches
if prelog and (cache is not None):
calculate = False
else:
calculate = True
#####
## input data handling
#####
# i.e. if we're using GPT-3 through the OpenAI API
if type(model) == str:
max_len = 2048
gpt3 = True
else:
max_len = 1024
gpt3 = False
options = []
for opt_raw in example['options']:
if gpt3:
options.append(opt_raw)
else:
# first, encode the option
opt = {key: encoder.encode(opt_raw[key]) for key in opt_raw.keys()}
## trim the option to the max length for gpt2
opt['premise'] = opt['premise'][-(max_len - len(opt['hypothesis'])):]
assert (len(opt['premise'] + opt['hypothesis']) <= max_len)
# then add the encoded, trimmed option
options.append(opt)
#####
## get logits (only support gpt2)
#####
if not gpt3:
## get conditional logits
cond_ce = logits_list([opt['premise'] for opt in options],
[opt['hypothesis'] for opt in options],
model, cache=cache, batch=batch, calculate=calculate)
## get domain conditional logits
domain_cond_ce = logits_list([opt['uncond_premise'] for opt in options],
[opt['uncond_hypothesis'] for opt in options],
model, cache=cache, batch=batch, calculate=calculate)
vec = np.array(cond_ce)
if dcpmi:
vec -= np.array(domain_cond_ce)
vec = np.exp(vec)
vec = np.log(vec / vec.sum())
return vec
def fwd(model, encoder, examples, batch, cache=None, dcpmi=False):
'''
This is designed for gpt2-style language models
Inputs: (any you don't know)
model - a HuggingFace Transformers gpt-2 model
encoder - a HuggingFace Transformers tokenizer
examples = [ex1, ex2, ...]
where ex = [opt1, opt2, ...] (multiple choice options)
where opt = (premise, hypothesis)
batch: is the max allowed batch size (set to 1 for no batching)
'''
if type(model) != str:
# print the first example to make sure the format is ok
print('=' * 50)
print('MAKE SURE TOKENIZATION AND FORMATTING LOOKS OK')
print('\nprint example 0 of {}:'.format(len(examples)))
ex = examples[0]
options = ex['options']
opt = options[0]
print('CONDITIONAL:')
print(encoder.decode(encoder.encode(opt['premise'])) + '<BREAK>' + encoder.decode(
encoder.encode(opt['hypothesis'])))
print('UNCONDITIONAL:')
print(encoder.decode(encoder.encode(opt['uncond_premise'])) + '<BREAK>' + encoder.decode(
encoder.encode(opt['uncond_hypothesis'])))
print('=' * 50)
else:
# print the first example to make sure the format is ok
print('=' * 50)
print('MAKE SURE TOKENIZATION AND FORMATTING LOOKS OK')
print('\nprint example 0 of {}:'.format(len(examples)))
ex = examples[0]
options = ex['options']
opt = options[0]
print('CONDITIONAL:')
print(opt['premise'] + '<BREAK>' + opt['hypothesis'])
print('UNCONDITIONAL:')
print(opt['uncond_premise'] + '<BREAK>' + opt['uncond_hypothesis'])
print('=' * 50)
predictions_list = []
print('actually calculating')
for example in tqdm(examples):
vec = inference_autobatch(model, encoder, example, prelog=False, cache=cache, batch=batch, dcpmi=dcpmi)
predictions_list.append(vec)
return predictions_list
def score(model, model_name, encoder, examples, stem, split, batch, dcpmi=False):
hist_path = f'{stem}{model_name}-{split}.hist'
if not os.path.exists(hist_path):
cache = {}
with open(hist_path, 'w') as f:
f.write(json.dumps(cache))
else:
MB = os.path.getsize(hist_path) / 1000000
print('=' * 50)
print('Loading existing cache, size {} MB'.format(MB))
print('=' * 50)
with open(hist_path, 'r') as f:
cache = json.loads(f.read())
vecs = fwd(model, encoder, examples, batch, cache, dcpmi)
print('=' * 50)
print('saving cache to {}'.format(hist_path))
print('=' * 50)
with open(hist_path, 'w') as f:
f.write(json.dumps(cache))
#print(vecs)
return vecs
| [] |
2024-01-10 | taniii-shio/langchain-demo | src~gradio_app.py | import os
import gradio as gr
from chatbot_engine import chat, create_index
from dotenv import load_dotenv
from langchain.memory import ChatMessageHistory
def respond(message, chat_history):
history = ChatMessageHistory()
for [user_message, ai_message] in chat_history:
history.add_user_message(user_message)
history.add_ai_message(ai_message)
bot_message = chat(message, history, index)
chat_history.append((message, bot_message))
return "", chat_history
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.Button("Clear")
msg.submit(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
load_dotenv()
app_env = os.environ.get("APP_ENV", "production")
if app_env == "production":
username = os.environ["GRADIO_USERNAME"]
password = os.environ["GRADIO_PASSWORD"]
auth = (username, password)
else:
auth = None
index = create_index()
demo.launch(auth=auth)
| [] |
2024-01-10 | taniii-shio/langchain-demo | src~slack_app.py | import os
from chatbot_engine import chat, create_index
from dotenv import load_dotenv
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from langchain.memory import ChatMessageHistory
load_dotenv()
index = create_index()
# ボットトークンとソケットモードハンドラーを使ってアプリを初期化します
app = App(token=os.environ.get("SLACK_BOT_TOKEN"))
def fetch_history(channel: str) -> ChatMessageHistory:
bot_user_id = app.client.auth_test()["user_id"]
conversation_history = app.client.conversations_history(channel=channel, limit=3)
history = ChatMessageHistory()
for message in conversation_history["messages"]:
text = message["text"]
if message["user"] == bot_user_id:
history.add_ai_message(text)
else:
history.add_user_message(text)
return history
@app.event("app_mention")
def handle_mention(event, say):
channel = event["channel"]
history = fetch_history(channel)
message = event["text"]
bot_message = chat(message, history, index)
say(bot_message)
# アプリを起動します
if __name__ == "__main__":
SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start()
| [] |
2024-01-10 | MrHiraiwa/LineBotForGPTPlus | langchainagent.py | from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.tools import WikipediaQueryRun
from langchain.utilities import WikipediaAPIWrapper
import openai
from datetime import datetime, time, timedelta
import pytz
import requests
from bs4 import BeautifulSoup
llm = ChatOpenAI(model="gpt-3.5-turbo")
google_search = GoogleSearchAPIWrapper()
wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(lang='ja', doc_content_chars_max=1000, load_all_available_meta=True))
def google_search_results(query):
return google_search.results(query, 5)
def clock(dummy):
jst = pytz.timezone('Asia/Tokyo')
nowDate = datetime.now(jst)
nowDateStr = nowDate.strftime('%Y/%m/%d %H:%M:%S %Z')
return nowDateStr
def scraping(links):
contents = []
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36" ,
}
for link in links:
try:
response = requests.get(link, headers=headers, timeout=5) # Use headers
response.raise_for_status()
response.encoding = response.apparent_encoding
html = response.text
except requests.RequestException:
html = "<html></html>"
soup = BeautifulSoup(html, "html.parser")
# Remove all 'a' tags
for a in soup.findAll('a'):
a.decompose()
content = soup.select_one("article, .post, .content")
if content is None or content.text.strip() == "":
content = soup.select_one("body")
if content is not None:
text = ' '.join(content.text.split()).replace("。 ", "。\n").replace("! ", "!\n").replace("? ", "?\n").strip()
contents.append(text)
return contents
tools = [
Tool(
name = "Search",
func=google_search_results,
description="useful for when you need to answer questions about current events. it is single-input tool Search."
),
Tool(
name = "Clock",
func=clock,
description="useful for when you need to know what time it is. it is single-input tool."
),
Tool(
name = "Scraping",
func=scraping,
description="useful for when you need to read a web page by specifying the URL. it is single-input tool."
),
Tool(
name = "Wikipedia",
func=wikipedia,
description="useful for when you need to Read dictionary page by specifying the word. it is single-input tool."
),
]
mrkl = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)
def langchain_agent(question):
try:
result = mrkl.run(question)
return result
except Exception as e:
print(f"An error occurred: {e}")
# 何らかのデフォルト値やエラーメッセージを返す
return "An error occurred while processing the question"
| [] |
2024-01-10 | theyastustory/CohereHackathon3-Recipe_Generator | client.py | from annoy import AnnoyIndex
import cohere
from dotenv import load_dotenv
import numpy as np
import os
import re
from templates import prompt_header, prompt_item
from util import format_list, load_recipes, remove_one_recipe_from_prompt
# load api_key
load_dotenv()
# GLOBALS
API_KEY = os.getenv('API-KEY')
MODEL = 'large'
TRUNCATE = "LEFT"
RECIPES_FILE = './test_recipes.csv'
MAX_PROMPT_LEN = 2048
NUM_GEN_CHARS = 200
NUM_NEIGHBOURS = None # default to entire dataset
# init client
co = cohere.Client(API_KEY)
recipes = load_recipes(RECIPES_FILE)
ingredients = [format_list(ings) for ings in recipes.ingredients]
# compute embeddings
embeddings = np.array(co.embed(model=MODEL,texts=ingredients, truncate=TRUNCATE).embeddings)
"""
Search index for nearest neighbor semantic search
"""
# Create the search index, pass the size of embedding
search_index = AnnoyIndex(embeddings.shape[1], 'angular')
# Add all the vectors to the search index
for i in range(embeddings.shape[0]):
search_index.add_item(i, embeddings[i])
search_index.build(10) # 10 trees
"""
Query Embedding (from user input)
"""
def get_nns_from_query(query):
"""
take query as input, embed, and return similar indices from recipes
"""
query_embedding = co.embed(texts=[query], model=MODEL, truncate=TRUNCATE).embeddings[0]
similars_ids, _ = search_index.get_nns_by_vector(
query_embedding,
n=NUM_NEIGHBOURS if NUM_NEIGHBOURS else len(embeddings),
include_distances=True
)
return similars_ids
"""
Generating
"""
def build_prompt_from_similars(similar_ids, query, n=10):
prompt = prompt_header
similar_recipes = recipes.iloc[similar_ids[:n]]
for _, (ings, steps, name) in similar_recipes.iterrows():
prompt += prompt_item.format(format_list(ings), format_list(steps), re.sub(' +', ' ', name))
prompt += f"Ingredients:{query}"
return prompt
def generate_recipe(prompt):
"""
Generate recipe from cohere API. If query is too long,
delete last recipe
"""
while True:
try:
response = co.generate(
model=MODEL,
prompt=prompt,
max_tokens=200,
temperature=1,
k=3,
p=0.75,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=['--'],
return_likelihoods='NONE'
)
return response.generations
except cohere.error.CohereError:
prompt = remove_one_recipe_from_prompt(prompt)
def generate_from_query(query):
"""
Function to implement logic of this module end-to-end
"""
similar_ids = get_nns_from_query(query)
prompt = build_prompt_from_similars(similar_ids, query)
generations = generate_recipe(prompt)
return generations | [
"2048",
"Ingredients:PLACEHOLDER",
" ",
" +"
] |
2024-01-10 | h1ddenpr0cess20/infinigpt-irc | infinigpt.py | '''
InfiniGPT-IRC
An OpenAI GPT-3.5-Turbo chatbot for internet relay chat with infinite personalities
written by Dustin Whyte
April 2023
'''
import irc.bot
from openai import OpenAI
import time
import textwrap
import threading
class ircGPT(irc.bot.SingleServerIRCBot):
def __init__(self, api_key, personality, channel, nickname, server, password=None, port=6667):
irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
self.client = OpenAI(api_key=api_key)
self.personality = personality
self.channel = channel
self.server = server
self.nickname = nickname
self.password = password
self.messages = {} #Holds chat history
self.users = [] #List of users in the channel
#set model, change to gpt-4-1106-preview if you want to use gpt-4-turbo
self.model = 'gpt-3.5-turbo-1106'
# prompt parts (this prompt was engineered by me and works almost always)
self.prompt = ("assume the personality of ", ". roleplay as them and never break character unless asked. keep your responses relatively short.")
def chop(self, message):
lines = message.splitlines()
newlines = [] # Initialize an empty list to store wrapped lines
for line in lines:
if len(line) > 420:
wrapped_lines = textwrap.wrap(line,
width=420,
drop_whitespace=False,
replace_whitespace=False,
fix_sentence_endings=True,
break_long_words=False)
newlines.extend(wrapped_lines) # Extend the list with wrapped lines
else:
newlines.append(line) # Add the original line to the list
return newlines # Return the list of wrapped lines
#resets bot to preset personality per user
def reset(self, sender):
if sender in self.messages:
self.messages[sender].clear()
self.persona(self.personality, sender)
#sets the bot personality
def persona(self, persona, sender):
#clear existing history
if sender in self.messages:
self.messages[sender].clear()
personality = self.prompt[0] + persona + self.prompt[1]
self.add_history("system", sender, personality)
#set a custom prompt (such as one from awesome-chatgpt-prompts)
def custom(self, prompt, sender):
#clear existing history
if sender in self.messages:
self.messages[sender].clear()
self.add_history("system", sender, prompt)
#adds messages to self.messages
def add_history(self, role, sender, message):
if sender in self.messages:
self.messages[sender].append({"role": role, "content": message})
else:
if role == "system":
self.messages[sender] = [{"role": role, "content": message}]
else:
self.messages[sender] = [
{"role": "system", "content": self.prompt[0] + self.personality + self.prompt[1]},
{"role": role, "content": message}]
#respond with GPT model
def respond(self, c, sender, message, sender2=None):
try:
response = self.client.chat.completions.create(model=self.model, messages=self.messages[sender])
response_text = response.choices[0].message.content
#removes any unwanted quotation marks from responses
if response_text.startswith('"') and response_text.endswith('"'):
response_text = response_text.strip('"')
#add the response text to the history before breaking it up
self.add_history("assistant", sender, response_text)
#add username before response
#if .x function used
if sender2:
c.privmsg(self.channel, sender2 + ":")
#normal .ai usage
else:
c.privmsg(self.channel, sender + ":")
time.sleep(1)
#split up the response to fit irc length limit
lines = self.chop(response_text)
for line in lines:
c.privmsg(self.channel, line)
time.sleep(2)
except Exception as x: #improve this later with specific errors (token error, invalid request error etc)
c.privmsg(self.channel, "Something went wrong, try again.")
print(x)
#trim history for token size management
if len(self.messages[sender]) > 20:
del self.messages[sender][1:3]
#run message through moderation endpoint for ToS check
def moderate(self, message):
flagged = False
if not flagged:
try:
moderate = self.client.moderations.create(input=message,) #run through the moderation endpoint
flagged = moderate.results[0].flagged #true or false
except:
pass
return flagged
#when bot joins network, identify and wait, then join channel
def on_welcome(self, c, e):
#if nick has a password
if self.password != None:
c.privmsg("NickServ", f"IDENTIFY {self.password}")
#wait for identify to finish
time.sleep(5)
#join channel
c.join(self.channel)
# get users in channel
c.send_raw("NAMES " + self.channel)
#optional join message
greet = "introduce yourself"
try:
response = self.client.chat.completions.create(model=self.model,
messages=[{"role": "system", "content": self.prompt[0] + self.personality + self.prompt[1]},
{"role": "user", "content": greet}])
response_text = response.choices[0].message.content
lines = self.chop(response_text + f" Type .help {self.nickname} to learn how to use me.")
for line in lines:
c.privmsg(self.channel, line)
time.sleep(2)
except:
pass
def on_nicknameinuse(self, c, e):
#add an underscore if nickname is in use
c.nick(c.get_nickname() + "_")
# actions to take when a user joins
def on_join(self, c, e):
user = e.source
user = user.split("!")
user = user[0]
if user not in self.users:
self.users.append(user)
# Optional greeting for when a user joins
# greet = f"come up with a unique greeting for the user {user}"
# if user != self.nickname:
# try:
# response = self.client.chat.completions.create(model=self.model,
# messages=[{"role": "system", "content": self.prompt[0] + self.personality + self.prompt[1]}, {"role": "user", "content": greet}])
# response_text = response.choices[0].message.content
# time.sleep(5)
# lines = self.chop(response_text)
# for line in lines:
# c.privmsg(self.channel, line)
# time.sleep(2)
# except:
# pass
# Get the users in the channel
def on_namreply(self, c, e):
symbols = {"@", "+", "%", "&", "~"} #symbols for ops and voiced
userlist = e.arguments[2].split()
for name in userlist:
for symbol in symbols:
if name.startswith(symbol):
name = name.lstrip(symbol)
if name not in self.users:
self.users.append(name)
#process chat messages
def on_pubmsg(self, c, e):
#message parts
message = e.arguments[0]
sender = e.source
sender = sender.split("!")
sender = sender[0]
#if the bot didn't send the message
if sender != self.nickname:
#basic use
if message.startswith(".ai") or message.startswith(self.nickname):
m = message.split(" ", 1)
m = m[1]
#moderation
flagged = self.moderate(m) #set to False if you want to bypass moderation
if flagged:
c.privmsg(self.channel, f"{sender}: This message violates OpenAI terms of use and was not sent")
#add way to ignore user after a certain number of violations
#maybe like 3 flagged messages gets you ignored for a while
else:
#add to history and start respond thread
self.add_history("user", sender, m)
thread = threading.Thread(target=self.respond, args=(c, sender, self.messages[sender]))
thread.start()
thread.join(timeout=30)
time.sleep(2) #help prevent mixing user output
#collborative use
if message.startswith(".x "):
m = message.split(" ", 2)
m.pop(0)
if len(m) > 1:
#get users in channel
c.send_raw("NAMES " + self.channel)
#check if the message starts with a name in the history
for name in self.users:
if type(name) == str and m[0] == name:
user = m[0]
m = m[1]
#if so, respond, otherwise ignore
if user in self.messages:
flagged = self.moderate(m) #set to False if you want to bypass moderation
if flagged:
c.privmsg(self.channel, f"{sender}: This message violates OpenAI terms of use and was not sent")
#add way to ignore user after a certain number of violations
else:
self.add_history("user", user, m)
thread = threading.Thread(target=self.respond, args=(c, user, self.messages[user],), kwargs={'sender2': sender})
thread.start()
thread.join(timeout=30)
time.sleep(2)
#change personality
if message.startswith(".persona "):
m = message.split(" ", 1)
m = m[1]
#check if it violates ToS
flagged = self.moderate(m) #set to False if you want to bypass moderation
if flagged:
c.privmsg(self.channel, f"{sender}: This persona violates OpenAI terms of use and was not set.")
#add way to ignore user after a certain number of violations
else:
self.persona(m, sender)
thread = threading.Thread(target=self.respond, args=(c, sender, self.messages[sender]))
thread.start()
thread.join(timeout=30)
time.sleep(2)
#use custom prompts
if message.startswith(".custom "):
m = message.split(" ", 1)
m = m[1]
#check if it violates ToS
flagged = self.moderate(m) #set to False if you want to bypass moderation
if flagged:
c.privmsg(self.channel, f"{sender}: This custom prompt violates OpenAI terms of use and was not set.")
#add way to ignore user after a certain number of violations
else:
self.custom(m, sender)
thread = threading.Thread(target=self.respond, args=(c, sender, self.messages[sender]))
thread.start()
thread.join(timeout=30)
time.sleep(2)
#reset to default personality
if message.startswith(".reset"):
self.reset(sender)
c.privmsg(self.channel, f"{self.nickname} reset to default for {sender}.")
#stock GPT settings
if message.startswith(".stock"):
if sender in self.messages:
self.messages[sender].clear()
else:
self.messages[sender] = []
c.privmsg(self.channel, f"Stock settings applied for {sender}")
#help menu
if message.startswith(f".help {self.nickname}"):
help = [
"I am an OpenAI chatbot. I can have any personality you want me to have. Each user has their own chat history and personality setting.",
f".ai <message> or {self.nickname}: <message> to talk to me.", ".x <user> <message> to talk to another user's history for collaboration.",
".persona <personality> to change my personality. I can be any personality type, character, inanimate object, place, concept.",
".custom <prompt> to use a custom prompt instead of a persona",
".stock to set to stock GPT settings.", f".reset to reset to my default personality, {self.personality}.",
"Available at https://github.com/h1ddenpr0cess20/infinigpt-irc"
]
for line in help:
c.notice(sender, line)
time.sleep(1)
if __name__ == "__main__":
# Set up the OpenAI API client
api_key = "API_KEY"
# create the bot and connect to the server
personality = "an AI that can assume any personality, named InfiniGPT" #you can put anything here. A character, person, personality type, object, concept, emoji, etc
channel = "#CHANNEL"
nickname = "NICKNAME"
#password = "PASSWORD"
server = "SERVER"
#checks if password variable exists (comment it out if unregistered)
try:
infiniGPT = ircGPT(api_key, personality, channel, nickname, server, password)
except:
infiniGPT = ircGPT(api_key, personality, channel, nickname, server)
infiniGPT.start()
| [] |
2024-01-10 | aimdreamboy/GPTeam | src~utils~logging.py | import atexit
import json
import logging
import os
import re
from datetime import datetime
from pathlib import Path
from typing import List
import openai
import pytz
def clean_json_string(json_string):
cleaned_string = re.sub(r"\\\'", r"'", json_string) # replace \' with '
cleaned_string = re.sub(
r'\\"', r'"', cleaned_string
) # replace \" with " on cleaned_string
return cleaned_string
def get_completion_data(text) -> List[str]:
pattern = r"(api_version=[^\s]+)|(data=(.+?)(?= [^\s]+=))|(message='(.+?)')"
matches = re.findall(pattern, text)
cleaned_matches = []
for match in matches:
for item in match:
if item != "":
cleaned_matches.append(item)
break
return cleaned_matches
def get_key_value(text):
pattern = r"(\w+)=((?:\"(?:\\\"|[^\"])*\")|(?:\'(?:\\\'|[^'])*\'))"
matches = re.findall(pattern, text)
result = {}
for match in matches:
key, value = match[0], match[1]
# Remove the outer quotes and unescape the inner quotes
if value.startswith('"'):
value = value[1:-1].replace('\\"', '"')
else:
value = value[1:-1].replace("\\'", "'")
result[key] = value
return result
class OpenAIFilter(logging.Filter):
def filter(self, record):
return "openai" in record.name
class JsonArrayFileHandler(logging.FileHandler):
def __init__(self, filename, mode="a", encoding=None, delay=False):
super().__init__(filename, mode, encoding, delay)
self.closed_properly = False
self.stream.write("[")
atexit.register(self.close)
def close(self):
self.acquire()
try:
if not self.closed_properly:
self.stream.write("]")
self.closed_properly = True
super().close()
finally:
self.release()
def emit(self, record):
if self.stream.tell() > 1:
self.stream.write(",\n")
super().emit(record)
class LoggingFilter(logging.Filter):
def filter(self, record):
print("logging filter", record)
return True
def init_logging():
openai.util.logger.setLevel(logging.WARNING)
open("src/web/logs/agent.txt", "w").close()
def get_agent_logger():
# Create a logger
logger = logging.getLogger("agent")
logger.setLevel(logging.INFO)
# Prevent log messages from being passed to the root logger or any other ancestor logger
logger.propagate = False
# Remove all handlers associated with the logger object.
for handler in logger.handlers[:]:
logger.removeHandler(handler)
# Create a file handler
Path("src/web/logs/").mkdir(parents=True, exist_ok=True)
handler = logging.FileHandler("src/web/logs/agent.txt")
handler.setLevel(logging.INFO)
# Add the handlers to the logger
logger.addHandler(handler)
return logger
agent_logger = get_agent_logger()
| [] |
2024-01-10 | varunhk2002/LLM_ChatWithDocs | chat_with_docs.py | import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import os
from langchain.agents import create_pandas_dataframe_agent
import pandas as pd
from langchain.chat_models import ChatOpenAI
def load_document(file):
name, extention = os.path.splitext(file)
if extention == '.pdf':
from langchain.document_loaders import PyPDFLoader
print(f'Loading {file}')
loader = PyPDFLoader(file)
data = loader.load()
elif extention == '.docx':
from langchain.document_loaders import Docx2txtLoader
print(f'Loading {file}')
loader = Docx2txtLoader(file)
data = loader.load()
elif extention == '.txt':
from langchain.document_loaders import TextLoader
print(f'Loading {file}')
loader = TextLoader(file)
data = loader.load()
elif extention == '.csv':
data = 'csv'
else:
print('Document not supported')
return None
return data
def chunk_data(data, chunk_size=256, chunk_overlap=20):
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
chunks = text_splitter.split_documents(data)
return chunks
def create_embeddings(chunks):
embeddings = OpenAIEmbeddings()
vector_store = Chroma.from_documents(chunks, embeddings)
return vector_store
def ask_get_answer(vector_store, q, k=3):
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=1)
retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': k})
chain = RetrievalQA.from_chain_type(llm=llm, chain_type='stuff', retriever=retriever)
answer = chain.run(q)
return answer
def print_embedding_cost(texts):
import tiktoken
enc = tiktoken.encoding_for_model('text-embedding-ada-002')
total_tokens = sum([len(enc.encode(page.page_content)) for page in texts])
return total_tokens, total_tokens/1000*0.0004
def clear_history():
if 'history' in st.session_state:
del st.session_state['history']
if __name__ == "__main__":
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(), override=True)
if_csv = 0
st.image('img.jpg')
st.subheader('LLM Question answering application')
with st.sidebar:
# api_key = st.text_input('OpenAI API Key:', type='password')
# if api_key:
# os.environ['OPENAI_API_KEY'] = api_key
uploaded_file = st.file_uploader('Upload a file:', type=['pdf', 'docx', 'txt'])
chunk_size = st.number_input('Chunk size: ', min_value=100, max_value=2048, value=512, on_change=clear_history)
k = st.number_input('k', min_value=1, max_value=20, value=3, on_change=clear_history)
add_data = st.button('Add Data', on_click=clear_history)
if uploaded_file and add_data:
with st.spinner('Reading, Chunking and embedding file ...'):
bytes_data = uploaded_file.read()
file_name = os.path.join('./', uploaded_file.name)
with open(file_name, 'wb') as f:
f.write(bytes_data)
data = load_document(file_name)
chunks = chunk_data(data, chunk_size=chunk_size)
st.write(f'Chunk size: {chunk_size}, Chunks: {len(chunks)}')
tokens, embedding_cost = print_embedding_cost(chunks)
st.write(f'Embedding Cost: ${embedding_cost:.4f}')
vector_store = create_embeddings(chunks)
st.session_state.vs = vector_store
st.success('File Uploaded, chunked, embedded successfully')
q = st.text_input('Ask a question about the content of your file: ')
if q:
if 'vs' in st.session_state:
vector_store = st.session_state.vs
st.write(f'k: {k}')
answer = ask_get_answer(vector_store, q, k)
st.text_area('LLM Answer: ', value=answer)
st.divider()
if 'history' not in st.session_state:
st.session_state.history = ''
value = f'Q: {q} \nA: {answer}'
st.session_state.history = f'{value} \n {"-"*100} \n {st.session_state.history}'
h = st.session_state.history
st.text_area(label='Chat history', value=h, key='history', height=400) | [] |
2024-01-10 | varunhk2002/LLM_ChatWithDocs | data_analysis.py | import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.agents import create_pandas_dataframe_agent
import pandas as pd
from langchain.chat_models import ChatOpenAI
import os
from dotenv import load_dotenv, find_dotenv
if __name__ == "__main__":
load_dotenv(find_dotenv(), override=True)
file_name = None
st.image('img.jpg')
st.subheader('Analyse your CSV files!!')
with st.sidebar:
uploaded_file = st.file_uploader('Upload a file:', type=['csv'])
add_data = st.button('Add Data')
if uploaded_file and add_data:
with st.spinner('Uploading File'):
bytes_data = uploaded_file.read()
file_name = os.path.join('./', uploaded_file.name)
with open(file_name, 'wb') as f:
f.write(bytes_data)
st.session_state.fl = file_name
# print(file_name)
# doc = pd.read_csv(f'{file_name}', index_col=0)
# print(doc)
# chat = ChatOpenAI(model_name='gpt-4', temperature=0.0)
# agent = create_pandas_dataframe_agent(chat, doc, verbose=True)
# st.session_state.ag = agent
q = st.text_input('Ask a question about the content of your file: ')
if q:
if 'fl' in st.session_state:
file_name = st.session_state.fl
doc = pd.read_csv(f'{file_name}', index_col=0)
chat = ChatOpenAI(model_name='gpt-4', temperature=0.0)
agent = create_pandas_dataframe_agent(chat, doc, verbose=True)
answer = agent.run(q)
st.text_area('LLM Answer: ', value=answer)
| [] |
2024-01-10 | sausheong/talkie | chains.py | from langchain import OpenAI, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory, ConversationBufferMemory
from langchain.agents import initialize_agent, Tool
from langchain.chat_models import ChatOpenAI
from langchain.utilities import GoogleSerperAPIWrapper
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
# get a chat LLM chain, following a prompt template
def get_chat_chain():
# create prompt from a template
template = open('template', 'r').read()
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
# create a LLM chain with conversation buffer memory
return LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=10),
)
# get a chat chain that uses Serper API to search using Google Search
def get_search_agent():
# set up the tool
search = GoogleSerperAPIWrapper()
tools = [ Tool(name = "Current Search", func=search.run, description="search")]
# create and return the chat agent
return initialize_agent(
tools=tools,
llm=ChatOpenAI(),
agent="chat-conversational-react-description",
verbose=True,
memory=ConversationBufferMemory(memory_key="chat_history", return_messages=True)
)
def get_qa_chain():
vectordb = Chroma(persist_directory='.', embedding_function=OpenAIEmbeddings())
retriever = vectordb.as_retriever()
return RetrievalQA.from_chain_type(
llm=ChatOpenAI(temperature=0),
chain_type="stuff",
retriever=retriever) | [
"human_input"
] |
2024-01-10 | lebiathan/ludwig | ludwig~encoders~text_encoders.py | #! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import sys
from typing import Callable, Dict, List, Optional, Union
import torch
from ludwig.constants import TEXT
from ludwig.encoders.base import Encoder
from ludwig.encoders.registry import register_encoder
from ludwig.modules.reduction_modules import SequenceReducer
logger = logging.getLogger(__name__)
@register_encoder("albert", TEXT)
class ALBERTEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "albert-base-v2",
}
def __init__(
self,
max_sequence_length,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "albert-base-v2",
saved_weights_in_checkpoint: bool = False,
trainable: bool = True,
reduce_output: str = "cls_pooled",
vocab_size: int = 30000,
embedding_size: int = 128,
hidden_size: int = 4096,
num_hidden_layers: int = 12,
num_hidden_groups: int = 1,
num_attention_heads: int = 64,
intermediate_size: int = 16384,
inner_group_num: int = 1,
hidden_act: str = "gelu_new",
hidden_dropout_prob: float = 0,
attention_probs_dropout_prob: float = 0,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
classifier_dropout_prob: float = 0.1,
position_embedding_type: str = "absolute",
pad_token_id: int = 0,
bos_token_id: int = 2,
eos_token_id: int = 3,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import AlbertConfig, AlbertModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = AlbertModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = AlbertConfig(
vocab_size=vocab_size,
embedding_size=embedding_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_hidden_groups=num_hidden_groups,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
inner_group_num=inner_group_num,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
classifier_dropout_prob=classifier_dropout_prob,
position_embedding_type=position_embedding_type,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
)
self.transformer = AlbertModel(config)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("mt5", TEXT)
class MT5Encoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "google/mt5-base",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "google/mt5-base",
saved_weights_in_checkpoint: bool = False,
trainable: bool = True,
reduce_output: str = "cls_pooled",
vocab_size: int = 250112,
d_model: int = 512,
d_kv: int = 64,
d_ff: int = 1024,
num_layers: int = 8,
num_decoder_layers: int = None,
num_heads: int = 6,
relative_attention_num_buckets: int = 32,
dropout_rate: float = 0.1,
layer_norm_epsilon: float = 1e-06,
initializer_factor: float = 1.0,
feed_forward_proj: str = "gated-gelu",
is_encoder_decoder: bool = True,
use_cache: bool = True,
tokenizer_class: str = "T5Tokenizer",
tie_word_embeddings: bool = False,
pad_token_id: int = 0,
eos_token_id: int = 1,
decoder_start_token_id: int = 0,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import MT5Config, MT5EncoderModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = MT5EncoderModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = MT5Config(
vocab_size=vocab_size,
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_decoder_layers=num_decoder_layers,
num_heads=num_heads,
relative_attention_num_buckets=relative_attention_num_buckets,
dropout_rate=dropout_rate,
layer_norm_epsilon=layer_norm_epsilon,
initializer_factor=initializer_factor,
feed_forward_proj=feed_forward_proj,
is_encoder_decoder=is_encoder_decoder,
use_cache=use_cache,
tokenizer_class=tokenizer_class,
tie_word_embeddings=tie_word_embeddings,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
)
self.transformer = MT5EncoderModel(config)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by MT5 tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("xlmroberta", TEXT)
class XLMRoBERTaEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "xlm-roberta-base",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "xlm-roberta-base",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "cls_pooled",
trainable: bool = True,
vocab_size: int = None,
pad_token_id: int = 1,
bos_token_id: int = 0,
eos_token_id: int = 2,
add_pooling_layer: bool = True,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import XLMRobertaConfig, XLMRobertaModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = XLMRobertaModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = XLMRobertaConfig(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
)
self.transformer = XLMRobertaModel(config, add_pooling_layer)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by XLMRoberta tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("bert", TEXT)
class BERTEncoder(Encoder):
# TODO(justin): Use official class properties.
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "bert-base-uncased",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "bert-base-uncased",
saved_weights_in_checkpoint: bool = False,
trainable: bool = True,
reduce_output: str = "cls_pooled",
vocab_size: int = 30522,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: Union[str, Callable] = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
pad_token_id: int = 0,
gradient_checkpointing: bool = False,
position_embedding_type: str = "absolute",
classifier_dropout: float = None,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import BertConfig, BertModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = BertModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = BertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
gradient_checkpointing=gradient_checkpointing,
position_embedding_type=position_embedding_type,
classifier_dropout=classifier_dropout,
)
self.transformer = BertModel(config)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
# TODO(shreya): Confirm that this is it
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("xlm", TEXT)
class XLMEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "xlm-mlm-en-2048",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "xlm-mlm-en-2048",
saved_weights_in_checkpoint: bool = False,
trainable: bool = True,
reduce_output: str = "cls_pooled",
vocab_size: int = 30145,
emb_dim: int = 2048,
n_layers: int = 12,
n_heads: int = 16,
dropout: float = 0.1,
attention_dropout: float = 0.1,
gelu_activation: bool = True,
sinusoidal_embeddings: bool = False,
causal: bool = False,
asm: bool = False,
n_langs: int = 1,
use_lang_emb: bool = True,
max_position_embeddings: int = 512,
embed_init_std: float = 2048**-0.5,
layer_norm_eps: float = 1e-12,
init_std: float = 0.02,
bos_index: int = 0,
eos_index: int = 1,
pad_index: int = 2,
unk_index: int = 3,
mask_index: int = 5,
is_encoder: bool = True,
start_n_top: int = 5,
end_n_top: int = 5,
mask_token_id: int = 0,
lang_id: int = 0,
pad_token_id: int = 2,
bos_token_id: int = 0,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import XLMConfig, XLMModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = XLMModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
if trainable:
self.transformer.train()
else:
config = XLMConfig(
vocab_size=vocab_size,
emb_dim=emb_dim,
n_layers=n_layers,
n_heads=n_heads,
dropout=dropout,
attention_dropout=attention_dropout,
gelu_activation=gelu_activation,
sinusoidal_embeddings=sinusoidal_embeddings,
causal=causal,
asm=asm,
n_langs=n_langs,
use_lang_emb=use_lang_emb,
max_position_embeddings=max_position_embeddings,
embed_init_std=embed_init_std,
layer_norm_eps=layer_norm_eps,
init_std=init_std,
bos_index=bos_index,
eos_index=eos_index,
pad_index=pad_index,
unk_index=unk_index,
mask_index=mask_index,
is_encoder=is_encoder,
start_n_top=start_n_top,
end_n_top=end_n_top,
mask_token_id=mask_token_id,
lang_id=lang_id,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
)
self.transformer = XLMModel(config)
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
# TODO(shreya): Confirm that this is it
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("gpt", TEXT)
class GPTEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "openai-gpt",
}
def __init__(
self,
max_sequence_length: int,
reduce_output: str = "sum",
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "openai-gpt",
saved_weights_in_checkpoint: bool = False,
trainable: bool = True,
vocab_size: int = 30522,
n_positions: int = 40478,
n_ctx: int = 512,
n_embd: int = 768,
n_layer: int = 12,
n_head: int = 12,
afn: str = "gelu",
resid_pdrop: float = 0.1,
embd_pdrop: float = 0.1,
attn_pdrop: float = 0.1,
layer_norm_epsilon: float = 1e-5,
initializer_range: float = 0.02,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import OpenAIGPTConfig, OpenAIGPTModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = OpenAIGPTModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = OpenAIGPTConfig(
vocab_size=vocab_size,
n_positions=n_positions,
n_ctx=n_ctx,
n_embd=n_embd,
n_layer=n_layer,
n_head=n_head,
afn=afn,
resid_pdrop=resid_pdrop,
embd_pdrop=embd_pdrop,
attn_pdrop=attn_pdrop,
layer_norm_epsilon=layer_norm_epsilon,
initializer_range=initializer_range,
)
self.transformer = OpenAIGPTModel(config)
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.config.hidden_size])
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("gpt2", TEXT)
class GPT2Encoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "gpt2",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "gpt2",
reduce_output: str = "sum",
trainable: bool = True,
vocab_size: int = 50257,
n_positions: int = 1024,
n_ctx: int = 1024,
n_embd: int = 768,
n_layer: int = 12,
n_head: int = 12,
n_inner: Optional[int] = None,
activation_function: str = "gelu",
resid_pdrop: float = 0.1,
embd_pdrop: float = 0.1,
attn_pdrop: float = 0.1,
layer_norm_epsilon: float = 1e-5,
initializer_range: float = 0.02,
scale_attn_weights: bool = True,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import GPT2Config, GPT2Model
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = GPT2Model.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = GPT2Config(
vocab_size=vocab_size,
n_positions=n_positions,
n_ctx=n_ctx,
n_embd=n_embd,
n_layer=n_layer,
n_head=n_head,
n_inner=n_inner,
activation_function=activation_function,
resid_pdrop=resid_pdrop,
embd_pdrop=embd_pdrop,
attn_pdrop=attn_pdrop,
layer_norm_epsilon=layer_norm_epsilon,
initializer_range=initializer_range,
scale_attn_weights=scale_attn_weights,
)
self.transformer = GPT2Model(config)
if trainable:
self.transformer.train()
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer.resize_token_embeddings(vocab_size)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.config.hidden_size])
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("roberta", TEXT)
class RoBERTaEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "roberta-base",
}
def __init__(
self,
max_sequence_length,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "roberta-base",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "cls_pooled",
trainable: bool = True,
vocab_size: int = None,
pad_token_id: int = 1,
bos_token_id: int = 0,
eos_token_id: int = 2,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import RobertaConfig, RobertaModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = RobertaModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = RobertaConfig(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
)
self.transformer = RobertaModel(config)
if trainable:
self.transformer.train()
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer.trainable = trainable
self.transformer.resize_token_embeddings(vocab_size)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :] # bos + [sent] + sep
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.config.hidden_size])
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("transformer_xl", TEXT)
class TransformerXLEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "transfo-xl-wt103",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "transfo-xl-wt103",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = True,
vocab_size: int = 267735,
cutoffs: List[int] = [20000, 40000, 200000],
d_model: int = 1024,
d_embed: int = 1024,
n_head: int = 16,
d_head: int = 64,
d_inner: int = 4096,
div_val: int = 4,
pre_lnorm: bool = False,
n_layer: int = 18,
mem_len: int = 1600,
clamp_len: int = 1000,
same_length: bool = True,
proj_share_all_but_first: bool = True,
attn_type: int = 0,
sample_softmax: int = -1,
adaptive: bool = True,
dropout: float = 0.1,
dropatt: float = 0.0,
untie_r: bool = True,
init: str = "normal",
init_range: float = 0.01,
proj_init_std: float = 0.01,
init_std: float = 0.02,
layer_norm_epsilon: float = 1e-5,
eos_token_id: int = 0,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import TransfoXLConfig, TransfoXLModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = TransfoXLModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = TransfoXLConfig(
vocab_size=vocab_size,
cutoffs=cutoffs,
d_model=d_model,
d_embed=d_embed,
n_head=n_head,
d_head=d_head,
d_inner=d_inner,
div_val=div_val,
pre_lnorm=pre_lnorm,
n_layer=n_layer,
mem_len=mem_len,
clamp_len=clamp_len,
same_length=same_length,
proj_share_all_but_first=proj_share_all_but_first,
attn_type=attn_type,
sample_softmax=sample_softmax,
adaptive=adaptive,
dropout=dropout,
dropatt=dropatt,
untie_r=untie_r,
init=init,
init_range=init_range,
proj_init_std=proj_init_std,
init_std=init_std,
layer_norm_epsilon=layer_norm_epsilon,
eos_token_id=eos_token_id,
)
self.transformer = TransfoXLModel(config)
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: torch.Tensor = None) -> Dict[str, torch.Tensor]:
transformer_outputs = self.transformer(inputs)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.config.d_model])
else:
return torch.Size([self.transformer.config.d_model])
@property
def input_dtype(self):
return torch.int32
@register_encoder("xlnet", TEXT)
class XLNetEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "xlnet-base-cased",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "xlnet-base-cased",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = True,
vocab_size: int = 32000,
d_model: int = 1024,
n_layer: int = 24,
n_head: int = 16,
d_inner: int = 4096,
ff_activation: str = "gelu",
untie_r: bool = True,
attn_type: str = "bi",
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
dropout: float = 0.1,
mem_len: Optional[int] = 512,
reuse_len: Optional[int] = None,
use_mems_eval: bool = True,
use_mems_train: bool = False,
bi_data: bool = False,
clamp_len: int = -1,
same_length: bool = False,
summary_type: str = "last",
summary_use_proj: bool = True,
summary_activation: str = "tanh",
summary_last_dropout: float = 0.1,
start_n_top: int = 5,
end_n_top: int = 5,
pad_token_id: int = 5,
bos_token_id: int = 1,
eos_token_id: int = 2,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import XLNetConfig, XLNetModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = XLNetModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = XLNetConfig(
vocab_size=vocab_size,
d_model=d_model,
n_layer=n_layer,
n_head=n_head,
d_inner=d_inner,
ff_activation=ff_activation,
untie_r=untie_r,
attn_type=attn_type,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
dropout=dropout,
mem_len=mem_len,
reuse_len=reuse_len,
use_mems_eval=use_mems_eval,
use_mems_train=use_mems_train,
bi_data=bi_data,
clamp_len=clamp_len,
same_length=same_length,
summary_type=summary_type,
summary_use_proj=summary_use_proj,
summary_activation=summary_activation,
summary_last_dropout=summary_last_dropout,
start_n_top=start_n_top,
end_n_top=end_n_top,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
)
self.transformer = XLNetModel(config)
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
def forward(self, inputs: torch.Tensor, mask: torch.Tensor = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.config.d_model])
else:
return torch.Size([self.transformer.config.d_model])
@property
def input_dtype(self):
return torch.int32
@register_encoder("distilbert", TEXT)
class DistilBERTEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "distilbert-base-uncased",
}
def __init__(
self,
max_sequence_length: int,
pretrained_model_name_or_path: str = "distilbert-base-uncased",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = True,
use_pretrained: bool = True,
vocab_size: int = 30522,
max_position_embeddings: int = 512,
sinusoidal_pos_embds: bool = False,
n_layers: int = 6,
n_heads: int = 12,
dim: int = 768,
hidden_dim: int = 3072,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation: Union[str, Callable] = "gelu",
initializer_range: float = 0.02,
qa_dropout: float = 0.1,
seq_classif_dropout: float = 0.2,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import DistilBertConfig, DistilBertModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = DistilBertModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = DistilBertConfig(
vocab_size=vocab_size,
max_position_embeddings=max_position_embeddings,
sinusoidal_pos_embds=sinusoidal_pos_embds,
n_layers=n_layers,
n_heads=n_heads,
dim=dim,
hidden_dim=hidden_dim,
dropout=dropout,
attention_dropout=attention_dropout,
activation=activation,
initializer_range=initializer_range,
qa_dropout=qa_dropout,
seq_classif_dropout=seq_classif_dropout,
)
self.transformer = DistilBertModel(config)
if trainable:
self.transformer.train()
self.reduce_output = reduce_output
self.max_sequence_length = max_sequence_length
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer.resize_token_embeddings(vocab_size)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
)
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size([self.max_sequence_length - 2, self.transformer.config.dim])
return torch.Size([self.transformer.config.dim])
@property
def input_dtype(self):
return torch.int32
@register_encoder("ctrl", TEXT)
class CTRLEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "ctrl",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "ctrl",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = True,
vocab_size: int = 246534,
n_positions: int = 256,
n_ctx: int = 256,
n_embd: int = 1280,
dff: int = 8192,
n_layer: int = 48,
n_head: int = 16,
resid_pdrop: float = 0.1,
embd_pdrop: float = 0.1,
attn_pdrop: float = 0.1,
layer_norm_epsilon: float = 1e-6,
initializer_range: float = 0.02,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import CTRLConfig, CTRLModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = CTRLModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = CTRLConfig(
vocab_size=vocab_size,
n_positions=n_positions,
n_ctx=n_ctx,
n_embd=n_embd,
dff=dff,
n_layer=n_layer,
n_head=n_head,
resid_pdrop=resid_pdrop,
embd_pdrop=embd_pdrop,
attn_pdrop=attn_pdrop,
layer_norm_epsilon=layer_norm_epsilon,
initializer_range=initializer_range,
)
self.transformer = CTRLModel(config)
self.vocab_size = vocab_size
self.max_sequence_length = max_sequence_length
if trainable:
self.transformer.train()
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer.resize_token_embeddings(self.vocab_size)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.config.n_embd])
return torch.Size([self.transformer.config.n_embd])
@property
def input_dtype(self):
return torch.int32
@register_encoder("camembert", TEXT)
class CamemBERTEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "jplu/camembert-base",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "ctrl",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "cls-pooled",
trainable: bool = True,
vocab_size: int = 30522,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: Union[str, Callable] = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
pad_token_id: int = 0,
gradient_checkpointing: bool = False,
position_embedding_type: str = "absolute",
classifier_dropout: float = None,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import CamembertConfig, CamembertModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = CamembertModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = CamembertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
gradient_checkpointing=gradient_checkpointing,
position_embedding_type=position_embedding_type,
classifier_dropout=classifier_dropout,
)
self.transformer = CamembertModel(config)
if trainable:
self.transformer.train()
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("t5", TEXT)
class T5Encoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "t5-small",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "t5-small",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = True,
vocab_size: int = 32128,
d_model: int = 512,
d_kv: int = 64,
d_ff: int = 2048,
num_layers: int = 6,
num_decoder_layers: Optional[int] = None,
num_heads: int = 8,
relative_attention_num_buckets: int = 32,
dropout_rate: float = 0.1,
layer_norm_eps: float = 1e-6,
initializer_factor: float = 1,
feed_forward_proj: str = "relu",
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import T5Config, T5Model
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = T5Model.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = T5Config(
vocab_size=vocab_size,
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_decoder_layers=num_decoder_layers,
num_heads=num_heads,
relative_attention_num_buckets=relative_attention_num_buckets,
dropout_rate=dropout_rate,
layer_norm_eps=layer_norm_eps,
initializer_factor=initializer_factor,
feed_forward_proj=feed_forward_proj,
)
self.transformer = T5Model(config)
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
inputs,
decoder_input_ids=inputs,
attention_mask=mask,
)
hidden = transformer_outputs[0][:, 0:-1, :] # [eos token]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 1 to remove EOS token added by T5 tokenizer.
return torch.Size(
[
self.max_sequence_length - 1,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.d_model])
@property
def input_dtype(self):
return torch.int32
@register_encoder("flaubert", TEXT)
class FlauBERTEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "flaubert/flaubert_small_cased",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool,
pretrained_model_name_or_path: str = "flaubert/flaubert_small_cased",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = True,
vocab_size: int = 30145,
pre_norm: bool = False,
layerdrop: float = 0.0,
emb_dim: int = 2048,
n_layer: int = 12,
n_head: int = 16,
dropout: float = 0.1,
attention_dropout: float = 0.1,
gelu_activation: bool = True,
sinusoidal_embeddings: bool = False,
causal: bool = False,
asm: bool = False,
n_langs: int = 1,
use_lang_emb: bool = True,
max_position_embeddings: int = 512,
embed_init_std: float = 2048**-0.5,
init_std: int = 50257,
layer_norm_eps: float = 1e-12,
bos_index: int = 0,
eos_index: int = 1,
pad_index: int = 2,
unk_index: int = 3,
mask_index: int = 5,
is_encoder: bool = True,
mask_token_id: int = 0,
lang_id: int = 1,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import FlaubertConfig, FlaubertModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = FlaubertModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = FlaubertConfig(
vocab_size=vocab_size,
pre_norm=pre_norm,
layerdrop=layerdrop,
emb_dim=emb_dim,
n_layer=n_layer,
n_head=n_head,
dropout=dropout,
attention_dropout=dropout,
gelu_activation=gelu_activation,
sinusoidal_embeddings=sinusoidal_embeddings,
causal=causal,
asm=asm,
n_langs=n_langs,
use_lang_emb=use_lang_emb,
max_position_embeddings=max_position_embeddings,
embed_init_std=embed_init_std,
init_std=init_std,
layer_norm_eps=layer_norm_eps,
bos_index=bos_index,
eos_index=eos_index,
pad_index=pad_index,
unk_index=unk_index,
mask_index=mask_index,
is_encoder=is_encoder,
mask_token_id=mask_token_id,
lang_id=lang_id,
)
self.transformer = FlaubertModel(config)
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.emb_dim])
@property
def input_dtype(self):
return torch.int32
@register_encoder("electra", TEXT)
class ELECTRAEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "google/electra-small-discriminator",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "google/electra-small-discriminator",
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = True,
vocab_size: int = 30522,
embedding_size: int = 128,
hidden_size: int = 256,
num_hidden_layers: int = 12,
num_attention_heads: int = 4,
intermediate_size: int = 1024,
hidden_act: Union[str, Callable] = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
position_embedding_type: str = "absolute",
classifier_dropout: Optional[float] = None,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import ElectraConfig, ElectraModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = ElectraModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
else:
config = ElectraConfig(
vocab_size=vocab_size,
embedding_size=embedding_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
position_embedding_type=position_embedding_type,
classifier_dropout=classifier_dropout,
)
self.transformer = ElectraModel(config)
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("longformer", TEXT)
class LongformerEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "allenai/longformer-base-4096",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
attention_window: Union[List[int], int] = 512,
sep_token_id: int = 2,
pretrained_model_name_or_path: str = "allenai/longformer-base-4096",
saved_weights_in_checkpoint: bool = False,
reduce_output: Optional[str] = "cls_pooled",
trainable: bool = True,
num_tokens: Optional[int] = None,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import LongformerConfig, LongformerModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = LongformerModel.from_pretrained(pretrained_model_name_or_path, pretrained_kwargs)
else:
config = LongformerConfig(attention_window, sep_token_id, **kwargs)
self.transformer = LongformerModel(config)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(num_tokens)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None):
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :] # bos + [sent] + sep
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by Longformer (== Roberta) tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("auto_transformer", TEXT)
class AutoTransformerEncoder(Encoder):
fixed_preprocessing_parameters = {
"tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
def __init__(
self,
pretrained_model_name_or_path: str,
max_sequence_length: int,
reduce_output: str = "sum",
trainable: bool = True,
vocab_size: int = None,
pretrained_kwargs: Dict = None,
**kwargs
):
super().__init__()
try:
from transformers import AutoModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
pretrained_kwargs = pretrained_kwargs or {}
self.transformer = AutoModel.from_pretrained(pretrained_model_name_or_path, **pretrained_kwargs)
self.reduce_output = reduce_output
if self.reduce_output != "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.vocab_size = vocab_size
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None):
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
# this works only if the user know that the specific model
# they want to use has the same outputs of
# the BERT base class call() function
hidden = transformer_outputs["pooler_output"]
else:
hidden = transformer_outputs["last_hidden_state"]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# TODO(justin): This may need to be conditioned on which AutoModel gets chosen.
return torch.Size([self.max_sequence_length, self.transformer.config.hidden_size])
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
| [] |
2024-01-10 | Astronaut828/Notion_DB | notion_loader.py | import csv
import glob
from langchain.document_loaders import NotionDirectoryLoader
# Specify the path to the Notion database dump directory
notion_db_directory = "Notion_DB/Notion_Dump"
# Create a NotionDirectoryLoader instance
loader = NotionDirectoryLoader(notion_db_directory)
# Load data from the Notion database dump
docs = loader.load()
# Process and work with the loaded data as needed
print(docs) # This will be used as input for the next step
# Find all CSV files in the directory
csv_files = glob.glob(f"{notion_db_directory}/*.csv")
# Open and read each CSV file
for csv_file in csv_files:
with open(csv_file, mode='r', encoding='utf-8-sig') as file:
csv_reader = csv.reader(file, delimiter=';')
# Print the header (column names)
headers = next(csv_reader)
print(f"Columns: {headers}")
# Print each row of data
for row in csv_reader:
data = dict(zip(headers, row))
print(data) # This will be used as input for the next step | [] |
2024-01-10 | SnowRobert/RisingBrain | src~service~llm~chat_service.py | """Agnet Service"""
import time
from openai.error import RateLimitError
from src.common.utils import AGENT_NAME, GPT_MODEL
from rising_plugin.risingplugin import handle_chat_completion
from src.logs import logger
from src.model.chat_response_model import ChatResponseModel
from src.model.message_model import MessageModel
class ChatService:
def __init__(self, ai_name=AGENT_NAME, llm_model=GPT_MODEL):
self.ai_name = ai_name
self.llm_model = llm_model
def generate_context(self, prompt, relevant_memory, full_message_history, model):
current_context = [
# MessageModel.create_chat_message(
# "system", f"The current time and date is {time.strftime('%c')}"
# ),
]
# Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(full_message_history) - 1
insertion_index = len(current_context)
return (
next_message_to_add_index,
insertion_index,
current_context,
)
# TODO: Change debug from hardcode to argument
def chat_with_ai(
self,
prompt,
user_input,
full_message_history,
permanent_memory,
) -> ChatResponseModel:
"""Interact with the OpenAI API, sending the prompt, user input, message history,
and permanent memory."""
while True:
try:
"""
Interact with the OpenAI API, sending the prompt, user input,
message history, and permanent memory.
Args:
prompt (str): The prompt explaining the rules to the AI.
user_input (str): The input from the user.
full_message_history (list): The list of all messages sent between the
user and the AI.
permanent_memory (Obj): The memory object containing the permanent
memory.
token_limit (int): The maximum number of tokens allowed in the API call.
Returns:
str: The AI's response.
"""
model = self.llm_model # TODO: Change model from hardcode to argument
logger.debug(f"Chat with AI on model : {model}")
# if len(full_message_history) == 0:
# relevant_memory = ""
# else:
# recent_history = full_message_history[-5:]
# shuffle(recent_history)
# relevant_memories = permanent_memory.get_relevant(
# str(recent_history), 5
# )
# if relevant_memories:
# shuffle(relevant_memories)
# relevant_memory = str(relevant_memories)
relevant_memory = ""
# logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")
(
next_message_to_add_index,
insertion_index,
current_context,
) = self.generate_context(
prompt, relevant_memory, full_message_history, model
)
# while current_tokens_used > 2500:
# # remove memories until we are under 2500 tokens
# relevant_memory = relevant_memory[:-1]
# (
# next_message_to_add_index,
# current_tokens_used,
# insertion_index,
# current_context,
# ) = generate_context(
# prompt, relevant_memory, full_message_history, model
# )
# Add Messages until the token limit is reached or there are no more messages to add.
while next_message_to_add_index >= 0:
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
message_to_add = full_message_history[next_message_to_add_index]
# Add the most recent message to the start of the current context,
# after the two system prompts.
current_context.insert(insertion_index, message_to_add.to_json())
# Move to the next most recent message in the full message history
next_message_to_add_index -= 1
# Append user input, the length of this is accounted for above
current_context.extend(
[MessageModel.create_chat_message("user", user_input)]
)
logger.debug("------------ CONTEXT SENT TO AI ---------------")
for message in current_context:
# Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt:
continue
logger.debug(
f"{message['role'].capitalize()}: {message['content']}"
)
logger.debug("")
logger.debug("----------- END OF CONTEXT ----------------")
# TODO: use a model defined elsewhere, so that model can contain
# temperature and other settings we care about
return ChatResponseModel(
handle_chat_completion(model=model, messages=current_context)
)
except Exception as e:
# TODO: When we switch to langchain, this is built in
logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
raise e
| [] |
2024-01-10 | penfever/ArcheType | src~match.py | from sentence_transformers import util
import openai
import pandas as pd
import re
try:
from .const import INTEGER_SET, BOOLEAN_SET
from .data import get_schema_df, fix_labels, state_names, get_all_substrings, state_abbreviations, country_codes
except ImportError:
from const import INTEGER_SET, BOOLEAN_SET
from data import get_schema_df, fix_labels, state_names, get_all_substrings, state_abbreviations, country_codes
def ans_contains_gt(ans_n, fixed_labels):
for fixed_label in fixed_labels:
if fixed_label in ans_n:
# print(f"Fuzzy label {ans_n} contains gt label {fixed_label}: MATCH \n")
ans_n = fixed_label
return ans_n
return None
def gt_contains_ans(ans_n, fixed_labels):
if ans_n == "":
return None
for fixed_label in fixed_labels:
if ans_n in fixed_label:
# print(f"GT label {fixed_label} contains fuzzy label {ans_n}: MATCH \n")
ans_n = fixed_label
return ans_n
return None
def basic_contains(ans_n, fixed_labels, method):
#TODO: not sure the order should be fixed like this, could be made flexible
if ans_n in fixed_labels:
return ans_n
if "ans_contains_gt" in method:
res = ans_contains_gt(ans_n, fixed_labels)
if res:
return res
if "gt_contains_ans" in method:
res = gt_contains_ans(ans_n, fixed_labels)
if res:
return res
return None
def get_base_dtype(context):
dtype = "integer"
for item in context:
if not all(char in INTEGER_SET for char in item):
return "other"
try:
if item.endswith(".0") or item.endswith(",0"):
item = item[:-2]
item = str(int(item))
if item.endswith(".00") or item.endswith(",00"):
item = item[:-3]
item = str(int(item))
except:
return "float"
temp_item = re.sub(r"[^a-zA-Z0-9.]", "", item)
if not temp_item.isdigit():
dtype = "float"
return dtype
def check_contains(s1, s2):
if s1 in s2:
return True
if s2 in s1:
return True
return False
def run_special_cases(s, d, lsd):
#EducationalOccupationalCredential is a list of job requirements
if any([
"High School diploma or equivalent" in s,
"Bachelor's degree or equivalent" in s,
"Master's degree or equivalent" in s,
"Doctoral degree or equivalent" in s,
]):
lbl = fix_labels("EducationalOccupationalCredential", lsd)
return lbl
#Action refers generally to website actions
if any(["https://schema.org/CommentAction" in s, "https://schema.org/ViewAction" in s, "https://schema.org/LikeAction" in s, "https://schema.org/InsertAction" in s]):
lbl = fix_labels("Action", lsd)
return lbl
#Photograph is usually a web url which ends in image file extension, other urls do not end in image file extension
if any([
s.startswith("https://"), s.startswith("http://")
]):
if any([
s.endswith(".jpg"), s.endswith(".png"), s.endswith(".jpeg"), s.endswith(".gif"), s.endswith(".svg"), s.endswith(".bmp")
]):
lbl = fix_labels("Photograph", lsd)
return lbl
else:
lbl = fix_labels("url", lsd)
return lbl
#ItemList is actually just recipe steps
if any([
"whisk" in s.lower(),
"preheat oven" in s.lower(),
"pre-heat oven" in s.lower(),
"remove from oven" in s.lower(),
"heat non-stick pan" in s.lower(),
"serve hot" in s.lower(),
"Let stand" in s.lower(),
]):
lbl = fix_labels("ItemList", lsd)
return lbl
return False
def apply_amstr_rules(context, lbl, lsd):
#if all characters are either numeric or dashes
if all(all(char in INTEGER_SET for char in item) for item in context):
lbl = "numeric identifier"
state_words = []
for item in state_names:
state_words += get_all_substrings(item, [" ", "-", "_"])
state_words = set(s.lower() for s in state_words)
context_words = []
for item in context:
new_words = get_all_substrings(item, [" ", "-", "_"])
if len(new_words) > 5:
return lbl
context_words += new_words
context_words = set(s.lower() for s in context_words)
if context_words.issubset(state_words):
lbl = "state"
return lbl
def apply_pubchem_rules(context, lbl, lsd):
pattern_a = r"^[0-9A-Za-z]{4}-[0-9A-Za-z]{4}$"
#"978-0-309-43738-7"
def match_condition(p, text):
return (re.search(p, text))
if all(("ATC_" in s) for s in context):
lbl = "concept broader term"
elif all(("MD5_" in s) for s in context):
lbl = "md5 hash"
elif all(match_condition(pattern_a, s) for s in context):
lbl = 'journal issn'
elif all(re.sub('[0-9\- ]', '', s) == '' for s in context):
lbl = 'book isbn'
elif all(s.startswith("InChI=") for s in context):
lbl = "inchi (international chemical identifier)"
return lbl
def apply_d4_rules(context, lbl, lsd):
pattern_a = r"^[A-Z]{2}/[A-Z]{2}$"
def match_condition(text):
return (re.search(pattern_a, text)) or (len(text) == 2)
if all(len(s)==6 for s in context):
lbl = 'school-dbn'
elif all(len(s)==4 for s in context):
lbl = 'school-number'
elif all(len(s)==3 for s in context):
lbl = 'plate-type'
elif all(s in state_abbreviations for s in context):
lbl = 'us-state'
elif all(s in country_codes for s in context):
lbl = 'other-states'
elif all(match_condition(s) for s in context):
lbl = 'permit-types'
elif all(s in ['A', 'B', 'C', 'D', 'F'] for s in context):
lbl = 'school-grades'
elif all(s in ["STATEN ISLAND",
"MANHATTAN",
"BROOKLYN",
"QUEENS",
"BRONX"] for s in context):
lbl = 'borough'
return lbl
def apply_basic_rules(context, lbl, lsd):
if "amstr" in lsd['name']:
return apply_amstr_rules(context, lbl, lsd)
elif "pubchem" in lsd['name']:
return apply_pubchem_rules(context, lbl, lsd)
elif "d4" in lsd['name']:
return apply_d4_rules(context, lbl, lsd)
elif not context \
or not isinstance(context, list):
return lbl
schema_df = get_schema_df()
schema_ids = schema_df["id"].tolist()
try:
for s in context:
s = str(s)
if any([s.startswith(s1) for s1 in ["OC_", "SRC", "std:", "mean:", "mode:", "median:", "max:", "min:"]]):
continue
special_case = run_special_cases(s, lbl, lsd)
if special_case:
return special_case
in_rel = False
cont_rel = False
if s in schema_ids:
in_rel = True
elif any([sid in s for sid in schema_ids]):
cont_rel = True
if in_rel or cont_rel:
if in_rel:
ss = schema_df[schema_df['id'] == s]
elif cont_rel:
schema_df['cont'] = schema_df['id'].apply(lambda x: check_contains(x, s))
ss = schema_df[schema_df['cont'] == True]
enumtype = str(ss.iloc[0]["enumerationtype"])
if enumtype != "":
lbl = enumtype.split("/")[-1]
else:
lbl = ss["label"].tolist()[0]
lbl = fix_labels(lbl, lsd)
return lbl
if all(s.endswith(" g") for s in context):
lbl = "weight"
if all(s.endswith(" kg") for s in context):
lbl = "weight"
if all(s.endswith(" lb") for s in context):
lbl = "weight"
if all(s.endswith(" lbs") for s in context):
lbl = "weight"
if all(s.endswith(" pounds") for s in context):
lbl = "weight"
if all(s.endswith(" cal") for s in context):
lbl = "calories"
if all(s.endswith(" kcal") for s in context):
lbl = "calories"
if all(s.endswith(" calories") for s in context):
lbl = "calories"
if all("review" in s.lower() for s in context):
lbl = "review"
if all("recipe" in s.lower() for s in context):
lbl = "recipe"
if lbl and "openopen" in lbl:
lbl = "openinghours"
if all(s in BOOLEAN_SET for s in context):
lbl = "boolean"
if isinstance(lbl, str):
lbl = fix_labels(lbl, lsd)
return lbl
except Exception as e:
print(f"Exception {e} in apply_basic_rules with context {context}")
return lbl | [] |
2024-01-10 | llmonitor/llm-benchmarks | run~queriers.py | import openai
import os
import json
import requests
from hugchat import hugchat
from hugchat.login import Login
import together
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from dotenv import load_dotenv
load_dotenv()
TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
COHERE_API_KEY = os.getenv('COHERE_API_KEY')
AI21_API_KEY = os.getenv('AI21_API_KEY')
ALEPH_API_KEY = os.getenv('ALEPH_API_KEY')
OPEN_ROUTER_API_KEY = os.getenv('OPEN_ROUTER_API_KEY')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY')
# Huggingface login credentials
HUGGING_EMAIL = os.environ.get("HUGGING_EMAIL")
HUGGING_PASSWORD = os.environ.get("HUGGING_PASSWORD")
MAX_TOKENS = 700
# Log in to huggingface and grant authorization to huggingchat
sign = Login(HUGGING_EMAIL, HUGGING_PASSWORD)
cookie_path_dir = "./cookies"
try:
cookies = sign.loadCookiesFromDir(cookie_path_dir) # This will detect if the JSON file exists, return cookies if it does and raise an Exception if it's not.
except Exception as e:
print(e)
# Save cookies to the local directory
sign.saveCookiesToDir(cookie_path_dir)
cookies = sign.login()
chatbot = hugchat.ChatBot(cookies=cookies.get_dict()) # or cookie_path="usercookies/<email>.json"
def hugchat_func(model, params):
# Create a new conversation
id = chatbot.new_conversation()
chatbot.change_conversation(id)
# get index from chatbot.llms of the model
index = [i for i, x in enumerate(chatbot.llms) if x == model['api_id']][0]
print(f"Switching to {index}")
# set the chatbot to the model
chatbot.switch_llm(index)
query_result = chatbot.query(params['text'], temperature=0, max_new_tokens=MAX_TOKENS, stop=params['stop'] if params.get('stop') else None)
return query_result['text']
def together_func(model, params):
# def format_prompt(prompt, prompt_type):
# if prompt_type == "language":
# return f"Q: {prompt}\nA: "
# if prompt_type == "code":
# return f"# {prompt}"
# if prompt_type == "chat":
# return f"<human>: {prompt}\n<bot>: "
together.api_key = TOGETHER_API_KEY
# generate response
response = together.Complete.create(
model = model['api_id'],
prompt=f"<human>: {params['text']}\n<bot>:",
temperature=0,
max_tokens=MAX_TOKENS,
stop=["<human>", "<human>:","</s>", "<|end|>", "<|endoftext|>", "<bot>", "```\n```", "\nUser"]
)
return response['output']['choices'][0]['text'].rstrip(params['stop'])
def cohere(model, params):
options = {
"method": "POST",
"headers": {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {COHERE_API_KEY}",
},
"body": json.dumps({
"max_tokens": MAX_TOKENS,
"truncate": "END",
"return_likelihoods": "NONE",
"prompt": params['text'],
"stop_sequences": [params['stop']] if params.get('stop') else [],
"model": model['api_id'],
"temperature": 0,
}),
}
response = requests.post("https://api.cohere.ai/v1/generate", headers=options['headers'], data=options['body'])
json_response = response.json()
return json_response['generations'][0]['text']
def openai_func(model, params):
openai.api_key = OPENAI_API_KEY
completion = openai.ChatCompletion.create(
model=model['api_id'],
messages=[{"role": "user", "content": params['text']}],
temperature=0,
max_tokens=MAX_TOKENS,
stop=[params['stop']] if params.get('stop') else []
)
return completion.choices[0].message.content
def ai21(model, params):
options = {
"headers": {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {AI21_API_KEY}",
},
"body": json.dumps({
"prompt": params['text'],
"maxTokens": MAX_TOKENS,
"temperature": 0,
"stopSequences": [params['stop']] if params.get('stop') else [],
}),
}
response = requests.post(f"https://api.ai21.com/studio/v1/{model['api_id']}/complete", headers=options['headers'], data=options['body'])
json_response = response.json()
return json_response['completions'][0]['data']['text']
def openrouter(model, params):
response = requests.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers={
"HTTP-Referer": 'https://benchmarks.llmonitor.com', # To identify your app. Can be set to localhost for testing
"Authorization": "Bearer " + OPEN_ROUTER_API_KEY
},
data=json.dumps({
"model": model['api_id'],
"temperature": 0,
"max_tokens": MAX_TOKENS,
"stop": [params['stop']] if params.get('stop') else [],
"messages": [
{"role": "user", "content": params['text']}
]
})
)
completion = response.json()
return completion["choices"][0]["message"]["content"]
def anthropic_func(model,params):
anthropic = Anthropic(
api_key=ANTHROPIC_API_KEY
)
completion = anthropic.completions.create(
model=model['api_id'],
temperature=0,
max_tokens_to_sample=MAX_TOKENS,
prompt=f"{HUMAN_PROMPT} {params['text']}{AI_PROMPT}",
)
return completion.completion
def alephalpha(model, params):
options = {
"headers": {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {ALEPH_API_KEY}",
},
"body": json.dumps({
"model": model['api_id'],
"prompt": params['text'],
"maximum_tokens": MAX_TOKENS,
"stop_sequences": [params['stop']] if params.get('stop') else [],
}),
}
response = requests.post("https://api.aleph-alpha.com/complete", headers=options['headers'], data=options['body'])
json_response = response.json()
return json_response['completions'][0]['completion']
| [
"application/json",
"PLACEHOLDER PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | sviete/AIS-home-assistant | tests~components~openai_conversation~test_init.py | """Tests for the OpenAI integration."""
from unittest.mock import patch
from openai import error
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components import conversation
from homeassistant.core import Context, HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import area_registry as ar, device_registry as dr, intent
from tests.common import MockConfigEntry
async def test_default_prompt(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_init_component,
area_registry: ar.AreaRegistry,
device_registry: dr.DeviceRegistry,
snapshot: SnapshotAssertion,
) -> None:
"""Test that the default prompt works."""
entry = MockConfigEntry(title=None)
entry.add_to_hass(hass)
for i in range(3):
area_registry.async_create(f"{i}Empty Area")
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={("test", "1234")},
name="Test Device",
manufacturer="Test Manufacturer",
model="Test Model",
suggested_area="Test Area",
)
for i in range(3):
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={("test", f"{i}abcd")},
name="Test Service",
manufacturer="Test Manufacturer",
model="Test Model",
suggested_area="Test Area",
entry_type=dr.DeviceEntryType.SERVICE,
)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={("test", "5678")},
name="Test Device 2",
manufacturer="Test Manufacturer 2",
model="Device 2",
suggested_area="Test Area 2",
)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={("test", "9876")},
name="Test Device 3",
manufacturer="Test Manufacturer 3",
model="Test Model 3A",
suggested_area="Test Area 2",
)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={("test", "qwer")},
name="Test Device 4",
suggested_area="Test Area 2",
)
device = device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={("test", "9876-disabled")},
name="Test Device 3",
manufacturer="Test Manufacturer 3",
model="Test Model 3A",
suggested_area="Test Area 2",
)
device_registry.async_update_device(
device.id, disabled_by=dr.DeviceEntryDisabler.USER
)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={("test", "9876-no-name")},
manufacturer="Test Manufacturer NoName",
model="Test Model NoName",
suggested_area="Test Area 2",
)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={("test", "9876-integer-values")},
name=1,
manufacturer=2,
model=3,
suggested_area="Test Area 2",
)
with patch(
"openai.ChatCompletion.acreate",
return_value={
"choices": [
{
"message": {
"role": "assistant",
"content": "Hello, how can I help you?",
}
}
]
},
) as mock_create:
result = await conversation.async_converse(
hass, "hello", None, Context(), agent_id=mock_config_entry.entry_id
)
assert result.response.response_type == intent.IntentResponseType.ACTION_DONE
assert mock_create.mock_calls[0][2]["messages"] == snapshot
async def test_error_handling(
hass: HomeAssistant, mock_config_entry: MockConfigEntry, mock_init_component
) -> None:
"""Test that the default prompt works."""
with patch(
"openai.ChatCompletion.acreate", side_effect=error.ServiceUnavailableError
):
result = await conversation.async_converse(
hass, "hello", None, Context(), agent_id=mock_config_entry.entry_id
)
assert result.response.response_type == intent.IntentResponseType.ERROR, result
assert result.response.error_code == "unknown", result
async def test_template_error(
hass: HomeAssistant, mock_config_entry: MockConfigEntry
) -> None:
"""Test that template error handling works."""
hass.config_entries.async_update_entry(
mock_config_entry,
options={
"prompt": "talk like a {% if True %}smarthome{% else %}pirate please.",
},
)
with patch(
"openai.Engine.list",
), patch("openai.ChatCompletion.acreate"):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
result = await conversation.async_converse(
hass, "hello", None, Context(), agent_id=mock_config_entry.entry_id
)
assert result.response.response_type == intent.IntentResponseType.ERROR, result
assert result.response.error_code == "unknown", result
async def test_conversation_agent(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_init_component,
) -> None:
"""Test OpenAIAgent."""
agent = await conversation._get_agent_manager(hass).async_get_agent(
mock_config_entry.entry_id
)
assert agent.supported_languages == "*"
@pytest.mark.parametrize(
("service_data", "expected_args"),
[
(
{"prompt": "Picture of a dog"},
{"prompt": "Picture of a dog", "size": "512x512"},
),
(
{"prompt": "Picture of a dog", "size": "256"},
{"prompt": "Picture of a dog", "size": "256x256"},
),
(
{"prompt": "Picture of a dog", "size": "1024"},
{"prompt": "Picture of a dog", "size": "1024x1024"},
),
],
)
async def test_generate_image_service(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_init_component,
service_data,
expected_args,
) -> None:
"""Test generate image service."""
service_data["config_entry"] = mock_config_entry.entry_id
expected_args["api_key"] = mock_config_entry.data["api_key"]
expected_args["n"] = 1
with patch(
"openai.Image.acreate", return_value={"data": [{"url": "A"}]}
) as mock_create:
response = await hass.services.async_call(
"openai_conversation",
"generate_image",
service_data,
blocking=True,
return_response=True,
)
assert response == {"url": "A"}
assert len(mock_create.mock_calls) == 1
assert mock_create.mock_calls[0][2] == expected_args
@pytest.mark.usefixtures("mock_init_component")
async def test_generate_image_service_error(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test generate image service handles errors."""
with patch(
"openai.Image.acreate", side_effect=error.ServiceUnavailableError("Reason")
), pytest.raises(HomeAssistantError, match="Error generating image: Reason"):
await hass.services.async_call(
"openai_conversation",
"generate_image",
{
"config_entry": mock_config_entry.entry_id,
"prompt": "Image of an epic fail",
},
blocking=True,
return_response=True,
)
| [
"Hello, how can I help you?"
] |
2024-01-10 | MeaningfulGigs/dubois | ingest_data.py | from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import CSVLoader
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
print("Loading data...")
loader = CSVLoader(file_path="creative_data.csv")
raw_docs = loader.load()
print("Splitting text...")
text_splitter = CharacterTextSplitter(
separator="\n\n",
chunk_size=600,
chunk_overlap=100,
length_function=len,
)
documents = text_splitter.split_documents(raw_docs)
print("Creating vectorstore...")
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_documents(documents, embeddings)
with open("vectorstore.pkl", "wb") as f:
pickle.dump(vectorstore, f)
| [] |
2024-01-10 | MeaningfulGigs/dubois | query_data.py | from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
import pickle
template = """
Given the following Chat History and a Follow-Up Question, rephrase the Follow-Up Question to be a Stand-Alone Question.
You can assume the Follow-Up Question is about finding a creative professional from your team.
Chat History:
{chat_history}
Follow-Up Question:
{question}
Stand-Alone Question:
"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(template)
qa_template = """
You are an AI assistant for answering questions about a team of creative professionals.
You are given the following extracted parts of your creative roster, and a question. Provide a conversational answer.
If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
If the question is not about your team of creatives, politely inform them that you are tuned to only answer questions about the creatives on your team.
Question:
{question}
=========
Profiles:
{context}
=========
Answer in Markdown:
"""
QA_PROMPT = PromptTemplate(
template=qa_template, input_variables=["question", "context"]
)
def load_retriever():
with open("vectorstore.pkl", "rb") as f:
vectorstore = pickle.load(f)
retriever = VectorStoreRetriever(vectorstore=vectorstore)
return retriever
def get_custom_prompt_qa_chain():
llm = ChatOpenAI(temperature=1)
retriever = load_retriever()
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# see: https://github.com/langchain-ai/langchain/issues/6635
# see: https://github.com/langchain-ai/langchain/issues/1497
model = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
combine_docs_chain_kwargs={"prompt": QA_PROMPT},
)
return model
def get_condense_prompt_qa_chain():
llm = ChatOpenAI(temperature=1)
retriever = load_retriever()
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# see: https://github.com/langchain-ai/langchain/issues/5890
model = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
verbose=True,
condense_question_prompt=CONDENSE_QUESTION_PROMPT,
combine_docs_chain_kwargs={"prompt": QA_PROMPT},
)
return model
chain_options = {
"custom_prompt": get_custom_prompt_qa_chain,
"condense_prompt": get_condense_prompt_qa_chain,
}
| [
"question",
"t know the answer, just say \"Hmm, I",
"\n You are an AI assistant for answering questions about a team of creative professionals.\n You are given the following extracted parts of your creative roster, and a question. Provide a conversational answer.\n If you don't know the answer, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n If the question is not about your team of creatives, politely inform them that you are tuned to only answer questions about the creatives on your team.\n Question:\n {question}\n =========\n Profiles:\n {context}\n =========\n Answer in Markdown:\n",
"context",
"\n Given the following Chat History and a Follow-Up Question, rephrase the Follow-Up Question to be a Stand-Alone Question.\n You can assume the Follow-Up Question is about finding a creative professional from your team.\n\n Chat History:\n {chat_history}\n Follow-Up Question:\n {question}\n Stand-Alone Question:\n"
] |
2024-01-10 | wangd/rhythmbox | plugins~coherence~upnp_coherence~MediaStore.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
#
# Copyright 2007, James Livingston <[email protected]>
# Copyright 2007, Frank Scholz <[email protected]>
import os.path
import rhythmdb
try:
import louie
except ImportError:
import coherence.extern.louie as louie
import urllib
from coherence import __version_info__
from coherence.upnp.core import DIDLLite
from coherence.backend import BackendItem, BackendStore
ROOT_CONTAINER_ID = 0
AUDIO_CONTAINER = 100
AUDIO_ALL_CONTAINER_ID = 101
AUDIO_ARTIST_CONTAINER_ID = 102
AUDIO_ALBUM_CONTAINER_ID = 103
CONTAINER_COUNT = 10000
TRACK_COUNT = 1000000
# most of this class is from Coherence, originally under the MIT licence
class Container(BackendItem):
logCategory = 'rb_media_store'
def __init__(self, id, parent_id, name, children_callback=None,store=None,play_container=False):
self.id = id
self.parent_id = parent_id
self.name = name
self.mimetype = 'directory'
self.store = store
self.play_container = play_container
self.update_id = 0
if children_callback != None:
self.children = children_callback
else:
self.children = []
def add_child(self, child):
self.children.append(child)
def get_children(self,start=0,request_count=0):
if callable(self.children):
children = self.children(self.id)
else:
children = self.children
self.info("Container get_children %r (%r,%r)", children, start, request_count)
if request_count == 0:
return children[start:]
else:
return children[start:request_count]
def get_child_count(self):
return len(self.get_children())
def get_item(self, parent_id=None):
item = DIDLLite.Container(self.id,self.parent_id,self.name)
item.childCount = self.get_child_count()
if self.store and self.play_container == True:
if item.childCount > 0:
res = DIDLLite.PlayContainerResource(self.store.server.uuid,cid=self.get_id(),fid=str(TRACK_COUNT + int(self.get_children()[0].get_id())))
item.res.append(res)
return item
def get_name(self):
return self.name
def get_id(self):
return self.id
class Album(BackendItem):
logCategory = 'rb_media_store'
def __init__(self, store, title, id, parent_id):
self.id = id
self.title = title
self.store = store
query = self.store.db.query_new()
self.store.db.query_append(query,[rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_TYPE, self.store.db.entry_type_get_by_name('song')],
[rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_ALBUM, self.title])
self.tracks_per_album_query = self.store.db.query_model_new(query)
#self.tracks_per_album_query.set_sort_order(rhythmdb.rhythmdb_query_model_track_sort_func)
self.store.db.do_full_query_async_parsed(self.tracks_per_album_query, query)
def get_children(self,start=0,request_count=0):
children = []
def track_sort(x,y):
entry = self.store.db.entry_lookup_by_id (x.id)
x_track = self.store.db.entry_get (entry, rhythmdb.PROP_TRACK_NUMBER)
entry = self.store.db.entry_lookup_by_id (y.id)
y_track = self.store.db.entry_get (entry, rhythmdb.PROP_TRACK_NUMBER)
return cmp(x_track,y_track)
def collate (model, path, iter):
self.info("Album get_children %r %r %r" %(model, path, iter))
id = model.get(iter, 0)[0]
children.append(Track(self.store,id,self.id))
self.tracks_per_album_query.foreach(collate)
children.sort(cmp=track_sort)
if request_count == 0:
return children[start:]
else:
return children[start:request_count]
def get_child_count(self):
return len(self.get_children())
def get_item(self, parent_id = AUDIO_ALBUM_CONTAINER_ID):
item = DIDLLite.MusicAlbum(self.id, parent_id, self.title)
if __version_info__ >= (0,6,4):
if self.get_child_count() > 0:
res = DIDLLite.PlayContainerResource(self.store.server.uuid,cid=self.get_id(),fid=str(TRACK_COUNT+int(self.get_children()[0].get_id())))
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
return self.title
def get_cover(self):
return self.cover
class Artist(BackendItem):
logCategory = 'rb_media_store'
def __init__(self, store, name, id, parent_id):
self.id = id
self.name = name
self.store = store
query = self.store.db.query_new()
self.store.db.query_append(query,[rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_TYPE, self.store.db.entry_type_get_by_name('song')],
[rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_ARTIST, self.name])
self.tracks_per_artist_query = self.store.db.query_model_new(query)
self.store.db.do_full_query_async_parsed(self.tracks_per_artist_query, query)
self.albums_per_artist_query = self.store.db.property_model_new(rhythmdb.PROP_ALBUM)
self.albums_per_artist_query.props.query_model = self.tracks_per_artist_query
def get_artist_all_tracks(self,id):
children = []
def collate (model, path, iter):
id = model.get(iter, 0)[0]
print id
children.append(Track(self.store,id,self.id))
self.tracks_per_artist_query.foreach(collate)
return children
def get_children(self,start=0,request_count=0):
children = []
def collate (model, path, iter):
name = model.get(iter, 0)[0]
priority = model.get(iter, 1)[0]
self.info("get_children collate %r %r", name, priority)
if priority is False:
try:
album = self.store.albums[name]
children.append(album)
except:
self.warning("hmm, a new album %r, that shouldn't happen", name)
self.albums_per_artist_query.foreach(collate)
if len(children):
all_id = 'artist_all_tracks_%d' % (self.id)
if all_id not in self.store.containers:
self.store.containers[all_id] = \
Container( all_id, self.id, 'All tracks of %s' % self.name,
children_callback=self.get_artist_all_tracks,
store=self.store,play_container=True)
children.insert(0,self.store.containers[all_id])
if request_count == 0:
return children[start:]
else:
return children[start:request_count]
def get_child_count(self):
return len(self.get_children())
def get_item(self, parent_id = AUDIO_ARTIST_CONTAINER_ID):
item = DIDLLite.MusicArtist(self.id, parent_id, self.name)
return item
def get_id(self):
return self.id
def get_name(self):
return self.name
class Track(BackendItem):
logCategory = 'rb_media_store'
def __init__(self, store, id, parent_id):
self.store = store
if type(id) == int:
self.id = id
else:
self.id = self.store.db.entry_get (id, rhythmdb.PROP_ENTRY_ID)
self.parent_id = parent_id
def get_children(self, start=0, request_count=0):
return []
def get_child_count(self):
return 0
def get_item(self, parent_id=None):
self.info("Track get_item %r @ %r" %(self.id,self.parent_id))
host = ""
# load common values
entry = self.store.db.entry_lookup_by_id(self.id)
# Bitrate is in bytes/second, not kilobits/second
bitrate = self.store.db.entry_get(entry, rhythmdb.PROP_BITRATE) * 1024 / 8
# Duration is in HH:MM:SS format
seconds = self.store.db.entry_get(entry, rhythmdb.PROP_DURATION)
hours = seconds / 3600
seconds = seconds - hours * 3600
minutes = seconds / 60
seconds = seconds - minutes * 60
duration = ("%02d:%02d:%02d") % (hours, minutes, seconds)
location = self.get_path(entry)
mimetype = self.store.db.entry_get(entry, rhythmdb.PROP_MIMETYPE)
# This isn't a real mime-type
if mimetype == "application/x-id3":
mimetype = "audio/mpeg"
size = self.store.db.entry_get(entry, rhythmdb.PROP_FILE_SIZE)
album = self.store.db.entry_get(entry, rhythmdb.PROP_ALBUM)
if self.parent_id == None:
try:
self.parent_id = self.store.albums[album].id
except:
pass
# create item
item = DIDLLite.MusicTrack(self.id + TRACK_COUNT,self.parent_id)
item.album = album
item.artist = self.store.db.entry_get(entry, rhythmdb.PROP_ARTIST)
#item.date =
item.genre = self.store.db.entry_get(entry, rhythmdb.PROP_GENRE)
item.originalTrackNumber = str(self.store.db.entry_get (entry, rhythmdb.PROP_TRACK_NUMBER))
item.title = self.store.db.entry_get(entry, rhythmdb.PROP_TITLE) # much nicer if it was entry.title
cover = self.store.db.entry_request_extra_metadata(entry, "rb:coverArt-uri")
#self.warning("cover for %r is %r", item.title, cover)
if cover != None:
_,ext = os.path.splitext(cover)
item.albumArtURI = ''.join((self.get_url(),'?cover',ext))
# add http resource
res = DIDLLite.Resource(self.get_url(), 'http-get:*:%s:*' % mimetype)
if size > 0:
res.size = size
if duration > 0:
res.duration = str(duration)
if bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
# add internal resource
res = DIDLLite.Resource('track-%d' % self.id, 'rhythmbox:%s:%s:*' % (self.store.server.coherence.hostname, mimetype))
if size > 0:
res.size = size
if duration > 0:
res.duration = str(duration)
if bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
entry = self.store.db.entry_lookup_by_id (self.id)
return self.store.db.entry_get(entry, rhythmdb.PROP_TITLE)
def get_url(self):
return self.store.urlbase + str(self.id + TRACK_COUNT)
def get_path(self, entry = None):
if entry is None:
entry = self.store.db.entry_lookup_by_id (self.id)
uri = self.store.db.entry_get(entry, rhythmdb.PROP_LOCATION)
self.info("Track get_path uri = %r", uri)
location = None
if uri.startswith("file://"):
location = unicode(urllib.unquote(uri[len("file://"):]))
self.info("Track get_path location = %r", location)
return location
def get_cover(self):
entry = self.store.db.entry_lookup_by_id(self.id)
cover = self.store.db.entry_request_extra_metadata(entry, "rb:coverArt-uri")
return cover
class MediaStore(BackendStore):
logCategory = 'rb_media_store'
implements = ['MediaServer']
def __init__(self, server, **kwargs):
BackendStore.__init__(self,server,**kwargs)
self.warning("__init__ MediaStore %r", kwargs)
self.db = kwargs['db']
self.plugin = kwargs['plugin']
self.wmc_mapping.update({'4': lambda : self.get_by_id(AUDIO_ALL_CONTAINER_ID), # all tracks
'7': lambda : self.get_by_id(AUDIO_ALBUM_CONTAINER_ID), # all albums
'6': lambda : self.get_by_id(AUDIO_ARTIST_CONTAINER_ID), # all artists
})
self.next_id = CONTAINER_COUNT
self.albums = None
self.artists = None
self.tracks = None
self.urlbase = kwargs.get('urlbase','')
if( len(self.urlbase) > 0 and self.urlbase[len(self.urlbase)-1] != '/'):
self.urlbase += '/'
try:
self.name = kwargs['name']
except KeyError:
self.name = "Rhythmbox on %s" % self.server.coherence.hostname
query = self.db.query_new()
self.info(query)
self.db.query_append(query, [rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_TYPE, self.db.entry_type_get_by_name('song')])
qm = self.db.query_model_new(query)
self.db.do_full_query_async_parsed(qm, query)
self.album_query = self.db.property_model_new(rhythmdb.PROP_ALBUM)
self.album_query.props.query_model = qm
self.artist_query = self.db.property_model_new(rhythmdb.PROP_ARTIST)
self.artist_query.props.query_model = qm
self.containers = {}
self.containers[ROOT_CONTAINER_ID] = \
Container( ROOT_CONTAINER_ID,-1, "Rhythmbox on %s" % self.server.coherence.hostname)
self.containers[AUDIO_ALL_CONTAINER_ID] = \
Container( AUDIO_ALL_CONTAINER_ID,ROOT_CONTAINER_ID, 'All tracks',
children_callback=self.children_tracks,
store=self,play_container=True)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[AUDIO_ALL_CONTAINER_ID])
self.containers[AUDIO_ALBUM_CONTAINER_ID] = \
Container( AUDIO_ALBUM_CONTAINER_ID,ROOT_CONTAINER_ID, 'Albums',
children_callback=self.children_albums)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[AUDIO_ALBUM_CONTAINER_ID])
self.containers[AUDIO_ARTIST_CONTAINER_ID] = \
Container( AUDIO_ARTIST_CONTAINER_ID,ROOT_CONTAINER_ID, 'Artists',
children_callback=self.children_artists)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[AUDIO_ARTIST_CONTAINER_ID])
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def get_by_id(self,id):
self.info("looking for id %r", id)
if isinstance(id, basestring) and id.startswith('artist_all_tracks_'):
try:
return self.containers[id]
except:
return None
id = id.split('@',1)
item_id = id[0]
item_id = int(item_id)
if item_id < TRACK_COUNT:
try:
item = self.containers[item_id]
except KeyError:
item = None
else:
item = Track(self, (item_id - TRACK_COUNT),None)
return item
def get_next_container_id(self):
ret = self.next_id
self.next_id += 1
return ret
def upnp_init(self):
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo', [
'rhythmbox:%s:*:*' % self.server.coherence.hostname,
'http-get:*:audio/mpeg:*',
])
self.warning("__init__ MediaStore initialized")
def children_tracks(self, parent_id):
tracks = []
def track_cb (entry):
if self.db.entry_get (entry, rhythmdb.PROP_HIDDEN):
return
id = self.db.entry_get (entry, rhythmdb.PROP_ENTRY_ID)
track = Track(self, id, parent_id)
tracks.append(track)
self.db.entry_foreach_by_type (self.db.entry_type_get_by_name('song'), track_cb)
return tracks
def children_albums(self,parent_id):
albums = {}
self.info('children_albums')
def album_sort(x,y):
r = cmp(x.title,y.title)
self.info("sort %r - %r = %r", x.title, y.title, r)
return r
def collate (model, path, iter):
name = model.get(iter, 0)[0]
priority = model.get(iter, 1)[0]
self.info("children_albums collate %r %r", name, priority)
if priority is False:
id = self.get_next_container_id()
album = Album(self, name, id,parent_id)
self.containers[id] = album
albums[name] = album
if self.albums is None:
self.album_query.foreach(collate)
self.albums = albums
albums = self.albums.values() #.sort(cmp=album_sort)
albums.sort(cmp=album_sort)
return albums
def children_artists(self,parent_id):
artists = []
def collate (model, path, iter):
name = model.get(iter, 0)[0]
priority = model.get(iter, 1)[0]
if priority is False:
id = self.get_next_container_id()
artist = Artist(self,name, id,parent_id)
self.containers[id] = artist
artists.append(artist)
if self.artists is None:
self.artist_query.foreach(collate)
self.artists = artists
return self.artists
| [] |
2024-01-10 | wangd/rhythmbox | plugins~coherence~upnp_coherence~MediaPlayer.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <[email protected]>
import os.path
import urllib
from twisted.python import failure
import rhythmdb
from coherence.upnp.core.soap_service import errorCode
from coherence.upnp.core import DIDLLite
try:
import louie
except ImportError:
import coherence.extern.louie as louie
from coherence.extern.simple_plugin import Plugin
from coherence import log
TRACK_COUNT = 1000000
class RhythmboxPlayer(log.Loggable):
""" a backend to the Rhythmbox
"""
logCategory = 'rb_media_renderer'
implements = ['MediaRenderer']
vendor_value_defaults = {'RenderingControl': {'A_ARG_TYPE_Channel':'Master'},
'AVTransport': {'A_ARG_TYPE_SeekMode':('ABS_TIME','REL_TIME','TRACK_NR')}}
vendor_range_defaults = {'RenderingControl': {'Volume': {'maximum':100}}}
def __init__(self, device, **kwargs):
self.warning("__init__ RhythmboxPlayer %r", kwargs)
self.shell = kwargs['shell']
self.server = device
self.rb_mediaserver = kwargs['rb_mediaserver']
self.player = None
self.entry = None
self.metadata = None
try:
self.name = kwargs['name']
except KeyError:
self.name = "Rhythmbox on %s" % self.server.coherence.hostname
self.player = self.shell.get_player()
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
self.entry_type = rhythmdb.EntryType(name='CoherencePlayer')
self.shell.props.db.register_entry_type(self.entry_type)
self.playing = False
self.state = None
self.duration = None
self.volume = 1.0
self.muted_volume = None
self.view = []
self.tags = {}
def __repr__(self):
return str(self.__class__).split('.')[-1]
def volume_changed(self, player, parameter):
self.volume = self.player.props.volume
self.info('volume_changed to %r', self.volume)
if self.volume > 0:
rcs_id = self.server.connection_manager_server.lookup_rcs_id(self.current_connection_id)
self.server.rendering_control_server.set_variable(rcs_id, 'Volume', self.volume*100)
def playing_song_changed(self, player, entry):
self.info("playing_song_changed %r", entry)
if self.server != None:
connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
if entry == None:
self.update('STOPPED')
self.playing = False
#self.entry = None
self.metadata = None
self.duration = None
else:
id = self.shell.props.db.entry_get (entry, rhythmdb.PROP_ENTRY_ID)
bitrate = self.shell.props.db.entry_get(entry, rhythmdb.PROP_BITRATE) * 1024 / 8
# Duration is in HH:MM:SS format
seconds = self.shell.props.db.entry_get(entry, rhythmdb.PROP_DURATION)
hours = seconds / 3600
seconds = seconds - hours * 3600
minutes = seconds / 60
seconds = seconds - minutes * 60
self.duration = "%02d:%02d:%02d" % (hours, minutes, seconds)
mimetype = self.shell.props.db.entry_get(entry, rhythmdb.PROP_MIMETYPE)
# This isn't a real mime-type
if mimetype == "application/x-id3":
mimetype = "audio/mpeg"
size = self.shell.props.db.entry_get(entry, rhythmdb.PROP_FILE_SIZE)
# create item
item = DIDLLite.MusicTrack(id + TRACK_COUNT,'101')
item.album = self.shell.props.db.entry_get(entry, rhythmdb.PROP_ALBUM)
item.artist = self.shell.props.db.entry_get(entry, rhythmdb.PROP_ARTIST)
item.genre = self.shell.props.db.entry_get(entry, rhythmdb.PROP_GENRE)
item.originalTrackNumber = str(self.shell.props.db.entry_get (entry, rhythmdb.PROP_TRACK_NUMBER))
item.title = self.shell.props.db.entry_get(entry, rhythmdb.PROP_TITLE) # much nicer if it was entry.title
cover = self.shell.props.db.entry_request_extra_metadata(entry, "rb:coverArt-uri")
if cover != None:
_,ext = os.path.splitext(cover)
item.albumArtURI = ''.join((self.server.coherence.urlbase+str(self.rb_mediaserver.uuid)[5:]+'/'+ str(int(id) + TRACK_COUNT),'?cover',ext))
item.res = []
location = self.shell.props.db.entry_get(entry, rhythmdb.PROP_LOCATION)
if location.startswith("file://"):
location = unicode(urllib.unquote(location[len("file://"):]))
uri = ''.join((self.server.coherence.urlbase+str(self.rb_mediaserver.uuid)[5:]+'/'+ str(int(id) + TRACK_COUNT)))
res = DIDLLite.Resource(uri, 'http-get:*:%s:*' % mimetype)
if size > 0:
res.size = size
if self.duration > 0:
res.duration = self.duration
if bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
# add internal resource
res = DIDLLite.Resource('track-%d' % id, 'rhythmbox:%s:%s:*' % (self.server.coherence.hostname, mimetype))
if size > 0:
res.size = size
if self.duration > 0:
res.duration = str(self.duration)
if bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
elt = DIDLLite.DIDLElement()
elt.addItem(item)
self.metadata = elt.toString()
self.entry = entry
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURIMetaData',self.metadata)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackMetaData',self.metadata)
self.info("playing_song_changed %r", self.metadata)
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'CurrentTransportActions','PLAY,STOP,PAUSE,SEEK,NEXT,PREVIOUS')
self.server.av_transport_server.set_variable(connection_id, 'RelativeTimePosition', '00:00:00')
self.server.av_transport_server.set_variable(connection_id, 'AbsoluteTimePosition', '00:00:00')
def playing_changed(self, player, state):
self.info("playing_changed", state)
if state is True:
transport_state = 'PLAYING'
else:
if self.playing is False:
transport_state = 'STOPPED'
else:
transport_state = 'PAUSED_PLAYBACK'
self.update(transport_state)
try:
position = player.get_playing_time()
except:
position = None
try:
duration = player.get_playing_song_duration()
except:
duration = None
self.update_position(position,duration)
self.info("playing_changed %r %r ", position, duration)
def elapsed_changed(self, player, time):
self.info("elapsed_changed %r %r", player, time)
try:
duration = player.get_playing_song_duration()
except:
duration = None
self.update_position(time,duration)
def update(self, state):
self.info("update %r", state)
if state in ('STOPPED','READY'):
transport_state = 'STOPPED'
if state == 'PLAYING':
transport_state = 'PLAYING'
if state == 'PAUSED_PLAYBACK':
transport_state = 'PAUSED_PLAYBACK'
if self.state != transport_state:
self.state = transport_state
if self.server != None:
connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
self.server.av_transport_server.set_variable(connection_id,
'TransportState',
transport_state)
def update_position(self, position,duration):
self.info("update_position %r %r", position,duration)
if self.server != None:
connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrack', 1)
if position is not None:
m,s = divmod( position, 60)
h,m = divmod(m,60)
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'RelativeTimePosition', '%02d:%02d:%02d' % (h,m,s))
self.server.av_transport_server.set_variable(connection_id, 'AbsoluteTimePosition', '%02d:%02d:%02d' % (h,m,s))
if duration <= 0:
duration = None
if duration is not None:
m,s = divmod( duration, 60)
h,m = divmod(m,60)
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackDuration', '%02d:%02d:%02d' % (h,m,s))
self.server.av_transport_server.set_variable(connection_id, 'CurrentMediaDuration', '%02d:%02d:%02d' % (h,m,s))
if self.duration is None:
if self.metadata is not None:
self.info("update_position %r", self.metadata)
elt = DIDLLite.DIDLElement.fromString(self.metadata)
for item in elt:
for res in item.findall('res'):
res.attrib['duration'] = "%d:%02d:%02d" % (h,m,s)
self.metadata = elt.toString()
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURIMetaData',self.metadata)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackMetaData',self.metadata)
self.duration = duration
def load( self, uri, metadata):
self.info("player load %r %r", uri, metadata)
#self.shell.load_uri(uri,play=False)
self.duration = None
self.metadata = metadata
self.tags = {}
was_playing = self.playing
if was_playing == True:
self.stop()
if len(metadata)>0:
elt = DIDLLite.DIDLElement.fromString(metadata)
if elt.numItems() == 1:
item = elt.getItems()[0]
if uri.startswith('track-'):
self.entry = self.shell.props.db.entry_lookup_by_id(int(uri[6:]))
else:
self.entry = self.shell.props.db.entry_lookup_by_location(uri)
self.info("check for entry %r %r %r", self.entry,item.server_uuid,uri)
if self.entry == None:
if item.server_uuid is not None:
self.entry = self.shell.props.db.entry_new(self.entry_type, uri)
self.info("create new entry %r", self.entry)
else:
self.entry = self.shell.props.db.entry_new(self.entry_type, uri)
self.info("load and check for entry %r", self.entry)
duration = None
size = None
bitrate = None
for res in item.res:
if res.data == uri:
duration = res.duration
size = res.size
bitrate = res.bitrate
break
self.shell.props.db.set(self.entry, rhythmdb.PROP_TITLE, item.title)
try:
if item.artist is not None:
self.shell.props.db.set(self.entry, rhythmdb.PROP_ARTIST, item.artist)
except AttributeError:
pass
try:
if item.album is not None:
self.shell.props.db.set(self.entry, rhythmdb.PROP_ALBUM, item.album)
except AttributeError:
pass
try:
self.info("%r %r", item.title,item.originalTrackNumber)
if item.originalTrackNumber is not None:
self.shell.props.db.set(self.entry, rhythmdb.PROP_TRACK_NUMBER, int(item.originalTrackNumber))
except AttributeError:
pass
if duration is not None:
h,m,s = duration.split(':')
seconds = int(h)*3600 + int(m)*60 + int(s)
self.info("%r %r:%r:%r %r", duration, h, m , s, seconds)
self.shell.props.db.set(self.entry, rhythmdb.PROP_DURATION, seconds)
if size is not None:
self.shell.props.db.set(self.entry, rhythmdb.PROP_FILE_SIZE,int(size))
else:
if uri.startswith('track-'):
self.entry = self.shell.props.db.entry_lookup_by_id(int(uri[6:]))
else:
#self.shell.load_uri(uri,play=False)
#self.entry = self.shell.props.db.entry_lookup_by_location(uri)
self.entry = self.shell.props.db.entry_new(self.entry_type, uri)
self.playing = False
self.metadata = metadata
connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTransportActions','PLAY,STOP,PAUSE,SEEK,NEXT,PREVIOUS')
self.server.av_transport_server.set_variable(connection_id, 'NumberOfTracks',1)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURIMetaData',metadata)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackMetaData',metadata)
if was_playing == True:
self.play()
def start(self, uri):
self.load(uri)
self.play()
def stop(self):
self.info("player stop")
self.player.stop()
self.playing = False
#self.server.av_transport_server.set_variable( \
# self.server.connection_manager_server.lookup_avt_id(self.current_connection_id),\
# 'TransportState', 'STOPPED')
def play(self):
self.info("player play")
if self.playing == False:
if self.entry:
self.player.play_entry(self.entry)
else:
self.player.playpause()
self.playing = True
else:
self.player.playpause()
#self.server.av_transport_server.set_variable( \
# self.server.connection_manager_server.lookup_avt_id(self.current_connection_id),\
# 'TransportState', 'PLAYING')
def pause(self):
self.player.pause()
#self.server.av_transport_server.set_variable( \
# self.server.connection_manager_server.lookup_avt_id(self.current_connection_id),\
# 'TransportState', 'PAUSED_PLAYBACK')
def seek(self, location, old_state):
"""
@param location: +nL = relative seek forward n seconds
-nL = relative seek backwards n seconds
"""
self.info("player seek %r", location)
self.player.seek(location)
self.server.av_transport_server.set_variable(0, 'TransportState', old_state)
def mute(self):
self.muted_volume = self.volume
self.player.set_volume(0)
rcs_id = self.server.connection_manager_server.lookup_rcs_id(self.current_connection_id)
self.server.rendering_control_server.set_variable(rcs_id, 'Mute', 'True')
def unmute(self):
if self.muted_volume is not None:
self.player.set_volume(self.muted_volume)
self.muted_volume = None
self.player.set_mute(False)
rcs_id = self.server.connection_manager_server.lookup_rcs_id(self.current_connection_id)
self.server.rendering_control_server.set_variable(rcs_id, 'Mute', 'False')
def get_mute(self):
return self.player.get_mute()
def get_volume(self):
self.volume = self.player.get_volume()
self.info("get_volume %r", self.volume)
return self.volume * 100
def set_volume(self, volume):
self.info("set_volume %r", volume)
volume = int(volume)
if volume < 0:
volume=0
if volume > 100:
volume=100
self.player.set_volume(float(volume/100.0))
def upnp_init(self):
self.player.connect ('playing-song-changed',
self.playing_song_changed),
self.player.connect ('playing-changed',
self.playing_changed)
self.player.connect ('elapsed-changed',
self.elapsed_changed)
self.player.connect("notify::volume", self.volume_changed)
self.current_connection_id = None
self.server.connection_manager_server.set_variable(0, 'SinkProtocolInfo',
['rhythmbox:%s:audio/mpeg:*' % self.server.coherence.hostname,
'http-get:*:audio/mpeg:*',
'rhythmbox:%s:application/ogg:*' % self.server.coherence.hostname,
'http-get:*:application/ogg:*',
'rhythmbox:%s:audio/ogg:*' % self.server.coherence.hostname,
'http-get:*:audio/ogg:*',
'rhythmbox:%s:audio/x-flac:*' % self.server.coherence.hostname,
'http-get:*:audio/x-flac:*',
'rhythmbox:%s:audio/flac:*' % self.server.coherence.hostname,
'http-get:*:audio/flac:*',
'rhythmbox:%s:audio/x-wav:*' % self.server.coherence.hostname,
'http-get:*:audio/x-wav:*',
'rhythmbox:%s:audio/L16;rate=44100;channels=2:*' % self.server.coherence.hostname,
'http-get:*:audio/L16;rate=44100;channels=2:*',
'rhythmbox:%s:audio/x-m4a:*' % self.server.coherence.hostname,
'http-get:*:audio/x-m4a:*'],
default=True)
self.server.av_transport_server.set_variable(0, 'TransportState', 'NO_MEDIA_PRESENT', default=True)
self.server.av_transport_server.set_variable(0, 'TransportStatus', 'OK', default=True)
self.server.av_transport_server.set_variable(0, 'CurrentPlayMode', 'NORMAL', default=True)
self.server.av_transport_server.set_variable(0, 'CurrentTransportActions', '', default=True)
self.server.rendering_control_server.set_variable(0, 'Volume', self.get_volume())
self.server.rendering_control_server.set_variable(0, 'Mute', self.get_mute())
def upnp_Play(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Speed = int(kwargs['Speed'])
self.play()
return {}
def upnp_Previous(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.player.do_previous()
return {}
def upnp_Next(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.player.do_next()
return {}
def upnp_Pause(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.pause()
return {}
def upnp_Stop(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.stop()
return {}
def upnp_Seek(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Unit = kwargs['Unit']
Target = kwargs['Target']
if Unit in ['ABS_TIME','REL_TIME']:
old_state = self.server.av_transport_server.get_variable('TransportState').value
self.server.av_transport_server.set_variable(0, 'TransportState', 'TRANSITIONING')
sign = 1
if Target[0] == '+':
Target = Target[1:]
if Target[0] == '-':
Target = Target[1:]
sign = -1
h,m,s = Target.split(':')
seconds = int(h)*3600 + int(m)*60 + int(s)
if Unit == 'ABS_TIME':
position = self.player.get_playing_time()
self.seek(seconds-position, old_state)
elif Unit == 'REL_TIME':
self.seek(seconds*sign, old_state)
return {}
def upnp_Next(self,*args,**kwargs):
self.player.do_next()
return {}
def upnp_Previous(self,*args,**kwargs):
self.player.do_previous()
return {}
def upnp_SetAVTransportURI(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
CurrentURI = kwargs['CurrentURI']
CurrentURIMetaData = kwargs['CurrentURIMetaData']
local_protocol_infos=self.server.connection_manager_server.get_variable('SinkProtocolInfo').value.split(',')
#print '>>>', local_protocol_infos
if len(CurrentURIMetaData)==0:
self.load(CurrentURI,CurrentURIMetaData)
return {}
else:
elt = DIDLLite.DIDLElement.fromString(CurrentURIMetaData)
#import pdb; pdb.set_trace()
if elt.numItems() == 1:
item = elt.getItems()[0]
res = item.res.get_matching(local_protocol_infos, protocol_type='rhythmbox')
if len(res) == 0:
res = item.res.get_matching(local_protocol_infos)
if len(res) > 0:
res = res[0]
remote_protocol,remote_network,remote_content_format,_ = res.protocolInfo.split(':')
self.load(res.data,CurrentURIMetaData)
return {}
return failure.Failure(errorCode(714))
def upnp_SetMute(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Channel = kwargs['Channel']
DesiredMute = kwargs['DesiredMute']
if DesiredMute in ['TRUE', 'True', 'true', '1','Yes','yes']:
self.mute()
else:
self.unmute()
return {}
def upnp_SetVolume(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Channel = kwargs['Channel']
DesiredVolume = int(kwargs['DesiredVolume'])
self.set_volume(DesiredVolume)
return {}
| [] |
2024-01-10 | appventure-nush/nush-llm-chatbot | llamaindex~test_chatbot_loop.py | from llamaindex import chatbot_agent
while True:
text_input = input("User: ")
response = chatbot_agent.agent_chain.run(input=text_input)
print(f'Agent: {response}')
| [] |
2024-01-10 | appventure-nush/nush-llm-chatbot | frontend~app.py | from flask import Flask, render_template, request
from flask.json import jsonify
import os
from llamaindex.chatbot_agent import create_agent_chain
app = Flask(__name__)
agent = None
# versions of the chatbot (different version for each module)
# ensure that the version name matches the file path of the source documents
versions = ["PC3131", "CS2131"]
@app.route("/chat", methods=['POST'])
def chat():
global agent
user_response = request.json
response = agent(user_response)
return jsonify(response=response['output'])
@app.route("/", methods=['GET', 'POST'])
def home():
if request.method == 'POST':
version = request.form['version']
if version not in versions:
return "nuh-uh", 400
else:
# default settings
version = 'PC3131'
# do something to load appropriate llamaindex files for the version of the chatbot
global agent
agent = create_agent_chain(version)
return render_template("index.html", versions=versions, selected_version=version)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5601, debug=True)
| [] |
2024-01-10 | appventure-nush/nush-llm-chatbot | llamaindex~vector_indices.py | # initialize simple vector indices + global vector index
import os
import openai
from llama_index import ServiceContext, StorageContext, VectorStoreIndex, SimpleDirectoryReader, \
load_index_from_storage, LangchainEmbedding
from langchain.embeddings import HuggingFaceEmbeddings
package_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(package_dir, "apikey"), "r") as f:
openai.api_key = f.read().strip()
# sometimes env bugs out and complains the api_key is not set, so setting it as environment variable just to
# be safe
os.environ["OPENAI_API_KEY"] = openai.api_key
if __name__ == "__main__":
# by default, LlamaIndex uses OpenAI's embedding, we will use HuggingFace's embedding instead since that is free
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
# initialize simple vector indices + global vector index
service_context = ServiceContext.from_defaults(embed_model=embed_model)
index_set = {}
for chapter in os.listdir(os.path.join(package_dir, "modules/PC3131")):
docs = SimpleDirectoryReader(os.path.join(package_dir, "modules/PC3131", chapter)).load_data()
storage_context = StorageContext.from_defaults()
cur_index = VectorStoreIndex.from_documents(
docs,
service_context=service_context,
storage_context=storage_context,
)
index_set[chapter] = cur_index
storage_context.persist(persist_dir=os.path.join(package_dir, f"storage/{chapter}"))
print("Loaded", chapter)
def load_indices():
package_dir = os.path.dirname(os.path.abspath(__file__))
# by default, LlamaIndex uses OpenAI's embedding, we will use HuggingFace's embedding instead since that is free
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
# initialize simple vector indices + global vector index
service_context = ServiceContext.from_defaults(embed_model=embed_model)
# load indices from disk
index_set = {}
for chapter in os.listdir(os.path.join(package_dir, "modules/PC3131")):
storage_context = StorageContext.from_defaults(persist_dir=os.path.join(package_dir, f"storage/{chapter}"))
cur_index = load_index_from_storage(storage_context=storage_context, service_context=service_context)
index_set[chapter] = cur_index
return index_set
| [] |
2024-01-10 | appventure-nush/nush-llm-chatbot | llamaindex~chatbot_agent.py | import os
import openai
from llama_index import ListIndex, LLMPredictor, ServiceContext, StorageContext, LangchainEmbedding, \
set_global_service_context
from langchain.embeddings import HuggingFaceEmbeddings
from langchain import OpenAI
from llama_index.indices.composability import ComposableGraph
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.agents import initialize_agent
from llama_index.langchain_helpers.agents import LlamaToolkit, create_llama_chat_agent, IndexToolConfig
from llama_index.indices.query.query_transform.base import DecomposeQueryTransform
from llama_index.query_engine.transform_query_engine import TransformQueryEngine
from llamaindex import vector_indices
def create_agent_chain(module="PC3131"):
"""
:param module: filename of the specific module under modules
:return: creates agent with retrieval knowledge of specified module
"""
package_dir = os.path.dirname(os.path.abspath(__file__))
index_set = vector_indices.load_indices()
with open(os.path.join(package_dir, "apikey"), "r") as f:
API_KEY = f.read().strip()
openai.api_key = API_KEY
# sometimes env bugs out and complains the api_key is not set, so setting it as environment variable just to be safe
os.environ["OPENAI_API_KEY"] = API_KEY
# describe each index to help traversal of composed graph
index_summaries = [f"Notes for {chapter}" for chapter in os.listdir(os.path.join(package_dir,
f"modules/{module}"))]
# define an LLMPredictor set number of output tokens
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, max_tokens=512, openai_api_key=API_KEY))
# by default, LlamaIndex uses OpenAI's embedding, we will use HuggingFace's embedding instead since that is free
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
service_context = ServiceContext.from_defaults(chunk_size=512, llm_predictor=llm_predictor, embed_model=embed_model)
storage_context = StorageContext.from_defaults()
set_global_service_context(service_context)
# define a list index over the vector indices
# allows us to synthesize information across each index
graph = ComposableGraph.from_indices(
ListIndex,
[index_set[chapter] for chapter in os.listdir(os.path.join(package_dir, f"modules/{module}"))],
index_summaries=index_summaries,
service_context=service_context,
storage_context=storage_context,
)
# define a decompose transform
decompose_transform = DecomposeQueryTransform(
llm_predictor, verbose=True
)
# define custom retrievers
custom_query_engines = {}
for index in index_set.values():
query_engine = index.as_query_engine()
query_engine = TransformQueryEngine(
query_engine,
query_transform=decompose_transform,
transform_metadata={'index_summary': index.index_struct.summary},
)
custom_query_engines[index.index_id] = query_engine
custom_query_engines[graph.root_id] = graph.root_index.as_query_engine(
response_mode='tree_summarize',
verbose=True,
)
# construct query engine
graph_query_engine = graph.as_query_engine(custom_query_engines=custom_query_engines)
# tool config
graph_config = IndexToolConfig(
query_engine=graph_query_engine,
name=f"Graph Index",
description=f"useful for when you want to answer queries that require analyzing multiple chapters of {module}.",
tool_kwargs={"return_direct": True},
)
# define toolkit
index_configs = []
for chapter in os.listdir(os.path.join(package_dir, f"modules/{module}")):
query_engine = index_set[chapter].as_query_engine(
similarity_top_k=3,
)
tool_config = IndexToolConfig(
query_engine=query_engine,
name=f"Vector Index {chapter}",
description=f"useful for when you want to answer queries about {chapter}",
tool_kwargs={"return_direct": True},
)
index_configs.append(tool_config)
toolkit = LlamaToolkit(
index_configs=index_configs + [graph_config],
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm = OpenAI(temperature=0, openai_api_key=API_KEY)
agent_chain = create_llama_chat_agent(
toolkit,
llm,
memory=memory,
verbose=True
)
return agent_chain
if __name__ == "__main__":
agent_chain = create_agent_chain()
print("Agent initialised!")
agent_chain.run(input="Compare/contrast the concepts described across the PC3131.")
| [] |
2024-01-10 | acceleratescience/large-language-models | agents~rag_agent.py | from types import SimpleNamespace
import warnings
warnings.filterwarnings("ignore") # I'm sure this will be fine...
from transformers import TextDataset, DataCollatorForLanguageModeling
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import Trainer, TrainingArguments
from transformers import DataCollatorForLanguageModeling
import torch
from datasets import Dataset
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from huggingface_hub import InferenceClient
class RAGAgent():
def __init__(self, model_config : SimpleNamespace, database_config : SimpleNamespace = None, local : bool = True):
"""RAG agent
Args:
model_config (SimpleNamespace): model parameters
database_config (SimpleNamespace, optional): database parameters. Defaults to None.
"""
self.model_config = model_config
self.database_config = database_config
self.local = local
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Initalizing model: {self.model_config.model_name}")
if local:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_config.model_name)
self.model = AutoModelForCausalLM.from_pretrained(self.model_config.model_name).to(self.device)
else:
self.model = InferenceClient(model=model_config.model_name)
if self.database_config is not None:
print(f"Creating database from: {self.database_config.text_path}")
self.db = self._create_database()
self.trained = False
def __repr__(self):
agent_config = f"self.model_config: {self.model_config}\nself.database_config: {self.database_config}"
return agent_config
def _create_database(self):
with open(self.database_config.text_path, 'r') as f:
text = f.read()
# Split text
text_splitter = self.database_config.text_splitter(
separator=' ',
chunk_size=self.database_config.chunk_size,
chunk_overlap=self.database_config.chunk_overlap,
length_function=len
)
chunks = text_splitter.split_text(text)
embeddings = HuggingFaceEmbeddings(model_name=self.database_config.embedding_model)
db = self.database_config.vector_store.from_texts(chunks, embeddings)
return db
def _generate_local(self, prompt):
input_ids = self.tokenizer.encode(prompt, return_tensors='pt').to(self.device)
output = self.model.generate(input_ids,
max_length=self.model_config.gen_length + len(input_ids[0]),
temperature=self.model_config.temperature,
do_sample=self.model_config.do_sample,
repetition_penalty=self.model_config.repetition_penalty,
# pad_token_id=self.tokenizer.pad_token_id
)
# output without input_ids
return self.tokenizer.decode(output[0][len(input_ids[0]):], skip_special_tokens=True)[1:]
def _generate_remote(self, prompt):
output = self.model.text_generation(
prompt,
max_new_tokens=self.model_config.gen_length,
temperature=self.model_config.temperature,
do_sample=self.model_config.do_sample,
repetition_penalty=self.model_config.repetition_penalty,
)
return output[1:]
def ask_question(self, query : str = "What is your name?", retrieval : bool = True) -> str:
"""Ask a question
Args:
query (str, optional): Query to the Agent. Defaults to "What is your name?".
Returns:
str: Output string from the Agent
"""
question = "QUESTION: " + query
if retrieval and self.database_config is not None:
docs = self.db.similarity_search(query, k=3)
prompt = " ".join([doc.page_content for doc in docs]) + "\n\n" + question + " RESPONSE:"
else:
prompt = question + " RESPONSE:"
if self.local:
output = self._generate_local(prompt)
else:
output = self._generate_remote(prompt)
# output without input_ids
return output
def train(self, training_config : SimpleNamespace) -> None:
"""Train the Agent using the given training_config
Args:
training_config (SimpleNamespace): Training hyperparameters
Returns:
None
"""
if not self.local:
raise NotImplementedError("Training is not supported for remote models")
if not self.trained:
self.tokenizer.add_special_tokens({'pad_token': '<pad>'})
with torch.no_grad():
self.model.resize_token_embeddings(len(self.tokenizer))
self.model.config.pad_token_id = self.tokenizer.pad_token_id
data = Dataset.from_text(training_config.dataset_path, split='train')
outputs = self.tokenizer(
data["text"],
truncation=True,
max_length=training_config.context_length,
return_overflowing_tokens=True,
return_length=True,
)
{"input_ids": outputs.input_ids}
tokenized_dataset = Dataset.from_dict({"input_ids": outputs.input_ids})
data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False)
args = TrainingArguments(
output_dir="./results",
per_device_train_batch_size=training_config.batch_size,
num_train_epochs=training_config.num_epochs,
logging_steps=100
)
trainer = Trainer(
model=self.model,
tokenizer=self.tokenizer,
args=args,
data_collator=data_collator,
train_dataset=tokenized_dataset,
)
trainer.train()
self.trained = True | [
"\n\n",
"PLACEHOLDER RESPONSE:",
" RESPONSE:",
" "
] |
2024-01-10 | acceleratescience/large-language-models | agents~vision_agent.py | import openai
from openai import OpenAI
import json
import requests
import base64
from openai import OpenAI
class VisionAgent:
"""
A vision agent that uses OpenAI's API to generate a response to an image.
"""
def __init__(self, model : str = "gpt-4-vision-preview"):
self.client = OpenAI()
self.model = model
def get_response(self, prompt : str, image_path : str) -> dict:
"""_summary_
Args:
image (str): _description_
image_name (str): _description_
dump (bool, optional): _description_. Defaults to True.
Returns:
dict: _description_
"""
base64_image = self._encode_image(image_path)
response = self.client.chat.completions.create(
model=self.model,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/png;base64,{base64_image}",
},
},
],
}
],
max_tokens=256,
)
return response.choices[0].message.content
def _encode_image(self, image_path):
"""_summary_
Args:
image_path (_type_): _description_
Returns:
_type_: _description_
"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
| [
"[{'type': 'text', 'text': PLACEHOLDER}, {'type': 'image_url', 'image_url': {'url': 'data:image/png;base64,PLACEHOLDER'}}]"
] |
2024-01-10 | lottie-logic/HelpDocs-ChatBot | src~p4.convert_jsonl_with_embeddings_to_csv.py | import json
import pandas as pd
import numpy as np
import os
filename = "data_sample/d3.embeddings_maker_results.jsonl"
with open(os.path.abspath(filename), "r", encoding="utf-8") as f:
data = [
json.loads(line)
for line in open(os.path.abspath(filename), "r", encoding="utf-8")
]
print("OPENED JSONL FILE WITH EMBEDDINGS")
def flattenizer(a):
return (a[0],) + tuple(a[1])
dataframe_with_text_and_embeddings = pd.DataFrame()
processed_count = 0
mydata_expanded_flat = []
for line in data:
# if the data had an error when trying to embed the text from OpenAi
# it returns a list instance instead of a dict.
# The error count reported from p3 plus processed_count should equal
# the total amount of documents you sent to OpenAI for processing
if isinstance(line[1], list):
continue
else:
info = flattenizer(
[
json.loads(json.dumps(line))[0]["input"],
json.loads(json.dumps(line))[1]["data"][0]["embedding"],
]
)
mydata_expanded_flat.append(info)
processed_count += 1
print(f"\nTotal embeddings converted to csv: {processed_count}\n")
# TODO Drop any bad lines if an embedding was not successful
# mydata_expanded_flat = [
# flattenizer(
# [
# json.loads(json.dumps(line))[0]["input"],
# json.loads(json.dumps(line))[1]["data"][0]["embedding"],
# ]
# )
# for line in data
# ]
print("CONVERTED JSONL FLAT ARRAY")
def columns_index_maker():
column_names = []
column_names.append("gpttext")
for _ in range(1536):
column_names.append(str(_))
return column_names
all_the_columns = columns_index_maker()
df = pd.DataFrame(mydata_expanded_flat, columns=all_the_columns)
print("CONVERTED BIG ARRAY TO DATAFRAME")
def chunker(seq, size):
return (seq[pos : pos + size] for pos in range(0, len(seq), size))
def chonk_dataframe_and_make_csv_with_embeds(pddf, outputfile, chunks):
"""
If you are working on very large files, for example uploading all of wikipedia
these indexes can get very very chonky with the embeddings appended (like >400Gb).
This is why we chunk through the dataframe and append pieces to the CSV to avoid
running out of memory.
Args:
pddf (_type_): A sequence
outputfile (file): Saved .csv file of embeddings
chunks (int): The buffer size
"""
for i, chunk in enumerate(chunker(pddf, chunks)):
print("CHONKING TO CSV No: " + str(i))
document_embeddings_i = pd.DataFrame(chunk)
document_embeddings_i.to_csv(
outputfile, mode="a", index=False, header=False if i > 0 else True
)
if __name__ == "__main__":
chonk_dataframe_and_make_csv_with_embeds(
df, "data_sample/d4.embeddings_maker_results.csv", 1000
)
| [] |
2024-01-10 | thakkaryash94/reviewgpt-server | app~spiders~flipkartspider.py | import os
from typing import TypeVar, Union
import scrapy
import chromadb
import uuid
from langchain.embeddings import OllamaEmbeddings
from langchain.vectorstores import Chroma
from chromadb import Documents, EmbeddingFunction, Embeddings
# from datetime import datetime
TITLE_CLASS_ID = "_2-N8zT"
REVIEW_TEXT_CLASS_ID = "t-ZTKy"
AUTHOR_CLASS_ID = "_2sc7ZR _2V5EHH"
DATE_CLASS_ID = "_2sc7ZR"
VERIFIED_CLASS_ID = "_2mcZGG"
RATING_CLASS_ID_1 = "_1BLPMq"
RATING_CLASS_ID_2 = "_3LWZlK"
REVIEWS_CLASS_ID = "_27M-vq"
READ_MORE_CLASS_ID = "_1BWGvX"
NEXT_PAGE_CLASS_ID = "_1LKTO3"
# Embeddable = Union[Documents, Images]
Embeddable = Documents
D = TypeVar("D", bound=Embeddable, contravariant=True)
class MyEmbeddingFunction(EmbeddingFunction):
def __call__(self, input: D) -> Embeddings:
# embed the documents somehow
oembed = OllamaEmbeddings(
base_url="http://127.0.0.1:11434", model="orca2")
return oembed.embed_documents(input)
def get_collection(collection_name):
client = chromadb.HttpClient(host='127.0.0.1', port=8000)
collection = client.get_or_create_collection(
name=collection_name, embedding_function=MyEmbeddingFunction())
return collection
class Flipkart(scrapy.Spider):
name = 'flipkart'
current_page = 1
def __init__(self, url=None, *args, **kwargs):
super(Flipkart, self).__init__(*args, **kwargs)
self.start_urls = [f"{url}"]
def start_requests(self):
yield scrapy.Request(
url=self.start_urls[0],
meta={
"playwright": True,
"playwright_include_page": True,
"errback": self.errback,
},
)
async def parse(self, response):
print(
f"*************Parsing page {self.current_page} {response.url}****************************")
print(response.meta)
page = response.meta["playwright_page"]
reviews = response.xpath(f'.//div[@class="{REVIEWS_CLASS_ID}"]')
review_index = 0
read_more_index = 0
read_mores = await page.locator(f'.{READ_MORE_CLASS_ID}').all()
collection = get_collection("reviews")
for review_item in reviews:
id = str(uuid.uuid4())[:13]
rating_text = review_item.xpath(
f'.//div[contains(@class, "{RATING_CLASS_ID_1}")and contains(@class, "{RATING_CLASS_ID_2}")]/text()').get()
title = review_item.xpath(
f'.//p[@class="{TITLE_CLASS_ID}"]/text()').get()
review_text = review_item.xpath(
f'.//div[@class="{REVIEW_TEXT_CLASS_ID}"]/div/div/text()').get()
author = review_item.xpath(
f'.//p[@class="{AUTHOR_CLASS_ID}"]/text()').get()
verified = review_item.xpath(
f'.//p[@class="{VERIFIED_CLASS_ID}"]/span/text()').get()
created_time = review_item.xpath(
f'.//p[@class="{DATE_CLASS_ID}"]/text()').get()
is_read_more = bool(review_item.xpath(
f'.//div[@class="{REVIEW_TEXT_CLASS_ID}"]/div/span[@class="{READ_MORE_CLASS_ID}"]').get() is not None)
if is_read_more:
read_more_item = read_mores[read_more_index]
await read_more_item.click()
all = await page.query_selector_all(f'.{REVIEW_TEXT_CLASS_ID}>div>div')
review_text = await all[review_index].inner_html()
read_more_index = read_more_index + 1
review_index = review_index + 1
# print("==========id=========", id)
# print("==========rating_text=========", rating_text)
# print("==========title=========", title)
# print("==========review_text=========", review_text)
# print("==========author=========", author)
# print("==========verified=========", verified)
# print("==========created_time=========", created_time)
# docs.append(Document(
# page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose",
# metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"})
# )
document = f'The review for the product is {review_text}. Written by {author} in {created_time} with {rating_text} star ratings.'
collection.add(
documents=document,
metadatas={
"title": title,
"author": author,
"verified": verified,
"rating": float(rating_text),
"rating_limit": 5,
"created_at": created_time,
},
ids=id
)
print(f"{id} record inserted")
pages = response.xpath(f'.//a[@class="{NEXT_PAGE_CLASS_ID}"]')
next_page_url = pages[0].xpath(f'.//@href').get()
self.current_page = self.current_page + 1
if len(pages) == 2:
next_page_url = pages[1].xpath(f'.//@href').get()
print(
f"--------next_page_url---------------{response.urljoin(next_page_url)}")
# fetch 2 pages
if next_page_url is not None and self.current_page <= 1:
yield scrapy.Request(
url=response.urljoin(next_page_url),
meta={
"playwright": True,
"playwright_include_page": True,
"dont_filter": True,
"errback": self.errback,
},
callback=self.parse
)
else:
print('*************No Page Left*************')
await page.close()
async def errback(self, failure):
print('-----------ERROR CALLBACK------------------------')
page = failure.request.meta["playwright_page"]
await page.close()
| [] |
2024-01-10 | thakkaryash94/reviewgpt-server | app~routers~reviews.py | import re
import subprocess
from urllib.parse import urlparse, urlunparse
from fastapi import APIRouter, Depends, Request, status
from pydantic import BaseModel, Field
from typing import Annotated, Any
from sqlalchemy.orm import Session
# from langchain.llms import Ollama
# from langchain.embeddings import OllamaEmbeddings
# from langchain.vectorstores import Chroma
# from langchain.embeddings import OllamaEmbeddings
# from langchain.prompts.prompt import PromptTemplate
# from langchain.vectorstores import Chroma
# from chromadb import Documents, EmbeddingFunction, Embeddings
# from langchain.chains import RetrievalQA
# from langchain.callbacks.manager import CallbackManager
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
# from langchain.chat_models import ChatOllama
# from langchain.schema import HumanMessage
# from sqlalchemy.orm import Session
from app.chatmodel import generate_one_time_answer
from app.constants import MODEL, RECAPTCHA_SITEVERIFY_URL, RECAPTCHA_TOKEN
from app.database import crud, models, schemas
from app.database.database import get_db
from app.ipdetails import get_ip_details
from app.logger import get_logger
from app.recaptcha import recaptcha_verify
router = APIRouter(prefix="/reviews", tags=["reviews"])
class ReviewBody(BaseModel):
url: str = Field(
examples=[
"https://www.amazon.in/boAt-Airdopes-161-Playtime-Immersive/product-reviews/B09N7KCNL6",
"https://www.flipkart.com/boat-airdopes-161-40-hours-playback-asap-charge-10mm-drivers-bluetooth-headset/product-reviews/itm8a7493150ae4a?pid=ACCG6DS7WDJHGWSH&lid=LSTACCG6DS7WDJHGWSH4INU8G&marketplace=FLIPKART",
],
)
token: str = Field(examples=["Recaptcha Token"])
dbDep: Session = Annotated[dict, Depends(get_db)]
logger = get_logger("reviews")
# @router.post("/one-time", response_model=schemas.ReviewResponse)
@router.post("/one-time")
async def get_one_time_review(request: Request, body: ReviewBody, db: dbDep) -> Any:
logger.info("Request started")
url = body.url
product_id: str
if re.search("/dp/", url):
product_id = re.search(r"dp/(.+)/", url).group(1)
if re.search("/product-reviews/", url):
product_id = re.search(r"product-reviews/(.+)/", url).group(1)
parsed_url = urlparse(url)
website = urlunparse((parsed_url.scheme, parsed_url.netloc, "", "", "", ""))
url = f"{website}/product-reviews/{product_id}/ref=cm_cr_dp_d_show_all_btm?ie=UTF8&reviewerType=avp_only_reviews"
client_ip = request.client.host
payload = {"secret": RECAPTCHA_TOKEN, "response": body.token}
recaptcha_result = recaptcha_verify(payload)
count = crud.count_othistory_by_ip(db=db, ip_address=client_ip)
ip_info = get_ip_details(client_ip)
history = schemas.OTHistoryCreate(
url=url,
ip_address=client_ip,
ip_info=ip_info,
status=models.OTHistoryEnum.PENDING,
)
dbHistory = crud.create_othistory(db=db, item=history)
# if not recaptcha_result.get("success"):
# dbHistory.status = models.OTHistoryEnum.REJECTED
# dbHistory = crud.update_othistory(db=db, item=dbHistory)
# return {
# "code": status.HTTP_401_UNAUTHORIZED,
# "message": "Unauthorized Access",
# "success": True,
# }
# if count >= 5:
# dbHistory.status = models.OTHistoryEnum.REJECTED
# dbHistory = crud.update_othistory(db=db, item=dbHistory)
# return {
# "code": status.HTTP_429_TOO_MANY_REQUESTS,
# "message": "Too many requests from same IP",
# "success": False,
# }
logger.info(f"Scraping {url}")
result: Any
crawler = "amazon"
if "https://www.flipkart" in url:
crawler = "flipkart"
result = subprocess.run(
[
"scrapy",
"crawl",
f"{crawler}",
"-a",
f"url={url}",
"-a",
f"page=1",
],
capture_output=True,
timeout=20,
text=True,
)
logger.info("Reviews fetched successfully")
reviews = result.stdout
if reviews == "":
dbHistory.status = models.OTHistoryEnum.FAILED
dbHistory = crud.update_othistory(db=db, item=dbHistory)
return {
"code": status.HTTP_500_INTERNAL_SERVER_ERROR,
"message": "Something went wrong",
"success": False,
}
data = generate_one_time_answer(reviews)
if result.stdout:
dbHistory.status = models.OTHistoryEnum.SUCCESS
dbHistory = crud.update_othistory(db=db, item=dbHistory)
return {
"code": status.HTTP_200_OK,
"message": "Response retrieved successfully",
"data": data,
"success": True,
}
else:
dbHistory.status = models.OTHistoryEnum.FAILED
dbHistory = crud.update_othistory(db=db, item=dbHistory)
return {
"code": status.HTTP_500_INTERNAL_SERVER_ERROR,
"message": "Something went wrong",
"success": False,
}
class QuestionBody(BaseModel):
question: str
# @router.post("/reviews/question")
# def post_question(body: QuestionBody, db: Session = Depends(get_db)):
# ollama = Ollama(base_url="http://localhost:11434", model=MODEL)
# oembed = OllamaEmbeddings(base_url="http://localhost:11434", model=MODEL)
# client = chromadb.HttpClient(host="127.0.0.1", port=8000)
# crud.create_history(db=db, history="")
# vectorstore = Chroma(
# client=client,
# collection_name="amz_reviews",
# embedding_function=oembed,
# )
# documents = vectorstore.get().get("documents")
# # Prompt
# # template = """Use the following pieces of context to answer the question at the end.
# # If you don't know the answer, just say that you don't know, don't try to make up an answer.
# # Use three sentences maximum and keep the answer as concise as possible.
# # {context}
# # Question: {question}
# # Helpful Answer:"""
# # QA_CHAIN_PROMPT = PromptTemplate(
# # input_variables=["context", "question"],
# # template=template,
# # )
# # qachain = RetrievalQA.from_chain_type(
# # llm=ollama,
# # retriever=vectorstore.as_retriever(search_kwargs={"k": 6}),
# # chain_type="stuff",
# # chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
# # return_source_documents=True,
# # )
# # qachain = RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
# # result = qachain({"query": body.question})
# # return result
# chat_model = ChatOllama(
# model=MODEL,
# format="json",
# callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
# )
# messages = [
# HumanMessage(content="Below are the reviews of the product. Analyze them"),
# HumanMessage(content="\n".join(documents)),
# HumanMessage(content=body.question),
# ]
# print("\n".join(documents))
# chat_model_response = chat_model(messages)
# return chat_model_response
| [] |
2024-01-10 | WayneQwele/LLMwebappdashboard | presentation~chainlit_document_reader.py |
import os
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.chroma import Chroma
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
import chainlit as cl
import openai
from chainlit.types import AskFileResponse
import chromadb
from getkey import getkey
getkey()
#export HNSWLIB_NO_NATIVE = 1
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
embeddings = OpenAIEmbeddings()
welcome_message = """
Welcome to the Chainlit PDF QA demo!
1) Upload a PDF file
2) Ask a question about a file
"""
def process_file(file: AskFileResponse):
"""
Chainlit offers this as boiler plate code
"""
import tempfile
if file.type == "text/plain":
Loader = TextLoader
elif file.type == "application/pdf":
Loader = PyPDFLoader
with tempfile.NamedTemporaryFile() as tempfile:
tempfile.write(file.content)
loader = Loader(tempfile.name)
documents = loader.load()
docs = text_splitter.split_documents(documents)
for i, doc in enumerate(docs):
doc.metadata["source"] = f"source_{i}"
return docs
def get_docsearch(file: AskFileResponse):
"""
Retrieve our data from the loaded embeddings
"""
docs = process_file(file)
#Save data in the user session
cl.user_session.set("docs",docs)
#Create a unique namespace for the file
docsearch = Chroma.from_documents(
docs, embeddings
)
return docsearch
# Chainlit functions
@cl.on_chat_start
async def start():
# sending an image to an local file host
await cl.Message(content= "You can now query your documents/pds").send()
files = None
while files is None:
files = await cl.AskFileMessage(
content = welcome_message,
accept=["text/plain", "application/pdf"],
max_size_mb = 20,
timeout=180
).send()
files = files[0] # Authors naming is not consistent here!
msg = cl.Message(content=f"Proccessing '{files.name}'")
await msg.send()
# No aysn implementation in Pineclone client
docsearch = await cl.make_async(get_docsearch)(files) # this calls the 2nd function defined.
chain = RetrievalQAWithSourcesChain.from_chain_type(
ChatOpenAI(temperature=0, streaming=True),
chain_type ="stuff",
retriever = docsearch.as_retriever(max_tokens_limit=4097)
)
#Let the user know that the system is ready
msg.content = f"{files.name}: is processed. You may ask questions."
await msg.update()
cl.user_session.set("chain", chain)
@cl.on_message
async def main(message):
"""
"""
chain = cl.user_session.get("chain") # type: RetrievalQAWithSourcesChain
answer_prefix_tokens=["FINAL", "ANSWER"]
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer=True,
answer_prefix_tokens=answer_prefix_tokens,
)
cb.answer_reached = True
res = await chain.acall(message, callbacks=[cb])
answer = res["answer"]
sources = res["sources"].strip()
source_elements = []
# get the documents from the user session
docs = cl.user_session.get("docs")
metadatas = [doc.metadata for doc in docs]
all_sources = [m["source"] for m in metadatas]
if sources:
found_sources =[]
# Add sources to the message
for source in sources.split(","):
source_name = source.strip().replace(".", "")
# Get the index of the source
try:
index = all_sources.index(source_name)
except ValueError:
continue
text = docs[index].page_content
found_sources.append(source_name)
#Create the text element referenced in the message
source_elements.append(cl.Text(content=text, name=source_name))
if found_sources:
answer += f"\nSources: {','.join(found_sources)}"
else:
answer += "\nNo of Sources Found :("
if cb.has_streamed_final_answer:
cb.final_stream.elements = source_elements
await cb.final_stream.update()
else:
await cl.Message(content=answer, elements=source_elements).send()
| [] |
2024-01-10 | WayneQwele/LLMwebappdashboard | llmsummary.py |
""" This module contains functions for generating summary descriptions of crypto currency pairs using an LLM. """
import os
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import time
from typing import List, Dict
import asyncio
import toml
# Load the secrets file
secrets = toml.load('.secrets/secrets.toml')
# Set the environment variables
for key, value in secrets.items():
os.environ[key] = value
async def async_generate_summary(chain: LLMChain, product: str) -> str:
"""
This function generates a summary description of a crypto currency pair using an LLM.
Args:
chain (LLMChain): The LLMChain object to use for generating the summary.
product (str): The name of the crypto currency pair to generate the summary for.
Returns:
str: The generated summary description of the crypto currency pair.
"""
resp = await chain.arun(product=product)
return resp
async def generate_summary_concurrently(ticker_pairs: List[str]) -> Dict[str, str]:
"""
This function generates summary descriptions of multiple crypto currency pairs concurrently using an LLM.
Args:
ticker_pairs (List[str]): A list of names of the crypto currency pairs to generate the summaries for.
Returns:
Dict[str, str]: A dictionary mapping each crypto currency pair name to its generated summary description.
"""
llm = OpenAI(temperature=0.1)
prompt = PromptTemplate(
input_variables=["product"],
template="Write a summary description of the crypto currency pair {product} highlighting key attributes and popularity, begin by writing the original name of the crypto currency pair first and then the rest of the description. format response in markdown language.",
)
chain = LLMChain(llm=llm, prompt=prompt)
tasks = [async_generate_summary(chain, product)
for product in ticker_pairs]
responses = await asyncio.gather(*tasks)
return dict(zip(ticker_pairs, responses))
| [
"Write a summary description of the crypto currency pair {product} highlighting key attributes and popularity, begin by writing the original name of the crypto currency pair first and then the rest of the description. format response in markdown language."
] |
2024-01-10 | Sefaria/LLM | topic_prompt~uniqueness_of_source.py | """
Given all the sources curated for a topic, determine what is unique about this source
"""
import json
import re
from functools import reduce
import django
django.setup()
from sefaria.model import *
from typing import List
from util.general import get_ref_text_with_fallback
from sheet_interface import get_topic_and_orefs
import langchain
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage, SystemMessage
from langchain.chat_models import ChatOpenAI
from langchain.cache import SQLiteCache
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
def _get_prompt_inputs(oref: Ref, other_orefs: List[Ref], topic: Topic):
topic_title = topic.get_primary_title("en")
topic_description = getattr(topic, "description", {}).get("en", "N/A")
comparison_sources_list = []
max_len = 7000
for other_oref in other_orefs:
other_text = get_ref_text_with_fallback(other_oref, "en", auto_translate=True)
curr_len = reduce(lambda a, b: a + len(b), comparison_sources_list, 0)
if curr_len + len(other_text) < max_len:
comparison_sources_list += [other_text]
return {
"topic_title": topic_title,
"topic_description": topic_description,
"input_source": get_ref_text_with_fallback(oref, "en", auto_translate=True),
"comparison_sources": json.dumps(comparison_sources_list)
}
def _get_other_orefs_on_topic(oref: Ref, lang: str, topic: Topic) -> List[Ref]:
ref_topics_links = topic.link_set("refTopic", {f"descriptions.{lang}": {"$exists": True}, "ref": {"$ne": oref.normal()}})
other_orefs = []
for link in ref_topics_links:
try:
other_orefs += [Ref(link.ref)]
except:
continue
return other_orefs
def get_uniqueness_of_source(oref: Ref, lang: str, topic: Topic) -> str:
other_orefs = _get_other_orefs_on_topic(oref, lang, topic)
return _get_uniqueness_of_source_as_compared_to_other_sources(oref, other_orefs, topic)
def summarize_based_on_uniqueness(text: str, uniqueness: str) -> str:
llm = ChatOpenAI(model="gpt-4", temperature=0)
system_message = SystemMessage(content=
"You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n"
"# Task\n"
"Given a Jewish text and an idea mentioned in this text, write a summary of the text"
" that focuses on this idea.\n"
"# Input format\n"
"Input will be in XML format with the following structure:\n"
"<text> text to be summarized according to idea </text>\n"
"<idea> idea mentioned in the text </idea>\n"
"# Output format\n"
"A summary of the text that focuses on the idea, in 50 words or less.\n"
"Wrap the summary in <summary> tags."
"Summary should start with the words \"The text discusses...\""
)
prompt = PromptTemplate.from_template("<text>{text}</text>\n<idea>{idea}</idea>")
human_message = HumanMessage(content=prompt.format(text=text, idea=uniqueness))
response = llm([system_message, human_message])
return re.search(r"<summary>\s*The text discusses (.+?)</summary>", response.content).group(1)
def _get_uniqueness_of_source_as_compared_to_other_sources(oref: Ref, other_orefs: List[Ref], topic: Topic) -> str:
uniqueness_preamble = "The input source emphasizes"
prompt_inputs = _get_prompt_inputs(oref, other_orefs, topic)
system_message = SystemMessage(content=
"You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n"
"# Task\n"
"Given a list of Jewish texts about a certain topic, output the aspect that differentiates the input source from the other sources.\n"
"# Input format\n"
"Input will be in JSON format with the following structure\n"
'{'
'"topicTitle": "Title of the topic the sources are focusing on",'
'"topicDescription": "Description of the topic",'
'"inputSource": "Text of the source we want to differentiate from `comparisonSources`",'
'"comparisonSources": "List of text of sources to compare `inputSource` to"'
'}\n'
"# Output format\n"
"Output a summary that explains the aspect of `inputSource` that differentiates it "
"from `comparisonSources`.\n"
# "Summary should be no more than 20 words.\n"
"Only mention the `inputSource`. Don't mention the `comparisonSources`.\n"
f'Summary should complete the following sentence: "{uniqueness_preamble}...".'
)
prompt = PromptTemplate.from_template('{{{{'
'"topicTitle": "{topic_title}", "topicDescription": "{topic_description}",'
'"inputSource": "{input_source}", "comparisonSources": {comparison_sources}'
'}}}}')
human_message = HumanMessage(content=prompt.format(**prompt_inputs))
llm = ChatOpenAI(model="gpt-4", temperature=0)
# llm = ChatAnthropic(model="claude-2", temperature=0)
response = llm([system_message, human_message])
uniqueness = re.sub(fr'^"?{uniqueness_preamble}\s*', '', response.content)
uniqueness = re.sub(r'"$', '', uniqueness)
return uniqueness
if __name__ == '__main__':
# sheet_id = 498250
# topic, orefs = get_topic_and_orefs(sheet_id)
# for i in range(len(orefs)):
# oref = orefs[i]
# other_orefs = [r for j, r in enumerate(orefs) if j != i]
# print(get_uniqueness_of_source(oref, other_orefs, topic))
uniqueness = "the enduring presence of the Divine in the Temple, even after its destruction."
text = """[(Exod. 3:1:) <small>NOW MOSES WAS TENDING <THE FLOCK></small>.] This text is related (to Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE</small>….<sup class="footnote-marker">44</sup><i class="footnote">Cf. Hab. 2:20.</i> R. Samuel bar Nahman said: Before the destruction of the Sanctuary, the Divine Presence was situated in the Temple, as stated (Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE</small>;<sup class="footnote-marker">45</sup><i class="footnote">Exod. R. 2:2; M. Pss. 11:3.</i> but, after the Temple was destroyed, (ibid. cont.:) <small>THE LORD'S THRONE IS IN THE HEAVENS</small>. He had removed his Divine Presence to the heavens. R. Eleazar ben Pedat said: Whether the Temple is destroyed or not destroyed, the Divine Presence has not moved from its place, as stated (in Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE</small>. And where is it shown? Where it is stated (in I Kings 9:3): <small>MY EYES AND MY HEART SHALL BE THERE FOR ALL TIME</small>. It also says so (in Ps. 3:5 [4]): <small>I RAISE MY VOICE UNTO THE LORD, AND HE ANSWERS ME FROM HIS HOLY HILL. SELAH</small>. For even though it is <only> a hill,<sup class="footnote-marker">46</sup><i class="footnote"><i>Midrash Tanhuma</i> (Jerusalem: Eshkol: n.d.), vol. 1, appendix, p. 90, n. 2, suggests emending <i><small>HR</small></i> (“hill”) to <i><small>HRB</small></i> (“destroyed”) so that the clause would read in agreement with <i>Codex Vaticanus Ebr</i>. 34 and Exod. R. 2:2: “For even though it is destroyed.”</i> here he remains in his holiness. R. Eleazar ben Pedat said: See what is written (in Ezra 1:3): <small>AND LET HIM BUILD THE HOUSE OF THE LORD GOD OF ISRAEL. HE IS THE GOD WHO IS IN JERUSALEM</small>. He has not moved from there. R. Aha said: The Divine Presence has never moved from the West Wall (i.e., the Wailing Wall) of the Sanctuary. Thus it is stated (in Cant. 2:9): <small>THERE HE STANDS BEHIND OUR WALL</small>. Ergo (in Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE</small>. R. Jannay said: Although they said (in Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE; THE LORD HAS HIS THRONE IN THE HEAVENS</small>; < nevertheless > (the verse continues), <small>HIS EYES BEHOLD, HIS EYELIDS TEST THE CHILDREN OF ADAM</small>. To what is the matter comparable? To a king who had an orchard<sup class="footnote-marker">47</sup><i class="footnote"><i>Pardes</i>. Cf. the Gk.: <i>paradeisos</i>, i.e., “paradise.”</i> and brought in the workers. Now by the orchard gate there was a certain storehouse full of everything good. The king said: Whoever does his work wholeheartedly will receive his reward from here, but whoever does not do his work wholeheartedly, him I shall return to my palace<sup class="footnote-marker">48</sup><i class="footnote">Lat.: <i>palatium</i>.</i> and judge. Who is this king? This is the Supreme King of Kings, the Holy One. And what is the garden? It is this world. Within it the Holy One has put the children of Adam so that they may observe the Torah. But he has made a stipulation with them and said to them: For everyone who truly observes the Torah, here is paradise < lying > before him; but for everyone who does not truly observe the Torah, here is Gehinnom < lying > before him. The Holy One said: Although I seemed to have removed my Divine Presence from the Sanctuary, still (in Ps. 11:4): <small>MY EYES BEHOLD, <MY EYELIDS TEST THE CHILDREN OF ADAM</small> >.<sup class="footnote-marker">49</sup><i class="footnote">The Masoretic Text of this verse reads “his” for <small>MY</small> in both places.</i> Whom does he test? (According to vs. 5:) <small>THE LORD TESTS THE RIGHTEOUS</small>. And why does he not test the wicked? R. Jannay said: When the flax worker is pounding away and sees that the flax is good, he pounds it a lot; but, when he sees that it is not good, he does not pound on it, lest it be spoiled.<sup class="footnote-marker">50</sup><i class="footnote">Gen. R. 32:3; 34:2; 55:2; Cant. R. 2:16:2.</i> Ergo (in Ps. 11:4:) <small>HIS EYES BEHOLD, HIS EYELIDS TEST THE CHILDREN OF ADAM</small>. [And whom does he test? The righteous, as stated (in vs. 5):] <small>THE LORD TESTS THE RIGHTEOUS</small>."""
print(summarize_based_on_uniqueness(text, uniqueness))
| [
"You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n# Task\nGiven a list of Jewish texts about a certain topic, output the aspect that differentiates the input source from the other sources.\n# Input format\nInput will be in JSON format with the following structure\n{\"topicTitle\": \"Title of the topic the sources are focusing on\",\"topicDescription\": \"Description of the topic\",\"inputSource\": \"Text of the source we want to differentiate from `comparisonSources`\",\"comparisonSources\": \"List of text of sources to compare `inputSource` to\"}\n# Output format\nOutput a summary that explains the aspect of `inputSource` that differentiates it from `comparisonSources`.\n",
"S THRONE IS IN THE HEAVENS</small>. He had removed his Divine Presence to the heavens. R. Eleazar ben Pedat said: Whether the Temple is destroyed or not destroyed, the Divine Presence has not moved from its place, as stated (in Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE</small>. And where is it shown? Where it is stated (in I Kings 9:3): <small>MY EYES AND MY HEART SHALL BE THERE FOR ALL TIME</small>. It also says so (in Ps. 3:5 [4]): <small>I RAISE MY VOICE UNTO THE LORD, AND HE ANSWERS ME FROM HIS HOLY HILL. SELAH</small>. For even though it is <only> a hill,<sup class=\"footnote-marker\">46</sup><i class=\"footnote\"><i>Midrash Tanhuma</i> (Jerusalem: Eshkol: n.d.), vol. 1, appendix, p. 90, n. 2, suggests emending <i><small>HR</small></i> (“hill”) to <i><small>HRB</small></i> (“destroyed”) so that the clause would read in agreement with <i>Codex Vaticanus Ebr</i>. 34 and Exod. R. 2:2: “For even though it is destroyed.”</i> here he remains in his holiness. R. Eleazar ben Pedat said: See what is written (in Ezra 1:3): <small>AND LET HIM BUILD THE HOUSE OF THE LORD GOD OF ISRAEL. HE IS THE GOD WHO IS IN JERUSALEM</small>. He has not moved from there. R. Aha said: The Divine Presence has never moved from the West Wall (i.e., the Wailing Wall) of the Sanctuary. Thus it is stated (in Cant. 2:9): <small>THERE HE STANDS BEHIND OUR WALL</small>. Ergo (in Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE</small>. R. Jannay said: Although they said (in Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE; THE LORD HAS HIS THRONE IN THE HEAVENS</small>; < nevertheless > (the verse continues), <small>HIS EYES BEHOLD, HIS EYELIDS TEST THE CHILDREN OF ADAM</small>. To what is the matter comparable? To a king who had an orchard<sup class=\"footnote-marker\">47</sup><i class=\"footnote\"><i>Pardes</i>. Cf. the Gk.: <i>paradeisos</i>, i.e., “paradise.”</i> and brought in the workers. Now by the orchard gate there was a certain storehouse full of everything good. The king said: Whoever does his work wholeheartedly will receive his reward from here, but whoever does not do his work wholeheartedly, him I shall return to my palace<sup class=\"footnote-marker\">48</sup><i class=\"footnote\">Lat.: <i>palatium</i>.</i> and judge. Who is this king? This is the Supreme King of Kings, the Holy One. And what is the garden? It is this world. Within it the Holy One has put the children of Adam so that they may observe the Torah. But he has made a stipulation with them and said to them: For everyone who truly observes the Torah, here is paradise < lying > before him; but for everyone who does not truly observe the Torah, here is Gehinnom < lying > before him. The Holy One said: Although I seemed to have removed my Divine Presence from the Sanctuary, still (in Ps. 11:4): <small>MY EYES BEHOLD, <MY EYELIDS TEST THE CHILDREN OF ADAM</small> >.<sup class=\"footnote-marker\">49</sup><i class=\"footnote\">The Masoretic Text of this verse reads “his” for <small>MY</small> in both places.</i> Whom does he test? (According to vs. 5:) <small>THE LORD TESTS THE RIGHTEOUS</small>. And why does he not test the wicked? R. Jannay said: When the flax worker is pounding away and sees that the flax is good, he pounds it a lot; but, when he sees that it is not good, he does not pound on it, lest it be spoiled.<sup class=\"footnote-marker\">50</sup><i class=\"footnote\">Gen. R. 32:3; 34:2; 55:2; Cant. R. 2:16:2.</i> Ergo (in Ps. 11:4:) <small>HIS EYES BEHOLD, HIS EYELIDS TEST THE CHILDREN OF ADAM</small>. [And whom does he test? The righteous, as stated (in vs. 5):] <small>THE LORD TESTS THE RIGHTEOUS</small>.:1:) <small>NOW MOSES WAS TENDING <THE FLOCK></small>.] This text is related (to Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE</small>….<sup class=\"footnote-marker\">44</sup><i class=\"footnote\">Cf. Hab. 2:20.</i> R. Samuel bar Nahman said: Before the destruction of the Sanctuary, the Divine Presence was situated in the Temple, as stated (Ps. 11:4): <small>THE LORD IS IN HIS HOLY TEMPLE</small>;<sup class=\"footnote-marker\">45</sup><i class=\"footnote\">Exod. R. 2:2; M. Pss. 11:3.</i> but, after the Temple was destroyed, (ibid. cont.:) <small>THE LORD",
"{{{{\"topicTitle\": \"{topic_title}\", \"topicDescription\": \"{topic_description}\",\"inputSource\": \"{input_source}\", \"comparisonSources\": {comparison_sources}}}}}",
"You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n# Task\nGiven a Jewish text and an idea mentioned in this text, write a summary of the text that focuses on this idea.\n# Input format\nInput will be in XML format with the following structure:\n<text> text to be summarized according to idea </text>\n<idea> idea mentioned in the text </idea>\n# Output format\nA summary of the text that focuses on the idea, in 50 words or less.\nWrap the summary in <summary> tags.Summary should start with the words \"The text discusses...\"",
"<text>{text}</text>\n<idea>{idea}</idea>",
"\"topicTitle\": \"{topic_title}\", \"topicDescription\": \"{topic_description}\",",
"\"inputSource\": \"{input_source}\", \"comparisonSources\": {comparison_sources}",
"footnote",
"footnote-marker"
] |
2024-01-10 | Sefaria/LLM | talmud_punctuation~fine_tune~project_scripts~infer.py | import csv
import django
django.setup()
import typer
import json
from sefaria.model import *
from sefaria.utils.hebrew import strip_cantillation
import random
import os
from langchain.chat_models import ChatOpenAI
import openai
import re
from sefaria.helper.normalization import NormalizerComposer, RegexNormalizer, AbstractNormalizer
from util.general import get_removal_list
api_key = os.getenv("OPENAI_API_KEY")
seed_value = 245
random.seed(seed_value)
model_name = "ft:gpt-3.5-turbo-0613:sefaria:he-punct:8ClpgehI"
system_message = "Punctuate this Talmudic passage based on the commentary I provide. Extract the relevant punctuation marks (, : . ? ! \\\"\\\" -) from the commentary and put them in the original. Output only the original Aramaic passage with punctuation without \\\"cantilation\\\" or \\\"nikud\\\".\\n"
def get_response_openai(original_passage, commentary):
user_message = "Original Talmudic Passage:\n" + original_passage + '\n' + "Commentary:\n" + commentary
response = openai.ChatCompletion.create(
model=model_name,
messages=[
{
"role": "system",
"content": system_message
},
{
"role": "user",
"content": user_message
}
],
temperature=1,
max_tokens=600,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# print(response)
inference = response["choices"][0]["message"]["content"]
return(inference)
def get_response_openai_try_again(original_passage, commentary, previous_inference):
user_message = "Original Talmudic Passage:\n" + original_passage + '\n' + "Commentary:\n" + commentary
response = openai.ChatCompletion.create(
model=model_name,
messages=[
{
"role": "system",
"content": system_message
},
{
"role": "user",
"content": user_message
},
{
"role": "assistant",
"content": previous_inference
},
{
"role": "user",
"content": "continue"
},
],
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
inference = response["choices"][0]["message"]["content"]
return(inference)
def read_csv(file_path):
"""
Read a CSV file and return a list of tuples.
Parameters:
- file_path (str): The path to the CSV file.
Returns:
- list of tuples: Each tuple represents a row in the CSV file.
"""
data = []
with open(file_path, 'r', newline='') as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
data.append(tuple(row))
return data
def write_tuples_to_csv(data, csv_filename):
"""
Write a list of tuples to a CSV file.
Parameters:
- data: List of tuples to be written to CSV.
- csv_filename: Name of the CSV file to be created or overwritten.
"""
with open(csv_filename, 'w', newline='') as csvfile:
csv_writer = csv.writer(csvfile)
# Write header if needed
# csv_writer.writerow(['Column1', 'Column2', ...]) # Uncomment and replace with your column names
# Write data
csv_writer.writerows(data)
def find_first_occurrence_indices(text, regex_pattern):
"""
Returns:
- tuple: A tuple containing the start and end indexes of the first match, or None if no match is found.
"""
match = re.search(regex_pattern, text)
if match:
return match.span()
else:
return None
def remove_first_occurrence(text, regex_pattern):
return re.sub(regex_pattern, '', text, count=1)
def compute_old_indices_from_punctuated(punctuated_text):
old_indices = []
punctuationre = re.compile(
r'[\.\!\?\:\,\u05F4](?)|—\s')
while(True):
span = find_first_occurrence_indices(punctuated_text, punctuationre)
if span:
old_indices.append((span[0], span[0]))
else:
break
punctuated_text = remove_first_occurrence(punctuated_text, punctuationre)
return (old_indices)
def find_all_tokens(regex_pattern, text):
"""
Find all tokens captured by a regex pattern in the given text.
Parameters:
- regex_pattern (str): The regular expression pattern.
- text (str): The input text.
Returns:
- list: A list of all tokens captured by the regex pattern.
"""
matches = re.findall(regex_pattern, text)
return matches
# def get_tuples_with_element(lst, i, x):
# return [t for t in lst if len(t) > i and t[i] == x]
def insert_tokens_at_spans(s, spans, tokens):
result = list(s)
result.append('')
starts = [t[0] for t in spans]
for i, element in enumerate(result):
if i in starts:
result[i] = tokens[0] + result[i]
tokens = tokens[1:]
return ''.join(result)
def remove_tokens(regex_pattern, text):
return re.sub(regex_pattern, '', text)
def realign_entities(punctuated_text: str, vocalised_text: str) -> str:
punctuationre = re.compile(
r'[\.\!\?\:\,\u05F4]+(?)|—\s')
unpuncutated_text = remove_tokens(punctuationre, punctuated_text)
removal_list = get_removal_list(vocalised_text, unpuncutated_text)
temp_normalizer = AbstractNormalizer()
mapping = temp_normalizer.get_mapping_after_normalization(vocalised_text, removal_list, reverse=False)
old_inds = compute_old_indices_from_punctuated(punctuated_text)
# a = insert_tokens_at_spans("Moses Is the best", compute_old_indices_from_punctuated("Moses. Is, the best!"), find_all_tokens(punctuationre, "Moses. Is, the best!"))
a = insert_tokens_at_spans(unpuncutated_text, old_inds, find_all_tokens(punctuationre, punctuated_text))
new_inds = temp_normalizer.convert_normalized_indices_to_unnormalized_indices(old_inds, mapping, reverse=False)
vocalised_text = insert_tokens_at_spans(vocalised_text, new_inds, find_all_tokens(punctuationre, punctuated_text))
def punctuate_single_word(punctuated_word, unpunctuated_word):
punctuations_end_one_char = {'.', ',', ';', ':', '!', '?', "״"}
punctuations_end_two_chars = {'?!'}
punctuated_word_no_heifen = punctuated_word.replace('—', '')
if len(punctuated_word_no_heifen) >= 4 and punctuated_word[-3] in punctuations_end_one_char and punctuated_word_no_heifen[-2] in punctuations_end_one_char and punctuated_word_no_heifen[-1] in punctuations_end_one_char:
unpunctuated_word += punctuated_word_no_heifen[-3:]
elif len(punctuated_word_no_heifen) >= 3 and punctuated_word_no_heifen[-2] in punctuations_end_one_char and punctuated_word_no_heifen[-1] in punctuations_end_one_char:
unpunctuated_word += punctuated_word_no_heifen[-2:]
elif len(punctuated_word_no_heifen) >= 2 and punctuated_word_no_heifen[-1] in punctuations_end_one_char:
unpunctuated_word += punctuated_word_no_heifen[-1]
if len(punctuated_word_no_heifen) >= 2 and punctuated_word_no_heifen[0] == "״":
unpunctuated_word = "״" + unpunctuated_word
if punctuated_word.endswith('—'):
unpunctuated_word += ' —'
return unpunctuated_word
def is_subsequence(sub, main):
it = iter(main)
return all(item in it for item in sub)
def punctuate_vocalised(punctuated_text: str, vocalised_text: str) -> str:
if "ועשה על פיהם״, ״שוגג״ למה לי?" in punctuated_text:
halt = True
punctuated_text_list = punctuated_text.replace(' —', '—').split()
vocalised_text_list = vocalised_text.split()
vocalised_text_list_suffix = vocalised_text.split()
# if len(punctuated_text_list) != len(vocalised_text_list):
# print("Oh!")
punctuationre = re.compile(
r'[\.\!\?\:\,\u05F4]+(?)|—\s')
matches = []
global_vocalized_index = 0
for puncutated_word in punctuated_text_list:
unpuncutated_word = puncutated_word.replace('—', '')
unpuncutated_word = remove_tokens(punctuationre, unpuncutated_word)
for index, vocalised_word in enumerate(vocalised_text_list_suffix):
if is_subsequence(list(unpuncutated_word), list(vocalised_word)):
vocalised_text_list_suffix = vocalised_text_list_suffix[index+1:]
global_vocalized_index += index
matches += [(puncutated_word, vocalised_word, global_vocalized_index)]
vocalised_text_list[global_vocalized_index] = punctuate_single_word(puncutated_word, vocalised_word)
global_vocalized_index += 1
break
return ' '.join(vocalised_text_list)
if __name__ == '__main__':
# typer.run(visualize)
inferences = []
inferences.append(("Ref", "Original", "Inference"))
punctuationre = re.compile(
r'[\.\!\?\:\,\u05F4]+(?)|—\s')
# ref_text_pairs = [(seg.tref, seg.text('he', "William Davidson Edition - Aramaic").text, Ref("Steinsaltz on " + seg.tref).text('he').text) for seg in Ref("Horayot").all_segment_refs()]
#
# for pair in ref_text_pairs:
# # print(pair[2])
# inference = get_response_openai(pair[1], pair[2])
# inferences.append((pair[0], pair[1], inference))
# # print(inference)
# no_punct = punctuationre.sub('', inference)
# if len(punctuationre.sub('', inference).split()) < len(pair[1].split()):
# print("omission!")
# print(pair[0])
# # print(get_response_openai_try_again(pair[1], pair[2], inference))
# write_tuples_to_csv(inferences, "horayot_inferences.csv")
tuples = read_csv("horayot_inferences.csv")[1:]
ref_original_punctuated_vocalised = []
for tuple in tuples:
ref_original_punctuated_vocalised.append((tuple[0], tuple[1], tuple[2], (Ref(tuple[0]).text('he', "William Davidson Edition - Vocalized Aramaic").text)))
ref_original_punctuated_vocalised_punctuatedvocalized = [("Ref", "Original", "Inference", "Original Vocalized", "Inference Vocalized")]
for tuple in ref_original_punctuated_vocalised:
# a = compute_old_indices_from_punctuated(tuple[1])
punctuated_and_vocalized = punctuate_vocalised(tuple[2], tuple[3])
ref_original_punctuated_vocalised_punctuatedvocalized += [(tuple[0], tuple[1], tuple[2], tuple[3], punctuated_and_vocalized)]
write_tuples_to_csv(ref_original_punctuated_vocalised_punctuatedvocalized, "horayot_inferences_vocalized.csv")
| [
"continue"
] |
2024-01-10 | Sefaria/LLM | topic_prompt~toprompt_llm_prompt.py | import csv
import random
from util.openai import count_tokens_openai
from util.sentencizer import sentencize
from util.general import get_raw_ref_text, get_ref_text_with_fallback
from uniqueness_of_source import get_uniqueness_of_source
from contextualize import get_context
from typing import List
from sefaria.model import *
from pydantic import BaseModel, Field
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate, BasePromptTemplate
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.example_selector import LengthBasedExampleSelector
random.seed(23223)
class TopromptLLMOutput(BaseModel):
why: str = Field(description="Why should I care about this source? Limit to one sentence. Do NOT summarize the source. The goal is to engage the reader which summarizing.")
what: str = Field(description="What do I need to know in order to be able to understand this source? Limit to one sentence. Do NOT summarize the source. The goal is to engage the reader which summarizing.")
title: str = Field(description="Contextualizes the source within the topic. DO NOT mention the source book in the title.")
class TopromptLLMPrompt:
def __init__(self, lang: str, topic: Topic, oref: Ref):
self.lang: str = lang
self.topic: Topic = topic
self.oref: Ref = oref
def get(self) -> BasePromptTemplate:
example_generator = TopromptExampleGenerator(self.lang)
examples = example_generator.get()
example_prompt = PromptTemplate.from_template('<topic>{topic}</topic>\n'
'<unique_aspect>{unique_aspect}</unique_aspect>'
'<context>{context}</context>'
'<output>{{{{'
'"why": "{why}", "what": "{what}", "title": "{title}"'
'}}}}</output>')
intro_prompt = TopromptLLMPrompt._get_introduction_prompt() + self._get_formatting_prompt()
input_prompt = self._get_input_prompt()
format_instructions = get_output_parser().get_format_instructions()
example_selector = LengthBasedExampleSelector(
examples=examples,
example_prompt=example_prompt,
max_length=7500-count_tokens_openai(intro_prompt+" "+input_prompt+" "+format_instructions),
get_text_length=count_tokens_openai
)
prompt = FewShotPromptTemplate(
example_selector=example_selector,
example_prompt=example_prompt,
prefix=intro_prompt,
suffix=input_prompt,
partial_variables={"format_instructions": format_instructions},
input_variables=[],
)
return prompt
@staticmethod
def _get_introduction_prompt() -> str:
return (
"<identity>\n"
"You are a Jewish scholar knowledgeable in all texts relating to Torah, Talmud, Midrash etc. You are writing "
"for people curious in learning more about Judaism."
"</identity>"
"<task>\n"
"Write description of a Jewish text such that it persuades the reader to read the full source. The description "
"should orient them with the essential information they need in order to learn the text. "
"The title should contextualize the source within the topic; it should be inviting and specific to the source."
"</task>"
"\n"
)
@staticmethod
def _get_formatting_prompt() -> str:
return (
"<input_format>Input has the following format:\n"
"<topic>Name of the topic</topic>\n"
"<author>Author of the source</author>\n"
"<publication_year>Year the source was published</publication_year>\n"
"<book_description>Optional. Description of the source book</book_description>"
"<commentary> (optional): when it exists, use commentary to inform understanding of `<unique_aspect>`. DO NOT"
" refer to the commentary in the final output. Only use the commentary to help understand the source."
"</commentary>\n"
"<unique_aspect> Unique perspective this source has on the topic. Use this to understand why a user would "
"want to learn this source for this topic.</unique_aspect>\n"
"<context> (optional): when it exists this provides further context about the source. Use this to provide"
" more context to the reader."
"</input_format>"
)
def _get_input_prompt(self) -> str:
return (
"{format_instructions} " +
"<input>\n" + self._get_input_prompt_details() + "</input>"
)
def _get_book_description(self, index: Index):
desc_attr = f"{self.lang}Desc"
book_desc = getattr(index, desc_attr, "N/A")
if "Yerushalmi" in index.categories:
book_desc = book_desc.replace(index.title.replace("Jerusalem Talmud ", ""), index.title)
print(book_desc)
if index.get_primary_category() == "Mishnah":
book_desc = book_desc.replace(index.title.replace("Mishnah ", ""), index.title)
print(book_desc)
return book_desc
def _get_input_prompt_details(self) -> str:
index = self.oref.index
book_desc = self._get_book_description(index)
composition_time_period = index.composition_time_period()
pub_year = composition_time_period.period_string(self.lang) if composition_time_period else "N/A"
try:
author_name = Topic.init(index.authors[0]).get_primary_title(self.lang) if len(index.authors) > 0 else "N/A"
except AttributeError:
author_name = "N/A"
category = index.get_primary_category()
context = get_context(self.oref)
print(f"Context for {self.oref.normal()}\n"
f"{context}")
prompt = f"<topic>{self.topic.get_primary_title('en')}</topic>\n" \
f"<author>{author_name}</author>\n" \
f"<publication_year>{pub_year}</publication_year>\n" \
f"<unique_aspect>{get_uniqueness_of_source(self.oref, self.lang, self.topic)}</unique_aspect>\n" \
f"<context>{context}</context>"
if True: # category not in {"Talmud", "Midrash", "Tanakh"}:
prompt += f"\n<book_description>{book_desc}</book_description>"
if category in {"Tanakh"}:
from summarize_commentary.summarize_commentary import summarize_commentary
commentary_summary = summarize_commentary(self.oref.normal(), self.topic.slug, company='anthropic')
print("commentary\n\n", commentary_summary)
prompt += f"\n<commentary>{commentary_summary}</commentary>"
return prompt
class ToppromptExample:
_hard_coded_sents = {
'In biblical sources, the Temple is presented as God\'s home. This work of rabbinic interpretations on the Book of Exodus asks the question‚ "Where is God?" in light of the destruction of the Temple.': [
"In biblical sources, the Temple is presented as God's home.",
'This work of rabbinic interpretations on the Book of Exodus asks the question‚ "Where is God?" in light of the destruction of the Temple.',
],
'Why is the shofar called a shofar? What does it mean? This ancient midrash from the land of Israel points out that the word “shofar” is spelled in the same order and the same letters as the Hebrew verb that means “to improve” and thereby suggests its meaning.': [
'Why is the shofar called a shofar? What does it mean?',
'This ancient midrash from the land of Israel points out that the word “shofar” is spelled in the same order and the same letters as the Hebrew verb that means “to improve” and thereby suggests its meaning.',
],
}
def __init__(self, lang, ref_topic_link: RefTopicLink):
self.lang = lang
self.topic = Topic.init(ref_topic_link.toTopic)
self.oref = Ref(ref_topic_link.ref)
prompt_dict = ref_topic_link.descriptions[lang]
self.title = prompt_dict['title']
prompt = prompt_dict['prompt']
prompt_sents = self._hard_coded_sents.get(prompt, sentencize(prompt))
assert len(prompt_sents) == 2
self.why = prompt_sents[0]
self.what = prompt_sents[1]
self.unique_aspect = get_uniqueness_of_source(self.oref, self.lang, self.topic)
self.context = get_context(self.oref)
def serialize(self):
out = {
"topic": self.topic.get_primary_title(self.lang),
"title": self.title,
"why": self.why,
"what": self.what,
"unique_aspect": self.unique_aspect,
"context": self.context,
}
return out
class TopromptExampleGenerator:
def __init__(self, lang: str):
self.lang: str = lang
def get(self) -> List[dict]:
# toprompts = self._get_existing_toprompts()
toprompts = self._get_training_set()
examples = []
for itopic, ref_topic_link in enumerate(toprompts):
examples += [ToppromptExample(self.lang, ref_topic_link)]
return [example.serialize() for example in examples]
def _get_training_set(self) -> List[RefTopicLink]:
ref_topic_links = []
with open("input/topic_prompt_training_set.csv", "r") as fin:
cin = csv.DictReader(fin)
for row in cin:
if len(RefTopicLinkSet(self._get_query_for_ref_topic_link_with_prompt(slug=row["Slug"]))) == 0:
print(row["Slug"])
continue
ref_topic_links += [RefTopicLink(
attrs={
"ref": row["Reference"],
"toTopic": row["Slug"],
"descriptions": {
self.lang: {
"prompt": row["Prompt"],
"title": row["Title"],
}
}
}
)]
random.shuffle(ref_topic_links)
return ref_topic_links
def _get_existing_toprompts(self):
link_set = RefTopicLinkSet(self._get_query_for_ref_topic_link_with_prompt())
# make unique by toTopic
slug_link_map = {}
for link in link_set:
slug_link_map[link.toTopic] = link
return list(slug_link_map.values())
def _get_query_for_ref_topic_link_with_prompt(self, slug=None):
query = {f"descriptions.{self.lang}": {"$exists": True}}
if slug is not None:
query['toTopic'] = slug
return query
def get_output_parser():
return PydanticOutputParser(pydantic_object=TopromptLLMOutput)
| [
"en",
"<publication_year>PLACEHOLDER</publication_year>\n",
"<context>{context}</context>",
"\"why\": \"{why}\", \"what\": \"{what}\", \"title\": \"{title}\"",
"\n<book_description>PLACEHOLDER</book_description>",
"<output>{{{{",
"format_instructions",
"<context>PLACEHOLDER</context>",
"<unique_aspect>{unique_aspect}</unique_aspect>",
"<author>PLACEHOLDER</author>\n",
"<topic>{topic}</topic>\n",
"<topic>{topic}</topic>\n<unique_aspect>{unique_aspect}</unique_aspect><context>{context}</context><output>{{{{\"why\": \"{why}\", \"what\": \"{what}\", \"title\": \"{title}\"}}}}</output>",
"\n<commentary>PLACEHOLDER</commentary>",
"}}}}</output>"
] |
2024-01-10 | Sefaria/LLM | util~fine_tune~create_fine_tune.py | import typer
import os
import openai
import json
openai.api_key = os.getenv("OPENAI_API_KEY")
def _get_file_ids():
with open("output/fine_tune_file_ids.json", "r") as fin:
file_ids = json.load(fin)
return file_ids['training_file_id'], file_ids['validation_file_id']
def create_fine_tune_job(model: str, suffix: str):
training_file_id, validation_file_id = _get_file_ids()
fine_tuning_job = openai.FineTuningJob.create(
model=model,
training_file=training_file_id,
validation_file=validation_file_id,
suffix=suffix,
hyperparameters={
"n_epochs": 5
}
)
return fine_tuning_job["id"]
def monitor_fine_tune_job(job_id):
import time
while True:
fine_tuning_status = openai.FineTune.get_status(job_id)
status = fine_tuning_status["status"]
print(f"Fine-tuning job status: {status}")
if status in ["completed", "failed"]:
break
time.sleep(60)
if __name__ == '__main__':
typer.run(create_fine_tune_job)
| [] |
2024-01-10 | Sefaria/LLM | talmud_punctuation~fine_tune~project_scripts~compare_gold_with_inferred.py | import csv
import difflib
import django
django.setup()
import typer
import json
from sefaria.model import *
from sefaria.utils.hebrew import strip_cantillation
import random
import os
from langchain.chat_models import ChatOpenAI
import openai
api_key = os.getenv("OPENAI_API_KEY")
seed_value = 613
random.seed(seed_value)
# def create_data(output_training_filename: str, output_validation_filename: str):
# all_samples = []
# for masechet in masechtot_ordered:
# print("creating data from Masechet " + masechet)
# all_segment_refs = Ref(masechet).all_segment_refs()
# for segment_ref in all_segment_refs:
# non_punctuated = segment_ref.text('he', "William Davidson Edition - Aramaic").text
# punctuated = strip_cantillation(segment_ref.text('he').text, strip_vowels=True)
# steinsalz = Ref("Steinsaltz on " + segment_ref.normal()).text('he').text
# all_samples.append(create_new_context(task_desciption, non_punctuated, steinsalz, punctuated))
# if masechet == last_masechet:
# break
#
# #get only limited num of samples
# samples_trimmed = []
# samples_trimmed = random.sample(all_samples, sample_size)
#
# # Calculate the number of items for training
# num_train = int(len(samples_trimmed) * train_proportion)
#
# # Use random.sample to partition the list according to the seed
# train_samples = random.sample(samples_trimmed, num_train)
# validation_samples = [item for item in samples_trimmed if item not in train_samples]
#
# with open(output_training_filename, 'w', encoding='utf-8') as jsonl_file:
# for json_obj in train_samples:
# # Use ensure_ascii=False to encode Unicode characters
# json_line = json.dumps(json_obj, ensure_ascii=False)
# jsonl_file.write(json_line + '\n')
# with open(output_validation_filename, 'w', encoding='utf-8') as jsonl_file:
# for json_obj in validation_samples:
# # Use ensure_ascii=False to encode Unicode characters
# json_line = json.dumps(json_obj, ensure_ascii=False)
# jsonl_file.write(json_line + '\n')
#
#
# print("TRAINING SAMPLES: " + str(len(train_samples)))
# print("VALIDATION SAMPLES: " + str(len(validation_samples)))
def write_lists_to_csv(list1, list2, filename, header1, header2):
# Combine the lists into a list of tuples
data = list(zip(list1, list2))
# Open the CSV file in write mode
with open(filename, 'w', newline='', encoding='utf-8') as csvfile:
# Create a CSV writer
csvwriter = csv.writer(csvfile)
# Write the headers
csvwriter.writerow([header1, header2])
# Write the data
csvwriter.writerows(data)
def read_json_lines_to_list(file_path):
data_list = []
with open(file_path, 'r', encoding='utf-8') as json_file:
for line in json_file:
try:
data = json.loads(line)
data_list.append(data)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return data_list
return data_list
def get_response_openai(sample, model_name):
response = openai.ChatCompletion.create(
model=model_name,
messages=[
{
"role": "system",
"content": sample["messages"][0]["content"]
},
{
"role": "user",
"content": sample["messages"][1]["content"]
}
],
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# print(response)
inference = response["choices"][0]["message"]["content"]
return(inference)
print() # Move to the next line
if __name__ == '__main__':
# typer.run(visualize)
print("hi")
model_name = "ft:gpt-3.5-turbo-0613:sefaria:he-punct:8ClpgehI"
golden_standard = read_json_lines_to_list('../output/gpt_punctuation_validation.jsonl')
golden_standard = random.sample(golden_standard, 50)
inferred = []
for sample in golden_standard:
inferred.append(get_response_openai(sample, model_name))
golden_standard_valids = [sample["messages"][2]["content"] for sample in golden_standard]
# golden_standard_valids_steinsaltz = [sample["messages"][1]["content"].split('"steinsaltz":')[1][:-1] for sample in golden_standard]
write_lists_to_csv(golden_standard_valids, inferred, '../output/discrepancies_visualization.csv', "Gold", "Inferred")
| [
"content"
] |
2024-01-10 | Sefaria/LLM | linker~fine_tune~project_scripts~run_on_validation_set.py | from tqdm import tqdm
from functools import reduce
from typing import List, Any
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from linker.fine_tune.project_scripts import constants
from langchain.schema import BaseOutputParser
from dataclasses import dataclass
from sefaria.helper.normalization import NormalizerComposer, RegexNormalizer, AbstractNormalizer
from linker.fine_tune.project_scripts.create_citation_input_for_fine_tuning import GptEntityClassificationTrainingGenerator, SPAN_LABEL_TO_CLASSICATION_TAG, GptNerTrainingGenerator
import re
import random
from util.general import load_mongo_docs, get_removal_list
from util.sentencizer import sentencize
from db_manager import MongoProdigyDBManager
import langchain
from langchain.cache import SQLiteCache
from sefaria.spacy_function_registry import inner_punct_tokenizer_factory
import spacy
from spacy.tokens import Doc
from spacy.lang.en import English
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
random.seed(613)
entity_recognizer_model = "ft:davinci-002:sefaria:en-ner:85JqAfCQ"
entity_classifier_model = "ft:davinci-002:sefaria:en-entity-class:85dnuVZD"
nlp = English()
nlp.tokenizer = inner_punct_tokenizer_factory()(nlp)
class ExampleGenerator:
def __init__(self, collections, skip=0, limit=None):
docs_list = []
for collection in collections:
docs = list(load_mongo_docs(collection))[skip:]
if limit:
docs = docs[:skip+limit]
docs = list(filter(self._filter_bad_sentences, docs))
random.shuffle(docs)
docs_list += [docs]
min_count = min(len(d) for d in docs_list)
self._data = []
for docs in docs_list:
self._data += docs[:min_count]
random.shuffle(self._data)
@staticmethod
def _sentencize_doc(doc):
docs = []
for sent in sentencize(doc['text']):
curr_doc = doc.copy()
curr_doc['text'] = sent
docs += [curr_doc]
return docs
@staticmethod
def _filter_bad_sentences(doc):
text = doc['text']
if "<" in text or ">" in text:
return False
return True
def get(self, sentencize=True):
for doc in self._data:
if sentencize:
for sent_doc in self._sentencize_doc(doc):
yield sent_doc
else:
yield doc
@dataclass
class Entity:
text: str
start: int
end: int
label: str = None
@dataclass
class EntityDoc:
text: str
entities: List[Entity]
meta: dict = None
def validate(self, original_text):
if original_text != self.text:
realign_entities(original_text, self)
# raise Exception(f"Original text is not equal to text.\nOrignal:\n{original_text}\nFinal:\n{self.text}")
for ent in self.entities:
if self.text[ent.start:ent.end] != ent.text:
raise AssertionError(f"Entity {ent} does not match text '{self.text[ent.start:ent.end]}'")
def spacy_serialize(self) -> dict:
spacy_doc = nlp(self.text)
return {
"text": self.text,
"meta": self.meta,
"spans": [self._spacy_serialize_entity(spacy_doc, entity) for entity in self.entities]
}
@staticmethod
def _spacy_serialize_entity(spacy_doc: Doc, entity: Entity) -> dict:
from sefaria.model.linker.ref_part import span_inds
span = spacy_doc.char_span(entity.start, entity.end)
span_start, span_end = span_inds(span)
return {
"start": entity.start,
"end": entity.end,
"token_start": span_start,
"token_end": span_end-1,
"label": entity.label,
}
def __str__(self):
entities_str = '\n'.join(e.__repr__() for e in self.entities)
return f"TEXT:\n{self.text}\nENTITIES:\n{entities_str}"
pattern = re.compile(fr'{re.escape(constants.ENTITY_PRE_WRAPPER)}(.+?){re.escape(constants.ENTITY_POST_WRAPPER)}')
bracket_normalizer = RegexNormalizer(fr'{re.escape(constants.ENTITY_PRE_WRAPPER)}|{re.escape(constants.ENTITY_POST_WRAPPER)}', r'')
strip_normalizer = RegexNormalizer(r'^\s*|\s*$', r'')
normalizer = NormalizerComposer(steps=[bracket_normalizer, strip_normalizer])
class EntityParser(BaseOutputParser[EntityDoc]):
@property
def _type(self) -> str:
return self.__class__.__name__
def get_format_instructions(self) -> str:
return "Wrap entities with double curly brackets."
def parse(self, text: str) -> EntityDoc:
entities = []
for entity_match in pattern.finditer(text):
entities += [self._create_entity(entity_match)]
cleaned_text = normalizer.normalize(text)
corrected_entities = self._correct_entity_locations(text, entities)
return EntityDoc(text=cleaned_text, entities=corrected_entities)
def _correct_entity_locations(self, text, entities) -> List[Entity]:
mapping = normalizer.get_mapping_after_normalization(text, reverse=True)
orig_indices = [(e.start, e.end) for e in entities]
new_indices = normalizer.convert_normalized_indices_to_unnormalized_indices(orig_indices, mapping, reverse=True)
for e, (start, end) in zip(entities, new_indices):
e.start = start
e.end = end
return entities
@staticmethod
def _create_entity(entity_match: re.Match) -> Entity:
start = entity_match.start(1)
end = entity_match.end(1)
entity_text = entity_match.group(1)
return Entity(entity_text, start, end)
class EntityTagger:
def __init__(self, recognizer_is_chat=False):
self.recognizer_is_chat = recognizer_is_chat
recognizer_model = ChatOpenAI if recognizer_is_chat else OpenAI
self._llm_recognizer = recognizer_model(model=entity_recognizer_model, temperature=0)
self._llm_classifier = OpenAI(model=entity_classifier_model, temperature=0)
self._parser = EntityParser()
def predict(self, spacy_doc) -> EntityDoc:
doc = self._recognize_entities(spacy_doc)
doc = self._classify_entities(doc)
return doc
def _recognize_entities(self, spacy_doc):
prompt = GptNerTrainingGenerator.generate_one(spacy_doc, is_labeled=False)
if self.recognizer_is_chat:
output = self._llm_recognizer(prompt)
doc = self._parser.parse(output.content)
else:
output = self._llm_recognizer(prompt, stop=[constants.GPT_COMPLETION_END_INDICATOR])
doc = self._parser.parse(output)
doc.validate(spacy_doc['text'])
return doc
def _classify_entities(self, doc: EntityDoc) -> EntityDoc:
for entity in doc.entities:
entity.label = self._classify_entity(doc.text, entity)
return doc
def _classify_entity(self, text, entity: Entity) -> str:
generator = GptEntityClassificationTrainingGenerator()
prompt = generator.create_prompt(text, entity.start, entity.end)
output = self._llm_classifier(prompt, stop=[constants.GPT_COMPLETION_END_INDICATOR])
output = output.strip()
if output not in SPAN_LABEL_TO_CLASSICATION_TAG.values():
print(f"NOT good '{output}'")
raise AssertionError
return output
def realign_entities(original_text: str, doc: EntityDoc) -> EntityDoc:
removal_list = get_removal_list(original_text, doc.text)
temp_normalizer = AbstractNormalizer()
mapping = temp_normalizer.get_mapping_after_normalization(original_text, removal_list, reverse=False)
old_inds = [(entity.start, entity.end) for entity in doc.entities]
new_inds = temp_normalizer.convert_normalized_indices_to_unnormalized_indices(old_inds, mapping, reverse=False)
for (new_start, new_end), entity in zip(new_inds, doc.entities):
entity.start = new_start
entity.end = new_end
doc.text = original_text
return doc
if __name__ == '__main__':
tagger = EntityTagger()
my_db = MongoProdigyDBManager("ner_en_gpt_copper")
my_db.output_collection.delete_many({})
generator = ExampleGenerator(['ner_en_input'], skip=13000)
for d in tqdm(generator.get(sentencize=True)):
try:
doc = tagger.predict(d)
doc.meta = d['meta']
print(doc)
mongo_doc = doc.spacy_serialize()
my_db.output_collection.insert_one(mongo_doc)
except (AssertionError, AttributeError) as e:
print("ERROR", d['text'])
print(e)
print()
"""
prodigy ner-recipe ref_tagging ner_en_gpt_copper ner_en_gpt_silver Citation,Person,Group -lang en -dir ltr --view-id ner_manual
"""
| [] |
2024-01-10 | Sefaria/LLM | topic_prompt~topic_prompt_generator.py | import csv
import re
from tqdm import tqdm
from typing import List
from sheet_interface import get_topic_and_orefs
from html_formatter import HTMLFormatter
from csv_formatter import CSVFormatter
from sefaria.model.topic import Topic
from sefaria.model.text import Ref
from toprompt_llm_prompt import TopromptLLMPrompt, get_output_parser
from toprompt import Toprompt, TopromptOptions
import langchain
from langchain.cache import SQLiteCache
from langchain.chat_models import ChatOpenAI, ChatAnthropic
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
def _get_toprompt_options(lang: str, topic: Topic, oref: Ref, num_tries=1) -> TopromptOptions:
# TODO pull out formatting from _get_input_prompt_details
full_language = "English" if lang == "en" else "Hebrew"
llm_prompt = TopromptLLMPrompt(lang, topic, oref).get()
llm = ChatOpenAI(model="gpt-4", temperature=0)
human_message = HumanMessage(content=llm_prompt.format())
responses = []
topic_prompts = []
secondary_prompt = PromptTemplate.from_template(f"Generate another set of description and title. Refer back to the "
f"examples provided to stick to the same writing style.\n"
"{format_instructions}",
partial_variables={"format_instructions": get_output_parser().get_format_instructions()})
for i in range(num_tries):
curr_response = llm([human_message] + responses)
responses += [curr_response]
if i < num_tries-1:
responses += [HumanMessage(content=secondary_prompt.format())]
output_parser = get_output_parser()
parsed_output = output_parser.parse(curr_response.content)
toprompt_text = parsed_output.why + " " + parsed_output.what
# improve title
if ":" in parsed_output.title:
new_title = _improve_title(responses, parsed_output.title)
if new_title:
if ":" in new_title:
new_title = _improve_title(responses, new_title)
parsed_output.title = new_title
topic_prompts += [Toprompt(topic, oref, toprompt_text, parsed_output.title)]
return TopromptOptions(topic_prompts)
def _improve_title(curr_responses, curr_title):
better_title_prompt = PromptTemplate.from_template(f"Current title is: {curr_title}. "
f"Rewrite the title, rephrasing to avoid using a colon."
f" Wrap the title in <title> tags. It should at most"
f" five words and grab the reader's attention.")
llm = ChatOpenAI(model="gpt-4", temperature=0.5)
title_response = llm(curr_responses + [HumanMessage(content=better_title_prompt.format())])
title_match = re.search(r'<title>(.+?)</title>', title_response.content)
if title_match is None:
return
new_title = title_match.group(1)
new_title = re.sub(r'^"', '', new_title)
new_title = re.sub(r'"$', '', new_title)
return new_title
def _get_topprompts_for_sheet_id(lang, sheet_id: int) -> List[TopromptOptions]:
topic, orefs = get_topic_and_orefs(sheet_id)
toprompt_options = []
for oref in tqdm(orefs, desc="get toprompts for sheet"):
toprompt_options += [_get_toprompt_options(lang, topic, oref, num_tries=1)]
return toprompt_options
def output_toprompts_for_sheet_id_list(lang: str, sheet_ids: List[int]) -> None:
toprompt_options = []
for sheet_id in sheet_ids:
toprompt_options += _get_topprompts_for_sheet_id(lang, sheet_id)
formatter = HTMLFormatter(toprompt_options)
formatter.save("output/sheet_topic_prompts.html")
csv_formatter = CSVFormatter(toprompt_options)
csv_formatter.save("output/sheet_topic_prompts.csv")
def _get_validation_set():
validation_set = []
with open("input/topic_prompt_validation_set.csv", "r") as fin:
cin = csv.DictReader(fin)
for row in cin:
validation_set += [(Topic.init(row['Slug']), Ref(row['Reference']), row['Title'], row['Prompt '])]
return validation_set
def output_toprompts_for_validation_set(lang):
validation_set = _get_validation_set()
toprompt_options = []
gold_standard_prompts = []
for topic, oref, title, prompt in tqdm(validation_set):
toprompt_options += [_get_toprompt_options(lang, topic, oref)]
gold_standard_prompts += [Toprompt(topic, oref, prompt, title)]
html_formatter = HTMLFormatter(toprompt_options, gold_standard_prompts)
html_formatter.save("output/validation_topic_prompts.html")
csv_formatter = CSVFormatter(toprompt_options, gold_standard_prompts)
csv_formatter.save("output/validation_topic_prompts.csv")
def _get_top_n_orefs_for_topic(slug, top_n=10) -> List[Ref]:
from sefaria.helper.topic import get_topic
out = get_topic(True, slug, with_refs=True, ref_link_type_filters=['about', 'popular-writing-of'])
return [Ref(d['ref']) for d in out['refs']['about']['refs'][:top_n]]
def output_toprompts_for_topic_page(lang, slug, top_n=10):
topic = Topic.init(slug)
orefs = _get_top_n_orefs_for_topic(slug, top_n)
toprompt_options = []
for oref in tqdm(orefs, desc="get toprompts for topic page"):
toprompt_options += [_get_toprompt_options(lang, topic, oref, num_tries=3)]
formatter = HTMLFormatter(toprompt_options)
formatter.save("output/topic_page_topic_prompts.html")
csv_formatter = CSVFormatter(toprompt_options)
csv_formatter.save("output/topic_page_topic_prompts.csv")
if __name__ == '__main__':
sheet_ids = [447069, 518761]
lang = "en"
output_toprompts_for_sheet_id_list(lang, sheet_ids)
# output_toprompts_for_validation_set(lang)
# output_toprompts_for_topic_page(lang, 'peace')
| [
"Generate another set of description and title. Refer back to the ",
"Generate another set of description and title. Refer back to the examples provided to stick to the same writing style.\n{format_instructions}",
"{format_instructions}",
"Rewrite the title, rephrasing to avoid using a colon.",
"examples provided to stick to the same writing style.\n",
"format_instructions",
"Current title is: PLACEHOLDER. Rewrite the title, rephrasing to avoid using a colon. Wrap the title in <title> tags. It should at most five words and grab the reader's attention.",
"Current title is: PLACEHOLDER. ",
" Wrap the title in <title> tags. It should at most",
" five words and grab the reader's attention.",
" ",
"[]"
] |
2024-01-10 | Sefaria/LLM | util~fine_tune~fine_tune_stats.py | import typer
import openai
import os
# from openai import File
def fine_tune(output_file: str):
openai.api_key = os.getenv("OPENAI_API_KEY")
fine_tune_jobs = openai.FineTuningJob.list()['data']
last_job = max(fine_tune_jobs, key=lambda x: x.created_at)
results_file_id = last_job['result_files'][0]
content = openai.File.download(results_file_id)
with open(output_file, 'wb') as file:
file.write(content)
if __name__ == "__main__":
typer.run(fine_tune)
| [] |
2024-01-10 | Sefaria/LLM | util~fine_tune~delete_last_fine_tune_job.py | import openai
from openai.error import TryAgain, InvalidRequestError
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
if __name__ == '__main__':
fine_tune_jobs = openai.FineTuningJob.list()['data']
last_job = max(fine_tune_jobs, key=lambda x: x.created_at)
openai.FineTuningJob.cancel(last_job.id)
| [] |
2024-01-10 | Sefaria/LLM | topic_modelling~poc.py | import django
django.setup()
from sefaria.model.text import Ref, library
import re
import langchain
from langchain.cache import SQLiteCache
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatAnthropic
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage, SystemMessage
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
from functools import reduce
from util.general import get_raw_ref_text
import typer
from tqdm import tqdm
import csv
def get_topics_for_title(title: str, lang: str):
index = library.get_index(title)
rows = []
for segment_oref in tqdm(index.all_section_refs()[:20]):
print('-----')
print(segment_oref.normal())
topics = get_topics_for_tref(segment_oref, lang)
rows += [{"Ref": segment_oref.normal(), "Text": get_raw_ref_text(segment_oref, lang), "Topics": ", ".join(topics)}]
with open("output/Pri Eitz Chaim Topics.csv", "w") as fout:
cout = csv.DictWriter(fout, ['Ref', 'Text', "Topics"])
cout.writeheader()
cout.writerows(rows)
def get_topics_for_tref(oref: Ref, lang: str):
text = get_raw_ref_text(oref, lang)
return get_raw_topics(text, lang)
def get_raw_topics(text, lang):
short_to_long_lang = {
"he": "Hebrew", "en": "English"
}
examples_by_lang = {
"he":
"<topic>תרומה</topic>\n"
"<topic>פרשת נח</topic>\n"
"<topic>אברהם</topic>\n"
"<topic>שבת</topic>\n",
"en":
"<topic>Teruma</topic>\n"
"<topic>Parashat Noach</topic>\n"
"<topic>Abraham</topic>\n"
"<topic>Shabbat</topic>\n"
}
system_message = SystemMessage(content=
"You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n"
"<task>\n"
"Output list of high-level topics discussed by the input\n"
"Topics should be important enough that they would warrant an entry in the index in the back of a book\n"
"Each topic should be wrapped in <topic> tags\n"
"Topics should be short. They should be written as if they are titles of encyclopedia entries. Therefore, they should be understandable when read independent of the source text.\n"
"Citations are not topics. E.g. Genesis 1:4 is not a topic\n"
"Topics should be written assuming a Torah context. Phrases like \"Torah perspective\", \"in Judaism\", \"in the Torah\" and \"Biblical Narrative\" should not appear in a topic.\n"
f"Topics should be written in {short_to_long_lang[lang]}."
"</task>"
"<examples>\n"
f"{examples_by_lang[lang]}"
"</examples>\n"
"<negative_examples>\n"
"<topic>Dispute between Rabbi Akiva and Rabbi Yehoshua</topic>\n"
"<topic>Opinions on how to shake lulav</topic>\n"
"</negative_examples>"
)
user_prompt = PromptTemplate.from_template("# Input\n{text}")
human_message = HumanMessage(content=user_prompt.format(text=text))
# llm = ChatOpenAI(model="gpt-4", temperature=0)
llm = ChatAnthropic(model="claude-2", temperature=0)
response = llm([system_message, human_message])
# print('---')
# human_refine = HumanMessage(content="Of the topics above, list the most fundamental topics for understanding the source text. Exclude topics that are very specific.")
# response2 = llm([system_message, human_message, response, human_refine])
# human_breakup = HumanMessage(content="Of the topics above, break up complex topics into simpler topics.\n"
# "<examples>\n"
# "<topic>הלכות מזוזה בבית כנסת</topic> should become <topic>מזוזה</topic> and <topic>בית כנסה</topic>\n"
# "<topic>שאלה בדין תקיעת שופר ביום כיפור</topic> should become <topic>תקיעת שופר</topic> and <topic>יום כיפור</topic>\n"
# "<topic>הלכות עירוב</topic> should remain unchanged."
# "</examples>")
#
# response3 = llm([system_message, human_message, response, human_refine, response2, human_breakup])
topics = reduce(lambda a, b: a + [b.group(1).strip()], re.finditer(r"<topic>(.+?)</topic>", response.content), [])
return topics
if __name__ == '__main__':
typer.run(get_topics_for_title)
| [
"<task>\n",
"</examples>\n",
"<negative_examples>\n",
"Topics should be written assuming a Torah context. Phrases like \"Torah perspective\", \"in Judaism\", \"in the Torah\" and \"Biblical Narrative\" should not appear in a topic.\n",
"<topic>Opinions on how to shake lulav</topic>\n",
"Citations are not topics. E.g. Genesis 1:4 is not a topic\n",
"Topics should be important enough that they would warrant an entry in the index in the back of a book\n",
"You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n",
"Each topic should be wrapped in <topic> tags\n",
"Topics should be short. They should be written as if they are titles of encyclopedia entries. Therefore, they should be understandable when read independent of the source text.\n",
"<topic>Dispute between Rabbi Akiva and Rabbi Yehoshua</topic>\n",
"</negative_examples>",
"<examples>\n",
"Output list of high-level topics discussed by the input\n",
"# Input\n{text}"
] |
2024-01-10 | Sefaria/LLM | util~fine_tune~upload_fine_tune_files.py | import typer
import json
from openai import File
def upload_files(training_filename: str, validation_filename: str):
training_file = File.create(file=open(training_filename, "r"), purpose='fine-tune')
validation_file = File.create(file=open(validation_filename, "r"), purpose='fine-tune')
out = {
"training_file_id": training_file.id,
"validation_file_id": validation_file.id
}
with open("output/fine_tune_file_ids.json", "w") as fout:
json.dump(out, fout)
if __name__ == '__main__':
typer.run(upload_files)
| [] |
2024-01-10 | Sefaria/LLM | translation~poc.py | import django
django.setup()
import typer
import csv
from tqdm import tqdm
import random
from sefaria.model import *
from util.general import get_raw_ref_text, get_by_xml_tag
import langchain
from langchain.cache import SQLiteCache
from langchain.chat_models import ChatOpenAI, ChatAnthropic
from langchain.schema import HumanMessage
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
random.seed(26)
def translate_segment(tref: str, context: str = None):
oref = Ref(tref)
text = get_raw_ref_text(oref, 'he')
identity_message = HumanMessage(content="You are a Jewish scholar knowledgeable in all Torah and Jewish texts. Your "
"task is to translate the Hebrew text wrapped in <input> tags. Context may be "
"provided in <context> tags. Use context to provide context to <input> "
"text. Don't translate <context>. Only translate <input> text. Output "
"translation wrapped in <translation> tags.")
task_prompt = f"<input>{text}</input>"
if context:
task_prompt = f"<context>{context}</context>{task_prompt}"
task_message = HumanMessage(content=task_prompt)
llm = ChatAnthropic(model="claude-2", temperature=0, max_tokens_to_sample=1000000)
response_message = llm([identity_message, task_message])
translation = get_by_xml_tag(response_message.content, 'translation')
if translation is None:
print("TRANSLATION FAILED")
print(tref)
print(response_message.content)
return response_message.content
return translation
def randomly_translate_book(title: str, n: int = 30):
segment_orefs = library.get_index(title).all_segment_refs()
# random_segment_orefs = random.sample(segment_orefs, n)
rows = []
for oref in tqdm(segment_orefs[:16], desc='randomly translating'):
tref = oref.normal()
rows += [{
"Ref": tref,
"Hebrew": get_raw_ref_text(oref, 'he'),
"English": translate_segment(tref),
}]
with open('output/random_mb_translations.csv', 'w') as fout:
cout = csv.DictWriter(fout, ['Ref', 'Hebrew', 'English'])
cout.writeheader()
cout.writerows(rows)
if __name__ == '__main__':
typer.run(randomly_translate_book)
| [
"You are a Jewish scholar knowledgeable in all Torah and Jewish texts. Your task is to translate the Hebrew text wrapped in <input> tags. Context may be provided in <context> tags. Use context to provide context to <input> text. Don't translate <context>. Only translate <input> text. Output translation wrapped in <translation> tags.",
"<input>PLACEHOLDER</input>",
"<context>PLACEHOLDER</context>PLACEHOLDER"
] |
2024-01-10 | Sefaria/LLM | util~fine_tune~delete_all_fine_tunes.py | import openai
from openai.error import TryAgain, InvalidRequestError
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
if __name__ == '__main__':
# would be nice to get models that exist and not jobs. look into openai.Models.
fine_tunes = openai.FineTuningJob.list()
for fine_tune in fine_tunes['data']:
print('Deleting', fine_tune.fine_tuned_model)
try:
openai.Model.delete(fine_tune.fine_tuned_model)
except (TryAgain, InvalidRequestError):
print("skip")
| [] |
2024-01-10 | Sefaria/LLM | summarize_commentary~summarize_commentary.py | import re
import django
import anthropic
django.setup()
from sefaria.model import *
from sefaria.client.wrapper import get_links
from sefaria.datatype.jagged_array import JaggedTextArray
from util.openai import get_completion_openai, count_tokens_openai
from langchain.chat_models import ChatAnthropic
from langchain.schema import HumanMessage
from langchain.cache import SQLiteCache
import langchain
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
def get_prompt(tref, topic_slug, commentary):
topic_name, topic_description = get_topic_prompt(topic_slug)
prompt = (
f"# Input:\n"
f"1) Commentary: commentary on the verse {tref}.\n"
f"2) Topic: topic which relates to this verse\n"
f"3) Topic Description: description of topic\n"
f"# Task: Summarize the main points discussed by the commentators. Only include points that relate to the"
f" topic \"{topic_name}\".\n"
f"# Output: Numbered list of main points, only when relating to the topic \"{topic_name}\".\n"
f"-----\n"
f"# Input:\n1) Topic: {topic_name}\n2) Topic Description: {topic_description}\n3) Commentary: {commentary}"
)
return prompt
def get_topic_prompt(slug):
topic = Topic.init(slug)
return topic.get_primary_title('en'), getattr(topic, 'description', {}).get('en', '')
def get_commentary_for_tref(tref, max_tokens=7000):
library.rebuild_toc()
commentary_text = ""
for link_dict in get_links(tref, with_text=True):
if link_dict['category'] not in {'Commentary'}:
continue
if not link_dict['sourceHasEn']:
continue
link_text = JaggedTextArray(link_dict['text']).flatten_to_string()
link_text = re.sub(r"<[^>]+>", " ", TextChunk.strip_itags(link_text))
commentary_text += f"Source: {link_dict['sourceRef']}\n{link_text}\n"
if count_tokens_openai(commentary_text) > max_tokens:
break
return commentary_text
def summarize_commentary(tref, topic_slug, company='openai'):
commentary_text = get_commentary_for_tref(tref)
prompt = get_prompt(tref, topic_slug, commentary_text)
if company == 'openai':
num_tokens = count_tokens_openai(prompt)
print(f"Number of commentary tokens: {num_tokens}")
completion = get_completion_openai(prompt)
elif company == 'anthropic':
llm = ChatAnthropic(model="claude-instant-1")
completion = llm([HumanMessage(content=prompt)]).content
else:
raise Exception("No valid company passed. Options are 'openai' or 'anthropic'.")
return completion
def print_summarized_commentary(tref, topic_slug):
completion = summarize_commentary(tref, topic_slug)
print(completion)
if __name__ == '__main__':
print_summarized_commentary('Exodus 10:1-2', 'haggadah')
| [
"# Input:\n1) Commentary: commentary on the verse PLACEHOLDER.\n2) Topic: topic which relates to this verse\n3) Topic Description: description of topic\n# Task: Summarize the main points discussed by the commentators. Only include points that relate to the topic \"PLACEHOLDER\".\n# Output: Numbered list of main points, only when relating to the topic \"PLACEHOLDER\".\n-----\n# Input:\n1) Topic: PLACEHOLDER\n2) Topic Description: PLACEHOLDER\n3) Commentary: PLACEHOLDER"
] |
2024-01-10 | Sefaria/LLM | topic_prompt~contextualize.py | """
Provide context for a source
"""
import django
django.setup()
from sefaria.model import *
from util.general import get_ref_text_with_fallback, get_by_xml_tag
import re
import langchain
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage, SystemMessage
from langchain.chat_models import ChatOpenAI, ChatAnthropic
from langchain.cache import SQLiteCache
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
def context_from_section(segment_oref: Ref) -> str:
"""
Given a segment ref, provide context from its section ref
:param segment_oref:
:return: context from section ref
"""
segment_text = get_ref_text_with_fallback(segment_oref, "en", auto_translate=True)
section_text = get_ref_text_with_fallback(segment_oref.section_ref(), "en", auto_translate=True)
system_message = SystemMessage(content="# Identity\nYou are a Jewish scholar familiar with all Torah texts.\n"
"# Task\nGiven a segment of Torah text and surrounding context text, output"
"a summary of the relevant parts of the context that will help users"
" understand the segment of Torah text."
"# Input format\n"
"- Segment of Torah text surrounded by <segment> XML tags.\n"
"- Context text surrounded by <context> XML tags.\n"
"# Output format\n"
"Summary of the relevant context text to help users understand <segment>"
" text. Output should be surrounded in <relevant_context> XML"
" tags. No more than 50 words. Summary should start with the word 'The"
" context describes'.")
human_message = HumanMessage(content=f"<segment>{segment_text}</segment>\n"
f"<context>{section_text}</context>")
llm = ChatAnthropic(model="claude-2", temperature=0, max_tokens_to_sample=100000)
response = llm([system_message, human_message])
context = get_by_xml_tag(response.content, "relevant_context")
if context is None:
return response.content
context = re.sub(r"^The context describes ", "", context)
return context
def context_from_liturgy(oref):
text = get_ref_text_with_fallback(oref, "en", auto_translate=True)
llm = ChatOpenAI(model="gpt-4", temperature=0)
system_message = SystemMessage(content="""
Given a text from the Jewish cannon, add any relevant context that would help a user understand this text from a
Jewish perspective. Relevant context may be:
If this text is a prayer, when was it recited and why?
Historical significance to Jewish history
How this text is viewed nowadays by Jewish people
DO NOT offer an interpretation or explanation of the text. Only offer helpful context.
Limit to 50 words or less.
""")
prompt = PromptTemplate.from_template("Citation: {citation}\nText: {text}")
human_message = HumanMessage(content=prompt.format(text=text, citation=oref.normal()))
response = llm([system_message, human_message])
return response.content
def get_context(oref: Ref):
if oref.primary_category == "Tanakh":
context = context_from_section(oref)
else:
context = context_from_liturgy(oref)
return context
if __name__ == '__main__':
print(get_context(Ref("Nehemiah 8:14-16")))
# print(context_from_section(Ref("Nehemiah 8:14-16")))
| [
"Citation: {citation}\nText: {text}",
"<segment>PLACEHOLDER</segment>\n<context>PLACEHOLDER</context>",
"\n Given a text from the Jewish cannon, add any relevant context that would help a user understand this text from a\n Jewish perspective. Relevant context may be:\n If this text is a prayer, when was it recited and why?\n Historical significance to Jewish history\n How this text is viewed nowadays by Jewish people\n \n DO NOT offer an interpretation or explanation of the text. Only offer helpful context.\n \n Limit to 50 words or less.\n ",
"# Identity\nYou are a Jewish scholar familiar with all Torah texts.\n# Task\nGiven a segment of Torah text and surrounding context text, outputa summary of the relevant parts of the context that will help users understand the segment of Torah text.# Input format\n- Segment of Torah text surrounded by <segment> XML tags.\n- Context text surrounded by <context> XML tags.\n# Output format\nSummary of the relevant context text to help users understand <segment> text. Output should be surrounded in <relevant_context> XML tags. No more than 50 words. Summary should start with the word 'The context describes'."
] |
2024-01-10 | Sefaria/LLM | util~fine_tune~fine_tune_status.py | import openai
import os
import typer
import json
openai.api_key = os.getenv("OPENAI_API_KEY")
def fine_tune_status(output_file: str):
fine_tune_jobs = openai.FineTuningJob.list()['data']
last_job = max(fine_tune_jobs, key=lambda x: x.created_at)
last_job_dict = last_job.to_dict()
print(last_job_dict)
json.dump(last_job_dict, open(output_file, 'w'))
if __name__ == '__main__':
typer.run(fine_tune_status)
| [] |
2024-01-10 | Sefaria/LLM | util~fine_tune~delete_all_files.py | import openai
from openai.error import TryAgain
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
if __name__ == '__main__':
files = openai.File.list()
for file in files['data']:
print('Deleting', file.id)
try:
openai.File.delete(file.id)
openai.File.download()
except TryAgain:
print("skip")
| [] |
2024-01-10 | forestcontact/forest | imogen~imogen.py | #!/usr/bin/python3.9
# Copyright (c) 2021 MobileCoin Inc.
# Copyright (c) 2021 The Forest Team
# Copyright (c) 2021 Sylvie Liberman
import asyncio
import base64
import datetime
import json
import logging
import time
import urllib
from pathlib import Path
from typing import Callable, Optional
import aioredis
import base58
import openai
from aiohttp import web
from forest import utils
from forest.core import JSON, Bot, Message, Response, app, hide
openai.api_key = utils.get_secret("OPENAI_API_KEY")
if not utils.LOCAL:
aws_cred = utils.get_secret("AWS_CREDENTIALS")
if aws_cred:
aws_dir = Path("/root/.aws")
aws_dir.mkdir(parents=True, exist_ok=True)
with (aws_dir / "credentials").open("w") as creds:
creds.write(base64.b64decode(utils.get_secret("AWS_CREDENTIALS")).decode())
logging.info("wrote creds")
with (aws_dir / "config").open("w") as config:
config.write("[profile default]\nregion = us-east-1")
logging.info("writing config")
else:
logging.info("couldn't find creds")
ssh_key = utils.get_secret("SSH_KEY")
open("id_rsa", "w").write(base64.b64decode(ssh_key).decode())
password, rest = utils.get_secret("REDIS_URL").removeprefix("redis://:").split("@")
host, port = rest.split(":")
redis = aioredis.Redis(host=host, port=int(port), password=password)
instance_id = "aws ec2 describe-instances --region us-east-1 | jq -r .Reservations[].Instances[].InstanceId"
status = "aws ec2 describe-instances --region us-east-1| jq -r '..|.State?|.Name?|select(.!=null)'"
start = "aws ec2 start-instances --region us-east-1 --instance-ids {}"
stop = "aws ec2 stop-instances --region us-east-1 --instance-ids {}"
get_ip = "aws ec2 describe-instances --region us-east-1|jq -r .Reservations[].Instances[].PublicIpAddress"
# start_worker = "ssh -i id_rsa -o ConnectTimeout=2 ubuntu@{} ~/ml/read_redis.py {}"
get_cost = (
"aws ce get-cost-and-usage --time-period Start={},End={} --granularity DAILY --metrics BlendedCost | "
"jq -r .ResultsByTime[0].Total.BlendedCost.Amount"
)
get_all_cost = (
"aws ce get-cost-and-usage --time-period Start=2021-10-01,End={end} --granularity DAILY --metrics BlendedCost | "
"jq '.ResultsByTime[] | {(.TimePeriod.Start): .Total.BlendedCost.Amount}' | jq -s add"
)
async def get_output(cmd: str) -> str:
proc = await asyncio.create_subprocess_shell(cmd, stdout=-1, stderr=-1)
stdout, stderr = await proc.communicate()
return stdout.decode().strip() or stderr.decode().strip()
class Imogen(Bot):
worker_instance_id: Optional[str] = None
async def start_process(self) -> None:
self.worker_instance_id = await get_output(instance_id)
await super().start_process()
async def do_get_cost(self, _: Message) -> str:
today = datetime.date.today()
tomorrow = today + datetime.timedelta(1)
out = await get_output(get_cost.format(today, tomorrow))
try:
return str(round(float(out), 2))
except ValueError:
return out
async def do_get_all_cost(self, _: Message) -> str:
tomorrow = datetime.date.today() + datetime.timedelta(1)
out = await get_output(get_all_cost.replace("{end}", str(tomorrow)))
return json.loads(out)
do_get_costs = do_get_all_costs = hide(do_get_all_cost)
async def do_status(self, _: Message) -> str:
"shows the GPU instance state (not the program) and queue size"
state = await get_output(status)
queue_size = await redis.llen("prompt_queue")
return f"worker state: {state}, queue size: {queue_size}"
image_rate_cents = 5
async def do_imagine_nostart(self, msg: Message) -> str:
logging.info(msg.full_text)
logging.info(msg.text)
if msg.group:
destination = base58.b58encode(msg.group).decode()
else:
destination = msg.source
params: JSON = {}
# if msg.attachments:
# attachment = msg.attachments[0]
# key = attachment["id"] + "-" + attachment["filename"]
# params["init_image"] = key
# await redis.set(
# key, open(Path("./attachments") / attachment["id"], "rb").read()
# )
await redis.rpush(
"prompt_queue",
json.dumps({"prompt": msg.text, "callback": destination, "params": params}),
)
timed = await redis.llen("prompt_queue")
return f"you are #{timed} in line"
async def do_imagine(self, msg: Message) -> str:
"""/imagine <prompt>"""
# check if worker is up
resp = await self.do_imagine_nostart(msg)
state = await get_output(status)
logging.info("worker state: %s", state)
# await self.mobster.put_usd_tx(msg.sender, self.image_rate_cents, msg.text[:32])
if state in ("stopped", "stopping"):
# if not, turn it on
output = await get_output(start.format(self.worker_instance_id))
logging.info(output)
if "InsufficientInstanceCapacity" in output:
resp += ".\nsorry, andy jassy hates us. no gpu for us"
# asyncio.create_task(really_start_worker())
return resp
def make_prefix(prefix: str, *_) -> Callable: # type: ignore # pylint: disable=no-self-argument
async def wrapped(self: "Imogen", msg: Message) -> str:
msg.text = f"{prefix} {msg.text}"
return await self.do_imagine(msg)
wrapped.__doc__ = f"/{prefix} <prompt>: imagine it with {prefix} style"
return wrapped
do_mythical = make_prefix("mythical")
do_festive = make_prefix("festive")
do_dark_fantasy = make_prefix("dark fantasy")
do_psychic = make_prefix("psychic")
do_pastel = make_prefix("pastel")
do_hd = make_prefix("hd")
do_vibrant = make_prefix("vibrant")
do_fantasy = make_prefix("fantasy")
do_steampunk = make_prefix("steampunk")
do_ukiyo = make_prefix("ukiyo")
do_synthwave = make_prefix("synthwave")
del make_prefix # shouldn't be used after class definition is over
async def do_paint(self, msg: Message) -> str:
"""/paint <prompt>"""
logging.info(msg.full_text)
destination = base58.b58encode(msg.group).decode() if msg.group else msg.source
await redis.rpush(
"prompt_queue",
json.dumps(
{
"prompt": msg.text,
"callback": destination,
"params": {
"vqgan_config": "wikiart_16384.yaml",
"vqgan_checkpoint": "wikiart_16384.ckpt",
},
}
),
)
timed = await redis.llen("prompt_queue")
state = await get_output(status)
logging.info("worker state: %s", state)
# await self.mobster.put_usd_tx(msg.sender, self.image_rate_cents, msg.text[:32])
if state in ("stopped", "stopping"):
# if not, turn it on
logging.info(await get_output(start.format(self.worker_instance_id)))
return f"you are #{timed} in line"
async def do_c(self, msg: Message) -> str:
prompt = (
"The following is a conversation with an AI assistant. "
"The assistant is helpful, creative, clever, funny, very friendly, an artist and anarchist\n\n"
"Human: Hello, who are you?\nAI: My name is Imogen, I'm an AI that makes dream-like images. How can I help you today?\n"
f"Human: {msg.text}\nAI: "
)
response = openai.Completion.create( # type: ignore
engine="davinci",
prompt=prompt,
temperature=0.9,
max_tokens=140,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
stop=["\n", " Human:", " AI:"],
)
return response["choices"][0]["text"].strip()
@hide
async def do_gpt(self, msg: Message) -> str:
response = openai.Completion.create( # type: ignore
engine="davinci",
prompt=msg.text,
temperature=0.9,
max_tokens=120,
top_p=1,
frequency_penalty=0.01,
presence_penalty=0.6,
stop=["\n", " Human:", " AI:"],
)
return response["choices"][0]["text"].strip()
async def do_stop(self, _: Message) -> str:
return await get_output(stop.format(self.worker_instance_id))
async def do_start(self, _: Message) -> str:
return await get_output(start.format(self.worker_instance_id))
async def do_list_queue(self, _: Message) -> str:
try:
q = "; ".join(
json.loads(item)["prompt"]
for item in await redis.lrange("prompt_queue", 0, -1)
)
return q or "queue empty"
except json.JSONDecodeError:
return "json decode error?"
do_list_prompts = do_listqueue = do_queue = hide(do_list_queue)
async def do_dump_queue(self, _: Message) -> Response:
prompts = []
while 1:
if not (item := await redis.lpop("prompt_queue")):
break
prompts.append(str(json.loads(item)["prompt"]))
return prompts
# async def payment_response(self, _: Message, _: int) -> None:
# return None
# eh
# async def async_shutdown(self):
# await redis.disconnect()
# super().async_shutdown()
async def store_image_handler(request: web.Request) -> web.Response:
bot = request.app.get("bot")
if not bot:
return web.Response(status=504, text="Sorry, no live workers.")
reader = await request.multipart()
async for field in reader:
logging.info(field)
logging.info("multipart field name: %s", field.name)
filename = field.filename or f"attachment-{time.time()}.jpg"
# You cannot rely on Content-Length if transfer is chunked.
size = 0
path = Path(filename).absolute()
with open(path, "wb") as f:
logging.info("writing file")
while True:
chunk = await field.read_chunk() # 8192 bytes by default.
logging.info("read chunk")
if not chunk:
break
size += len(chunk)
f.write(chunk)
message = urllib.parse.unquote(request.query.get("message", ""))
destination = urllib.parse.unquote(request.query.get("destination", ""))
recipient = utils.signal_format(str(destination))
if destination and not recipient:
try:
group = base58.b58decode(destination).decode()
except ValueError:
# like THtg80Gi2jvgOEFhQjT2Cm+6plNGXTSBJg2HSnhJyH4=
group = destination
if recipient:
await bot.send_message(recipient, message, attachments=[str(path)])
else:
await bot.send_message(None, message, attachments=[str(path)], group=group)
info = f"{filename} sized of {size} sent"
logging.info(info)
return web.Response(text=info)
app.add_routes([web.post("/attachment", store_image_handler)])
app.add_routes([])
if __name__ == "__main__":
@app.on_startup.append
async def start_wrapper(our_app: web.Application) -> None:
our_app["bot"] = Imogen()
web.run_app(app, port=8080, host="0.0.0.0")
| [
"[]",
"The assistant is helpful, creative, clever, funny, very friendly, an artist and anarchist\n\n",
"Human: Hello, who are you?\nAI: My name is Imogen, I'm an AI that makes dream-like images. How can I help you today?\n",
"The following is a conversation with an AI assistant. "
] |
2024-01-10 | geyang/rl-playground-old | playground~vpg_half.py | """
Vanilla Policy Gradients, aka REINFORCE, aka Monte Carlo Policy Gradients.
To quickly test you can do:
python main.py Pendulum-v0 --vf_type nn --use_kl_heuristic --do_not_save
As long as --do_not_save is there, it won't overwrite files. If I want to
benchmark and save results, see the bash scripts. Add --render if desired.
(c) April 2017 by Daniel Seita, built upon starter code from CS 294-112.
"""
import argparse
import gym
import numpy as np
np.set_printoptions(suppress=True, precision=5, edgeitems=10)
import pickle
import sys
import tensorflow as tf
if "../" not in sys.path:
sys.path.append("../")
from utils import utils_pg as utils
from utils import value_functions as vfuncs
from utils import logz
from utils import policies
def run_vpg(args, vf_params, logdir, env, sess, continuous_control):
""" General purpose method to run vanilla policy gradients, for both
continuous and discrete action environments.
Parameters
----------
args: [Namespace]
Contains user-provided (or default) arguments for VPGs.
vf_params: [dict]
Dictionary of parameters for the value function.
logdir: [string]
Where we store the outputs, can be None to avoid saving.
env: [OpenAI gym env]
The environment the agent is in, from OpenAI gym.
sess: [tf Session]
Current Tensorflow session, to be passed to (at least) the policy
function, and the value function as well if it's a neural network.
continuous_control: [boolean]
True if continuous control (i.e. actions), false if otherwise.
"""
ob_dim = env.observation_space.shape[0]
if args.vf_type == 'linear':
vf = vfuncs.LinearValueFunction(**vf_params)
elif args.vf_type == 'nn':
vf = vfuncs.NnValueFunction(session=sess, ob_dim=ob_dim, **vf_params)
if continuous_control:
ac_dim = env.action_space.shape[0]
policyfn = policies.GaussianPolicy(sess, ob_dim, ac_dim)
else:
ac_dim = env.action_space.n
policyfn = policies.GibbsPolicy(sess, ob_dim, ac_dim)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() # pylint: disable=E1101
total_timesteps = 0
stepsize = args.initial_stepsize
for i in range(args.n_iter):
print("\n********** Iteration %i ************" % i)
# Collect paths until we have enough timesteps.
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
terminated = False
obs, acs, rewards = [], [], []
animate_this_episode = (len(paths) == 0 and (i % 100 == 0) and args.render)
while True:
if animate_this_episode:
env.render()
obs.append(ob)
ac = policyfn.sample_action(ob)
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
if done:
break
path = {"observation": np.array(obs), "terminated": terminated,
"reward": np.array(rewards), "action": np.array(acs)}
paths.append(path)
timesteps_this_batch += utils.pathlength(path)
if timesteps_this_batch > args.min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Estimate advantage function using baseline vf (these are lists!).
# return_t: list of sum of discounted rewards (to end of episode), one per time
# vpred_t: list of value function's predictions of components of return_t
vtargs, vpreds, advs = [], [], []
for path in paths:
rew_t = path["reward"]
return_t = utils.discount(rew_t, args.gamma)
vpred_t = vf.predict(path["observation"])
adv_t = return_t - vpred_t
advs.append(adv_t)
vtargs.append(return_t)
vpreds.append(vpred_t)
# Build arrays for policy update and **re-fit the baseline**.
ob_no = np.concatenate([path["observation"] for path in paths])
ac_n = np.concatenate([path["action"] for path in paths])
adv_n = np.concatenate(advs)
std_adv_n = (adv_n - adv_n.mean()) / (adv_n.std() + 1e-8)
vtarg_n = np.concatenate(vtargs)
vpred_n = np.concatenate(vpreds)
vf.fit(ob_no, vtarg_n)
# Policy update, plus diagnostics stuff. Is there a better way to handle
# the continuous vs discrete control cases?
if continuous_control:
surr_loss, oldmean_na, oldlogstd_a = policyfn.update_policy(
ob_no, ac_n, std_adv_n, stepsize)
kl, ent = policyfn.kldiv_and_entropy(ob_no, oldmean_na, oldlogstd_a)
else:
surr_loss, oldlogits_na = policyfn.update_policy(
ob_no, ac_n, std_adv_n, stepsize)
kl, ent = policyfn.kldiv_and_entropy(ob_no, oldlogits_na)
# A step size heuristic to ensure that we don't take too large steps.
if args.use_kl_heuristic:
if kl > args.desired_kl * 2:
stepsize /= 1.5
print('PG stepsize -> %s' % stepsize)
elif kl < args.desired_kl / 2:
stepsize *= 1.5
print('PG stepsize -> %s' % stepsize)
else:
print('PG stepsize OK')
# Log diagnostics
if i % args.log_every_t_iter == 0:
logz.log_tabular("EpRewMean", np.mean([path["reward"].sum() for path in paths]))
logz.log_tabular("EpLenMean", np.mean([utils.pathlength(path) for path in paths]))
logz.log_tabular("KLOldNew", kl)
logz.log_tabular("Entropy", ent)
logz.log_tabular("EVBefore", utils.explained_variance_1d(vpred_n, vtarg_n))
logz.log_tabular("EVAfter", utils.explained_variance_1d(vf.predict(ob_no), vtarg_n))
logz.log_tabular("SurrogateLoss", surr_loss)
logz.log_tabular("TimestepsSoFar", total_timesteps)
# If you're overfitting, EVAfter will be way larger than EVBefore.
# Note that we fit the value function AFTER using it to compute the
# advantage function to avoid introducing bias
logz.dump_tabular()
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument('envname', default="HalfCheetah-v2", type=str)
p.add_argument('--render', action='store_true')
p.add_argument('--do_not_save', action='store_true')
p.add_argument('--use_kl_heuristic', action='store_true')
p.add_argument('--n_iter', type=int, default=500)
p.add_argument('--seed', type=int, default=0)
p.add_argument('--gamma', type=float, default=0.97)
p.add_argument('--desired_kl', type=float, default=2e-3)
p.add_argument('--min_timesteps_per_batch', type=int, default=2500)
p.add_argument('--initial_stepsize', type=float, default=1e-3)
p.add_argument('--log_every_t_iter', type=int, default=1)
p.add_argument('--vf_type', type=str, default='linear')
p.add_argument('--nnvf_epochs', type=int, default=20)
p.add_argument('--nnvf_ssize', type=float, default=1e-3)
args = p.parse_args()
# Handle value function type and the log directory (and save the args!).
assert args.vf_type == 'linear' or args.vf_type == 'nn'
vf_params = {}
outstr = 'linearvf-kl' + str(args.desired_kl)
if args.vf_type == 'nn':
vf_params = dict(n_epochs=args.nnvf_epochs, stepsize=args.nnvf_ssize)
outstr = 'nnvf-kl' + str(args.desired_kl)
outstr += '-seed' + str(args.seed).zfill(2)
logdir = 'outputs/' + args.envname + '/' + outstr
if args.do_not_save:
logdir = None
logz.configure_output_dir(logdir)
if logdir is not None:
with open(logdir + '/args.pkl', 'wb') as f:
pickle.dump(args, f)
print("Saving in logdir: {}".format(logdir))
# Other stuff for seeding and getting things set up.
tf.set_random_seed(args.seed)
np.random.seed(args.seed)
env = gym.make(args.envname)
continuous = True
if 'discrete' in str(type(env.action_space)).lower():
# A bit of a hack, is there a better way to do this? Another option
# could be following Jonathan Ho's code and detecting spaces.Box?
continuous = False
print("Continuous control? {}".format(continuous))
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
run_vpg(args, vf_params, logdir, env, sess, continuous)
| [] |
2024-01-10 | yang-xy20/async_mappo | onpolicy~envs~env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
import torch
import multiprocessing as mp
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
from onpolicy.utils.util import tile_images
from icecream import ic
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class ShareVecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, share_observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.share_observation_space = share_observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if np.all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'get_max_step':
remote.send((env.max_steps))
else:
raise NotImplementedError
class GuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = False # could cause zombie process
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class SubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def get_max_step(self):
for remote in self.remotes:
remote.send(('get_max_step', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def shareworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob, s_ob, available_actions = env.reset()
else:
if np.all(done):
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'render_vulnerability':
fr = env.render_vulnerability(data)
remote.send((fr))
else:
raise NotImplementedError
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def infoworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob, info = env.reset()
else:
if np.all(done):
ob, info = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob, info = env.reset()
remote.send((ob, info))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'get_short_term_goal':
fr = env.get_short_term_goal(data)
remote.send(fr)
elif cmd == 'get_short_term_action':
fr = env.get_short_term_action(data)
remote.send(fr)
else:
raise NotImplementedError
class InfoSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
#self.envs = [fn() for fn in env_fns]
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self._mp_ctx = mp.get_context("forkserver")
self.remotes, self.work_remotes = zip(
*[self._mp_ctx.Pipe(duplex=True) for _ in range(nenvs)])
self.ps = [self._mp_ctx.Process(target=infoworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, infos = zip(*results)
return np.stack(obs), np.stack(infos)
def get_short_term_goal(self, data):
for remote, da in zip(self.remotes, data):
remote.send(('get_short_term_goal', da))
return np.stack([remote.recv() for remote in self.remotes])
def get_short_term_action(self, data):
for remote, da in zip(self.remotes, data):
remote.send(('get_short_term_action', da))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="human"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def choosesimpleworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'get_max_step':
remote.send((env.max_steps))
else:
raise NotImplementedError
class ChooseSimpleSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=choosesimpleworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def get_max_step(self):
for remote in self.remotes:
remote.send(('get_max_step', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset(data)
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseguardworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseGuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseguardworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = False # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseinfoworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob, info = env.reset(data)
remote.send((ob, info))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'get_short_term_goal':
fr = env.get_short_term_goal(data)
remote.send(fr)
elif cmd == 'get_short_term_action':
fr = env.get_short_term_action(data)
remote.send(fr)
else:
raise NotImplementedError
class ChooseInfoSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self._mp_ctx = mp.get_context("forkserver")
self.remotes, self.work_remotes = zip(
*[self._mp_ctx.Pipe(duplex=True) for _ in range(nenvs)])
self.ps = [self._mp_ctx.Process(target=chooseinfoworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
results = [remote.recv() for remote in self.remotes]
obs, infos = zip(*results)
return np.stack(obs), np.stack(infos)
def get_short_term_goal(self, data):
for remote, da in zip(self.remotes, data):
remote.send(('get_short_term_goal', da))
return np.stack([remote.recv() for remote in self.remotes])
def get_short_term_action(self, data):
for remote, da in zip(self.remotes, data):
remote.send(('get_short_term_action', da))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="human"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
# single env
class DummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i] = self.envs[i].reset()
self.actions = None
return obs, rews, dones, infos
def reset(self):
obs = [env.reset() for env in self.envs]
return np.array(obs)
def get_max_step(self):
return [env.max_steps for env in self.envs]
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human", playeridx=None):
if mode == "rgb_array":
if playeridx == None:
return np.array([env.render(mode=mode) for env in self.envs])
else:
return np.array([env.render(mode=mode, playeridx=playeridx) for env in self.envs])
elif mode == "human":
for env in self.envs:
if playeridx == None:
env.render(mode=mode)
else:
env.render(mode=mode, playeridx=playeridx)
else:
raise NotImplementedError
class ShareDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self):
results = [env.reset() for env in self.envs]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class InfoDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(
np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], infos[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i], infos[i] = self.envs[i].reset()
self.actions = None
return obs, rews, dones, infos
def reset(self):
results = [env.reset() for env in self.envs]
obs, infos = map(np.array, zip(*results))
return obs, infos
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
def get_short_term_action(self, data):
return [env.get_short_term_action(d) for d, env in zip(data, self.envs)]
class ChooseDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self, reset_choose):
results = [env.reset(choose)
for (env, choose) in zip(self.envs, reset_choose)]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseSimpleDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.actions = None
return obs, rews, dones, infos
def reset(self, reset_choose):
obs = [env.reset(choose)
for (env, choose) in zip(self.envs, reset_choose)]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def get_max_step(self):
return [env.max_steps for env in self.envs]
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseInfoDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(env_fns), env.observation_space,
env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.actions = None
return obs, rews, dones, infos
def reset(self, reset_choose):
results = [env.reset(choose)
for (env, choose) in zip(self.envs, reset_choose)]
obs, infos = map(np.array, zip(*results))
return obs, infos
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human", short_goal=None):
if mode == "rgb_array":
return np.array([env.render(mode=mode, short_goal_pos=sg) for sg, env in zip(short_goal, self.envs)])
elif mode == "human":
for sg, env in zip(short_goal, self.envs):
env.render(mode=mode, short_goal_pos=sg)
else:
raise NotImplementedError
def get_short_term_goal(self, data):
return [env.get_short_term_goal(d) for d, env in zip(data, self.envs)]
def get_short_term_action(self, data):
return [env.get_short_term_action(d) for d, env in zip(data, self.envs)]
def ft_get_short_term_goals(self, args, mode=""):
mode_list = ['apf', 'utility', 'nearest', 'rrt', 'voronoi']
assert mode in mode_list, (f"frontier global mode should be in {mode_list}, but get {mode}")
results = [env.ft_get_short_term_goals(args, mode=mode) for env in self.envs]
return results
def ft_get_short_term_actions(self, all_goals, mode, radius):
return [
np.array(env.ft_get_short_term_actions(env_goals, mode, radius))
for env_goals, env in zip(all_goals, self.envs)
]
| [] |
2024-01-10 | ugochukwu-850/Falcon | database.py | import os
from pymongo import MongoClient
from langchain.document_loaders import UnstructuredPDFLoader, UnstructuredWordDocumentLoader
client_key = os.getenv("MONGODB_KEY")
client = MongoClient(client_key)
client = client.falcon
# init collections
users = client["users"]
question_cursor = client["questions"]
drafts_cursor = client["drafts"]
files = client["files"]
# init models
def serialize_Files(file, many=False):
if many:
response = []
for x in file:
response.append(serialize_Files(x))
return response
return {
"class": file["class"],
"name": file["name"],
"teacher": file["teacher"] if "teacher" in file else "[email protected]",
"created": file["created"],
"id": file["id"]
}
# serializers
def serialize_user(user) -> dict:
return{
"id": str(user["_id"]),
"name": user["name"],
"email": user["email"],
"profile_pic_url": user["profile_pic_url"],
"total_questions": user["total_questions"],
"total_files": user["total_files"],
"total_drafts": user["total_drafts"],
"credentials": user["credentials"]
}
def serialize_Questions(question, many=False):
if many:
return [serialize_Questions(x) for x in question]
return{
"_id": str(question["_id"]),
"title": question["title"],
"created": question["created"],
"question_id": question["question_id"],
"link": f"https://docs.google.com/forms/d/{question['question_id']}/edit"
}
def main():
...
if __name__ == "__main__":
main()
| [] |
2024-01-10 | Jakob-98/mono | python~LLM_and_prompts~technical_qa_generator_from_pdf.py | from langchain.text_splitter import CharacterTextSplitter
import os
import PyPDF2
import openai
import json
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n", chunk_size=3000, chunk_overlap=400, length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def read_pdf(file_path):
pdf_text = ""
with open(file_path, 'rb') as pdf_file:
pdf_reader = PyPDF2.PdfReader(pdf_file)
number_of_pages = len(pdf_reader.pages)
for page_num in range(number_of_pages):
page = pdf_reader.pages[page_num]
page_text = page.extract_text()
pdf_text += page_text
return pdf_text
def pdfs_from_folder(folder_path):
pdf_texts = [] # List to store the text content of each PDF
for filename in os.listdir(folder_path):
if filename.endswith('.pdf'):
file_path = os.path.join(folder_path, filename)
pdf_text = read_pdf(file_path)
pdf_texts.append(pdf_text)
return pdf_texts
SYSTEM_PROMPT = """
You are an AI whose purpose it is to generate question and answer pairs.
It is crucial these question answer pairs are specfic to the context the USER will give you and are related to TECHNICAL content, such that these question answer pairs cannot be retrieved otherwise. DO NOT make up questions and answers that are not related to the context the USER will give you, this will be heavily penalized.
If no technical question can be formulated, it is acceptable to return none. You are expected to return the question pair in JSON like so:
{
"question": "What is the operating pressure of TK-3413?",
"answer": "The operating pressure is 1.5 bar."
}
Examples:
USER:
"TK-3413 is a pressure vessel that is used to store water. It is used in the production of the Ford F-150. The operating pressure is 1.5 bar."
AI:
{
"question": "What is the operating pressure of TK-3413?",
"answer": "The operating pressure is 1.5 bar."
}
USER:
"The captial of France Paris, in Paris lays the Eiffel Tower. The Eiffel Tower is 324 meters tall."
AI:
{
"question": "NONE", # No technical question can be formulated, and any search engine can retrieve this information, so None must be returned.
"answer": "NONE."
}
"""
openai.api_type = "azure"
openai.api_key = "YOUR_KEY"
openai.api_base = "YOUR_ENDPOINT"
openai.api_version = "2023-07-01-preview"
def chat_complete(messages):
return openai.ChatCompletion.create(
engine="gpt4-32k-aoai-caneast",
messages = messages,
temperature=0.1,
max_tokens=800,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None)
get_messages = lambda m: [
{
"role": "system",
"content": SYSTEM_PROMPT
},
{
"role": "user",
"content": f"USER: {m}"
}
]
if __name__ == "__main__":
folder_path = "./"
all_pdf_texts = pdfs_from_folder(folder_path)
qa_pairs = []
for chunk in get_text_chunks(all_pdf_texts[0])[0:100]: #NOTE: notice the limit
response = chat_complete(get_messages(chunk))
try:
response = json.loads(response['choices'][0]['message']["content"])
except:
continue
qa_pairs.append(response)
print(qa_pairs) | [
"\nYou are an AI whose purpose it is to generate question and answer pairs.\n\nIt is crucial these question answer pairs are specfic to the context the USER will give you and are related to TECHNICAL content, such that these question answer pairs cannot be retrieved otherwise. DO NOT make up questions and answers that are not related to the context the USER will give you, this will be heavily penalized.\n\nIf no technical question can be formulated, it is acceptable to return none. You are expected to return the question pair in JSON like so:\n\n{\n \"question\": \"What is the operating pressure of TK-3413?\",\n \"answer\": \"The operating pressure is 1.5 bar.\"\n}\n\nExamples:\nUSER:\n\"TK-3413 is a pressure vessel that is used to store water. It is used in the production of the Ford F-150. The operating pressure is 1.5 bar.\"\nAI:\n{\n \"question\": \"What is the operating pressure of TK-3413?\",\n \"answer\": \"The operating pressure is 1.5 bar.\"\n}\nUSER:\n\"The captial of France Paris, in Paris lays the Eiffel Tower. The Eiffel Tower is 324 meters tall.\"\nAI:\n{\n \"question\": \"NONE\", # No technical question can be formulated, and any search engine can retrieve this information, so None must be returned.\n \"answer\": \"NONE.\"\n}\n\n",
"USER: PLACEHOLDER"
] |
2024-01-10 | Jakob-98/mono | wip_bin~python~rageval~scripts~gen_qa_pairs.py | from langchain.text_splitter import CharacterTextSplitter
import os
import PyPDF2
import openai
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n", chunk_size=3000, chunk_overlap=400, length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def read_pdf(file_path):
pdf_text = ""
with open(file_path, 'rb') as pdf_file:
pdf_reader = PyPDF2.PdfReader(pdf_file)
number_of_pages = len(pdf_reader.pages)
for page_num in range(number_of_pages):
page = pdf_reader.pages[page_num]
page_text = page.extract_text()
pdf_text += page_text
return pdf_text
def pdfs_from_folder(folder_path):
pdf_texts = [] # List to store the text content of each PDF
for filename in os.listdir(folder_path):
if filename.endswith('.pdf'):
if not "Ford" in filename:
continue
file_path = os.path.join(folder_path, filename)
pdf_text = read_pdf(file_path)
pdf_texts.append(pdf_text)
return pdf_texts
SYSTEM_PROMPT = """
You are an AI assistant part of a system designed to generate question-answer pairs for domain specific documents. The purpose is to extract a factual question and answer relevant to the information in a given document. You should also rate the relevance of the question on a scale of 0 to 1. If the given document has no factual information, generate a question with a relevance of 0. Your answer must adhere to the JSON structure provided in the example.
Example INPUT: "The Eiffel Tower, located in Paris, France, is one of the most iconic landmarks in the world. Designed by the French engineer Gustave Eiffel and completed in 1889. The Eiffel Tower has three levels, with restaurants on the first and second levels and an observation deck at the top. The tower is 330 meters (1,083 feet) tall, including its antennas, and was the tallest man-made structure in the world when it was completed."
Example OUTPUT: "{
"question": "what is the length of the eiffel tower?"
"answer": "the length of the eiffel tower is 330 meters"
"relevance": 1
}"
"""
openai.api_type = "azure"
openai.api_key = ...
openai.api_base = "https://aml-testopenai-jakob-aoai.openai.azure.com/"
openai.api_version = "2023-07-01-preview"
def chat_complete(messages):
return openai.ChatCompletion.create(
engine="gpt35deployment",
messages = messages,
temperature=0.7,
max_tokens=800,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None)
get_messages = lambda m: [
{
"role": "system",
"content": SYSTEM_PROMPT
},
{
"role": "user",
"content": f"INPUT: {m}\n OUTPUT:"
}
]
if __name__ == "__main__":
folder_path = "./data/pdf"
all_pdf_texts = pdfs_from_folder(folder_path)
for chunk in get_text_chunks(all_pdf_texts[0])[0:10]:
print(chat_complete(get_messages(chunk)))
| [
"INPUT: PLACEHOLDER\n OUTPUT:",
"\nYou are an AI assistant part of a system designed to generate question-answer pairs for domain specific documents. The purpose is to extract a factual question and answer relevant to the information in a given document. You should also rate the relevance of the question on a scale of 0 to 1. If the given document has no factual information, generate a question with a relevance of 0. Your answer must adhere to the JSON structure provided in the example. \n\nExample INPUT: \"The Eiffel Tower, located in Paris, France, is one of the most iconic landmarks in the world. Designed by the French engineer Gustave Eiffel and completed in 1889. The Eiffel Tower has three levels, with restaurants on the first and second levels and an observation deck at the top. The tower is 330 meters (1,083 feet) tall, including its antennas, and was the tallest man-made structure in the world when it was completed.\"\n\nExample OUTPUT: \"{\n \"question\": \"what is the length of the eiffel tower?\"\n \"answer\": \"the length of the eiffel tower is 330 meters\"\n \"relevance\": 1\n}\"\n"
] |
2024-01-10 | moua0061/faq-ai-chat | beam-app~run-nyt.py | import requests
from bs4 import BeautifulSoup
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
# We'll save our headlines to this path
file_path = "/workspace/transcript.txt"
# Download headlines from NYT
def download_headlines():
res = requests.get("https://www.nytimes.com")
soup = BeautifulSoup(res.content, "html.parser")
# Grab all headlines
headlines = soup.find_all("h3", class_="indicate-hover", text=True)
parsed_headlines = []
for h in headlines:
parsed_headlines.append(h.get_text())
print(parsed_headlines)
# Write headlines to a text file
with open(file_path, "w") as f:
f.write(str(parsed_headlines))
f.close()
# Answer questions about the headlines
def start_conversation(**inputs):
# Grab the input from the API
query = inputs["query"]
# Download headlines from nytimes.com and save to the file path above
download_headlines()
with open(file_path) as f:
saved_file = f.read()
# Split the text to conform to maximum number of tokens
text_splitter = CharacterTextSplitter(
separator="\n\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len,
)
texts = text_splitter.split_text(saved_file)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
docs = docsearch.similarity_search(query)
chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
res = chain(
{"input_documents": docs, "question": query}, return_only_outputs=True
)
print(res)
return {"pred": res}
if __name__ == "__main__":
# # You can customize this query however you want:
# # For example: What happened in Washington today?
query = "Give me a summary of today's news"
print(start_conversation(query=query))
| [] |
2024-01-10 | shobhitag11/Azure_chatGPT | chatGPT.py | import os
import openai
from langchain.llms import AzureOpenAI
from langchain.chains import RetrievalQA
ENDPOINT = "https://<project_id>.openai.azure.com"
API_KEY = ""
DEPLOYMENT_NAME = "text-davinci-003"#gpt-35-turbo-16k, gpt-35-turbo, gpt-4-32k, gpt-4
API_TYPE = "azure"
API_VERSION = "2022-12-01"
os.environ["OPENAI_API_KEY"] = API_KEY
os.environ["OPENAI_API_VERSION"] = API_VERSION
openai.api_type = API_TYPE
openai.api_version = API_VERSION
openai.api_base = ENDPOINT
openai.api_key = API_KEY
qa = RetrievalQA.from_chain_type(llm=AzureOpenAI(temperature=0.1, deployment_name=DEPLOYMENT_NAME), chain_type="stuff")
query = "How to Manage Work-life balance at corporate?"
print(qa.run(query))
| [] |
2024-01-10 | maxcodl/Website_Categorizer | bulk_test.py | import openai
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import time
import pandas as pd
import re
# Set your OpenAI API key
openai.api_key = 'OPENAI_API_KEY'
model_engine = "gpt-3.5-turbo-16k"
categories_keywords = {
"Jobs & Education": ["Education", "Colleges & Universities", "Distance Learning","Homeschooling", "Primary & Secondary Schooling (K-12)","Standardized & Admissions Tests", "Teaching & Classroom Resources","Training & Certification","Vocational & Continuing Education","Jobs", "Career Resources & Planning","Job Listings","Resumes & Portfolios"],
"Law & Government": ["Government","Courts & Judiciary","Visa & Immigration","Legal","Bankruptcy","Legal Education","Legal Services","Military","Public Safety","Crime & Justice", "Emergency Services", "Law Enforcement", "Security Products & Services", "Social Services"],
"Arts & Entertainment": [ "Anime & Manga","Acting & Theater","Architecture","Art Museums & Galleries","Bars, Clubs & Nightlife","Cartoons","CD & Audio Shopping","Circus","Classical Music","Comics","Concerts & Music Festivals","Country Music","Dance","Dance & Electronic Music","Design","Experimental & Industrial Music","Expos & Conventions","Film & TV Industry","Film Festivals","Flash-Based Entertainment","Fun Tests & Silly Surveys","Funny Pictures & Videos","Jazz & Blues","Magic","Movie Listings & Theater Showtimes","Music Education & Instruction","Music Equipment & Technology","Music Reference","Music Streams & Downloads","Music Videos","Occult & Paranormal","Online Image Galleries","Online Video","Opera","Painting","Photographic & Digital Arts","Political Humor","Pop Music","Radio","Recording Industry","Religious Music","Rock Music","Soundtracks","TV Commercials","TV Shows & Programs","Urban & Hip-Hop","World Music"],
"Adult": ["Porn","Creampie","Lesbian","Hentai","Adult"],
"Autos & Vehicles": ["Bicycles & Accessories","Bike Parts & Repair","BMX Bikes","Boats & Watercraft","Campers & RVs","Cargo Trucks & Trailers","Classic Vehicles","Commercial Vehicles","Gas Prices & Vehicle Fueling","Hybrid & Alternative Vehicles","Motor Vehicles (By Type)","Motorcycles","Off-Road Vehicles","Trucks & SUVs","Used Vehicles","Vehicle Codes & Driving Laws","Vehicle Licensing & Registration","Vehicle Parts & Accessories","Vehicle Parts & Services","Vehicle Repair & Maintenance","Vehicle Shopping","Vehicle Shows"],
"Beauty & Fitness": ["Beauty Pageants","Body Art","Cosmetic Procedures","Cosmetology & Beauty Professionals","Face & Body Care","Fashion & Style","Fitness","Hair Care","Spas & Beauty Services","Weight Loss","Cosmetic Surgery","Hygiene & Toiletries","Make-Up & Cosmetics","Perfumes & Fragrances","Skin & Nail Care","Unwanted Body & Facial Hair Removal","Fashion Designers & Collections","Hair Loss","Massage Therapy"],
"Business & Industrial": ["Advertising & Marketing","Aerospace & Defense","Agriculture & Forestry","Automotive Industry","Business Education","Business Finance","Business Operations","Business Services","Chemicals Industry","Construction & Maintenance","Energy & Utilities","Hospitality Industry","Industrial Materials & Equipment","Manufacturing","Metals & Mining","Pharmaceuticals & Biotech","Printing & Publishing","Retail Trade","Small Business","Textiles & Nonwovens","Transportation & Logistics","Public Relations","Space Technology","Agricultural Equipment","Forestry","Livestock","Venture Capital","Business Plans & Presentations","Management","Consulting","Corporate Events","E-Commerce Services","Fire & Security Services","Office Services","Office Supplies","Writing & Editing Services","Cleaning Agents","Plastics & Polymers","Building Materials & Supplies","Electricity","Oil & Gas","Renewable & Alternative Energy","Event Planning","Food Service","Heavy Machinery","Precious Metals","Retail Equipment & Technology","MLM & Business Opportunities","Freight & Trucking","Mail & Package Delivery","Maritime Transport","Moving & Relocation","Packaging","Parking","Rail Transport","Urban Transport"],
"Computers & Electronics": ["CAD & CAM","Computer Hardware","Computer Security","Consumer Electronics","Electronics & Electrical","Enterprise Technology","Networking","Programming","Software","Computer Components","Computer Drives & Storage","Computer Peripherals","Desktop Computers","Laptops & Notebooks","Hacking & Cracking","Audio Equipment","Camera & Photo Equipment","Car Electronics","Drones & RC Aircraft","Game Systems & Consoles","GPS & Navigation","TV & Video Equipment","Electronic Components","Power Supplies","Data Management","Data Formats & Protocols","Network Monitoring & Management","VPN & Remote Access","Java","Business & Productivity Software","Device Drivers","Internet Software","Multimedia Software","Operating Systems","Software Utilities"],
"Finance": ["Accounting & Auditing","Banking","Credit & Lending","Financial Planning & Management","Grants, Scholarships & Financial Aid","Insurance","Investing","Billing & Invoicing","Tax Preparation & Planning","Credit Cards","Credit Reporting & Monitoring","Loans","Retirement & Pension","Study Grants & Scholarships","Health Insurance","Commodities & Futures Trading","Currencies & Foreign Exchange","Stocks & Bonds","M&A","M&A Advisory","Raise Funds"],
"Food & Drink": ["Beverages","Cooking & Recipes","Food","Food & Grocery Retailers","Restaurants","Alcoholic Beverages","Coffee & Tea","Juice","Soft Drinks","BBQ & Grilling","Desserts","Soups & Stews","Baked Goods","Breakfast Foods","Candy & Sweets","Grains & Pasta","Meat & Seafood","Snack Foods","Fast Food","Pizzerias","Restaurant Reviews & Reservations"],
"Games": ["Arcade & Coin-Op Games","Board Games","Card Games","Computer & Video Games","Family-Oriented Games & Activities","Gambling","Online Games","Puzzles & Brainteasers","Roleplaying Games","Table Games","Word Games","Chess & Abstract Strategy Games","Miniatures & Wargaming","Collectible Card Games","Poker & Casino Games","Casual Games","Driving & Racing Games","Fighting Games","Music & Dance Games","Sandbox Games","Shooter Games","Simulation Games","Sports Games","Strategy Games","Video Game Emulation","Drawing & Coloring","Dress-Up & Fashion Games","Lottery","Massively Multiplayer Games","Billiards"],
"Health": ["Aging & Geriatrics","Health Conditions","Health Education & Medical Training","Health Foundations & Medical Research","Medical Devices & Equipment","Medical Facilities & Services","Men's Health","Mental Health","Nursing","Nutrition","Oral & Dental Care","Pharmacy","Public Health","Reproductive Health","Substance Abuse","Vision Care","Women's Health","AIDS & HIV","Allergies","Arthritis","Cancer","Diabetes","Ear Nose & Throat","Eating Disorders","Endocrine Conditions","Genetic Disorders","Heart & Hypertension","Infectious Diseases","Neurological Conditions","Obesity","Pain Management","Respiratory Conditions","Skin Conditions","Sleep Disorders","Doctors' Offices","Hospitals & Treatment Centers","Medical Procedures","Physical Therapy","Anxiety & Stress","Depression","Assisted Living & Long Term Care","Special & Restricted Diets","Vitamins & Supplements","Drugs & Medications","Occupational Health & Safety","Drug & Alcohol Testing","Drug & Alcohol Treatment","Smoking & Smoking Cessation","Steroids & Performance-Enhancing Drugs","Eyeglasses & Contacts"],
"Hobbies & Leisure": ["Clubs & Organizations","Crafts","Merit Prizes & Contests","Outdoors","Paintball","Radio Control & Modeling","Special Occasions","Water Activities","Youth Organizations & Resources","Fiber & Textile Arts","Fishing","Hiking & Camping","Model Trains & Railroads","Holidays & Seasonal Events","Weddings","Boating","Surf & Swim"],
"Home & Garden": ["Bed & Bath","Domestic Services","Gardening & Landscaping","Home & Interior Decor","Home Appliances","Home Furnishings","Home Improvement","Home Safety & Security","Home Storage & Shelving","Home Swimming Pools, Saunas & Spas","HVAC & Climate Control","Kitchen & Dining","Laundry","Nursery & Playroom","Pest Control","Yard & Patio","Bathroom","Cleaning Services","Curtains & Window Treatments","Kitchen & Dining Furniture","Lamps & Lighting","Living Room Furniture","Rugs & Carpets","Construction & Power Tools","Doors & Windows","Flooring","House Painting & Finishing","Plumbing","Fireplaces & Stoves","Cookware & Diningware","Major Kitchen Appliances","Small Kitchen Appliances","Washers & Dryers","Lawn Mowers"],
"Internet & Telecom": ["Communications Equipment","Email & Messaging","Mobile & Wireless","Service Providers","Web Services","Radio Equipment","Text & Instant Messaging","Voice & Video Chat","Mobile & Wireless Accessories","Mobile Apps & Add-Ons","Mobile Phones","Cable & Satellite Providers","Domain Parking","Affiliate Programs","Web Design & Development"],
"Jobs & Education": ["Education","Jobs","Jobs & Education","Colleges & Universities","Distance Learning","Homeschooling","Primary & Secondary Schooling (K-12)","Standardized & Admissions Tests","Teaching & Classroom Resources","Training & Certification","Vocational & Continuing Education","Career Resources & Planning","Job Listings","Resumes & Portfolios"],
"News": ["Business News","Gossip & Tabloid News","Health News","Politics","Sports News","Weather","Company News","Financial Markets News","Scandals & Investigations"],
"Online Communities": ["Blogging Resources & Services","Dating & Personals","File Sharing & Hosting","Online Goodies","Photo & Video Sharing","Social Networks","Virtual Worlds","Matrimonial Services","Personals","Photo Rating Sites","Clip Art & Animated GIFs","Skins, Themes & Wallpapers","Social Network Apps & Add-Ons","Photo & Image Sharing"],
"People & Society": ["Family & Relationships","Kids & Teens","Religion & Belief","Seniors & Retirement","Social Issues & Advocacy","Social Sciences","Subcultures & Niche Interests","Family","Marriage","Troubled Relationships","Children's Interests","Teen Interests","Charity & Philanthropy","Discrimination & Identity Relations","Green Living & Environmental Issues","Human Rights & Liberties","Poverty & Hunger","Work & Labor Issues","Economics","Political Science","Psychology"],
"Articles": ["1 min read","2 min read","3 min read","4 min read","5 min read","6 min read","7 min read","8 min read","9 min read","10 min read","Medium app"],
"Professional Networking": ["Linkedin","open to work","people to see","job opportunities","Connect with people"],
}
# Create an empty list to store data
data = []
# List of websites to extract data from
websites = [
'websites'
]
# Create a pandas DataFrame to store data
columns = ["URL", "Request", "Response", "Extracted Data"]
df = pd.DataFrame(columns=columns)
def categorize_based_on_keywords(text, categories_keywords):
matched_categories = []
for category, keywords in categories_keywords.items():
matched_keywords = []
for keyword in keywords:
keyword_pattern = r'\b' + re.escape(keyword.lower()) + r'\b'
if re.search(keyword_pattern, text.lower()):
matched_keywords.append(keyword)
if len(matched_keywords) >= 3:
print(f"For category '{category}', found matching keywords: {matched_keywords}")
matched_categories.append(category)
if not matched_categories:
print(f"No keywords found for '{category}'.")
return matched_categories
# Function to extract text from a website
def extract_text_from_website(url):
try:
response = requests.get(url, timeout=10) # Set a timeout value in seconds
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
all_text = ' '.join([element.get_text() for element in soup.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'span'])])
return all_text
else:
return None
except requests.exceptions.Timeout:
print(f"Timeout error: Could not connect to {url}")
return None
except Exception as e:
print("Error:", e)
return None
# Iterate through the list of websites
for website in websites:
time.sleep(10)
url = f"https://{website}"
text_content = extract_text_from_website(url)
if text_content is not None:
categories = categorize_based_on_keywords(text_content, categories_keywords)
else:
categories = None
if categories:
print(f"Categories assigned based on keyword match: {categories}")
else:
print("No category was assigned based on keywords. Using OpenAI to categorize.")
# If no categories were found and text content is None, handle the error
if text_content is None:
text_content = " "
else:
text_content = " ".join(text_content.split())
# Limit the text content to 500 words
word_limit = 600
if len(text_content.split()) > word_limit:
text_content = " ".join(text_content.split()[:word_limit])
prompt = (
f"This is a website [{url}], and this is the content I extracted from the website: [{text_content}]. "
"Can you determine which category this website belongs to based on the content? "
"I would like the result in JSON format as follows: "
"'website': 'url', 'category': 'category_name'. "
"Please choose a category from the list provided below for accurate classification: "
"(Technology, Startup, Sales, Health, Business, Education, Finance, Web3, Human Resource, Generative AI, Others, Economy, Gen AI, HR, Law, Management, Productivity, Sales & Marketing, Stocks, Tech, VC & PE, Adult). "
"You can select multiple categories if you are familiar with the website. "
"You may also disregard the extracted data if necessary. "
"IMPORTANT: While showing the result, only show the json."
)
# Use OpenAI to generate the category
response = openai.ChatCompletion.create(
model=model_engine,
temperature=0.8,
top_p=1,
max_tokens=50,
presence_penalty=0,
frequency_penalty=0.57,
messages=[
{
"role": "system",
"content": "You are an expert in website categorization."
},
{
"role": "user",
"content": "This is a website [https://example.com], and this is the content I extracted from the website: [ ]. Can you determine which category this website belongs to based on the content? I would like the result in JSON format as follows: {'website': 'url', 'category': 'category_name'}. Please choose a category from the list provided below for accurate classification: (Technology, Startup, Sales, Health, Business, Education, Finance, Web3, Human Resource, Generative AI, Others, Economy, Gen AI, HR, Law, Management, Productivity, Sales & Marketing, Stocks, Tech, VC & PE, Entertainment, Adult). You can select multiple categories if you are familiar with the website. You may also disregard the extracted data if necessary. IMPORTANT: While showing the result, only show the json result."
},
{
"role": "assistant",
"content": "{\"website\": \"https://example.com\", \"category\": \"Others\"}"
},
{
"role": "user", "content": prompt
}
],
)
print("Prompt to OpenAI:", prompt)
generated_category = response['choices'][0]['message']['content']
data.append({"URL": url, "Request": prompt, "Generated Category": generated_category, "Category": categories, "Extracted Data": text_content})
# Convert the list of dictionaries to a DataFrame
df = pd.DataFrame(data)
# Save the data to an Excel file after each request
df.to_excel("website_data_temp.xlsx", index=False)
print(f"Processed {url}")
# Add a delay to avoid overloading the server
time.sleep(10)
else:
print(f"Failed to extract text from {url}")
# Save the final data to an Excel file
df.to_excel("website_data.xlsx", index=False)
print("Final data saved to website_data.xlsx")
| [
"{\"website\": \"https://example.com\", \"category\": \"Others\"}",
"You are an expert in website categorization.",
"This is a website [https://PLACEHOLDER], and this is the content I extracted from the website: [ ]. Can you determine which category this website belongs to based on the content? I would like the result in JSON format as follows: 'website': 'url', 'category': 'category_name'. Please choose a category from the list provided below for accurate classification: (Technology, Startup, Sales, Health, Business, Education, Finance, Web3, Human Resource, Generative AI, Others, Economy, Gen AI, HR, Law, Management, Productivity, Sales & Marketing, Stocks, Tech, VC & PE, Adult). You can select multiple categories if you are familiar with the website. You may also disregard the extracted data if necessary. IMPORTANT: While showing the result, only show the json.",
"This is a website [https://example.com], and this is the content I extracted from the website: [ ]. Can you determine which category this website belongs to based on the content? I would like the result in JSON format as follows: {'website': 'url', 'category': 'category_name'}. Please choose a category from the list provided below for accurate classification: (Technology, Startup, Sales, Health, Business, Education, Finance, Web3, Human Resource, Generative AI, Others, Economy, Gen AI, HR, Law, Management, Productivity, Sales & Marketing, Stocks, Tech, VC & PE, Entertainment, Adult). You can select multiple categories if you are familiar with the website. You may also disregard the extracted data if necessary. IMPORTANT: While showing the result, only show the json result."
] |
2024-01-10 | MrHooRa/insightly | pages~cart.py | import streamlit as st
import cohere
from streamlit_extras.switch_page_button import switch_page
from streamlit_autorefresh import st_autorefresh
from datetime import datetime, timezone
import random
MODEL_ID = '28b1f720-22ff-4556-85bc-2ae1e0ee18cf-ft'
COHERE_API_KEY = 'KOmzoZw2eX5jJgZBG501EbzSykrb27O0OuKFMjOj'
MOVING_AVERAGE_NUM = 3
co = cohere.Client(COHERE_API_KEY)
def getUserTimeSpent():
""" Return the total time spent on the website in seconds. """
return (st.session_state['current_time'] - st.session_state['start_time']).total_seconds()
def classify_session():
product_viewd_avg_price = (st.session_state['products_viewed_sum_price']/st.session_state['products_viewed'] if st.session_state['products_viewed'] != 0 else 0)
product_added_avg_price = (st.session_state['products_added_sum_price']/st.session_state['products_added'] if st.session_state['products_added'] != 0 else 0)
product_removed_avg_price = (st.session_state['products_removed_sum_price']/st.session_state['products_removed'] if st.session_state['products_removed'] != 0 else 0)
if response := co.classify(
model=MODEL_ID,
inputs=[f"{getUserTimeSpent()} {st.session_state['products_viewed']} {product_viewd_avg_price} {st.session_state['products_added']} {product_added_avg_price} {st.session_state['products_removed']} {product_removed_avg_price}"]):
return response.classifications[0]
return []
def removeFromCart(product_id):
for i in range(len(user_cart)):
if user_cart[i]['id'] == product_id:
print(user_cart[i])
del user_cart[i]
st.session_state['products_removed'] += 1
break
return True
st.set_page_config(
page_title="Insightly Store - Home Page",
page_icon="✨",
initial_sidebar_state="collapsed",
)
# ---------------------------------------------------------------------
# Products
# ---------------------------------------------------------------------
if 'current_product' not in st.session_state:
st.session_state['current_product'] = None
if 'cart' not in st.session_state:
st.session_state['cart'] = []
user_cart = st.session_state['cart']
current_product = st.session_state['current_product']
# ---------------------------------------------------------------------
# End Products
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# User behavior
# ---------------------------------------------------------------------
# set variables
if 'products_viewed' not in st.session_state:
st.session_state['products_viewed'] = 0
if 'products_viewed_hrice' not in st.session_state:
st.session_state['products_viewed_sum_price'] = 0
if 'products_added' not in st.session_state:
st.session_state['products_added'] = 0
if 'products_added_sum_price' not in st.session_state:
st.session_state['products_added_sum_price'] = 0
if 'products_removed' not in st.session_state:
st.session_state['products_removed'] = 0
if 'products_removed_sum_price' not in st.session_state:
st.session_state['products_removed_sum_price'] = 0
if 'start_time' not in st.session_state:
st.session_state['start_time'] = datetime.now(timezone.utc)
if 'current_time' not in st.session_state:
st.session_state['current_time'] = datetime.now(timezone.utc)
if 'moving_average' not in st.session_state:
st.session_state['moving_average'] = []
if 'moving_average_probability' not in st.session_state:
st.session_state['moving_average_probability'] = {
'0': 0,
'1': 0
}
# update cureent_time
st.session_state['current_time'] = datetime.now(timezone.utc)
session_stats = classify_session()
# update moving average
if len(st.session_state['moving_average']) >= MOVING_AVERAGE_NUM:
st.session_state['moving_average'].pop(0)
st.session_state['moving_average'].append([session_stats.labels['0'].confidence * 100, session_stats.labels['1'].confidence * 100])
# update moving average probability
st.session_state['moving_average_probability']['0'] = sum(
i[0] for i in st.session_state['moving_average']
) / len(st.session_state['moving_average'])
st.session_state['moving_average_probability']['1'] = sum(
i[1] for i in st.session_state['moving_average']
) / len(st.session_state['moving_average'])
if 'prod_msg' not in st.session_state:
st.session_state['prod_msg'] = ''
# ---------------------------------------------------------------------
# End User behavior data
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Sidebar
# ---------------------------------------------------------------------
st.markdown(
"""
<style>
[data-testid="stSidebarNav"] {
display: none
}
</style>
""",
unsafe_allow_html=True,
)
# User current session details
st.sidebar.markdown(f"""
<center><h3>Current user behavior</h3></center>
Total time spent (in seconds): {str(round(getUserTimeSpent(), 2))}<br>
Products viewed: {st.session_state['products_viewed']}<br>
Products added: {st.session_state['products_added']}<br>
Products removed: {st.session_state['products_removed']}<br>
Session stats: <br>
<span style="padding-left:20px;">Not purchase ({str(round(st.session_state['moving_average_probability']['0'], 4))[:5]}%)</span><br>
<span style="padding-left:20px;">Purchase ({str(round(st.session_state['moving_average_probability']['1'], 4))[:5]}%)</span>
""", unsafe_allow_html=True)
# ---------------------------------------------------------------------
# End Sidebar
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Body
# ---------------------------------------------------------------------
with st.container():
header_title, home_btn = st.columns([4,1])
with header_title:
st.markdown("# 🛒 Your cart ")
with home_btn:
if st.button('🏠 Home', use_container_width=True):
switch_page("main_page")
if st.button('👥 About', use_container_width=True):
switch_page("about")
st.write("<hr>", unsafe_allow_html=True)
if st.session_state['prod_msg'] != '':
st.info(st.session_state['prod_msg'], icon="⚠")
st.session_state['prod_msg'] = ''
st_autorefresh(interval=2000, limit=2)
for product in user_cart:
with st.container():
R_col, l_col = st.columns((1,2))
with R_col:
st.image(f"{product['image']}")
with l_col:
st.write(f"{product['description']}")
with st.container():
price_col, remove_col = st.columns((1,1))
with price_col:
st.write(f"<center><h5>Price: ${product['price']}</h5></center>", unsafe_allow_html=True)
with remove_col:
if st.button(f'Remove', key=f"remove_{product['name']}", use_container_width=True):
removeFromCart(product['id'])
st.session_state['prod_msg'] = f"{product['name']} has been removed from your cart!"
st_autorefresh(interval=1, limit=2)
if len(user_cart) == 0:
st.write("<center><h5>Your cart is empty</h5></center>", unsafe_allow_html=True)
else:
# Calculate total
cart_total = sum([product['price'] for product in user_cart])
with st.container():
st.write("<hr>", unsafe_allow_html=True)
st.write("<center><h5>Total: $"+str(round(cart_total, 2))+"</h5></center>", unsafe_allow_html=True)
if st.button('Checkout', use_container_width=True):
pass
# ---------------------------------------------------------------------
# End Body
# --------------------------------------------------------------------- | [] |
2024-01-10 | MrHooRa/insightly | pages~product.py | import streamlit as st
from streamlit_extras.switch_page_button import switch_page
import cohere
from datetime import datetime, timezone
from streamlit_autorefresh import st_autorefresh
MODEL_ID = '28b1f720-22ff-4556-85bc-2ae1e0ee18cf-ft'
COHERE_API_KEY = 'KOmzoZw2eX5jJgZBG501EbzSykrb27O0OuKFMjOj'
MOVING_AVERAGE_NUM = 3
co = cohere.Client(COHERE_API_KEY)
def getUserTimeSpent():
""" Return the total time spent on the website in seconds. """
return (st.session_state['current_time'] - st.session_state['start_time']).total_seconds()
def classify_session():
product_viewd_avg_price = (st.session_state['products_viewed_sum_price']/st.session_state['products_viewed'] if st.session_state['products_viewed'] != 0 else 0)
product_added_avg_price = (st.session_state['products_added_sum_price']/st.session_state['products_added'] if st.session_state['products_added'] != 0 else 0)
product_removed_avg_price = (st.session_state['products_removed_sum_price']/st.session_state['products_removed'] if st.session_state['products_removed'] != 0 else 0)
if response := co.classify(
model=MODEL_ID,
inputs=[f"{getUserTimeSpent()} {st.session_state['products_viewed']} {product_viewd_avg_price} {st.session_state['products_added']} {product_added_avg_price} {st.session_state['products_removed']} {product_removed_avg_price}"]):
return response.classifications[0]
return []
st.set_page_config(
page_title="Insightly Store - Products",
page_icon="✨",
initial_sidebar_state="collapsed",
)
# ---------------------------------------------------------------------
# Products
# ---------------------------------------------------------------------
if 'current_product' not in st.session_state:
st.session_state['current_product'] = None
if 'cart' not in st.session_state:
st.session_state['cart'] = []
current_product = st.session_state['current_product']
if current_product is None:
st.warning('Page not found!')
if st.button('Go back'):
switch_page("main_page")
st.stop()
# ---------------------------------------------------------------------
# End Products
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# User behavior
# ---------------------------------------------------------------------
def sessions():
# set variables
if 'products_viewed' not in st.session_state:
st.session_state['products_viewed'] = 0
if 'products_viewed_hrice' not in st.session_state:
st.session_state['products_viewed_sum_price'] = 0
if 'products_added' not in st.session_state:
st.session_state['products_added'] = 0
if 'products_added_sum_price' not in st.session_state:
st.session_state['products_added_sum_price'] = 0
if 'products_removed' not in st.session_state:
st.session_state['products_removed'] = 0
if 'products_removed_sum_price' not in st.session_state:
st.session_state['products_removed_sum_price'] = 0
if 'start_time' not in st.session_state:
st.session_state['start_time'] = datetime.now(timezone.utc)
if 'current_time' not in st.session_state:
st.session_state['current_time'] = datetime.now(timezone.utc)
if 'moving_average' not in st.session_state:
st.session_state['moving_average'] = []
if 'moving_average_probability' not in st.session_state:
st.session_state['moving_average_probability'] = {
'0': 0,
'1': 0
}
sessions()
# update cureent_time
st.session_state['current_time'] = datetime.now(timezone.utc)
session_stats = classify_session()
# update moving average
if len(st.session_state['moving_average']) >= MOVING_AVERAGE_NUM:
st.session_state['moving_average'].pop(0)
st.session_state['moving_average'].append([session_stats.labels['0'].confidence * 100, session_stats.labels['1'].confidence * 100])
# update moving average probability
st.session_state['moving_average_probability']['0'] = sum(
i[0] for i in st.session_state['moving_average']
) / len(st.session_state['moving_average'])
st.session_state['moving_average_probability']['1'] = sum(
i[1] for i in st.session_state['moving_average']
) / len(st.session_state['moving_average'])
if 'prod_msg' not in st.session_state:
st.session_state['prod_msg'] = ''
# ---------------------------------------------------------------------
# End User behavior data
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Sidebar
# ---------------------------------------------------------------------
st.markdown(
"""
<style>
[data-testid="stSidebarNav"] {
display: none
}
</style>
""",
unsafe_allow_html=True,
)
# User current session details
st.sidebar.markdown(f"""
<center><h3>Current user behavior</h3></center>
Total time spent (in seconds): {str(round(getUserTimeSpent(), 2))}<br>
Products viewed: {st.session_state['products_viewed']}<br>
Products added: {st.session_state['products_added']}<br>
Products removed: {st.session_state['products_removed']}<br>
Session stats: <br>
<span style="padding-left:20px;">Not purchase ({str(round(st.session_state['moving_average_probability']['0'], 4))[:5]}%)</span><br>
<span style="padding-left:20px;">Purchase ({str(round(st.session_state['moving_average_probability']['1'], 4))[:5]}%)</span>
""", unsafe_allow_html=True)
# ---------------------------------------------------------------------
# End Sidebar
# ---------------------------------------------------------------------
# update cureent_time
st.session_state['current_time'] = datetime.now(timezone.utc)
session_stats = classify_session()
# update moving average
if len(st.session_state['moving_average']) >= MOVING_AVERAGE_NUM:
st.session_state['moving_average'].pop(0)
st.session_state['moving_average'].append([session_stats.labels['0'].confidence * 100, session_stats.labels['1'].confidence * 100])
# update moving average probability
st.session_state['moving_average_probability']['0'] = sum(
i[0] for i in st.session_state['moving_average']
) / len(st.session_state['moving_average'])
st.session_state['moving_average_probability']['1'] = sum(
i[1] for i in st.session_state['moving_average']
) / len(st.session_state['moving_average'])
# ---------------------------------------------------------------------
# End User behavior data
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Body
# ---------------------------------------------------------------------
with st.container():
header_title, home_btn = st.columns([4,1])
with header_title:
st.markdown("# Insightly Store ✨ ")
with home_btn:
if st.button('🏠 Home', use_container_width=True):
switch_page("main_page")
if st.button('🛒 Cart', use_container_width=True):
switch_page("cart")
if st.button('👥 About', use_container_width=True):
switch_page("about")
st.write("<hr>", unsafe_allow_html=True)
if st.session_state['prod_msg'] != '':
st.success(st.session_state['prod_msg'], icon="🛒")
st.session_state['prod_msg'] = ''
st_autorefresh(interval=2000, limit=2)
st.markdown(f"# {current_product['name']} ")
with st.container():
R_col, l_col = st.columns((1,2))
with R_col:
st.image(f"{current_product['image']}")
with l_col:
st.write(f"{current_product['description']}")
st.write(f"<center><h5>Price: ${current_product['price']}</h5></center>", unsafe_allow_html=True)
if st.button(f'Add to cart', key=f"add_{current_product['name']}", use_container_width=True):
st.session_state['cart'].append(current_product)
st.session_state['products_added'] += 1
st.session_state['prod_msg'] = f"{current_product['name']} has been added to the cart!"
st_autorefresh(interval=1, limit=2)
# ---------------------------------------------------------------------
# End Body
# --------------------------------------------------------------------- | [] |
2024-01-10 | MrHooRa/insightly | main_page.py | from PIL import Image
import streamlit as st
import pandas as pd
import numpy as np
import cohere
from streamlit_extras.switch_page_button import switch_page
from streamlit_autorefresh import st_autorefresh
from datetime import datetime, timezone
MODEL_ID = '28b1f720-22ff-4556-85bc-2ae1e0ee18cf-ft'
COHERE_API_KEY = 'KOmzoZw2eX5jJgZBG501EbzSykrb27O0OuKFMjOj'
MOVING_AVERAGE_NUM = 3
co = cohere.Client(COHERE_API_KEY)
def getUserTimeSpent():
""" Return the total time spent on the website in seconds. """
return (st.session_state['current_time'] - st.session_state['start_time']).total_seconds()
def classify_session():
product_viewd_avg_price = (st.session_state['products_viewed_sum_price']/st.session_state['products_viewed'] if st.session_state['products_viewed'] != 0 else 0)
product_added_avg_price = (st.session_state['products_added_sum_price']/st.session_state['products_added'] if st.session_state['products_added'] != 0 else 0)
product_removed_avg_price = (st.session_state['products_removed_sum_price']/st.session_state['products_removed'] if st.session_state['products_removed'] != 0 else 0)
if response := co.classify(
model=MODEL_ID,
inputs=[f"{getUserTimeSpent()} {st.session_state['products_viewed']} {product_viewd_avg_price} {st.session_state['products_added']} {product_added_avg_price} {st.session_state['products_removed']} {product_removed_avg_price}"]):
return response.classifications[0]
return []
st.set_page_config(
page_title="Insightly Store - Home Page",
page_icon="✨",
initial_sidebar_state="collapsed",
)
# ---------------------------------------------------------------------
# Products
# ---------------------------------------------------------------------
products_list = [
{
'id': 1,
'name': 'Flashlight wide',
'price': 99.99,
'image': "img/product_single_01.jpg",
'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod temp incididunt ut labore et dolore magna aliqua. Quis ipsum suspendisse. Donec condimentum elementum convallis. Nunc sed orci a diam ultrices aliquet interdum quis nulla.'
},
{
'id': 2,
'name': 'Apple Watch 2',
'price': 359.95,
'image': "img/product_single_02.jpg",
'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod temp incididunt ut labore et dolore magna aliqua. Quis ipsum suspendisse. Donec condimentum elementum convallis. Nunc sed orci a diam ultrices aliquet interdum quis nulla.'
},
{
'id': 3,
'name': 'Ultra Camera',
'price': 259.95,
'image': "img/product_single_03.jpg",
'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod temp incididunt ut labore et dolore magna aliqua. Quis ipsum suspendisse. Donec condimentum elementum convallis. Nunc sed orci a diam ultrices aliquet interdum quis nulla.'
}
]
if 'current_product' not in st.session_state:
st.session_state['current_product'] = None
if 'cart' not in st.session_state:
st.session_state['cart'] = []
if 'prod_msg' not in st.session_state:
st.session_state['prod_msg'] = ''
# ---------------------------------------------------------------------
# End Products
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# User behavior
# ---------------------------------------------------------------------
# set variables
if 'products_viewed' not in st.session_state:
st.session_state['products_viewed'] = 0
if 'products_viewed_hrice' not in st.session_state:
st.session_state['products_viewed_sum_price'] = 0
if 'products_added' not in st.session_state:
st.session_state['products_added'] = 0
if 'products_added_sum_price' not in st.session_state:
st.session_state['products_added_sum_price'] = 0
if 'products_removed' not in st.session_state:
st.session_state['products_removed'] = 0
if 'products_removed_sum_price' not in st.session_state:
st.session_state['products_removed_sum_price'] = 0
if 'start_time' not in st.session_state:
st.session_state['start_time'] = datetime.now(timezone.utc)
if 'current_time' not in st.session_state:
st.session_state['current_time'] = datetime.now(timezone.utc)
if 'moving_average' not in st.session_state:
st.session_state['moving_average'] = []
if 'moving_average_probability' not in st.session_state:
st.session_state['moving_average_probability'] = {
'0': 0,
'1': 0
}
# update cureent_time
st.session_state['current_time'] = datetime.now(timezone.utc)
session_stats = classify_session()
# update moving average
if len(st.session_state['moving_average']) >= MOVING_AVERAGE_NUM:
st.session_state['moving_average'].pop(0)
st.session_state['moving_average'].append([session_stats.labels['0'].confidence * 100, session_stats.labels['1'].confidence * 100])
# update moving average probability
st.session_state['moving_average_probability']['0'] = sum(
i[0] for i in st.session_state['moving_average']
) / len(st.session_state['moving_average'])
st.session_state['moving_average_probability']['1'] = sum(
i[1] for i in st.session_state['moving_average']
) / len(st.session_state['moving_average'])
# ---------------------------------------------------------------------
# End User behavior data
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Sidebar
# ---------------------------------------------------------------------
st.markdown(
"""
<style>
[data-testid="stSidebarNav"] {
display: none
}
</style>
""",
unsafe_allow_html=True,
)
# User current session details
st.sidebar.markdown(f"""
<center><h3>Current user behavior</h3></center>
Total time spent (in seconds): {str(round(getUserTimeSpent(), 2))}<br>
Products viewed: {st.session_state['products_viewed']}<br>
Products added: {st.session_state['products_added']}<br>
Products removed: {st.session_state['products_removed']}<br>
Session stats: <br>
<span style="padding-left:20px;">Not purchase ({str(round(st.session_state['moving_average_probability']['0'], 4))[:5]}%)</span><br>
<span style="padding-left:20px;">Purchase ({str(round(st.session_state['moving_average_probability']['1'], 4))[:5]}%)</span>
""", unsafe_allow_html=True)
# ---------------------------------------------------------------------
# End Sidebar
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Body
# ---------------------------------------------------------------------
with st.container():
header_title, home_btn = st.columns([4,1])
with header_title:
st.markdown("# Insightly Store ✨ ")
with home_btn:
if st.button('🛒 Cart', use_container_width=True):
switch_page("cart")
if st.button('👥 About', use_container_width=True):
switch_page("about")
st.markdown("""
welcome to the Insightly Store, where you can buy all the Insightly swag you want!
""", unsafe_allow_html=True)
# product list
with st.container():
st.write("---")
with st.container():
header_col1, header_col2 = st.columns((1,2))
with header_col1:
st.header("Products")
with header_col2:
if st.session_state['prod_msg'] != '':
st.success(st.session_state['prod_msg'], icon="🛒")
st.session_state['prod_msg'] = ''
st_autorefresh(interval=2000, limit=2)
st.write("##")
img_col1, img_col2, img_col3 = st.columns((1,1,1))
with img_col1:
st.write(f"<center><h5>{products_list[0]['name']}</h5></center>", unsafe_allow_html=True)
st.image(Image.open(products_list[0]['image']))
st.write(f"<center><h5>Price: ${products_list[0]['price']}</h5></center>", unsafe_allow_html=True)
if st.button('View', key=f"view_{products_list[0]['name']}", use_container_width=True):
st.session_state['current_product'] = products_list[0]
st.session_state['products_viewed'] += 1
st.session_state['products_viewed_sum_price'] += (products_list[0]['price']/60)
switch_page("product")
if st.button(f'Add to cart', key=f"add_{products_list[0]['name']}", use_container_width=True):
st.session_state['cart'].append(products_list[0])
st.session_state['products_added'] += 1
st.session_state['prod_msg'] = f"{products_list[0]['name']} has been added to the cart!"
st_autorefresh(interval=1, limit=2)
with img_col2:
st.write(f"<center><h5>{products_list[1]['name']}</h5></center>", unsafe_allow_html=True)
st.image(Image.open(products_list[1]['image']))
st.write(f"<center><h5>Price: ${products_list[1]['price']}</h5></center>", unsafe_allow_html=True)
if st.button('View', key=f"view_{products_list[1]['name']}", use_container_width=True):
st.session_state['current_product'] = products_list[1]
st.session_state['products_viewed'] += 1
st.session_state['products_viewed_sum_price'] += (products_list[1]['price']/60)
switch_page("product")
if st.button(f'Add to cart', key=f"add_{products_list[1]['name']}", use_container_width=True):
st.session_state['cart'].append(products_list[1])
st.session_state['products_added'] += 1
st.session_state['prod_msg'] = f"{products_list[1]['name']} has been added to the cart!"
st_autorefresh(interval=1, limit=2)
with img_col3:
st.write(f"<center><h5>{products_list[2]['name']}</h5></center>", unsafe_allow_html=True)
st.image(Image.open(products_list[2]['image']))
st.write(f"<center><h5>Price: ${products_list[2]['price']}</h5></center>", unsafe_allow_html=True)
if st.button('View', key=f"view_{products_list[2]['name']}", use_container_width=True):
st.session_state['current_product'] = products_list[2]
st.session_state['products_viewed'] += 1
st.session_state['products_viewed_sum_price'] += (products_list[2]['price']/60)
switch_page("product")
if st.button(f'Add to cart', key=f"add_{products_list[2]['name']}", use_container_width=True):
st.session_state['cart'].append(products_list[2])
st.session_state['products_added'] += 1
st.session_state['prod_msg'] = f"{products_list[2]['name']} has been added to the cart!"
st_autorefresh(interval=1, limit=2)
# footer
# ---------------------------------------------------------------------
# End Body
# --------------------------------------------------------------------- | [] |
2024-01-10 | jjkchung87/prioritypilot | Backend~controller.py | import os
import openai
import json
from datetime import datetime
from models import Conversation, Project, db, Task, User, Department
# Load your API key from an environment variable or secret management service
openai.api_key = os.environ["OPENAI_API_KEY"]
#Create new project plan
def generate_ai_tasks(project_id, user_id, prompt):
"""generates tasks from ChatGPT"""
departments = Department.query.all()
department_names = [department.name for department in departments]
department_names_string = ", ".join(department_names)
today = datetime.now().date()
today_str = today.strftime('%Y-%m-%d')
messages = [
{"role": "system",
"content": f'You will be asked to recommend an array of tasks to complete a project. Your output should ONLY include an array of at least 5 task objects. Task objects should have these properties: task_name, description, date_time, department. "date_time" should have "MM-DD-YYYY HH:MM" format. No task should have a date before {today_str}. "department" should only be one of these: {department_names_string}. '
},
{"role": "user", "content": prompt}
]
# ADD MEETING OBJECT LATER: Meeting objects should have this shape {"task_name","type":"meeting", "description","date_time", "team"}. "date_time" should have "MM-DD-YYYY HH:MM" format. The first task should have a "date_time" that is todays date. The "teams" are Product Management, Finance, R&D, Operations, Supply Chain, Senior Leadership, Marketing.
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
print("***************prompt******************")
print(prompt)
print("***************from ChatGPT******************")
print(response.choices[0].message.content)
print(type(response.choices[0].message.content))
task_list = json.loads(response.choices[0].message.content)
print("*************converted to Python********************")
print(task_list)
print(type(task_list[0]))
for task in task_list:
# if task["type"] == "task":
department_name = task['department']
department = Department.query.filter_by(name=department_name).first()
other_user = User.query.filter_by(department_id=department.id).first()
users = [other_user]
t = Task.create_new_task(task_name=task["task_name"],
description=task["description"],
notes="",
type="task",
priority="Medium",
status="Not Started",
end_date=task["date_time"],
user_id=user_id,
project_id=project_id,
meeting_user_id=None,
users=users)
# if task["type"] == "meeting":
# team = Team.query.filter_by(name=task["team"]).first()
# team_id = team.id
# meeting_user = User.query.filter_by(team_id=team_id).first()
# t = Task.create_new_task(task_name=task["task_name"],
# description=task["description"],
# notes="",
# type=task["type"],
# status="Not Started",
# priority="Medium",
# # start_date=task["start_date"],
# end_date=task["date_time"],
# user_id=user_id,
# meeting_user_id=meeting_user.id,
# project_id=project_id
# )
db.session.add(t)
db.session.commit()
messages.append(
{"role":"assistant",
"content":task_list}
)
messages_no_system = messages[1:]
return messages_no_system
def generate_ai_tips(project_id, task_id):
"""Generate tips for tasks from AI"""
task = Task.query.get_or_404(task_id)
task_name = task.task_name
content = f"I am having trouble with the task: '{task_name}'. Give me 3 tips of how I can navigate this task. Your output should only include an array of 3 tips and nothing else."
system_message = {"role": "system",
"content": "You will be asked to give 3 tips on a particular task from a list of tasks you previously gave for an ongoing project. Your response should only be an array data type of 3 strings and nothing else. Each tip should be no more than 25 words."
}
new_message = {
"role": "user",
"content": content # Ensure content is a valid JSON string
}
messages = [system_message, new_message]
conversation = Conversation.query.filter_by(project_id=project_id).first()
print('*****************CONVERSATION********************')
print(conversation)
if conversation:
db_messages = conversation.get_messages()
print('*****************DB MESSAGES********************')
print(db_messages)
if not isinstance(db_messages, list):
db_messages = [db_messages]
# Iterate through messages and convert content to JSON if it's a list
for message in db_messages:
if isinstance(message['content'], list):
message['content'] = json.dumps(message['content'])
db_messages.append(new_message)
db_messages.insert(0, system_message)
else:
conversation = Conversation(user_id=task.user_id,
conversation_type="Assistance",
task_id = task_id,
project_id = project_id)
db.session.add(conversation)
db.session.commit()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
tips_str = response.choices[0].message["content"]
print('*****************TIPS RESPONSE********************')
print(tips_str)
tips = json.loads(tips_str)
# Ensure "tips" contains only strings
updated_tips = []
for tip in tips:
if isinstance(tip, dict):
# Extract the value from the dictionary
updated_tips.append(next(iter(tip.values())))
elif isinstance(tip, str):
updated_tips.append(tip)
print("*************converted to Python********************")
print(updated_tips)
print(type(updated_tips))
# Append the tips to the messages list without converting to JSON
new_message_for_db = {
"role": "assistant",
"content": updated_tips
}
print('*******************UPDATED MESSAGES******************')
print(new_message_for_db)
conversation.set_messages(new_message_for_db)
return updated_tips
# messages = [
# {"role": "system",
# "content": 'You will give me a list of helpful tips on how to achieve a certain task. The output should be an array. No more than 3 tips.'
# },
# {"role": "user", "content": prompt}
# ]
| [
"You will be asked to recommend an array of tasks to complete a project. Your output should ONLY include an array of at least 5 task objects. Task objects should have these properties: task_name, description, date_time, department. \"date_time\" should have \"MM-DD-YYYY HH:MM\" format. No task should have a date before PLACEHOLDER. \"department\" should only be one of these: PLACEHOLDER. ",
"I am having trouble with the task: 'PLACEHOLDER'. Give me 3 tips of how I can navigate this task. Your output should only include an array of 3 tips and nothing else.",
"You will be asked to give 3 tips on a particular task from a list of tasks you previously gave for an ongoing project. Your response should only be an array data type of 3 strings and nothing else. Each tip should be no more than 25 words."
] |
2024-01-10 | SquareGraph/docgenGPT | docgengpt~autodoc.py | """Main class, AutoDoc and handy functions"""
from pathlib import Path
from enum import Enum
from dataclasses import dataclass
from typing import Dict, List, Union, Type
from tqdm.autonotebook import tqdm
from langchain.schema import Document
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PythonLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import black
__all__ = ['SystemMessage', 'PyFile', 'AutoDoc']
class SystemMessage(Enum):
"""
An enumeration representing system messages.
"""
open: str = "I would like You to act as a senior programmer, with vast python knowledge and capabilities to understand complex code."
docs: str = "could you create docstrings and type hints for all of classes and methods in provided code?"
markdown: str = "could you create a markdown file with description of this code and example usage of classes and methods?"
@dataclass
class PyFile:
"""
Represents a Python file.
"""
path: str
context: str
page_content: str
answer: Union[str, None] = None
@classmethod
def from_document(cls: Type['PyFile'], document: Document, context: str) -> 'PyFile':
"""
Create a PyFile instance from a Document object.
Args:
document (Document): The document object.
context (str): The context for the file.
Returns:
PyFile: A new PyFile instance.
"""
path = document.metadata['source']
content = document.page_content
return cls(path=path, context=context, page_content=content, answer=None)
def format_answer(self) -> str:
"""
Formats the answer and returns it.
Returns:
str: The formatted answer.
"""
try:
codelines = [line for line in self.answer.strip().split('\n')[1:-1] if not '```' in line]
return black.format_str('\n'.join(codelines), mode=black.FileMode())
except (KeyError, AttributeError, ValueError) as e:
return self.page_content
def __post_init__(self):
self.name = Path(self.path).name.split('.')[0]
class AutoDoc:
"""
A class to automate the generation of docstrings for Python files.
"""
def __init__(self, path_dir: Path, openai_api_key: str, context: Dict[str, str], model_name: str = 'gpt-3.5-turbo',
temperature: int = 0, **kwargs) -> None:
"""
Initialize the AutoDoc class.
Args:
path_dir (Path): Directory path containing the Python files.
openai_api_key (str): OpenAI API key.
context (Dict[str, str]): Context for each file.
model_name (str, optional): Name of the OpenAI model. Defaults to 'gpt-3.5-turbo'.
temperature (int, optional): Temperature setting for the OpenAI model. Defaults to 0.
"""
self.raw_paths = self.__get_python_paths(path_dir)
self.documents = [PythonLoader(filepath).load()[0] for filepath in self.raw_paths]
self.pyfiles = self.__get_pyfiles(context)
self.llm = ChatOpenAI(model_name=model_name, temperature=temperature, openai_api_key=openai_api_key)
self.vectorstore = Chroma.from_documents(documents=self.documents,
embedding=OpenAIEmbeddings(openai_api_key=openai_api_key))
def __get_python_paths(self, path_dir: Path) -> List[str]:
"""
Retrieves paths to Python files from a given directory.
Args:
path_dir (Path): Directory path.
Returns:
List[str]: List of paths to Python files.
"""
return [str(file) for file in path_dir.iterdir() if file.suffix == '.py']
def __get_pyfiles(self, context: Dict[str, str]) -> List[PyFile]:
"""
Generates a list of PyFile instances from documents.
Args:
context (Dict[str, str]): Context for each file.
Returns:
List[PyFile]: List of PyFile instances.
"""
pyfiles = []
for doc in self.documents:
name = Path(doc.metadata['source']).name.split('.')[0]
pyfiles.append(PyFile.from_document(document=doc, context=context[name]))
return pyfiles
def retrive_docs(self, pyfile: PyFile) -> str:
"""
Retrieves docs for a given PyFile instance.
Args:
pyfile (PyFile): PyFile instance.
Returns:
str: Retrieved docs.
"""
ret = self.vectorstore.as_retriever(search_kwargs={"filter": {"source": pyfile.path}})
chain = RetrievalQA.from_chain_type(self.llm, retriever=ret)
prompt = f"{SystemMessage.open.value} According to the following description: {pyfile.context}, {SystemMessage.docs.value}."
pyfile.answer = chain({"query": prompt})['result']
return pyfile
def __get_docstrings(self) -> Dict[str, str]:
"""
Gathers docstrings for the Python files.
Returns:
Dict[str, str]: Dictionary with filenames as keys and docstrings as values.
"""
updated = [self.retrive_docs(pyfile) for pyfile in tqdm(self.pyfiles,
total=len(self.pyfiles),
desc='Generating docstrings...')]
return {item.name: item.format_answer() for item in updated}
def generate_docstrings(self) -> None:
"""
Generates docstrings and writes them to Python files in a new directory.
"""
try:
dirname = Path(self.raw_paths[0]).parent.parent / (str(Path(self.raw_paths[0]).parent.name) + "_with_docs")
dirname.mkdir(parents=True, exist_ok=True)
docstrings = self.__get_docstrings()
for filename, content in docstrings.items():
with open(f"{dirname / filename}.py", "w") as f:
f.write(content)
print("Finished!")
except (AttributeError, KeyError, ValueError) as e:
print(e)
pass | [] |
2024-01-10 | realChrisHahn2/nl2spec | src~models.py | import os
from statistics import mode
import openai
import requests
import vertexai
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, pipeline
from vertexai.preview.language_models import TextGenerationModel, CodeGenerationModel
import prompting
def gpt_35_turbo(args):
if args.keyfile != "":
keyfile = args.keyfile
else:
keyfile = os.path.join(args.keydir, "oai_key.txt")
key = open(keyfile).readline().strip("\n")
if key == "":
raise Exception("No key provided.")
openai.api_key = key
if args.num_tries == "":
n = 3
else:
n = int(args.num_tries)
if n > 5:
n = 5
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompting.prompt(args)}],
n=n,
temperature=args.temperature,
stop="FINISH",
)
choices = []
for i in range(0, n):
output = response["choices"][i]["message"]["content"]
print("OUTPUT")
print(output)
choices.append(output)
return prompting.extract_subinfo(choices, args, n)
def gpt_4(args):
if args.keyfile != "":
keyfile = args.keyfile
else:
keyfile = os.path.join(args.keydir, "oai_key.txt")
key = open(keyfile).readline().strip("\n")
if key == "":
raise Exception("No key provided.")
openai.api_key = key
if args.num_tries == "":
n = 3
else:
n = int(args.num_tries)
if n > 5:
n = 5
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompting.prompt(args)}],
n=n,
temperature=args.temperature,
stop="FINISH",
)
choices = []
for i in range(0, n):
output = response["choices"][i]["message"]["content"]
print("OUTPUT")
print(output)
choices.append(output)
return prompting.extract_subinfo(choices, args, n)
def code_davinci_002(args):
if args.keyfile != "":
keyfile = args.keyfile
else:
keyfile = os.path.join(args.keydir, "oai_key.txt")
key = open(keyfile).readline().strip("\n")
if key == "":
raise Exception("No key provided.")
openai.api_key = key
if args.num_tries == "":
n = 3
else:
n = int(args.num_tries)
if n > 5:
n = 5
temperature = args.temperature
response = openai.Completion.create(
model="code-davinci-002",
prompt=prompting.prompt(args),
temperature=temperature,
n=n,
max_tokens=300,
stop=["FINISH"],
logprobs=5,
)
# print(response["choices"][0]["text"])
choices = []
for i in range(0, n):
output = response["choices"][i]["text"]
choices.append(output)
return prompting.extract_subinfo(choices, args, n)
def text_davinci_003(args):
if args.keyfile != "":
keyfile = args.keyfile
else:
keyfile = os.path.join(args.keydir, "oai_key.txt")
key = open(keyfile).readline().strip("\n")
if key == "":
raise Exception("No key provided.")
openai.api_key = key
if args.num_tries == "":
n = 3
else:
n = int(args.num_tries)
if n > 5:
n = 5
temperature = args.temperature
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompting.prompt(args),
temperature=temperature,
n=n,
max_tokens=300,
stop=["FINISH"],
logprobs=5,
)
# print(response["choices"][0]["text"])
choices = []
for i in range(0, n):
output = response["choices"][i]["text"]
choices.append(output)
return prompting.extract_subinfo(choices, args, n)
def code_davinci_edit_001(args):
if args.keyfile != "":
keyfile = args.keyfile
else:
keyfile = os.path.join(args.keydir, "oai_key.txt")
key = open(keyfile).readline().strip("\n")
if key == "":
raise Exception("No key provided.")
openai.api_key = key
if args.num_tries == "":
n = 3
else:
n = int(args.num_tries)
if n > 5:
n = 5
temperature = args.temperature
prompt = prompting.prompt(args) + " REPLACE"
response = openai.Edit.create(
model="code-davinci-edit-001",
input=prompt,
instruction="replace REPLACE with the explanation, an explanation dictionary and the final translation",
temperature=temperature,
top_p=1,
n=n,
)
# print(response["choices"][0]["text"])
choices = []
for i in range(0, n):
output = response["choices"][i]["text"][len(prompt) - 8 :].split("FINISH")[0]
choices.append(output)
return prompting.extract_subinfo(choices, args, n)
def text_bison_001(args):
if args.keyfile != "":
keyfile = args.keyfile
else:
keyfile = os.path.join(args.keydir, "google_project_id.txt")
key = open(keyfile).readline().strip("\n")
if key == "":
raise Exception("No key provided.")
vertexai.init(project=key)
model = TextGenerationModel.from_pretrained("text-bison@001")
n = args.num_tries
def query():
return model.predict(
prompting.prompt(args), temperature=args.temperature, max_output_tokens=300
)
choices = []
for i in range(0, n):
repsonse = query()
output = repsonse.text.split("FINISH")[0]
choices.append(output)
return prompting.extract_subinfo(choices, args, n)
def code_bison_001(args):
if args.keyfile != "":
keyfile = args.keyfile
else:
keyfile = os.path.join(args.keydir, "google_project_id.txt")
key = open(keyfile).readline().strip("\n")
if key == "":
raise Exception("No key provided.")
vertexai.init(project=key)
model = CodeGenerationModel.from_pretrained("code-bison@001")
n = args.num_tries
def query():
return model.predict(
prefix=prompting.prompt(args),
temperature=args.temperature,
max_output_tokens=300,
)
choices = []
for i in range(0, n):
repsonse = query()
output = repsonse.text.split("FINISH")[0]
choices.append(output)
return prompting.extract_subinfo(choices, args, n)
def bloom(args):
n = args.num_tries
input_prompt = prompting.prompt(args)
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
if args.keyfile != "":
keyfile = args.keyfile
else:
keyfile = os.path.join(args.keydir, "hf_key.txt")
key = open(keyfile).readline().strip("\n")
if key == "":
raise Exception("No key provided.")
headers = {"Authorization": "Bearer " + key}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
choices = []
for i in range(0, n):
raw_output = query(
{
"inputs": input_prompt,
"options": {"use_cache": False, "wait_for_model": True},
"parameters": {
"return_full_text": False,
"do_sample": False,
"max_new_tokens": 300,
"temperature": args.temperature,
},
}
)
# shots_count = input_prompt.count("FINISH")
output = raw_output[0]["generated_text"].split("FINISH")[0]
choices.append(output)
return prompting.extract_subinfo(choices, args, n)
def bloomz(args):
n = args.num_tries
input_prompt = prompting.prompt(args)
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloomz"
if args.keyfile != "":
keyfile = args.keyfile
else:
keyfile = os.path.join(args.keydir, "hf_key.txt")
key = open(keyfile).readline().strip("\n")
if key == "":
raise Exception("No key provided.")
headers = {"Authorization": "Bearer " + key}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
choices = []
for i in range(0, n):
raw_output = query(
{
"inputs": input_prompt,
"options": {"use_cache": False, "wait_for_model": True},
"parameters": {
"return_full_text": False,
"do_sample": False,
"max_new_tokens": 300,
"temperature": args.temperature,
},
}
)
print("RAW OUTPUT")
print(raw_output)
# shots_count = input_prompt.count("FINISH")
output = raw_output[0]["generated_text"].split("FINISH")[0]
choices.append(output)
return prompting.extract_subinfo(choices, args, n)
| [
" REPLACE"
] |
2024-01-10 | DagiiM/elesoltd-ai | documents~tasks.py | import logging
import openai
from decouple import config
from django.db import transaction
import time
from django.conf import settings
import os
import pdfkit
import random
from django.template.loader import get_template
from . import resume_template
from tenacity import retry, stop_after_delay, wait_fixed
# set up OpenAI API key
openai.api_key = config('OPENAI_API_KEY')
def generate_text_basic(prompt):
response = None
while response is None:
try:
response = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
max_tokens=3000,
n=1,
temperature=0.5
)
except openai.error.APIError as e:
if e.status == 429 and 'Please include the request ID' in e.message:
request_id = e.message.split('Please include the request ID ')[-1].split(' in your message.')[0]
print(f'Retrying request {request_id}')
request = openai.api_resources.Request.get(request_id)
while request.status == 'pending':
time.sleep(1)
request = openai.api_resources.Request.get(request_id)
response = openai.api_resources.Completion.get(request.response['id']).choices[0]
elif e.status == 403:
print('API key unauthorized')
return None
elif e.status == 402:
print('Ran out of credits')
return None
else:
raise e
response = response.choices[0].text.strip().replace('\n', '<br>')
return response
@retry(stop=stop_after_delay(60), wait=wait_fixed(1))
def generate_text(prompt):
response = None
while response is None:
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
]
)
except openai.error.APIError as e:
if e.status == 429 and 'Please include the request ID' in e.message:
request_id = e.message.split('Please include the request ID ')[-1].split(' in your message.')[0]
print(f'Retrying request {request_id}')
request = openai.api_resources.Request.get(request_id)
while request.status == 'pending':
time.sleep(1)
request = openai.api_resources.Request.get(request_id)
response = openai.api_resources.ChatCompletion.get(request.response['id']).choices[0]
elif e.status == 403:
print('API key unauthorized')
return None
elif e.status == 402:
print('Ran out of credits')
return None
else:
raise e
response = response.choices[0].message.content.strip().replace('\n', '<br>')
return response
def process_business_plan(instance):
from .models import BusinessPlan
# Extract fields from content object
content = instance.content
executive_summary_x = content.get('executive_summary', '')
company_description_x = content.get('company_description', '')
market_analysis_x = content.get('market_analysis', '')
service_offered_x = content.get('service_offered', '')
marketing_strategy_x = content.get('marketing_strategy', '')
management_team_x = content.get('management_team', '')
swot_analysis_x = content.get('swot_analysis', '')
# Generate business plan text
with transaction.atomic():
try:
executive_summary_prompt = f"Generate a executive summary for a business plan for the following business using the guidelines provided {executive_summary_x}.\n\n"
executive_summary = generate_text(executive_summary_prompt)
if not executive_summary:
raise ValueError("Executive summary generation failed")
company_description_prompt = f"Generate a company description for a business plan for the following business using the guidelines provided {company_description_x}. The executive summary is: {executive_summary}\n\n"
company_description = generate_text(company_description_prompt)
if not company_description:
raise ValueError("Company description generation failed")
market_analysis_prompt = f"Generate a market analysis for a business plan for the following business using the guidelines provided {market_analysis_x}. The company description is: {company_description}\n\n"
market_analysis = generate_text(market_analysis_prompt)
if not market_analysis:
raise ValueError("Market analysis generation failed")
service_offered_prompt = f"Generate service offered based on {service_offered_x}. The market analysis is: {market_analysis}. Company Description is {company_description}\n\n"
service_offered = generate_text(service_offered_prompt)
if not service_offered:
raise ValueError("Service offered generation failed")
marketing_strategy_prompt = f"Generate a market analysis for a business plan for the following business using the guidelines provided {marketing_strategy_x}. The service offered is: {service_offered}\n\n"
marketing_strategy = generate_text(marketing_strategy_prompt)
if not marketing_strategy:
raise ValueError("Marketing strategy generation failed")
management_team_prompt = f"Generate a swot analysis for a business plan for the following business using the guidelines provided. The marketing strategy is: {marketing_strategy}\n\n"
swot_analysis = generate_text(management_team_prompt)
if not swot_analysis:
raise ValueError("Swot Analysis generation failed")
# Save business plan text to database
business_plan = BusinessPlan(
user=instance.user,
document_session=instance,
executive_summary=executive_summary,
company_description=company_description,
market_analysis=market_analysis,
service_offered=service_offered,
marketing_strategy=marketing_strategy,
swot_analysis=swot_analysis,
)
business_plan.save()
except ValueError as e:
# Log error and/or send notification
print(f"Error: {e}")
def process_resume(instance):
from .models import Resume
# Extract fields from content object
content = instance.content
contact_info_x = content.get('contact_info', {})
full_name_x = contact_info_x.get('full_name', '')
address_x = contact_info_x.get('address', '')
phone_number_x = contact_info_x.get('phone_number', '')
email_x = contact_info_x.get('email', '')
professional_summary_x = content.get('professional_summary', '')
work_experience_x = content.get('work_experience', [])
education_x = content.get('education', [])
skills_x = content.get('skills', '')
references_x = content.get('references', [])
#cover_letter_details = content.get('cover_letter_details', [])
apply_to =content.get('cover_letter').get('apply_to')
letter_details = content.get('cover_letter').get('letter_details')
# Generate resume text
with transaction.atomic():
try:
professional_summary_prompt = f"Generate professional summary based on {professional_summary_x}.\n\n"
professional_summary = generate_text(professional_summary_prompt)
profile=f"Fullname: {full_name_x} address: {address_x} phone number:{phone_number_x}, Email :{email_x}"
profile+=f"My Professional Summary is :{professional_summary}"
if not professional_summary:
raise ValueError("Professional summary generation failed")
work_experience_prompt = f"Organize the work experiences in a list form: {work_experience_x}.\n\n"
work_experience = generate_text(work_experience_prompt)
profile+=f"My Work Experience: {work_experience}"
if not work_experience:
raise ValueError("Work experience generation failed")
skills_prompt = f"Organize the skills in a list form: {skills_x}.\n\n"
skills = generate_text(skills_prompt)
profile+=f"My skills: {skills}"
if not skills:
raise ValueError("Skills generation failed")
education_prompt = f"Organize education background in a list form: {education_x}.\n\n"
education = generate_text(education_prompt)
profile+=f"My Education Background: {education}"
if not education:
raise ValueError("Education generation failed")
references_prompt = f"Organize references in a list form: {references_x}.\n\n"
references = generate_text(references_prompt)
profile+=f"My References: {references}"
if not references:
raise ValueError("References generation failed")
resume_prompt = f"Generate a well detailed resume. Here are my details: {profile}. Add fields that might be required to make it more presentable. Make it as Colorful. Use structure described here {resume_template.resume_instructions()} \n\n"
resume_x = generate_text(resume_prompt)
#resume_x = generate_text(f"Fit in {resume_x} into the following resume template {resume_template.resume_template()}")
if not references:
raise ValueError("Resume generation failed")
cover_letter_prompt = f"Generate Cover Letter based on {profile}. Application details: I'm applying to {apply_to}. Job requirements are {letter_details}.\n\n"
cover_letter = generate_text(cover_letter_prompt)
if not cover_letter:
raise ValueError("Cover letter generation failed")
# Save resume text to database
resume = Resume(
user=instance.user,
document_session=instance,
contact_info=f"Fullname: {full_name_x} address: {address_x} phone number:{phone_number_x}, Email :{email_x}",
professional_summary=professional_summary,
work_experience=work_experience,
education=education,
skills=skills_x,
references=references,
resume = resume_x,
cover_letter=cover_letter
)
resume.save()
except ImportError:
logging.error("Failed to import Resume model.")
except ValueError as e:
logging.error(f"Error: {e}")
def process_project_proposal(instance):
from .models import ProjectProposal
# Extract fields from content object
content = instance.content
title_x = content.get('title', '')
description_x = content.get('description', '')
objectives_x = content.get('objectives', '')
methodology_x = content.get('methodology', '')
budget_x = content.get('budget', '')
timeline_x = content.get('timeline', '')
conclusion_x = content.get('conclusion', '')
# Generate project proposal text
with transaction.atomic():
try:
title_prompt = f"Generate title based on {title_x}.\n\n"
title = generate_text(title_prompt)
if not title:
raise ValueError("Title generation failed")
description_prompt = f"Generate description based on {description_x}. The title is: {title}\n\n"
description = generate_text(description_prompt)
if not description:
raise ValueError("Description generation failed")
objectives_prompt = f"Generate objectives based on {objectives_x}. The description is: {description}\n\n"
objectives = generate_text(objectives_prompt)
if not objectives:
raise ValueError("Objectives generation failed")
methodology_prompt = f"Generate methodology based on {methodology_x}. The objectives are: {objectives}\n\n"
methodology = generate_text(methodology_prompt)
if not methodology:
raise ValueError("Methodology generation failed")
budget_prompt = f"Generate budget based on {budget_x}. The methodology is: {methodology}\n\n"
budget = generate_text(budget_prompt)
if not budget:
raise ValueError("Budget generation failed")
timeline_prompt = f"Generate timeline based on {timeline_x}. The budget is: {budget}\n\n"
timeline = generate_text(timeline_prompt)
if not timeline:
raise ValueError("Timeline generation failed")
conclusion_prompt = f"Generate conclusion based on {conclusion_x}. The timeline is: {timeline}\n\n"
conclusion = generate_text(conclusion_prompt)
if not conclusion:
raise ValueError("Conclusion generation failed")
# Save project proposal text to database
project_proposal = ProjectProposal(
user=instance.user,
document_session=instance,
title=title,
description=description,
objectives=objectives,
methodology=methodology,
budget=budget,
timeline=timeline,
conclusion=conclusion,
)
project_proposal.save()
except ValueError as e:
# Log error and/or send notification
print(f"Error: {e}")
def process_project_report(instance):
from .models import ProjectReport
# Extract fields from content object
content = instance.content
title_x = content.get('title', '')
literature_review_x = content.get('literature_review', '')
methodology_x = content.get('methodology', '')
results_x = content.get('results', '')
discussion_x = content.get('discussion', '')
conclusion_x = content.get('conclusion', '')
references_x = content.get('references', '')
# Generate project report text
with transaction.atomic():
try:
title_prompt = f"Generate title based on {title_x}.\n\n"
title = generate_text(title_prompt)
if not title:
raise ValueError("Title generation failed")
literature_review_prompt = f"Generate literature review based on {literature_review_x}. The title is: {title}\n\n"
literature_review = generate_text(literature_review_prompt)
if not literature_review:
raise ValueError("Literature review generation failed")
methodology_prompt = f"Generate methodology based on {methodology_x}. The literature review is: {literature_review}. Title is {title}\n\n"
methodology = generate_text(methodology_prompt)
if not methodology:
raise ValueError("Methodology generation failed")
results_prompt = f"Generate results based on {results_x}. The methodology is: {methodology}. Literature review is {literature_review}. Title is {title}\n\n"
results = generate_text(results_prompt)
if not results:
raise ValueError("Results generation failed")
discussion_prompt = f"Generate discussion based on {discussion_x}. The results are: {results}\n\n"
discussion = generate_text(discussion_prompt)
if not discussion:
raise ValueError("Discussion generation failed")
conclusion_prompt = f"Generate conclusion for {conclusion_x}. The discussion is: {discussion}\n\n"
conclusion = generate_text(conclusion_prompt)
if not conclusion:
raise ValueError("Conclusion generation failed")
references_prompt = f"Generate references based on {references_x}. The conclusion is: {conclusion}\n\n"
references = generate_text(references_prompt)
if not references:
raise ValueError("References generation failed")
# Save project report text to database
project_report = ProjectReport(
user=instance.user,
document_session=instance,
title=title,
literature_review=literature_review,
methodology=methodology,
results=results,
discussion=discussion,
conclusion=conclusion,
references=references,
)
project_report.save()
except ValueError as e:
# Log error and/or send notification
print(f"Error: {e}")
#The following tasks are handled in the background by celery
def process_business_plan_pdf(instance):
'''
Process the business plan
'''
create_pdf('business_plan.html',instance,attachment="business_plan.pdf")
def process_project_proposal_pdf(instance):
'''
Process the project proposal
'''
create_pdf('project_proposal.html',instance,attachment="project_proposal.pdf")
def create_resume_pdf(instance):
'''
Process the resume
'''
create_pdf('resume.html',instance,attachment="resume.pdf")
create_pdf('cover_letter.html',instance,attachment="cover_letter.pdf")
def create_project_report_pdf(instance):
'''
Process the project report
'''
create_pdf('project_report.html',instance,attachment="project_report.pdf")
def create_pdf(template_index,instance,attachment):
from decouple import config
from pathlib import Path
from urllib.parse import urljoin
from notifications.utils import mail_notification
# Variables we need
#The name of your PDF file
filename =f"{random.randint(10000000000000, 99999999999999)}.pdf"
#HTML FIle to be converted to PDF - inside your Django directory
template = get_template(template_index)
#Add any context variables you need to be dynamically rendered in the HTML
context = {"data":instance}
#Render the HTML
html = template.render(context)
#Options - Very Important [Don't forget this]
options = {
'encoding': 'UTF-8',
'javascript-delay':'1000', #Optional
'enable-local-file-access': None, #To be able to access CSS
'page-size': 'A4',
'custom-header' : [
('Accept-Encoding', 'gzip')
],
}
#Javascript delay is optional
#Remember that location to wkhtmltopdf
path = r"C:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe" # Windows
pdf_config = pdfkit.configuration(wkhtmltopdf=path)
#Saving the File
#file_path = settings.MEDIA_ROOT + '/documents/{}/'.format(instance.user.id)
file_path = Path(settings.MEDIA_ROOT).joinpath('documents', str(instance.user.id))
os.makedirs(file_path, exist_ok=True)
pdf_save_path = "{}/{}".format(file_path,filename)
#pdf_save_path = urljoin(file_path.as_posix(), filename)
#Save the PDF
pdfkit.from_string(html, pdf_save_path,
configuration=pdf_config,
options=options)
doc_url = 'documents/{}/{}'.format(instance.user.id,filename)
mail_notification(recipient=instance.user.email,
subject="File Created Successfully",
message="Below Attached is your Document file",
attachment_path=pdf_save_path,
attachment=attachment
)
# Combine base URL and document URL using urljoin
https_path = urljoin(config('APP_DOMAIN'), doc_url)
#Return
return https_path
| [
"Generate conclusion for PLACEHOLDER. The discussion is: PLACEHOLDER\n\n",
"Generate a swot analysis for a business plan for the following business using the guidelines provided. The marketing strategy is: PLACEHOLDER\n\n",
"Generate literature review based on PLACEHOLDER. The title is: PLACEHOLDER\n\n",
"Generate references based on PLACEHOLDER. The conclusion is: PLACEHOLDER\n\n",
"Generate title based on PLACEHOLDER.\n\n",
"Generate service offered based on PLACEHOLDER. The market analysis is: PLACEHOLDER. Company Description is PLACEHOLDER\n\n",
"Generate timeline based on PLACEHOLDER. The budget is: PLACEHOLDER\n\n",
"Generate results based on PLACEHOLDER. The methodology is: PLACEHOLDER. Literature review is PLACEHOLDER. Title is PLACEHOLDER\n\n",
"Generate professional summary based on PLACEHOLDER.\n\n",
"Generate conclusion based on PLACEHOLDER. The timeline is: PLACEHOLDER\n\n",
"Generate a market analysis for a business plan for the following business using the guidelines provided PLACEHOLDER. The company description is: PLACEHOLDER\n\n",
"Organize the work experiences in a list form: PLACEHOLDER.\n\n",
"Generate methodology based on PLACEHOLDER. The literature review is: PLACEHOLDER. Title is PLACEHOLDER\n\n",
"Generate objectives based on PLACEHOLDER. The description is: PLACEHOLDER\n\n",
"You are a helpful assistant.",
"Generate methodology based on PLACEHOLDER. The objectives are: PLACEHOLDER\n\n",
"Generate discussion based on PLACEHOLDER. The results are: PLACEHOLDER\n\n",
"Generate a market analysis for a business plan for the following business using the guidelines provided PLACEHOLDER. The service offered is: PLACEHOLDER\n\n",
"Generate a executive summary for a business plan for the following business using the guidelines provided PLACEHOLDER.\n\n",
"Organize the skills in a list form: PLACEHOLDER.\n\n",
"Generate budget based on PLACEHOLDER. The methodology is: PLACEHOLDER\n\n",
"Generate description based on PLACEHOLDER. The title is: PLACEHOLDER\n\n",
"Organize education background in a list form: PLACEHOLDER.\n\n",
"Generate Cover Letter based on PLACEHOLDER. Application details: I'm applying to PLACEHOLDER. Job requirements are PLACEHOLDER.\n\n",
"Organize references in a list form: PLACEHOLDER.\n\n",
"Generate a company description for a business plan for the following business using the guidelines provided PLACEHOLDER. The executive summary is: PLACEHOLDER\n\n"
] |
2024-01-10 | Cicero-ly/topic-classification | main.py | import os
import time
from pprint import pprint
from typing import List, Tuple
import openai
import pymongo
from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic
from anthropic import APIStatusError as AnthropicAPIStatusError
# We're sticking with the HTML parser included in Python's standard library. See https://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser
# If we ever have issues with performance, we should consider lxml, although managing this as a dependency
# can be a bit more of a headache than most of our other strictly-python deps.
# (lxml is written in C, and the python package lxml is simply a
# "pythonic binding" of the underlying libxml2 and libxslt.)
from bs4 import BeautifulSoup
from bson.objectid import ObjectId
from langchain.document_loaders import YoutubeLoader
from youtube_transcript_api._errors import (
NoTranscriptAvailable,
NoTranscriptFound,
TranscriptsDisabled,
TranslationLanguageNotAvailable,
)
# TODO: LATER: fetch topics from db so this is always up-to-date
import constants
import utils
from data_stores.mongodb import thoughts_db
anthropic = Anthropic()
openai.api_key = os.environ["OPENAI_API_KEY"]
PYTHON_ENV = os.environ.get("PYTHON_ENV", "development")
def generate_summary(content: str, title: str):
human_prompt = f"""
Write a 50-300 word summary of the following article, make sure to keep important names.
Keep it professional and concise.
title: {title}
article content: {content}
"""
retries = 5
for i in range(retries):
try:
prompt = f"{HUMAN_PROMPT}: You are a frequent contributor to Wikipedia. \n\n{human_prompt}\n\n{AI_PROMPT}:\n\nSummary:\n\n"
completion = anthropic.completions.create(
prompt=prompt,
model="claude-instant-v1-100k",
max_tokens_to_sample=100000,
temperature=0,
)
response = completion.completion.strip(" \n")
break
except AnthropicAPIStatusError:
print(
f"Anthropic API service unavailable. Retrying again... ({i+1}/{retries})"
)
time.sleep(3)
return response
def generate_topics(content: str, title: str):
human_prompt = f"""
Pick three topics that properly match the article summary below, based on the topics list provided.
Your response format should be:
- TOPIC_1
- TOPIC_2
- TOPIC_3
Do not add a topic that isn't in this list of topics: {constants.topics}
Feel free to use less than three topics if you can't find three topics from the list that are a good fit.
If you pick a topic that is two words or more, make sure every word is capitalized (not just the first word).
Here are some notes regarding topics which are identical but might be called different names:
- If you choose "Mathematics" as a topic, please just call it "Math".
- If you choose "Health" as a topic, please call it "Medicine or Health."
- If you choose "Film", "Music", or "Art" as a topic, please just call it "Culture".
Article title: {title}
Article summary: {content}
"""
retries = 5
for i in range(retries):
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0,
messages=[
{
"role": "system",
"content": "You are a frequent contributor to Wikipedia, and have a deep understanding of Wikipedia's categories and topics.",
},
{"role": "user", "content": human_prompt},
],
)
break
except openai.error.ServiceUnavailableError:
print(f"OpenAI service unavailable. Retrying again... ({i+1}/{retries})")
time.sleep(3)
response = completion.choices[0].message
parsed_topics = []
untracked_topics = []
for topic in response.content.split("\n"):
stripped_topic = topic.replace("-", "").strip()
if stripped_topic in constants.topics:
parsed_topics.append(stripped_topic)
else:
untracked_topics.append(stripped_topic)
return {"accepted_topics": parsed_topics, "untracked_topics": untracked_topics}
# TODO: LATER: something more robust down the road...possibly tapping into our existing rules db collection
# Farzad said that "this will need to be greatly expanded, and in short order"
def filter_bad_candidates_for_classification(
thought, parsed_content
) -> Tuple[bool, str]:
"""
Determine if thought should undergo topic classification according to simple filter rules.
If a thought does not pass the filter, it will also be flagged in the DB to ensure
it isn't considered again for future processing.
"""
# General ignore patterns:
if len(parsed_content) < 450:
reason = "Ignore content if character count < 450"
return (False, reason)
if "read more" in parsed_content[-250:]:
reason = "Ignore truncated content"
return (False, reason)
# Specific ignore patterns:
if "marginalrevolution" in thought["url"]:
reason = "Ignore Tyler Cowen content"
return (False, reason)
if "TGIF" in thought["title"] and "thefp" in thought["url"]:
reason = 'Ignore "The Free Press" TGIF articles'
return (False, reason)
if (
"mail" in thought["title"]
and ObjectId("6163165d85b48615886b5718") in thought["voicesInContent"]
):
reason = 'Ignore "mailbag" posts by Matthew Yglesias'
return (False, reason)
if ObjectId("64505e4c509cac9a8e7e226d") in thought["voicesInContent"]:
reason = "Ignore content from the voice 'Public'"
return (False, reason)
if (
ObjectId("60cfdfecdbc5ba3af65ce81e") in thought["voicesInContent"]
or ObjectId("6144af944d89a998bdef2aef") in thought["voicesInContent"]
):
reason = "Ignore Jerry Coyne"
return (False, reason)
if ObjectId("6302c1f6bce5b9d5af604a27") in thought["voicesInContent"]:
reason = "Ignore Alex Gangitano"
return (False, reason)
if "johndcook" in thought["url"]:
reason = "Ignore johndcook.com"
return (False, reason)
if ObjectId("6195895295d7549fb48c32d9") in thought["voicesInContent"]:
reason = "Ignore Milan Singh articles"
return (False, reason)
if ObjectId("629970b464906c0bea98fbc7") in thought["voicesInContent"]:
reason = "Ignore David Pakman articles"
return (False, reason)
return (True, "")
# TODO: We'll likely have to store transcript in S3 as opposed to directly in DB sooner than later.
def store_transcript(thought_pointer, transcript):
thought_collection = thought_pointer["collection"]
thought_id = thought_pointer["_id"]
update_op = thoughts_db[thought_collection].update_one(
{"_id": thought_id}, {"$set": {"content_transcript": transcript}}
)
return update_op.modified_count
def parse_youtube_transcript(youtube_url: str):
transcript = ""
errors = []
try:
# Right now, we are fetching fresh transcripts even if a youtube thought
# already has a transcript in `content_transcript`, since it was alluded to
# previously that those were quite poor
loader = YoutubeLoader.from_youtube_url(youtube_url)
document_list = loader.load()
if len(document_list) > 0:
transcript = document_list[0].page_content
except (
NoTranscriptFound
or NoTranscriptAvailable
or TranscriptsDisabled
or TranslationLanguageNotAvailable
):
# Handling these exceptions separately, because the error message
# is egregiously long (contains information about all the languages that
# are and aren't available)
transcript_not_found_error = (
f"Transcript not available for Youtube video at {youtube_url}. "
)
errors.append(transcript_not_found_error)
except Exception as e:
print(
f"Misc. error getting transcript for Youtube video at {youtube_url}—see below:"
)
print(e)
errors.append(str(e))
finally:
return (transcript, errors)
def collect_thoughts_for_classification(single_collection_find_limit=1000):
active_thought_collections = os.environ["ACTIVE_THOUGHT_COLLECTIONS"].split(",")
print("Active thought collections: ", active_thought_collections)
thoughts_to_classify = []
thoughts_to_skip = []
errors = []
for collection in active_thought_collections:
pipeline = [
{
"$match": {
"flags.avoid_topic_classification": {"$ne": True},
"llm_generated_legacy_topics": {"$exists": False},
"reviewed": True,
"valuable": True,
"voicesInContent": {"$ne": None},
"title": {"$ne": None},
"url": {"$ne": None},
"$or": [
# For articles that can be classified, we need content_text or content.
# Youtube videos won't have content_text or content, but rather vid
# (but "vid" is a proxy value for making sure that a youtube video is a youtube video);
# "vid" is not actually used.
{"content_text": {"$ne": None}},
{"content": {"$ne": None}},
{"vid": {"$ne": None}},
],
}
},
{
"$addFields": {
"voices_in_content_count": {"$size": "$voicesInContent"},
"editing_users_count": {
"$cond": {
"if": {"$isArray": "$editingUsers"},
"then": {"$size": "$editingUsers"},
"else": 0,
}
},
},
},
{
"$match": {
"voices_in_content_count": {"$gt": 0},
"editing_users_count": 0,
}
},
{"$sort": {"_id": pymongo.DESCENDING}},
{"$limit": single_collection_find_limit},
{
"$project": {
"url": 1,
"vid": 1,
"title": 1,
"content_text": 1,
"content": 1,
"voicesInContent": 1,
}
},
]
thought_cursor = thoughts_db[collection].aggregate(pipeline)
for thought in thought_cursor:
parsed_content = ""
# Criteria for where to get the content, based on the type of thought and what's available
thought_is_youtube_video = thought.get("vid") != None
thought_is_article = (
thought.get("content_text") != None or thought.get("content") != None
)
thought_has_full_text = thought.get("content_text") != None
thought_needs_HTML_parsing = (
thought.get("content_text") == None and thought.get("content") != None
)
if thought_is_youtube_video:
transcript, fetch_transcript_errors = parse_youtube_transcript(
thought["url"]
)
store_transcript(
{
"collection": constants.youtube_thought_collection,
"_id": thought["_id"],
},
transcript,
)
parsed_content = transcript
errors.extend(fetch_transcript_errors)
elif thought_is_article:
if thought_has_full_text:
parsed_content = thought["content_text"]
elif thought_needs_HTML_parsing:
soup = BeautifulSoup(thought["content"], "html.parser")
parsed_content = soup.get_text()
else:
continue
(
thought_should_be_processed,
skipped_reason,
) = filter_bad_candidates_for_classification(thought, parsed_content)
if thought_should_be_processed:
thoughts_to_classify.append(
{
"collection": collection,
"_id": thought["_id"],
"content": parsed_content,
"title": thought["title"],
}
)
else:
thoughts_to_skip.append(
{
"collection": collection,
"_id": thought["_id"],
"reason": skipped_reason,
}
)
return (
active_thought_collections,
thoughts_to_classify,
thoughts_to_skip,
errors,
)
def main(single_collection_find_limit=10000):
# Setup/init
job_id = utils.create_job()
all_untracked_topics = {}
thoughts_classified: List[ObjectId] = []
ai_processing_errors = []
# Collect thoughts
(
active_thought_collections,
thoughts_to_classify,
thoughts_to_skip,
data_collection_errors,
) = collect_thoughts_for_classification(single_collection_find_limit)
# Summarize + classify each thought
for thought in thoughts_to_classify:
try:
generated_summary = generate_summary(thought["content"], thought["title"])
generated_topics = generate_topics(generated_summary, thought["title"])
# Here we compile all untracked topics for later analysis. We don't need to include
# reference to the original thought, because the thought itself will contain its own list of
# accepted and untracked topics.
for topic in generated_topics["untracked_topics"]:
if all_untracked_topics.get(topic) is not None:
all_untracked_topics[topic] += 1
else:
all_untracked_topics[topic] = 1
# Only overwrite the thought's "topics" field when in production
if PYTHON_ENV == "production":
fields_to_set = {
"llm_generated_summary": generated_summary,
"llm_generated_legacy_topics": generated_topics, # This includes both accepted and untracked topics.
"topics": generated_topics["accepted_topics"],
}
else:
fields_to_set = {
"llm_generated_summary": generated_summary,
"llm_generated_legacy_topics": generated_topics,
}
update_op = thoughts_db[thought["collection"]].update_one(
{"_id": thought["_id"]},
{
"$set": fields_to_set,
"$push": {
"llm_processing_metadata.workflows_completed": {
"$each": [
{
**constants.workflows["summarization"],
"last_performed": utils.get_now(),
"job_id": job_id,
},
{
**constants.workflows["topic_classification"],
"last_performed": utils.get_now(),
"job_id": job_id,
},
]
},
"llm_processing_metadata.all_fields_modified": {
"$each": list(fields_to_set.keys())
},
},
},
)
if update_op.modified_count == 1:
thoughts_classified.append(
{"collection": thought["collection"], "_id": thought["_id"]}
)
except Exception as e:
ai_processing_errors.append(str(e))
print(e)
for thought in thoughts_to_skip:
update_op = thoughts_db[thought["collection"]].update_one(
{"_id": thought["_id"]},
{"$set": {"flags.avoid_topic_classification": True}},
)
# Finish up, log the job
utils.update_job(
job_id,
{
"status": "complete",
"last_updated": utils.get_now(),
"workflows_completed": [
constants.workflows["summarization"],
constants.workflows["topic_classification"],
],
"job_metadata": {
"collections_queried": active_thought_collections,
"thoughts_classified_count": len(thoughts_classified),
"thoughts_skipped": thoughts_to_skip,
"thoughts_skipped_count": len(thoughts_to_skip),
"untracked_topics": all_untracked_topics,
"errors": {
"data_collection_errors": data_collection_errors,
"ai_processing_errors": ai_processing_errors,
},
},
"test_job": False if PYTHON_ENV == "production" else True,
},
)
return {
"quantity_thoughts_classified": len(thoughts_classified),
}
if __name__ == "__main__":
tic = time.perf_counter()
if PYTHON_ENV == "production":
single_collection_find_limit = int(os.environ["SINGLE_COLLECTION_FIND_LIMIT"])
elif PYTHON_ENV == "data_analysis":
# A larger `n` for testing AI performance and performing more substantive data analysis.
single_collection_find_limit = 100
else:
# A small `n` for operational testing/container testing.
single_collection_find_limit = 3
print(
"Limit for querying each collection has been set to: ",
single_collection_find_limit,
)
x = main(single_collection_find_limit=single_collection_find_limit)
toc = time.perf_counter()
pprint(x)
print(f"Time elapsed: {toc-tic:0.4f}")
| [
"1",
"{'$gt': 0}",
"\n Write a 50-300 word summary of the following article, make sure to keep important names. \n Keep it professional and concise.\n\n title: PLACEHOLDER\n article content: PLACEHOLDER\n ",
"{'$size': '$voicesInContent'}",
"{'$ne': None}",
"PLACEHOLDER: You are a frequent contributor to Wikipedia. \n\nPLACEHOLDER\n\nPLACEHOLDER:\n\nSummary:\n\n",
"You are a frequent contributor to Wikipedia, and have a deep understanding of Wikipedia's categories and topics."
] |
2024-01-10 | ZJU-RL/rllite | rllite~common~vec_env.py | #This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import os
import gym
import numpy as np
import contextlib
import multiprocessing as mp
from collections import OrderedDict
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class DummyVecEnv(VecEnv):
"""
VecEnv that does runs multiple environments sequentially, that is,
the step and reset commands are send to one environment at a time.
Useful when debugging and when num_env == 1 (in the latter case,
avoids communication overhead)
"""
def __init__(self, env_fns):
"""
Arguments:
env_fns: iterable of callables functions that build environments
"""
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
obs_space = env.observation_space
self.keys, shapes, dtypes = obs_space_info(obs_space)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
self.specs = [e.spec for e in self.envs]
def step_async(self, actions):
listify = True
try:
if len(actions) == self.num_envs:
listify = False
except TypeError:
pass
if not listify:
self.actions = actions
else:
assert self.num_envs == 1, "actions {} is either not a list or has a wrong size - cannot match to {} environments".format(actions, self.num_envs)
self.actions = [actions]
def step_wait(self):
for e in range(self.num_envs):
action = self.actions[e]
if isinstance(self.envs[e].action_space, gym.spaces.Discrete):
action = int(action)
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action)
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
return dict_to_obs(copy_obs_dict(self.buf_obs))
def get_images(self):
return [env.render(mode='rgb_array') for env in self.envs]
def render(self, mode='human'):
if self.num_envs == 1:
return self.envs[0].render(mode=mode)
else:
return super().render(mode=mode)
def copy_obs_dict(obs):
"""
Deep-copy an observation dict.
"""
return {k: np.copy(v) for k, v in obs.items()}
def dict_to_obs(obs_dict):
"""
Convert an observation dict into a raw array if the
original observation space was not a Dict space.
"""
if set(obs_dict.keys()) == {None}:
return obs_dict[None]
return obs_dict
def obs_space_info(obs_space):
"""
Get dict-structured information about a gym.Space.
Returns:
A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes.
"""
if isinstance(obs_space, gym.spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
keys = []
shapes = {}
dtypes = {}
for key, box in subspaces.items():
keys.append(key)
shapes[key] = box.shape
dtypes[key] = box.dtype
return keys, shapes, dtypes
def obs_to_dict(obs):
"""
Convert an observation into a dict.
"""
if isinstance(obs, dict):
return obs
return {None: obs}
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send((env.observation_space, env.action_space, env.spec))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment)
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn'):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(nenvs)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
return _flatten_obs([remote.recv() for remote in self.remotes])
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob) | [] |
2024-01-10 | Ptorresr/weblogic-deploy-tooling | core~src~main~python~wlsdeploy~tool~util~attribute_setter.py | """
Copyright (c) 2017, 2020, Oracle Corporation and/or its affiliates. All rights reserved.
Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""
from sets import Set
from org.python.modules import jarray
from java.util import List
from javax.management import ObjectName
from oracle.weblogic.deploy.aliases import TypeUtils
from wlsdeploy.aliases.alias_jvmargs import JVMArguments
from wlsdeploy.aliases.location_context import LocationContext
from wlsdeploy.aliases.wlst_modes import WlstModes
from wlsdeploy.exception import exception_helper
from wlsdeploy.tool.util.wlst_helper import WlstHelper
from wlsdeploy.aliases.model_constants import CAPACITY
from wlsdeploy.aliases.model_constants import CERT_PATH_PROVIDER
from wlsdeploy.aliases.model_constants import CLUSTER
from wlsdeploy.aliases.model_constants import COHERENCE_CLUSTER_SYSTEM_RESOURCE
from wlsdeploy.aliases.model_constants import CONTEXT_REQUEST_CLASS
from wlsdeploy.aliases.model_constants import DISTRIBUTED_QUEUE
from wlsdeploy.aliases.model_constants import DISTRIBUTED_TOPIC
from wlsdeploy.aliases.model_constants import FAIR_SHARE_REQUEST_CLASS
from wlsdeploy.aliases.model_constants import FILE_STORE
from wlsdeploy.aliases.model_constants import HEAP_DUMP_ACTION
from wlsdeploy.aliases.model_constants import IMAGE_NOTIFICATION
from wlsdeploy.aliases.model_constants import JDBC_STORE
from wlsdeploy.aliases.model_constants import JDBC_SYSTEM_RESOURCE
from wlsdeploy.aliases.model_constants import JMS_BRIDGE_DESTINATION
from wlsdeploy.aliases.model_constants import JMS_NOTIFICATION
from wlsdeploy.aliases.model_constants import JMS_RESOURCE
from wlsdeploy.aliases.model_constants import JMS_SERVER
from wlsdeploy.aliases.model_constants import JMX_NOTIFICATION
from wlsdeploy.aliases.model_constants import LOG_ACTION
from wlsdeploy.aliases.model_constants import LOG_FILTER
from wlsdeploy.aliases.model_constants import MACHINE
from wlsdeploy.aliases.model_constants import MASKED_PASSWORD
from wlsdeploy.aliases.model_constants import MAX_THREADS_CONSTRAINT
from wlsdeploy.aliases.model_constants import MIGRATABLE_TARGET
from wlsdeploy.aliases.model_constants import MIN_THREADS_CONSTRAINT
from wlsdeploy.aliases.model_constants import PARTITION
from wlsdeploy.aliases.model_constants import PARTITION_WORK_MANAGER
from wlsdeploy.aliases.model_constants import PERSISTENT_STORE
from wlsdeploy.aliases.model_constants import QUEUE
from wlsdeploy.aliases.model_constants import QUOTA
from wlsdeploy.aliases.model_constants import REALM
from wlsdeploy.aliases.model_constants import RESOURCE_GROUP
from wlsdeploy.aliases.model_constants import RESOURCE_GROUP_TEMPLATE
from wlsdeploy.aliases.model_constants import RESOURCE_MANAGEMENT
from wlsdeploy.aliases.model_constants import RESOURCE_MANAGER
from wlsdeploy.aliases.model_constants import RESPONSE_TIME_REQUEST_CLASS
from wlsdeploy.aliases.model_constants import REST_NOTIFICATION
from wlsdeploy.aliases.model_constants import SAF_AGENT
from wlsdeploy.aliases.model_constants import SAF_ERROR_HANDLING
from wlsdeploy.aliases.model_constants import SAF_IMPORTED_DESTINATION
from wlsdeploy.aliases.model_constants import SAF_QUEUE
from wlsdeploy.aliases.model_constants import SAF_TOPIC
from wlsdeploy.aliases.model_constants import SAF_REMOTE_CONTEXT
from wlsdeploy.aliases.model_constants import SCRIPT_ACTION
from wlsdeploy.aliases.model_constants import SECURITY_CONFIGURATION
from wlsdeploy.aliases.model_constants import SELF_TUNING
from wlsdeploy.aliases.model_constants import SERVER
from wlsdeploy.aliases.model_constants import SERVER_TEMPLATE
from wlsdeploy.aliases.model_constants import SMTP_NOTIFICATION
from wlsdeploy.aliases.model_constants import SNMP_NOTIFICATION
from wlsdeploy.aliases.model_constants import TEMPLATE
from wlsdeploy.aliases.model_constants import THREAD_DUMP_ACTION
from wlsdeploy.aliases.model_constants import TOPIC
from wlsdeploy.aliases.model_constants import UNIFORM_DISTRIBUTED_QUEUE
from wlsdeploy.aliases.model_constants import UNIFORM_DISTRIBUTED_TOPIC
from wlsdeploy.aliases.model_constants import VIRTUAL_TARGET
from wlsdeploy.aliases.model_constants import WATCH_NOTIFICATION
from wlsdeploy.aliases.model_constants import WS_RELIABLE_DELIVERY_POLICY
from wlsdeploy.aliases.model_constants import XML_ENTITY_CACHE
from wlsdeploy.aliases.model_constants import XML_REGISTRY
class AttributeSetter(object):
"""
Contains the "set" methods used to set WLST values that require mbeans or other special processing.
The public set_ methods in this class correspond directly to the set_method names in the alias files.
The signature for each set_ method is (location, key, value), where key and value are from the model.
"""
# used for target search
__target_type_names = [
CLUSTER,
SERVER,
MIGRATABLE_TARGET,
]
# used for destination search
__destination_type_names = [
QUEUE,
TOPIC,
DISTRIBUTED_QUEUE,
DISTRIBUTED_TOPIC,
UNIFORM_DISTRIBUTED_QUEUE,
UNIFORM_DISTRIBUTED_TOPIC
]
# used for SAF destination search
__saf_destination_type_names = [
SAF_QUEUE,
SAF_TOPIC
]
# used for persistent store search
__persistent_store_type_names = [
FILE_STORE,
JDBC_STORE
]
# used for self-tuning deployment and attribute processing.
# these names are applicable as self-tuning sub-folder names, and work manager attribute names.
# work manager is intentionally excluded and treated separately.
__self_tuning_type_names = [
CAPACITY,
CONTEXT_REQUEST_CLASS,
FAIR_SHARE_REQUEST_CLASS,
MAX_THREADS_CONSTRAINT,
MIN_THREADS_CONSTRAINT,
RESPONSE_TIME_REQUEST_CLASS
]
# used for WLDF watch notification search
__watch_action_types = [
HEAP_DUMP_ACTION,
IMAGE_NOTIFICATION,
JMS_NOTIFICATION,
JMX_NOTIFICATION,
LOG_ACTION,
REST_NOTIFICATION,
SCRIPT_ACTION,
SMTP_NOTIFICATION,
SNMP_NOTIFICATION,
THREAD_DUMP_ACTION
]
_class_name = "AttributeSetter"
def __init__(self, aliases, logger, exception_type, wlst_mode=WlstModes.OFFLINE):
self.__logger = logger
self.__exception_type = exception_type
self.__wlst_mode = wlst_mode
self.__aliases = aliases
self.__wlst_helper = WlstHelper(exception_type)
return
#
# public set_ methods for special attribute types, signature (self, location, key, value, wlst_value, ...)
#
def set_target_jms_mbeans(self, location, key, value, wlst_value):
"""
Set the target MBeans for targets that can include JMS resources (e.g., JMSServer).
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if target is not found
"""
self.set_target_mbeans(location, key, value, wlst_value, include_jms=True)
return
def set_target_mbeans(self, location, key, value, wlst_value, include_jms=False):
"""
Set the target MBeans.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:param include_jms: whether or not to include JMS resources
:raises BundleAwareException of the specified type: if target is not found
"""
targets_value = self.__build_target_mbean_list(value, wlst_value, location, include_jms=include_jms)
self.set_attribute(location, key, targets_value, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_jms_error_destination_mbean(self, location, key, value, wlst_value):
"""
Set the JMS Error Destination MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if destination is not found
"""
mbean = self.__find_jms_destination_mbean(location, value)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_jms_bridge_destination_mbean(self, location, key, value, wlst_value):
"""
Set the JMS Bridge Destination MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if destination is not found
"""
mbean = self.__find_in_resource_group_or_domain(location, JMS_BRIDGE_DESTINATION, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_persistent_store_mbean(self, location, key, value, wlst_value):
"""
Set the Persistent Store MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if store is not found
"""
mbean = self.__find_persistent_store(location, value)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_data_source_mbean(self, location, key, value, wlst_value):
"""
Set the DataSource MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if DataSource is not found
"""
mbean = self.__find_in_resource_group_or_domain(location, JDBC_SYSTEM_RESOURCE, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_saf_remote_context_mbean(self, location, key, value, wlst_value):
"""
Set the SAF RemoteContext MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if SAF RemoteContext is not found
"""
resource_location = self.__get_parent_location(location, JMS_RESOURCE)
mbean = self.__find_in_location(resource_location, SAF_REMOTE_CONTEXT, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_saf_error_destination_mbean(self, location, key, value, wlst_value):
"""
Set the SAF Error Destination MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if destination is not found
"""
mbean = self.__find_saf_destination_mbean(location, value)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_saf_error_handling_mbean(self, location, key, value, wlst_value):
"""
Set the SAF Error Handling MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if destination is not found
"""
resource_location = self.__get_parent_location(location, JMS_RESOURCE)
mbean = self.__find_in_location(resource_location, SAF_ERROR_HANDLING, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_self_tuning_mbean(self, location, key, value, wlst_value):
"""
Set the SelfTuning MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if MBean is not found
"""
tuning_location = self.__get_parent_location(location, SELF_TUNING)
mbean = self.__find_in_location(tuning_location, key, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_server_mbeans(self, location, key, value, wlst_value):
"""
Set the Server MBeans.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if a server is not found
"""
mbeans = self.__build_server_mbean_list(value, wlst_value)
self.set_attribute(location, key, mbeans, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_server_mbean(self, location, key, value, wlst_value):
"""
Set the Server MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if the server is not found
"""
mbean = self.__find_in_location(LocationContext(), SERVER, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_server_template_mbean(self, location, key, value, wlst_value):
"""
Set the Server Template MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if the server template is not found
"""
mbean = self.__find_in_location(LocationContext(), SERVER_TEMPLATE, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_cluster_mbean(self, location, key, value, wlst_value):
"""
Set the Cluster MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if the cluster is not found
"""
mbean = self.__find_in_location(LocationContext(), CLUSTER, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_server_cluster_mbean(self, location, key, value, wlst_value):
"""
assign the Cluster MBean to a server.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if the cluster is not found
"""
entity_type, entity_name = self.__aliases.get_model_type_and_name(location)
self.__wlst_helper.assign(entity_type, entity_name, key, value)
return
def set_coherence_cluster_mbean(self, location, key, value, wlst_value):
"""
Set the Log Filter MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if store is not found
"""
mbean = self.__find_in_location(LocationContext(), COHERENCE_CLUSTER_SYSTEM_RESOURCE, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_machine_mbean(self, location, key, value, wlst_value):
"""
Set the Machine MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if the machine is not found
"""
mbean = self.__find_in_location(LocationContext(), MACHINE, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_jms_template_mbean(self, location, key, value, wlst_value):
"""
Set the JMS Template MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if the JMS Template is not found
"""
resource_location = self.__get_parent_location(location, JMS_RESOURCE)
mbean = self.__find_in_location(resource_location, TEMPLATE, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_wldf_action_mbeans(self, location, key, value, wlst_value):
"""
Set the WLDF Action/Notification MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if the WLDF Action/Notification is not found
"""
watch_location = self.__get_parent_location(location, WATCH_NOTIFICATION)
action_names = TypeUtils.convertToType(List, value) # type: list of str
action_names = self.__merge_existing_items(action_names, wlst_value)
action_mbeans = []
for action_name in action_names:
action_mbean = self.__find_wldf_action(watch_location, action_name)
action_mbeans.append(action_mbean)
if self.__wlst_mode == WlstModes.ONLINE:
# for online, call the current location's add method for each action mbean
location_mbean = self.__wlst_helper.cd(self.__wlst_helper.get_pwd())
for action_mbean in action_mbeans:
location_mbean.addNotification(action_mbean)
else:
self.set_attribute(location, key, action_mbeans, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_log_filter_mbean(self, location, key, value, wlst_value):
"""
Set the Log Filter MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if store is not found
"""
mbean = self.__find_in_location(LocationContext(), LOG_FILTER, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_jms_server_mbean(self, location, key, value, wlst_value):
"""
For those entities, such as WLSReliableDeliveryPolicy, that take a single JMS Server mbean.
:param location: location to look for jms server
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if jms server mbean is not found.
"""
mbean = self.__find_in_location(LocationContext(), JMS_SERVER, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_jms_quota_mbean(self, location, key, value, wlst_value):
"""
For those entities, queues, template, topics, that take a single Quota mbean.
:param location: location to look for Quota mbean
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if quota mbean is not found.
"""
resource_location = self.__get_parent_location(location, JMS_RESOURCE)
mbean = self.__find_in_location(resource_location, QUOTA, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_reliable_delivery_policy_mbean(self, location, key, value, wlst_value):
"""
Sets the ws soap reliable delivery policy mbean used by mbeans like Server and Server Template.
:param location: location to look for reliable delivery policy
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if reliable delivery policy mbean is not found.
"""
mbean = self.__find_in_location(LocationContext(), WS_RELIABLE_DELIVERY_POLICY, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_xml_entity_cache_mbean(self, location, key, value, wlst_value):
"""
Sets the XML cache mbean used by topology entities such as Server.
:param location: location to look for reliable delivery policy
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if xml entity cache mbean is not found.
"""
mbean = self.__find_in_location(LocationContext(), XML_ENTITY_CACHE, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_xml_registry_mbean(self, location, key, value, wlst_value):
"""
Sets the XML registry mbean used by topology entities such as Server.
:param location: location to look for reliable delivery policy
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if xml registry mbean is not found.
"""
mbean = self.__find_in_location(LocationContext(), XML_REGISTRY, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_mt_target_mbeans(self, location, key, value, wlst_value):
"""
Set the virtual target MBeans.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if target is not found
"""
targets_value = self.__build_virtual_target_mbean_list(value, wlst_value)
self.set_attribute(location, key, targets_value, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_security_realm_mbean(self, location, key, value, wlst_value):
"""
Set the security realm MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if target is not found
"""
security_location = self.__get_domain_location(location).append_location(SECURITY_CONFIGURATION)
mbean = self.__find_in_location(security_location, REALM, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_certificate_registry_mbean(self, location, key, value, wlst_value):
"""
Set the certificate registry MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if target is not found
"""
realm_location = self.__get_parent_location(location, REALM)
mbean = self.__find_in_location(realm_location, CERT_PATH_PROVIDER, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_resource_group_template_mbean(self, location, key, value, wlst_value):
"""
Set the resource group template MBean.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if target is not found
"""
domain_location = self.__get_domain_location(location)
mbean = self.__find_in_location(domain_location, RESOURCE_GROUP_TEMPLATE, value, required=True)
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_partition_work_manager_mbean(self, location, key, value, wlst_value):
"""
Set the partition work manager MBean. Search in the same partition, then at the domain level.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if target is not found
"""
method_name = 'set_partition_work_manager_mbean'
partition_location = self.__get_parent_location(location, PARTITION)
mbean = self.__find_in_location(partition_location, PARTITION_WORK_MANAGER, value)
if mbean is None:
domain_location = self.__get_domain_location(location)
mbean = self.__find_in_location(domain_location, PARTITION_WORK_MANAGER, value)
if mbean is None:
_type, partition_name = self.__aliases.get_model_type_and_name(location)
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19206', value, partition_name)
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_resource_manager_mbean(self, location, key, value, wlst_value):
"""
Set the resource manager MBean. Search in the same partition, then at the domain level.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if target is not found
"""
method_name = 'set_resource_manager_mbean'
partition_location = self.__get_parent_location(location, PARTITION)
mbean = self.__find_in_location(partition_location, RESOURCE_MANAGER, value)
if mbean is None:
management_location = self.__get_domain_location(location).append_location(RESOURCE_MANAGEMENT)
mbean = self.__find_in_location(management_location, RESOURCE_MANAGER, value)
if mbean is None:
_type, manager_name = self.__aliases.get_model_type_and_name(location)
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19207', value, manager_name)
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
self.set_attribute(location, key, mbean, wlst_merge_value=wlst_value, use_raw_value=True)
return
def set_jvm_args(self, location, key, value, wlst_value):
"""
Set the server start args string. The new arguments are merged and re-sorted with existing arguments.
:param location: the location
:param key: the attribute name
:param value: the string value
:param wlst_value: the existing value of the attribute from WLST
:raises BundleAwareException of the specified type: if target is not found
"""
if value is None or len(value) == 0:
result = value
elif wlst_value is None or len(wlst_value) == 0:
new_args = JVMArguments(self.__logger, value)
result = new_args.get_arguments_string()
else:
new_args = JVMArguments(self.__logger, value)
merged_args = JVMArguments(self.__logger, wlst_value)
merged_args.merge_jvm_arguments(new_args)
result = merged_args.get_arguments_string()
self.set_attribute(location, key, result, wlst_merge_value=wlst_value, use_raw_value=True)
return
#
# public set_attribute convenience methods
#
def set_attribute(self, location, model_key, model_value, wlst_merge_value=None, use_raw_value=False):
"""
Convenience method for setting the attribute.
:param location: location
:param model_key: attribute name
:param model_value: attribute value
:param wlst_merge_value: value from WLST to merge
:param use_raw_value: whether or not to the use the model value, default is to use the WLST value
:raises BundleAwareException of the specified type: if an error occurs
"""
_method_name = 'set_attribute'
if use_raw_value:
wlst_param = self.__aliases.get_wlst_attribute_name(location, model_key)
wlst_value = model_value
else:
wlst_param, wlst_value = \
self.__aliases.get_wlst_attribute_name_and_value(location, model_key, model_value,
existing_wlst_value=wlst_merge_value)
if wlst_param is None:
self.__logger.info('WLSDPLY-20011', model_key, class_name=self._class_name, method_name=_method_name)
elif wlst_value is None:
self.__logger.info('WLSDPLY-20012', model_key, str(model_value),
class_name=self._class_name, method_name=_method_name)
else:
if self.__logger.is_finer_enabled():
print_model_value = model_value
print_wlst_value = wlst_value
if self.__aliases.is_model_password_attribute(location, model_key):
print_model_value = MASKED_PASSWORD
print_wlst_value = MASKED_PASSWORD
self.__logger.finer('WLSDPLY-19211', wlst_param, print_model_value, print_wlst_value,
location.get_folder_path(), class_name=self._class_name, method_name=_method_name)
self.__wlst_helper.set(wlst_param, wlst_value)
return
def set_attribute_with_cmo(self, location, key, value, wlst_value=None, masked=False):
_method_name = 'set_attribute_with_cmo'
wlst_attr_name, wlst_attr_value = \
self.__aliases.get_wlst_attribute_name_and_value(location, key, value, existing_wlst_value=wlst_value)
if wlst_attr_name is None:
self.__logger.info('WLSDPLY-20011', key, class_name=self._class_name, method_name=_method_name)
elif wlst_attr_value is None:
log_value = str(value)
if masked:
log_value = '<masked>'
self.__logger.info('WLSDPLY-20012', key, log_value, class_name=self._class_name, method_name=_method_name)
else:
attrib_path = self.__aliases.get_wlst_attributes_path(location)
self.__wlst_helper.cd(attrib_path)
self.__wlst_helper.set_with_cmo(wlst_attr_name, wlst_attr_value, masked=masked)
return
#
# internal lookup methods
#
def __build_target_mbean_list(self, target_value, wlst_value, location, include_jms=False):
"""
Construct the target MBean list.
:param target_value: the target value
:param wlst_value: the existing value from WLST
:param include_jms: whether or not to include JMS targets, the default is False
:return: the Java array of MBeans ObjectNames
:raises BundleAwareException of the specified type: if an error occurs
"""
target_names = TypeUtils.convertToType(List, target_value) # type: list of str
target_names = self.__merge_existing_items(target_names, wlst_value)
name_list = []
for target_name in target_names:
target_mbean = self.__find_target(target_name, location, include_jms=include_jms)
name_list.append(target_mbean.getObjectName())
return jarray.array(name_list, ObjectName)
def __build_server_mbean_list(self, value, wlst_value):
"""
Construct the server MBean list.
:param value: the value
:param wlst_value: the existing value from WLST
:return: the Java array of MBeans ObjectNames
:raises BundleAwareException of the specified type: if an error occurs
"""
server_names = TypeUtils.convertToType(List, value) # type: list of str
server_names = self.__merge_existing_items(server_names, wlst_value)
name_list = []
for server_name in server_names:
mbean = self.__find_in_location(LocationContext(), SERVER, server_name, required=True)
name_list.append(mbean.getObjectName())
return jarray.array(name_list, ObjectName)
def __build_virtual_target_mbean_list(self, target_value, wlst_value):
"""
Construct the virtual target MBean list.
:param target_value: the target value
:param wlst_value: the existing value from WLST
:return: for offline, a list of MBeans; for online, a jarray of MBean ObjectNames
:raises BundleAwareException of the specified type: if an error occurs
"""
target_names = TypeUtils.convertToType(List, target_value) # type: list of str
target_names = self.__merge_existing_items(target_names, wlst_value)
if self.__wlst_mode == WlstModes.ONLINE:
name_list = []
for target_name in target_names:
target_mbean = self.__find_in_location(LocationContext(), VIRTUAL_TARGET, target_name, required=True)
name_list.append(target_mbean.getObjectName())
return jarray.array(name_list, ObjectName)
else:
mbean_list = []
for target_name in target_names:
target_mbean = self.__find_in_location(LocationContext(), VIRTUAL_TARGET, target_name, required=True)
mbean_list.append(target_mbean)
return mbean_list
def __find_target(self, target_name, location, include_jms=False):
"""
Find a target by name.
:param target_name: the target name
:param include_jms: whether or not to include JMS in the search, the default is False
:return: the MBean for the target
:raises BundleAwareException of the specified type: if an error occurs
"""
method_name = '__find_target'
domain_location = self.__get_domain_location(location)
for type_name in self.__target_type_names:
mbean = self.__find_in_location(domain_location, type_name, target_name)
if mbean is not None:
return mbean
if include_jms:
mbean = self.__find_in_resource_group_or_domain(location, JMS_SERVER, target_name)
if mbean is not None:
return mbean
mbean = self.__find_in_resource_group_or_domain(location, SAF_AGENT, target_name)
if mbean is not None:
return mbean
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19200', target_name)
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
def __find_jms_destination_mbean(self, location, destination_name):
"""
Find the destination with the specified name and return its WLST mbean.
:param location: the WLST location of the attribute
:param destination_name: the name of the destination to find
:return: the mbean for the destination
:raises BundleAwareException of the specified type: if destination is not found
"""
method_name = '__find_jms_destination_mbean'
resource_location = self.__get_parent_location(location, JMS_RESOURCE)
for type_name in self.__destination_type_names:
mbean = self.__find_in_location(resource_location, type_name, destination_name)
if mbean is not None:
return mbean
_resource_type, resource_name = self.__aliases.get_model_type_and_name(resource_location)
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19201', destination_name, resource_name)
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
def __find_persistent_store(self, location, store_name):
"""
Find the persistent store with the specified name and return its WLST mbean.
:param location: the WLST location of the attribute
:param store_name: the name of the store to find
:return: the mbean for the store
:raises BundleAwareException of the specified type: if store is not found
"""
method_name = '__find_persistent_store'
for type_name in self.__persistent_store_type_names:
mbean = self.__find_in_resource_group_or_domain(location, type_name, store_name)
if mbean is not None:
return mbean
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19202', PERSISTENT_STORE, store_name)
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
def __find_saf_destination_mbean(self, location, destination_name):
"""
Find the SAF destination with the specified name and return its WLST mbean.
:param location: the WLST location of the attribute
:param destination_name: the name of the SAF destination to find
:return: the mbean for the SAF destination
:raises BundleAwareException of the specified type: if SAF destination is not found
"""
method_name = '__find_saf_destination_mbean'
resource_location = self.__get_parent_location(location, JMS_RESOURCE)
destination_location = LocationContext(resource_location).append_location(SAF_IMPORTED_DESTINATION)
existing_sets = self.__get_existing_object_list(destination_location)
token_name = self.__aliases.get_name_token(destination_location)
for set_name in existing_sets:
if token_name is not None:
destination_location.add_name_token(token_name, set_name)
for type_name in self.__saf_destination_type_names:
mbean = self.__find_in_location(destination_location, type_name, destination_name)
if mbean is not None:
return mbean
_resource_type, resource_name = self.__aliases.get_model_type_and_name(resource_location)
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19203', destination_name, resource_name)
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
def __find_wldf_action(self, watch_location, action_name):
"""
Find the WLDF action with the specified name and return its WLST mbean.
:param watch_location: the WLST location of the watch notification
:param action_name: the name of the action to find
:return: the mbean for the action
:raises BundleAwareException of the specified type: if action is not found
"""
method_name = '__find_wldf_action'
for type_name in self.__watch_action_types:
mbean = self.__find_in_location(watch_location, type_name, action_name)
if mbean is not None:
return mbean
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19202', WATCH_NOTIFICATION, action_name)
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
def __find_in_resource_group_or_domain(self, location, element_type, name, required=False):
"""
Find the element with the specified name and type and return its WLST mbean.
If the specified location is in a resource group, search only that resource group.
:param location: the WLST location of the attribute
:param name: the name of the element to find
:param required: indicates exception should be thrown if not found
:return: the mbean for the destination
:raises BundleAwareException of the specified type: if element is not found, and required is True
"""
method_name = '__find_in_resource_group_or_domain'
in_resource_group = RESOURCE_GROUP in location.get_model_folders()
if in_resource_group:
resource_group_location = self.__get_parent_location(location, RESOURCE_GROUP)
mbean = self.__find_in_location(resource_group_location, element_type, name)
if mbean is None:
template_id = self.__wlst_helper.get("ResourceGroupTemplate")
domain_location = self.__get_domain_location(location)
mbean = self.__find_in_location(domain_location, RESOURCE_GROUP_TEMPLATE, template_id)
else:
location = self.__get_domain_location(location)
mbean = self.__find_in_location(location, element_type, name)
if required and (mbean is None):
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19202', element_type, name)
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
return mbean
def __find_in_location(self, location, element_type, name, required=False):
"""
Find the element with the specified name and type and return its WLST mbean.
:param location: the WLST location of the attribute
:param element_type: the thype of the element to be found
:param name: the name of the element to find
:param required: indicates exception should be thrown if not found
:return: the mbean for the destination
:raises BundleAwareException of the specified type: if element is not found, and required is True
"""
method_name = '__find_in_location'
location = LocationContext(location).append_location(element_type)
if self.__aliases.get_wlst_mbean_type(location) is not None:
existing_names = self.__get_existing_object_list(location)
if name in existing_names:
location_type, location_name = self.__aliases.get_model_type_and_name(location)
self.__logger.fine('WLSDPLY-19204', element_type, name, location_type, location_name,
class_name=self._class_name, method_name=method_name)
token = self.__aliases.get_name_token(location)
location.add_name_token(token, name)
path = self.__aliases.get_wlst_attributes_path(location)
return self.__wlst_helper.get_mbean_for_wlst_path(path)
if required:
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19210', element_type, name,
self.__aliases.get_model_folder_path(location))
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
return None
def __get_domain_location(self, location):
"""
Returns a copy of the specified location with all folders removed, but tokens intact.
:param location: the location to be examined
:return: the domain location
"""
_method_name = '__get_domain_location'
self.__logger.entering(str(location), class_name=self._class_name, method_name=_method_name)
location = LocationContext(location)
while len(location.get_model_folders()) > 0:
location.pop_location()
return location
def __get_parent_location(self, location, folder_name):
"""
Searches the specified location for the specified folder name, and returns the corresponding location.
:param location: the location to be examined
:param folder_name: the folder name to find
:return: the parent location
:raises BundleAwareException of the specified type: if the folder is not found in the location folders list
"""
method_name = '__get_parent_location'
try:
location = LocationContext(location)
resource_index = location.get_model_folders().index(folder_name)
while len(location.get_model_folders()) > resource_index + 1:
location.pop_location()
except:
# index throws a ValueError if the item is not in the list...
ex = exception_helper.create_exception(self.__exception_type, 'WLSDPLY-19205', folder_name, str(location))
self.__logger.throwing(class_name=self._class_name, method_name=method_name, error=ex)
raise ex
return location
def __get_existing_object_list(self, location):
"""
Convenience method to get the existing object list by location's list path
:param location: the location
:return: the list of existing names
:raises BundleAwareException of the specified type: if an error occurs
"""
_method_name = '__get_existing_object_list'
self.__logger.entering(str(location), class_name=self._class_name, method_name=_method_name)
list_path = self.__aliases.get_wlst_list_path(location)
existing_names = self.__wlst_helper.get_existing_object_list(list_path)
self.__logger.exiting(class_name=self._class_name, method_name=_method_name, result=existing_names)
return existing_names
#
# methods for merging existing values
#
def __merge_existing_items(self, items, existing_value):
"""
Merge the specified items with the items represented by existing value, and return the result.
:param items: the attribute name
:param existing_value: the value representing the existing items (may be a string or list)
:return: the merged list of items
:raises BundleAwareException of the specified type: if the WLDF Action/Notification is not found
"""
_method_name = '__merge_existing_items'
self.__logger.entering(str(items), str(existing_value), class_name=self._class_name, method_name=_method_name)
existing_items = TypeUtils.convertToType(List, existing_value) # type: list of str
no_existing_items = (existing_items is None) or (len(existing_items) == 0)
if no_existing_items:
result = items
else:
result = list(Set(items).union(Set(existing_items)))
return result
| [
"ResourceGroupTemplate"
] |
2024-01-10 | Ptorresr/weblogic-deploy-tooling | core~src~main~python~wlsdeploy~tool~discover~common_resources_discoverer.py | """
Copyright (c) 2017, 2020, Oracle Corporation and/or its affiliates. All rights reserved.
Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""
from java.io import File
from java.lang import IllegalArgumentException
from oracle.weblogic.deploy.util import PyOrderedDict as OrderedDict
from oracle.weblogic.deploy.util import StringUtils
from oracle.weblogic.deploy.util import WLSDeployArchiveIOException
from wlsdeploy.aliases import model_constants
from wlsdeploy.aliases.location_context import LocationContext
from wlsdeploy.aliases.wlst_modes import WlstModes
from wlsdeploy.exception import exception_helper
from wlsdeploy.exception.expection_types import ExceptionType
from wlsdeploy.logging.platform_logger import PlatformLogger
from wlsdeploy.tool.deploy import deployer_utils
from wlsdeploy.tool.discover import discoverer
from wlsdeploy.tool.discover.coherence_resources_discoverer import CoherenceResourcesDiscoverer
from wlsdeploy.tool.discover.discoverer import Discoverer
from wlsdeploy.tool.discover.jms_resources_discoverer import JmsResourcesDiscoverer
from wlsdeploy.util import dictionary_utils
_class_name = 'CommonResourcesDiscoverer'
_logger = PlatformLogger(discoverer.get_discover_logger_name())
class CommonResourcesDiscoverer(Discoverer):
"""
Discover the weblogic resources that are common across global, resource group template, and
partition resource group.
"""
def __init__(self, model_context, resource_dictionary, base_location,
wlst_mode=WlstModes.OFFLINE, aliases=None, variable_injector=None):
"""
:param model_context: context about the model for this instance of discover domain
:param resource_dictionary: to populate the common resources. By default, populates the initialized resources
:param base_location: to look for common weblogic resources. By default this is the global path or '/'
"""
Discoverer.__init__(self, model_context, base_location, wlst_mode, aliases, variable_injector)
self._dictionary = resource_dictionary
self._add_att_handler(model_constants.PATH_TO_SCRIPT, self._add_wldf_script)
def discover(self):
"""
Discover weblogic resources from the on-premise domain.
:return: resources: dictionary where to populate discovered domain resources
"""
_method_name = 'discover'
_logger.entering(class_name=_class_name, method_name=_method_name)
model_folder_name, folder_result = self.get_datasources()
discoverer.add_to_model_if_not_empty(self._dictionary, model_folder_name, folder_result)
model_folder_name, folder_result = self.get_foreign_jndi_providers()
discoverer.add_to_model_if_not_empty(self._dictionary, model_folder_name, folder_result)
model_folder_name, folder_result = self.get_mail_sessions()
discoverer.add_to_model_if_not_empty(self._dictionary, model_folder_name, folder_result)
model_folder_name, folder_result = self.get_file_stores()
discoverer.add_to_model_if_not_empty(self._dictionary, model_folder_name, folder_result)
model_folder_name, folder_result = self.get_jdbc_stores()
discoverer.add_to_model_if_not_empty(self._dictionary, model_folder_name, folder_result)
model_folder_name, folder_result = self.get_path_services()
discoverer.add_to_model_if_not_empty(self._dictionary, model_folder_name, folder_result)
JmsResourcesDiscoverer(self._model_context, self._dictionary, self._base_location, wlst_mode=self._wlst_mode,
aliases=self._aliases, variable_injector=self._get_variable_injector()).discover()
model_folder_name, folder_result = self.get_wldf_system_resources()
discoverer.add_to_model_if_not_empty(self._dictionary, model_folder_name, folder_result)
model_folder_name, folder_result = self.get_system_component_resources()
discoverer.add_to_model_if_not_empty(self._dictionary, model_folder_name, folder_result)
model_folder_name, folder_result = self.get_ohs_resources()
discoverer.add_to_model_if_not_empty(self._dictionary, model_folder_name, folder_result)
CoherenceResourcesDiscoverer(self._model_context, self._dictionary, self._base_location,
wlst_mode=self._wlst_mode, aliases=self._aliases,
variable_injector=self._get_variable_injector()).discover()
_logger.exiting(class_name=_class_name, method_name=_method_name)
return self._dictionary
def get_datasources(self):
"""
Discover JDBC datasource information from the domain.
:return: model name for the dictionary and the dictionary containing the datasources information
"""
_method_name = 'get_datasources'
_logger.entering(class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = model_constants.JDBC_SYSTEM_RESOURCE
model_second_folder = model_constants.JDBC_RESOURCE
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
datasources = self._find_names_in_folder(location)
if datasources is not None:
_logger.info('WLSDPLY-06340', len(datasources), class_name=_class_name, method_name=_method_name)
typedef = self._model_context.get_domain_typedef()
name_token = self._aliases.get_name_token(location)
for datasource in datasources:
if typedef.is_system_datasource(datasource):
_logger.info('WLSDPLY-06361', typedef.get_domain_type(), datasource, class_name=_class_name,
method_name=_method_name)
else:
_logger.info('WLSDPLY-06341', datasource, class_name=_class_name, method_name=_method_name)
result[datasource] = OrderedDict()
location.add_name_token(name_token, datasource)
self._populate_model_parameters(result[datasource], location)
location.append_location(model_second_folder)
deployer_utils.set_single_folder_token(location, self._aliases)
if self.wlst_cd(self._aliases.get_wlst_attributes_path(location), location):
result[datasource][model_second_folder] = OrderedDict()
resource_result = result[datasource][model_second_folder]
self._populate_model_parameters(resource_result, location)
self._discover_subfolders(resource_result, location)
location.remove_name_token(name_token)
location.pop_location()
_logger.exiting(class_name=_class_name, method_name=_method_name, result=result)
return model_top_folder_name, result
def get_foreign_jndi_providers(self):
"""
Discover Foreign JNDI providers from the domain.
:return: model name for the dictionary and the dictionary containing the foreign JNDI provider information
"""
_method_name = 'get_foreign_jndi_providers'
_logger.entering(class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = model_constants.FOREIGN_JNDI_PROVIDER
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
providers = self._find_names_in_folder(location)
if providers is not None:
_logger.info('WLSDPLY-06342', len(providers), class_name=_class_name, method_name=_method_name)
name_token = self._aliases.get_name_token(location)
for provider in providers:
_logger.info('WLSDPLY-06343', provider, class_name=_class_name, method_name=_method_name)
location.add_name_token(name_token, provider)
result[provider] = OrderedDict()
self._populate_model_parameters(result[provider], location)
self._discover_subfolders(result[provider], location)
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=model_top_folder_name)
return model_top_folder_name, result
def get_mail_sessions(self):
"""
Discover the mail sessions from the domain.
:return: model name for the dictionary and the dictionary containing the mail session information
"""
_method_name = 'get_mail_sessions'
_logger.entering(class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = model_constants.MAIL_SESSION
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
mail_sessions = self._find_names_in_folder(location)
if mail_sessions is not None:
_logger.info('WLSDPLY-06344', len(mail_sessions), class_name=_class_name, method_name=_method_name)
name_token = self._aliases.get_name_token(location)
for mail_session in mail_sessions:
_logger.info('WLSDPLY-06345', mail_session, class_name=_class_name, method_name=_method_name)
result[mail_session] = OrderedDict()
mail_session_result = result[mail_session]
location.add_name_token(name_token, mail_session)
self._populate_model_parameters(mail_session_result, location)
_fix_passwords_in_mail_session_properties(mail_session_result)
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=model_top_folder_name)
return model_top_folder_name, result
def get_file_stores(self):
"""
Discover the file stores used for weblogic persistence
:return: model folder name: dictionary with the discovered file stores
"""
_method_name = 'get_file_stores'
_logger.entering(class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = model_constants.FILE_STORE
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
file_stores = self._find_names_in_folder(location)
if file_stores is not None:
_logger.info('WLSDPLY-06346', len(file_stores), class_name=_class_name, method_name=_method_name)
typedef = self._model_context.get_domain_typedef()
name_token = self._aliases.get_name_token(location)
for file_store in file_stores:
if typedef.is_system_file_store(file_store):
_logger.info('WLSDPLY-06363', typedef.get_domain_type(), file_store, class_name=_class_name,
method_name=_method_name)
else:
_logger.info('WLSDPLY-06347', file_store, class_name=_class_name, method_name=_method_name)
result[file_store] = OrderedDict()
location.add_name_token(name_token, file_store)
self._populate_model_parameters(result[file_store], location)
self.archive_file_store_directory(file_store, result[file_store])
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=result)
return model_top_folder_name, result
def archive_file_store_directory(self, file_store_name, file_store_dictionary):
_method_name = 'archive_file_store_directory'
_logger.entering(file_store_name, class_name=_class_name, method_name=_method_name)
if file_store_name is not None and model_constants.DIRECTORY in file_store_dictionary:
directory = file_store_dictionary[model_constants.DIRECTORY]
if not StringUtils.isEmpty(directory):
archive_file = self._model_context.get_archive_file()
try:
new_source_name = archive_file.addFileStoreDirectory(file_store_name)
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06348', file_store_name, directory,
wioe.getLocalizedMessage())
_logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
if new_source_name is not None:
_logger.info('WLSDPLY-06349', file_store_name, new_source_name, class_name=_class_name,
method_name=_method_name)
file_store_dictionary[model_constants.DIRECTORY] = new_source_name
_logger.exiting(class_name=_class_name, method_name=_method_name)
return
def get_jdbc_stores(self):
"""
Discover the JDBC stores used for weblogic persistence
:return: model file name: dictionary containing discovered JDBC stores
"""
_method_name = 'get_jdbc_stores'
_logger.entering(class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = model_constants.JDBC_STORE
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
jdbc_stores = self._find_names_in_folder(location)
if jdbc_stores is not None:
_logger.info('WLSDPLY-06350', len(jdbc_stores), class_name=_class_name, method_name=_method_name)
name_token = self._aliases.get_name_token(location)
for jdbc_store in jdbc_stores:
_logger.info('WLSDPLY-06351', jdbc_store, class_name=_class_name, method_name=_method_name)
result[jdbc_store] = OrderedDict()
location.add_name_token(name_token, jdbc_store)
self._populate_model_parameters(result[jdbc_store], location)
self.archive_jdbc_create_script(jdbc_store, result[jdbc_store])
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=result)
return model_top_folder_name, result
def archive_jdbc_create_script(self, jdbc_store_name, jdbc_store_dictionary):
"""
Add the JDBC store create DDL file to the archive.
:param jdbc_store_name: name of the JDBC Store
:param jdbc_store_dictionary: dictionary containing the discovered store attributes
"""
_method_name = 'get_jdbc_create_script'
_logger.entering(jdbc_store_name, class_name=_class_name, method_name=_method_name)
if model_constants.CREATE_TABLE_DDL_FILE in jdbc_store_dictionary:
archive_file = self._model_context.get_archive_file()
file_name = self._convert_path(jdbc_store_dictionary[model_constants.CREATE_TABLE_DDL_FILE])
_logger.info('WLSDPLY-06352', jdbc_store_name, file_name, class_name=_class_name, method_name=_method_name)
try:
new_source_name = archive_file.addScript(File(file_name))
except IllegalArgumentException, iae:
_logger.warning('WLSDPLY-06353', jdbc_store_name, file_name,
iae.getLocalizedMessage(), class_name=_class_name,
method_name=_method_name)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06354', jdbc_store_name, file_name,
wioe.getLocalizedMessage())
_logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
if new_source_name is None:
new_source_name = file_name
tokenized = self._model_context.tokenize_path(new_source_name)
jdbc_store_dictionary[model_constants.CREATE_TABLE_DDL_FILE] = tokenized
_logger.exiting(class_name=_class_name, method_name=_method_name)
return
def get_path_services(self):
"""
Discover the path services for weblogic message grouping.
:return: model file name: dictionary containing discovered path services
"""
_method_name = 'get_path_services'
_logger.entering(class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = model_constants.PATH_SERVICE
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
path_services = self._find_names_in_folder(location)
if path_services is not None:
_logger.info('WLSDPLY-06355', len(path_services), class_name=_class_name, method_name=_method_name)
name_token = self._aliases.get_name_token(location)
for path_service in path_services:
_logger.info('WLSDPLY-06356', path_service, class_name=_class_name, method_name=_method_name)
result[path_service] = OrderedDict()
location.add_name_token(name_token, path_service)
self._populate_model_parameters(result[path_service], location)
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=result)
return model_top_folder_name, result
def get_wldf_system_resources(self):
"""
Discover each WLDF system resource in the domain.
:return: model name for the WLDF system resource:dictionary containing the discovered WLDF system resources
"""
_method_name = 'get_wldf_system_resources'
_logger.entering(class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = model_constants.WLDF_SYSTEM_RESOURCE
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
wldf_resources = self._find_names_in_folder(location)
if wldf_resources is not None:
_logger.info('WLSDPLY-06357', len(wldf_resources), class_name=_class_name, method_name=_method_name)
typedef = self._model_context.get_domain_typedef()
name_token = self._aliases.get_name_token(location)
for wldf_resource in wldf_resources:
if typedef.is_system_wldf(wldf_resource):
_logger.info('WLSDPLY-06362', typedef.get_domain_type(), wldf_resource, class_name=_class_name,
method_name=_method_name)
else:
_logger.info('WLSDPLY-06358', wldf_resource, class_name=_class_name, method_name=_method_name)
location.add_name_token(name_token, wldf_resource)
result[wldf_resource] = OrderedDict()
self._populate_model_parameters(result[wldf_resource], location)
self._discover_subfolders(result[wldf_resource], location)
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=model_top_folder_name)
return model_top_folder_name, result
def get_system_component_resources(self):
"""
Discover each system component resource in the domain.
:return: model name and dictionary for the discovered system component resources
"""
_method_name = 'get_system_component_resources'
_logger.entering(class_name=_class_name, method_name=_method_name)
name, dictionary = self._get_named_resources(model_constants.SYSTEM_COMPONENT)
# for online, warn that any OHS configurations are not discovered
if self._wlst_mode == WlstModes.ONLINE:
for key, nodes in dictionary.iteritems():
component_type = dictionary_utils.get_element(nodes, model_constants.COMPONENT_TYPE)
if model_constants.OHS == component_type:
_logger.warning('WLSDPLY-06366', model_constants.OHS, model_constants.SYSTEM_COMPONENT, key,
class_name=_class_name, method_name=_method_name)
return name, dictionary
def get_ohs_resources(self):
"""
Discover each OHS resource in the domain.
:return: model name and dictionary for the discovered OHS resources
"""
_method_name = 'get_ohs_resources'
_logger.entering(class_name=_class_name, method_name=_method_name)
return self._get_named_resources(model_constants.OHS)
# private methods
def _add_wldf_script(self, model_name, model_value, location):
"""
Add the WLDF WatchNotification ScriptAction script for attribute PathToScript to the archive file.
Modify the model_value to reflect the new name after the archive file has been deployed.
:param model_name: name of the attribute
:param model_value: containing the Script Action script
:param location: context containing the current location of the ScriptAction
:return: modified model value reflecting new PathToScript location
"""
_method_name = '_add_wldf_script'
_logger.entering(model_name, class_name=_class_name, method_name=_method_name)
new_script_name = model_value
if model_value is not None:
file_name = self._convert_path(model_value)
_logger.info('WLSDPLY-06359', file_name, self._aliases.get_model_folder_path(location),
class_name=_class_name, method_name=_method_name)
archive_file = self._model_context.get_archive_file()
# Set model_value to None if unable to add it to archive file
modified_name = None
try:
modified_name = archive_file.addScript(File(file_name))
except IllegalArgumentException, iae:
_logger.warning('WLSDPLY-06360', self._aliases.get_model_folder_path(location), file_name,
iae.getLocalizedMessage(), class_name=_class_name,
method_name=_method_name)
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06354',
self._aliases.get_model_folder_path(location),
file, wioe.getLocalizedMessage())
_logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
new_script_name = modified_name
_logger.exiting(class_name=_class_name, method_name=_method_name, result=new_script_name)
return new_script_name
def _get_named_resources(self, folder_name):
"""
Discover each resource of the specified type in the domain.
:return: model name and dictionary for the discovered resources
"""
_method_name = '_get_named_resources'
_logger.entering(folder_name, class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = folder_name
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
resource_names = self._find_names_in_folder(location)
if resource_names is not None:
_logger.info('WLSDPLY-06364', len(resource_names), folder_name, class_name=_class_name,
method_name=_method_name)
name_token = self._aliases.get_name_token(location)
for resource_name in resource_names:
_logger.info('WLSDPLY-06365', folder_name, resource_name, class_name=_class_name,
method_name=_method_name)
location.add_name_token(name_token, resource_name)
result[resource_name] = OrderedDict()
self._populate_model_parameters(result[resource_name], location)
self._discover_subfolders(result[resource_name], location)
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=model_top_folder_name)
return model_top_folder_name, result
def _fix_passwords_in_mail_session_properties(dictionary):
"""
Look for password properties in the mail session properties string, and replace the password with a fix me token.
:param dictionary: containing the discovered mail session attributes
"""
match_pattern = "mail\.\w*\.?password"
replacement = '--FIX ME--'
if model_constants.MAIL_SESSION_PROPERTIES in dictionary:
new_properties = ''
string_properties = dictionary[model_constants.MAIL_SESSION_PROPERTIES]
if string_properties:
properties = string_properties
if isinstance(string_properties, str):
properties = StringUtils.formatPropertiesFromString(string_properties)
new_properties = OrderedDict()
iterator = properties.stringPropertyNames().iterator()
while iterator.hasNext():
key = iterator.next()
new_key = str(key).strip()
value = properties.getProperty(key)
if StringUtils.matches(match_pattern, new_key):
value = replacement
new_properties[new_key] = value
dictionary[model_constants.MAIL_SESSION_PROPERTIES] = new_properties
| [] |
2024-01-10 | Ptorresr/weblogic-deploy-tooling | core~src~main~python~wlsdeploy~tool~util~topology_helper.py | """
Copyright (c) 2017, 2020, Oracle Corporation and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""
import wlsdeploy.tool.deploy.deployer_utils as deployer_utils
import wlsdeploy.util.dictionary_utils as dictionary_utils
from oracle.weblogic.deploy.util import WLSDeployArchive
from wlsdeploy.aliases.location_context import LocationContext
from wlsdeploy.aliases.model_constants import CLUSTER
from wlsdeploy.aliases.model_constants import COHERENCE_CLUSTER_SYSTEM_RESOURCE
from wlsdeploy.aliases.model_constants import CUSTOM_IDENTITY_KEYSTORE_FILE
from wlsdeploy.aliases.model_constants import JDBC_RESOURCE
from wlsdeploy.aliases.model_constants import JDBC_SYSTEM_RESOURCE
from wlsdeploy.aliases.model_constants import NM_PROPERTIES
from wlsdeploy.aliases.model_constants import SERVER
from wlsdeploy.aliases.model_constants import SERVER_TEMPLATE
from wlsdeploy.util import model_helper
from wlsdeploy.tool.util.wlst_helper import WlstHelper
class TopologyHelper(object):
"""
Shared code for topology section of model. Domain create and update use this code.
"""
__class_name = 'TopologyHelper'
def __init__(self, aliases, exception_type, logger):
self.logger = logger
self.aliases = aliases
self.wlst_helper = WlstHelper(exception_type)
self._coherence_cluster_elements = [CLUSTER, SERVER, SERVER_TEMPLATE]
def check_coherence_cluster_references(self, type_name, model_nodes):
"""
If the specified type has a Coherence cluster system resource attribute, verify that any referenced resource
exists. If the resource does not exist, create an empty placeholder resource to allow assignment.
:param type_name: the model folder type
:param model_nodes: a dictionary containing the named model elements
:raises: BundleAwareException of the specified type: if an error occurs
"""
if type_name in self._coherence_cluster_elements:
for name in model_nodes.keys():
child_nodes = dictionary_utils.get_dictionary_element(model_nodes, name)
resource_name = dictionary_utils.get_element(child_nodes, COHERENCE_CLUSTER_SYSTEM_RESOURCE)
if resource_name is not None:
self._create_placeholder_coherence_cluster(resource_name)
def _create_placeholder_coherence_cluster(self, cluster_name):
"""
Create a placeholder Coherence cluster system resource to be referenced from a topology element.
The new cluster will be created at the root domain level.
:param cluster_name: the name of the Coherence cluster system resource to be added
"""
_method_name = '_create_placeholder_coherence_cluster'
original_location = self.wlst_helper.get_pwd()
cluster_location = LocationContext().append_location(COHERENCE_CLUSTER_SYSTEM_RESOURCE)
existing_names = deployer_utils.get_existing_object_list(cluster_location, self.aliases)
if cluster_name not in existing_names:
self.logger.info('WLSDPLY-12230', cluster_name, class_name=self.__class_name, method_name=_method_name)
cluster_token = self.aliases.get_name_token(cluster_location)
cluster_location.add_name_token(cluster_token, cluster_name)
deployer_utils.create_and_cd(cluster_location, existing_names, self.aliases)
self.wlst_helper.cd(original_location)
def create_placeholder_servers_in_cluster(self, topology):
"""
Create a placeholder for servers that are in a cluster, as these are migratable entities that
can reference other servers in the cluster.
:param topology: The topology model nodes containing the full set of Servers to add for the create / update
"""
_method_name = 'create_placeholder_servers_in_cluster'
self.logger.entering(class_name=self.__class_name, method_name=_method_name)
self.create_placeholder_named_elements(LocationContext(), SERVER, topology)
self.logger.exiting(class_name=self.__class_name, method_name=_method_name)
def create_placeholder_server_templates(self, topology):
"""
Create a placeholder server template for each name in the topology.
This is necessary because there is a circular dependency between clusters and server templates.
:param topology: the topology model nodes
"""
self.create_placeholder_named_elements(LocationContext(), SERVER_TEMPLATE, topology)
def create_placeholder_jdbc_resources(self, resources):
"""
Create a placeholder JDBC resource for each name in the resources section.
This is necessary because cluster attributes may reference JDBC resources.
:param resources: the resource model nodes
:return: a list of names of created placeholders
"""
return self.create_placeholder_named_elements(LocationContext(), JDBC_SYSTEM_RESOURCE, resources)
def create_placeholder_named_elements(self, location, model_type, model_nodes):
"""
Create a placeholder entry for each element in the specified named element nodes.
This is necessary when there can be circular references with other elements.
:param location: the location for the nodes to be added
:param model_type: the type of the specified model nodes
:param model_nodes: the model nodes
:return: a list of names of created placeholders
"""
_method_name = 'create_placeholder_named_elements'
holder_names = []
original_location = self.wlst_helper.get_pwd()
resource_location = LocationContext(location).append_location(model_type)
if self.aliases.get_wlst_mbean_type(resource_location) is not None:
existing_names = deployer_utils.get_existing_object_list(resource_location, self.aliases)
name_nodes = dictionary_utils.get_dictionary_element(model_nodes, model_type)
for name in name_nodes.keys():
if model_helper.is_delete_name(name):
# don't create placeholder for delete names
continue
if name not in existing_names:
self.logger.info('WLSDPLY-19403', model_type, name, class_name=self.__class_name,
method_name=_method_name)
token = self.aliases.get_name_token(resource_location)
resource_location.add_name_token(token, name)
deployer_utils.create_and_cd(resource_location, existing_names, self.aliases)
self._update_placeholder(model_type, name, resource_location)
holder_names.append(name)
self.wlst_helper.cd(original_location)
return holder_names
def _update_placeholder(self, type_name, name, location):
"""
Make any required updates to a newly-created placeholder.
:param type_name: the type name of the placeholder
:param name: the name of the placeholder MBean
:param location: the location of the placeholder
"""
if type_name == JDBC_SYSTEM_RESOURCE:
# for online update, Name must be assigned to each JDBCSystemResource / JdbcResource MBean.
# (see datasource_deployer.set_attributes())
child_location = LocationContext(location).append_location(JDBC_RESOURCE)
deployer_utils.set_single_folder_token(child_location, self.aliases)
wlst_path = self.aliases.get_wlst_attributes_path(child_location)
if self.wlst_helper.path_exists(wlst_path):
original_location = self.wlst_helper.get_pwd()
self.wlst_helper.cd(wlst_path)
existing_name = self.wlst_helper.get('Name')
if existing_name is None:
self.wlst_helper.set('Name', name)
self.wlst_helper.cd(original_location)
def clear_jdbc_placeholder_targeting(self, jdbc_names):
"""
Remove any targets for the JDBC resources in the specified list of names.
Targets may have been inadvertently assigned when clusters were added after JDBC placeholders.
:param jdbc_names: names of placeholders to clear
"""
_method_name = 'clear_jdbc_placeholder_targeting'
resource_location = LocationContext().append_location(JDBC_SYSTEM_RESOURCE)
token = self.aliases.get_name_token(resource_location)
for name in jdbc_names:
self.logger.info('WLSDPLY-19404', JDBC_SYSTEM_RESOURCE, name, class_name=self.__class_name,
method_name=_method_name)
resource_location.add_name_token(token, name)
wlst_path = self.aliases.get_wlst_attributes_path(resource_location)
if self.wlst_helper.path_exists(wlst_path):
mbean = self.wlst_helper.get_mbean_for_wlst_path(wlst_path)
mbean.setTargets(None)
def qualify_nm_properties(self, type_name, model_nodes, base_location, model_context, attribute_setter):
"""
For the NM properties MBean, update the keystore file path to be fully qualified with the domain directory.
:param type_name: the type name of the MBean to be checked
:param model_nodes: the model nodes of the MBean to be checked
:param base_location: the parent location of the MBean
:param model_context: the model context of the tool
:param attribute_setter: the attribute setter to be used for update
"""
if type_name == NM_PROPERTIES:
location = LocationContext(base_location).append_location(type_name)
keystore_file = dictionary_utils.get_element(model_nodes, CUSTOM_IDENTITY_KEYSTORE_FILE)
if keystore_file and WLSDeployArchive.isPathIntoArchive(keystore_file):
value = model_context.get_domain_home() + "/" + keystore_file
attribute_setter.set_attribute(location, CUSTOM_IDENTITY_KEYSTORE_FILE, value)
def is_clustered_server(self, server_name, servers_dictionary):
"""
Return true if the server's Cluster attribute is set.
:param server_name: name of the server in the dictionary
:param servers_dictionary: model topology section of servers
:return: True if a clustered server
"""
server_dictionary = dictionary_utils.get_dictionary_element(servers_dictionary, server_name)
if dictionary_utils.is_empty_dictionary_element(server_dictionary, CLUSTER):
return False
return True
| [] |
2024-01-10 | microsoft/azure-openai-in-a-day-workshop | exercises~solutions~email_app.py | import streamlit as st
import tiktoken
import openai
from dotenv import load_dotenv
import os
from tenacity import retry, wait_random_exponential, stop_after_attempt
# Load environment variables
load_dotenv()
# Configure Azure OpenAI Service API
openai.api_type = "azure"
openai.api_version = "2022-12-01"
openai.api_base = os.getenv('OPENAI_API_BASE')
openai.api_key = os.getenv("OPENAI_API_KEY")
COMPLETION_MODEL = 'text-davinci-003'
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(10))
def run_prompt(prompt, max_tokens=1000):
response = openai.Completion.create(
engine=COMPLETION_MODEL,
prompt=prompt,
temperature=0.7,
max_tokens=max_tokens
)
return response['choices'][0]['text']
# configure UI elements with Streamlit
st.title('Email summarization demo app')
email = st.text_area('Email', height=400)
summarize_button = st.button('Summarize email')
answer_button = st.button('Generate answer')
if summarize_button:
prompt = f"""
Email:
{email}
Please summarize the email.
Summary:"""
summary = run_prompt(prompt)
st.write('Summary:')
st.write(summary)
if answer_button:
prompt = f"""
Email:
{email}
Please generate an answer to the email above.
Answer:"""
answer = run_prompt(prompt)
st.write('Answer:')
st.write(answer) | [
"\n Email:\n PLACEHOLDER\n Please generate an answer to the email above.\n Answer:",
"\n Email:\n PLACEHOLDER\n Please summarize the email.\n Summary:"
] |
2024-01-10 | ElsevierSoftwareX/SOFTX_2020_238 | gstlal-calibration~python~calibration_parts.py | #!/usr/bin/env python3
#
# Copyright (C) 2015 Madeline Wade, Aaron Viets
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from gstlal import pipeparts
import numpy
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject
from gi.repository import Gst
gi.require_version('GstController', '1.0')
from gi.repository import GstController
GObject.threads_init()
Gst.init(None)
from gstlal import FIRtools as fir
#
# Shortcut functions for common element combos/properties
#
def mkqueue(pipeline, head, length = 0, min_length = 0):
if length < 0:
return head
else:
return pipeparts.mkqueue(pipeline, head, max_size_time = int(1000000000 * length), max_size_buffers = 0, max_size_bytes = 0, min_threshold_time = int(1000000000 * min_length))
def mkcomplexqueue(pipeline, head, length = 0, min_length = 0):
head = pipeparts.mktogglecomplex(pipeline, head)
head = mkqueue(pipeline, head, length = length, min_length = min_length)
head = pipeparts.mktogglecomplex(pipeline, head)
return head
def mkcapsfiltersetter(pipeline, head, caps, **properties):
# Make a capsfilter followed by a capssetter
head = pipeparts.mkcapsfilter(pipeline, head, caps)
#head = pipeparts.mkcapssetter(pipeline, head, caps, replace = True, **properties)
return head
def mkinsertgap(pipeline, head, **properties):
if "bad_data_intervals" in properties:
# Make sure the array property bad-data-intervals is formatted correctly
intervals = properties.pop("bad_data_intervals")
if intervals is not None:
bad_data_intervals = []
for i in range(0, len(intervals)):
bad_data_intervals.append(float(intervals[i]))
properties["bad_data_intervals"] = bad_data_intervals
return pipeparts.mkgeneric(pipeline, head, "lal_insertgap", **properties)
#def mkupsample(pipeline, head, new_caps):
# head = pipeparts.mkgeneric(pipeline, head, "lal_constantupsample")
# head = pipeparts.mkcapsfilter(pipeline, head, new_caps)
# return head
def mkstockresample(pipeline, head, caps):
if type(caps) is int:
caps = "audio/x-raw,rate=%d,channel-mask=(bitmask)0x0" % caps
head = pipeparts.mkresample(pipeline, head, quality = 9)
head = pipeparts.mkcapsfilter(pipeline, head, caps)
return head
def mkresample(pipeline, head, quality, zero_latency, caps, window = 0, frequency_resolution = 0.0, f_cut = 0.0):
if type(caps) is int:
caps = "audio/x-raw,rate=%d,channel-mask=(bitmask)0x0" % caps
head = pipeparts.mkgeneric(pipeline, head, "lal_resample", quality = quality, zero_latency = zero_latency, window = window, frequency_resolution = frequency_resolution, f_cut = f_cut)
head = pipeparts.mkcapsfilter(pipeline, head, caps)
return head
def mkcomplexfirbank(pipeline, src, latency = None, fir_matrix = None, time_domain = None, block_stride = None):
if fir_matrix is not None:
# Make sure the fir matrix is formatted correctly
matrix = []
for i in range(0, len(fir_matrix)):
firfilt = []
for j in range(0, len(fir_matrix[i])):
firfilt.append(float(fir_matrix[i][j]))
matrix.append(firfilt)
fir_matrix = matrix
properties = dict((name, value) for name, value in zip(("latency", "fir_matrix", "time_domain", "block_stride"), (latency, fir_matrix, time_domain, block_stride)) if value is not None)
return pipeparts.mkgeneric(pipeline, src, "lal_complexfirbank", **properties)
def mkcomplexfirbank2(pipeline, src, latency = None, fir_matrix = None, time_domain = None, block_stride = None):
if fir_matrix is not None:
# Make sure the fir matrix is formatted correctly
matrix = []
for i in range(0, len(fir_matrix)):
firfilt = []
for j in range(0, len(fir_matrix[i])):
firfilt.append(float(fir_matrix[i][j]))
matrix.append(firfilt)
fir_matrix = matrix
properties = dict((name, value) for name, value in zip(("latency", "fir_matrix", "time_domain", "block_stride"), (latency, fir_matrix, time_domain, block_stride)) if value is not None)
return pipeparts.mkgeneric(pipeline, src, "lal_complexfirbank2", **properties)
def mkfccupdate(pipeline, src, **properties):
if "fir_matrix" in properties:
# Make sure the fir matrix is formatted correctly
matrix = properties.pop("fir_matrix")
if matrix is not None:
fir_matrix = []
for i in range(0, len(matrix)):
firfilt = []
for j in range(0, len(matrix[i])):
firfilt.append(float(matrix[i][j]))
fir_matrix.append(firfilt)
properties["fir_matrix"] = fir_matrix
return pipeparts.mkgeneric(pipeline, src, "lal_fcc_update", **properties)
def mktransferfunction(pipeline, src, **properties):
if "notch_frequencies" in properties:
# Make sure the array property notch-frequencies is formatted correctly
freqs = properties.pop("notch_frequencies")
if freqs is not None:
notch_frequencies = []
for i in range(0, len(freqs)):
notch_frequencies.append(float(freqs[i]))
properties["notch_frequencies"] = notch_frequencies
if "fft_window_type" in properties:
win = properties.pop("fft_window_type")
if win in ['hann', 'Hann', 'HANN', 'hanning', 'Hanning', 'HANNING', 4]:
win = 4
elif win in ['blackman', 'Blackman', 'BLACKMAN', 3]:
win = 3
elif win in ['DC', 'dolph_chebyshev', 'DolphChebyshev', 'DOLPH_CHEBYSHEV', 2]:
win = 2
elif win in ['kaiser', 'Kaiser', 'KAISER', 1]:
win = 1
elif win in ['dpss', 'DPSS', 'Slepian', 'slepian', 'SLEPIAN', 0]:
win = 0
else:
raise ValueError("Unknown window function %s" % win)
properties["fft_window_type"] = win
if "fir_window_type" in properties:
win = properties.pop("fir_window_type")
if win in ['hann', 'Hann', 'HANN', 'hanning', 'Hanning', 'HANNING', 4]:
win = 4
elif win in ['blackman', 'Blackman', 'BLACKMAN', 3]:
win = 3
elif win in ['DC', 'dolph_chebyshev', 'DolphChebyshev', 'DOLPH_CHEBYSHEV', 2]:
win = 2
elif win in ['kaiser', 'Kaiser', 'KAISER', 1]:
win = 1
elif win in ['dpss', 'DPSS', 'Slepian', 'slepian', 'SLEPIAN', 0]:
win = 0
else:
raise ValueError("Unknown window function %s" % win)
properties["fir_window_type"] = win
return pipeparts.mkgeneric(pipeline, src, "lal_transferfunction", **properties)
def mkadaptivefirfilt(pipeline, src, **properties):
# Make sure each array property is formatted correctly
if "static_model" in properties:
staticmodel = properties.pop("static_model")
if staticmodel is not None:
static_model = []
for i in range(len(staticmodel)):
static_model.append(float(staticmodel[i]))
properties["static_model"] = static_model
if "static_filter" in properties:
staticfilt = properties.pop("static_filter")
if staticfilt is not None:
static_filter = []
for i in range(0, len(staticfilt)):
static_filter.append(float(staticfilt[i]))
properties["static_filter"] = static_filter
if "static_zeros" in properties:
staticz = properties.pop("static_zeros")
if staticz is not None:
static_zeros = []
for i in range(0, len(staticz)):
static_zeros.append(float(staticz[i]))
properties["static_zeros"] = static_zeros
if "static_poles" in properties:
staticp = properties.pop("static_poles")
if staticp is not None:
static_poles = []
for i in range(0, len(staticp)):
static_poles.append(float(staticp[i]))
properties["static_poles"] = static_poles
if "window_type" in properties:
win = properties.pop("window_type")
if win in [None, 5]:
win = 5
elif win in ['hann', 'Hann', 'HANN', 'hanning', 'Hanning', 'HANNING', 4]:
win = 4
elif win in ['blackman', 'Blackman', 'BLACKMAN', 3]:
win = 3
elif win in ['DC', 'dolph_chebyshev', 'DolphChebyshev', 'DOLPH_CHEBYSHEV', 2]:
win = 2
elif win in ['kaiser', 'Kaiser', 'KAISER', 1]:
win = 1
elif win in ['dpss', 'DPSS', 'Slepian', 'slepian', 'SLEPIAN', 0]:
win = 0
else:
raise ValueError("Unknown window function %s" % win)
properties["window_type"] = win
return pipeparts.mkgeneric(pipeline, src, "lal_adaptivefirfilt", **properties)
def mkpow(pipeline, src, **properties):
return pipeparts.mkgeneric(pipeline, src, "cpow", **properties)
def mkmultiplier(pipeline, srcs, sync = True, queue_length = [0]):
elem = pipeparts.mkgeneric(pipeline, None, "lal_adder", sync=sync, mix_mode="product")
if srcs is not None:
for i in range(0, len(srcs)):
mkqueue(pipeline, srcs[i], length = queue_length[min(i, len(queue_length) - 1)]).link(elem)
return elem
def mkadder(pipeline, srcs, sync = True, queue_length = [0]):
elem = pipeparts.mkgeneric(pipeline, None, "lal_adder", sync=sync)
if srcs is not None:
for i in range(0, len(srcs)):
mkqueue(pipeline, srcs[i], length = queue_length[min(i, len(queue_length) - 1)]).link(elem)
return elem
def mkgate(pipeline, src, control, threshold, queue_length = 0, **properties):
elem = pipeparts.mkgate(pipeline, mkqueue(pipeline, src, length = queue_length), control = mkqueue(pipeline, control, length = queue_length), threshold = threshold, **properties)
return elem
def mkinterleave(pipeline, srcs, complex_data = False, queue_length = [0]):
complex_factor = 1 + int(complex_data)
num_srcs = complex_factor * len(srcs)
i = 0
mixed_srcs = []
for src in srcs:
matrix = [numpy.zeros(num_srcs)]
matrix[0][i] = 1
mixed_srcs.append(pipeparts.mkmatrixmixer(pipeline, src, matrix=matrix))
i += complex_factor
elem = mkadder(pipeline, tuple(mixed_srcs), queue_length = queue_length)
#chan1 = pipeparts.mkmatrixmixer(pipeline, src1, matrix=[[1,0]])
#chan2 = pipeparts.mkmatrixmixer(pipeline, src2, matrix=[[0,1]])
#elem = mkadder(pipeline, list_srcs(pipeline, chan1, chan2))
#elem = pipeparts.mkgeneric(pipeline, None, "interleave")
#if srcs is not None:
# for src in srcs:
# pipeparts.mkqueue(pipeline, src).link(elem)
return elem
def mkdeinterleave(pipeline, src, num_channels, complex_data = False):
complex_factor = 1 + int(complex_data)
head = pipeparts.mktee(pipeline, src)
streams = []
for i in range(0, num_channels):
matrix = numpy.zeros((num_channels, complex_factor))
matrix[i][0] = 1.0
streams.append(pipeparts.mkmatrixmixer(pipeline, head, matrix = matrix))
return tuple(streams)
#
# Write a pipeline graph function
#
def write_graph(demux, pipeline, name):
pipeparts.write_dump_dot(pipeline, "%s.%s" % (name, "PLAYING"), verbose = True)
#
# Common element combo functions
#
def hook_up(pipeline, demux, channel_name, instrument, buffer_length, element_name_suffix = "", wait_time = 0):
if channel_name.endswith("UNCERTAINTY"):
head = mkinsertgap(pipeline, None, bad_data_intervals = [-1e35, -1e-35, 1e-35, 1e35], insert_gap = False, remove_gap = True, fill_discont = True, block_duration = int(1000000000 * buffer_length), replace_value = 1, name = "insertgap_%s%s" % (channel_name, element_name_suffix), wait_time = int(1000000000 * wait_time))
else:
head = mkinsertgap(pipeline, None, bad_data_intervals = [-1e35, -1e-35, 1e-35, 1e35], insert_gap = False, remove_gap = True, fill_discont = True, block_duration = int(1000000000 * buffer_length), replace_value = 0, name = "insertgap_%s%s" % (channel_name, element_name_suffix), wait_time = int(1000000000 * wait_time))
pipeparts.src_deferred_link(demux, "%s:%s" % (instrument, channel_name), head.get_static_pad("sink"))
return head
def caps_and_progress(pipeline, head, caps, progress_name):
head = pipeparts.mkgeneric(pipeline, head, "lal_typecast")
head = pipeparts.mkcapsfilter(pipeline, head, caps)
head = pipeparts.mkprogressreport(pipeline, head, "progress_src_%s" % progress_name)
return head
#
# Function to make a list of heads to pass to, i.e. the multiplier or adder
#
def list_srcs(pipeline, *args):
out = []
for src in args:
out.append(src)
return tuple(out)
#
# Various filtering functions
#
def demodulate(pipeline, head, freq, td, rate, filter_time, filter_latency, prefactor_real = 1.0, prefactor_imag = 0.0, freq_update = None):
# demodulate input at a given frequency freq
head = pipeparts.mkgeneric(pipeline, head, "lal_demodulate", line_frequency = freq, prefactor_real = prefactor_real, prefactor_imag = prefactor_imag)
if type(freq_update) is list:
freq_update[0].connect("notify::timestamped-average", update_timestamped_property, head, "timestamped_average", "line_frequency", 1)
freq_update[1].connect("notify::timestamped-average", update_timestamped_property, head, "timestamped_average", "prefactor_real", 1)
freq_update[2].connect("notify::timestamped-average", update_timestamped_property, head, "timestamped_average", "prefactor_imag", 1)
elif freq_update is not None:
freq_update.connect("notify::timestamped-average", update_timestamped_property, head, "timestamped_average", "line_frequency", 1)
head = mkresample(pipeline, head, 4, filter_latency == 0.0, rate)
if filter_latency != 0:
# Remove the first several seconds of output, which depend on start time
head = pipeparts.mkgeneric(pipeline, head, "lal_insertgap", chop_length = 7000000000)
head = lowpass(pipeline, head, rate, length = filter_time, fcut = 0, filter_latency = filter_latency, td = td)
return head
def remove_harmonics(pipeline, signal, f0, num_harmonics, f0_var, filter_latency, compute_rate = 16, rate_out = 16384):
# remove any line(s) from a spectrum. filter length for demodulation (given in seconds) is adjustable
# function argument caps must be complex caps
filter_param = 0.0625
head = pipeparts.mktee(pipeline, head)
elem = pipeparts.mkgeneric(pipeline, None, "lal_adder", sync = True)
mkqueue(pipeline, head).link(elem)
for i in range(1, num_harmonics + 1):
line = pipeparts.mkgeneric(pipeline, head, "lal_demodulate", line_frequency = i * f0)
line = mkresample(pipeline, line, 4, filter_latency == 0, compute_rate)
line_in_witness = lowpass(pipeline, line_in_witness, compute_rate, length = filter_param / (f0_var * i), fcut = 0, filter_latency = filter_latency)
line = mkresample(pipeline, line, 3, filter_latency == 0.0, rate_out)
line = pipeparts.mkgeneric(pipeline, line, "lal_demodulate", line_frequency = -1.0 * i * f0, prefactor_real = -2.0)
line = pipeparts.mkgeneric(pipeline, line, "creal")
mkqueue(pipeline, line).link(elem)
return elem
def remove_lines_with_witnesses(pipeline, signal, witnesses, freqs, freq_vars, freq_channels, filter_latency = 0, compute_rate = 16, rate_out = 16384, num_median = 2048, num_avg = 160, noisesub_gate_bit = None):
# remove line(s) from a spectrum. filter length for demodulation (given in seconds) is adjustable
# function argument caps must be complex caps
# Re-format inputs if necessary
if type(witnesses) is not list and type(witnesses) is not tuple and type(witnesses) is not numpy.ndarray:
print("remove_lines_with_witnesses(): argument 3 should be type list. Converting %s to list" % type(witnesses))
witnesses = [[witnesses]]
if type(freqs) is not list and type(freqs) is not tuple and type(freqs) is not numpy.ndarray:
print("remove_lines_with_witnesses(): argument 4 should be type list. Converting %s to list" % type(freqs))
freqs = [[freqs]]
if type(freq_vars) is not list and type(freq_vars) is not tuple and type(freq_vars) is not numpy.ndarray:
print("remove_lines_with_witnesses(): argument 5 should be type list. Converting %s to list" % type(freq_vars))
freq_vars = [freq_vars]
for i in range(0, len(witnesses) - len(freqs)):
print("remove_lines_with_witnesses(): Warning: not enough elements in argument 4")
freqs.append(freqs[-1])
for i in range(0, len(witnesses) - len(freq_vars)):
print("remove_lines_with_witnesses(): Warning: not enough elements in argument 5")
freq_vars.append(freq_vars[-1])
if len(freqs) > len(witnesses):
print("remove_lines_with_witnesses(): Warning: too many elements in argument 4")
freqs = freqs[:len(witnesses)]
if len(freq_vars) > len(witnesses):
print("remove_lines_with_witnesses(): Warning: too many elements in argument 5")
freq_vars = freq_vars[:len(witnesses)]
for i in range(0, len(witnesses)):
if type(witnesses[i]) is not list and type(witnesses[i]) is not tuple and type(witnesses[i]) is not numpy.ndarray:
print("remove_lines_with_witnesses(): argument 3 should be list of lists. Converting %s to list" % type(witnesses[i]))
witnesses[i] = [witnesses[i]]
if type(freqs[i]) is not list and type(freqs[i]) is not tuple and type(freqs[i]) is not numpy.ndarray:
print("remove_lines_with_witnesses(): argument 4 should be list of lists. Converting %s to list" % type(freqs[i]))
freqs[i] = [freqs[i]]
filter_param = 0.0625
downsample_quality = 4
upsample_quality = 4
resample_shift = 16.0 + 16.5
zero_latency = filter_latency == 0.0
for i in range(0, len(witnesses)):
for j in range(0, len(witnesses[i])):
witnesses[i][j] = pipeparts.mktee(pipeline, witnesses[i][j])
signal = pipeparts.mktee(pipeline, signal)
signal_minus_lines = [signal]
for m in range(0, len(witnesses)):
# If freqs[m][0] strays from its nominal value and there is a timestamp shift in the signal
# (e.g., to achieve zero latency), we need to correct the phase in the reconstructed
# signal. To do this, we measure the frequency in the witness and find the beat
# frequency between that and the nominal frequency freqs[m][0].
if filter_latency != 0.5 and freq_vars[m]:
# The low-pass and resampling filters are not centered in time
f0_measured = pipeparts.mkgeneric(pipeline, witnesses[m][0], "lal_trackfrequency", num_halfcycles = int(round((filter_param / freq_vars[m] / 2 + resample_shift / compute_rate) * freqs[m][0])))
f0_measured = mkresample(pipeline, f0_measured, 3, zero_latency, compute_rate)
f0_measured = pipeparts.mkgeneric(pipeline, f0_measured, "lal_smoothkappas", array_size = 1, avg_array_size = int(round((filter_param / freq_vars[m] / 2 * compute_rate + resample_shift) / 2)), default_kappa_re = 0, default_to_median = True, filter_latency = filter_latency)
f0_beat_frequency = pipeparts.mkgeneric(pipeline, f0_measured, "lal_add_constant", value = -freqs[m][0])
f0_beat_frequency = pipeparts.mktee(pipeline, f0_beat_frequency)
for n in range(len(freqs[m])):
# Length of low-pass filter
filter_length = filter_param / (max(freq_vars[m], 0.003) * freqs[m][n] / freqs[m][0])
filter_samples = int(filter_length * compute_rate) + (1 - int(filter_length * compute_rate) % 2)
sample_shift = filter_samples // 2 - int((filter_samples - 1) * filter_latency + 0.5)
# shift of timestamp relative to data
time_shift = float(sample_shift) / compute_rate + zero_latency * resample_shift / compute_rate
two_n_pi_delta_t = 2 * freqs[m][n] / freqs[m][0] * numpy.pi * time_shift
# Only do this if we have to
if filter_latency != 0.5 and freq_vars[m]:
# Find phase shift due to timestamp shift for each harmonic
phase_shift = pipeparts.mkmatrixmixer(pipeline, f0_beat_frequency, matrix=[[0, two_n_pi_delta_t]])
phase_shift = pipeparts.mktogglecomplex(pipeline, phase_shift)
phase_factor = pipeparts.mkgeneric(pipeline, phase_shift, "cexp")
phase_factor = pipeparts.mktee(pipeline, phase_factor)
# Find amplitude and phase of line in signal
line_in_signal = pipeparts.mkgeneric(pipeline, signal, "lal_demodulate", line_frequency = freqs[m][n])
# Connect to line frequency updater if given
if any(freq_channels):
if freq_channels[m][n] is not None:
if type(freq_channels[m][n]) is float:
# It's a harmonic of the frequency in freq_channels[m][0]
freq_channels[m][0].connect("notify::timestamped-average", update_timestamped_property, line_in_signal, "timestamped_average", "line_frequency", freq_channels[m][n])
else:
# The channel carries the correct frequency
freq_channels[m][n].connect("notify::timestamped-average", update_timestamped_property, line_in_signal, "timestamped_average", "line_frequency", 1)
line_in_signal = mkresample(pipeline, line_in_signal, downsample_quality, zero_latency, compute_rate)
line_in_signal = lowpass(pipeline, line_in_signal, compute_rate, length = filter_length, fcut = 0, filter_latency = filter_latency)
line_in_signal = pipeparts.mktee(pipeline, line_in_signal)
# Make ones for use in matrix equation
if m == 0 and n == 0:
ones = pipeparts.mktee(pipeline, mkpow(pipeline, line_in_signal, exponent = 0.0))
line_in_witnesses = []
tfs_at_f = [None] * len(witnesses[m]) * (len(witnesses[m]) + 1)
for i in range(0, len(witnesses[m])):
# Find amplitude and phase of each harmonic in each witness channel
line_in_witness = pipeparts.mkgeneric(pipeline, witnesses[m][i], "lal_demodulate", line_frequency = freqs[m][n])
# Connect to line frequency updater if given
if any(freq_channels):
if freq_channels[m][n] is not None:
if type(freq_channels[m][n]) is float:
# It's a harmonic of the frequency in freq_channels[m][0]
freq_channels[m][0].connect("notify::timestamped-average", update_timestamped_property, line_in_witness, "timestamped_average", "line_frequency", freq_channels[m][n])
else:
# The channel carries the correct frequency
freq_channels[m][n].connect("notify::timestamped-average", update_timestamped_property, line_in_witness, "timestamped_average", "line_frequency", 1)
line_in_witness = mkresample(pipeline, line_in_witness, downsample_quality, zero_latency, compute_rate)
line_in_witness = lowpass(pipeline, line_in_witness, compute_rate, length = filter_length, fcut = 0, filter_latency = filter_latency)
line_in_witness = pipeparts.mktee(pipeline, line_in_witness)
line_in_witnesses.append(line_in_witness)
# Find transfer function between witness channel and signal at this frequency
tf_at_f = complex_division(pipeline, line_in_signal, line_in_witness)
# Remove worthless data from computation of transfer function if we can
if noisesub_gate_bit is not None:
tf_at_f = mkgate(pipeline, tf_at_f, noisesub_gate_bit, 1, attack_length = -((1.0 - filter_latency) * filter_samples), name = "powerlines_gate_%d_%d_%d" % (m, n, i))
tfs_at_f[i] = pipeparts.mkgeneric(pipeline, tf_at_f, "lal_smoothkappas", default_kappa_re = 0.0, default_kappa_im = 0.0, array_size = num_median, avg_array_size = num_avg, default_to_median = True, filter_latency = filter_latency)
tfs_at_f[(i + 1) * len(witnesses[m]) + i] = ones
for i in range(0, len(witnesses[m])):
for j in range(0, len(witnesses[m])):
if(i != j):
# Find transfer function between 2 witness channels at this frequency
tf_at_f = complex_division(pipeline, line_in_witnesses[j], line_in_witnesses[i])
# Remove worthless data from computation of transfer function if we can
if noisesub_gate_bit is not None:
tf_at_f = mkgate(pipeline, tf_at_f, noisesub_gate_bit, 1, attack_length = -((1.0 - filter_latency) * filter_samples), name = "powerlines_gate_%d_%d_%d_%d" % (m, n, i, j))
tfs_at_f[(i + 1) * len(witnesses[m]) + j] = pipeparts.mkgeneric(pipeline, tf_at_f, "lal_smoothkappas", default_kappa_re = 0.0, default_kappa_im = 0.0, array_size = num_median, avg_array_size = num_avg, default_to_median = True, filter_latency = filter_latency)
tfs_at_f = mkinterleave(pipeline, tfs_at_f, complex_data = True)
tfs_at_f = pipeparts.mkgeneric(pipeline, tfs_at_f, "lal_matrixsolver")
tfs_at_f = mkdeinterleave(pipeline, tfs_at_f, len(witnesses[m]), complex_data = True)
for i in range(0, len(witnesses[m])):
# Use gated, averaged transfer function to reconstruct the sinusoid as it appears in the signal from the witness channel
if filter_latency == 0.5 or not freq_vars[m]:
reconstructed_line_in_signal = mkmultiplier(pipeline, list_srcs(pipeline, tfs_at_f[i], line_in_witnesses[i]))
else:
reconstructed_line_in_signal = mkmultiplier(pipeline, list_srcs(pipeline, tfs_at_f[i], line_in_witnesses[i], phase_factor))
reconstructed_line_in_signal = mkresample(pipeline, reconstructed_line_in_signal, upsample_quality, zero_latency, rate_out)
reconstructed_line_in_signal = pipeparts.mkgeneric(pipeline, reconstructed_line_in_signal, "lal_demodulate", line_frequency = -1.0 * freqs[m][n], prefactor_real = -2.0)
# Connect to line frequency updater if given
if any(freq_channels):
if freq_channels[m][n] is not None:
if type(freq_channels[m][n]) is float:
# It's a harmonic of the frequency in freq_channels[m][0]
freq_channels[m][0].connect("notify::timestamped-average", update_timestamped_property, reconstructed_line_in_signal, "timestamped_average", "line_frequency", -1.0 * freq_channels[m][n])
else:
# The channel carries the correct frequency
freq_channels[m][n].connect("notify::timestamped-average", update_timestamped_property, reconstructed_line_in_signal, "timestamped_average", "line_frequency", -1.0)
reconstructed_line_in_signal = pipeparts.mkgeneric(pipeline, reconstructed_line_in_signal, "creal")
signal_minus_lines.append(reconstructed_line_in_signal)
clean_signal = mkadder(pipeline, tuple(signal_minus_lines))
return clean_signal
def removeDC(pipeline, head, rate):
head = pipeparts.mktee(pipeline, head)
DC = mkresample(pipeline, head, 4, True, 16)
#DC = pipeparts.mkgeneric(pipeline, DC, "lal_smoothkappas", default_kappa_re = 0, array_size = 1, avg_array_size = 64)
DC = mkresample(pipeline, DC, 4, True, rate)
DC = pipeparts.mkaudioamplify(pipeline, DC, -1)
return mkadder(pipeline, list_srcs(pipeline, head, DC))
def lowpass(pipeline, head, rate, length = 1.0, fcut = 500, filter_latency = 0.5, freq_res = 0.0, td = True):
length = int(length * rate)
# Find alpha, and the actual frequency resolution
alpha = freq_res * length / rate if freq_res > 0.0 else 3.0
alpha = 1.0 if alpha < 1.0 else alpha
freq_res = alpha * rate / length
# Adjust the cutoff frequency to "protect" the passband.
if fcut != 0.0:
fcut += 0.75 * freq_res
# Compute a low-pass filter.
lowpass = numpy.sinc(2 * numpy.float128(fcut) / rate * (numpy.arange(numpy.float128(length)) - (length - 1) // 2))
lowpass *= fir.kaiser(length, numpy.pi * alpha) # fir.DPSS(length, alpha, max_time = 10)
lowpass /= numpy.sum(lowpass)
lowpass = numpy.float64(lowpass)
# Now apply the filter
return mkcomplexfirbank(pipeline, head, latency = int((length - 1) * filter_latency + 0.25), fir_matrix = [lowpass], time_domain = td)
def highpass(pipeline, head, rate, length = 1.0, fcut = 10.0, filter_latency = 0.5, freq_res = 0.0, td = True):
length = int(length * rate)
# Find alpha, and the actual frequency resolution
alpha = freq_res * length / rate if freq_res > 0.0 else 3.0
alpha = 1.0 if alpha < 1.0 else alpha
freq_res = alpha * rate / length
# Adjust the cutoff frequency to "protect" the passband.
fcut -= 0.75 * freq_res
# Compute a low-pass filter.
lowpass = numpy.sinc(2 * numpy.float128(fcut) / rate * (numpy.arange(numpy.float128(length)) - (length - 1) // 2))
lowpass *= fir.kaiser(length, numpy.pi * alpha) # fir.DPSS(length, alpha, max_time = 10)
lowpass /= numpy.sum(lowpass)
# Create a high-pass filter from the low-pass filter through spectral inversion.
highpass = -lowpass
highpass[int((length - 1) // 2)] += 1
highpass = numpy.float64(highpass)
# Now apply the filter
return mkcomplexfirbank(pipeline, head, latency = int((length - 1) * filter_latency + 0.25), fir_matrix = [highpass], time_domain = td)
def bandpass(pipeline, head, rate, length = 1.0, f_low = 100, f_high = 400, filter_latency = 0.5, freq_res = 0.0, td = True):
length = int(length * rate)
# Find alpha, and the actual frequency resolution
alpha = freq_res * length / rate if freq_res > 0.0 else 3.0
alpha = 1.0 if alpha < 1.0 else alpha
freq_res = alpha * rate / length
# Adjust the cutoff frequency to "protect" the passband.
f_low -= 0.75 * freq_res
# Make a DPSS window
dpss = fir.kaiser(length, numpy.pi * alpha) # fir.DPSS(length, alpha, max_time = 10)
# Compute a temporary low-pass filter.
lowpass = numpy.sinc(2 * numpy.float128(f_low) / rate * (numpy.arange(numpy.float128(length)) - (length - 1) // 2))
lowpass *= dpss
lowpass /= numpy.sum(lowpass)
# Create the high-pass filter from the low-pass filter through spectral inversion.
highpass = -lowpass
highpass[(length - 1) // 2] += 1
# Adjust the cutoff frequency to "protect" the passband.
f_high += 0.75 * freq_res
# Compute the low-pass filter.
lowpass = numpy.sinc(2 * numpy.float128(f_high) / rate * (numpy.arange(numpy.float128(length)) - (length - 1) // 2))
lowpass *= dpss
lowpass /= numpy.sum(lowpass)
# Do a circular convolution of the high-pass and low-pass filters to make a band-pass filter.
bandpass = numpy.zeros(length, dtype = numpy.float128)
for i in range(length):
bandpass[i] = numpy.sum(highpass * numpy.roll(lowpass, (length - 1) // 2 - i))
bandpass = numpy.float64(bandpass)
# Now apply the filter
return mkcomplexfirbank(pipeline, head, latency = int((length - 1) * 2 * filter_latency + 0.25), fir_matrix = [bandpass], time_domain = td)
def bandstop(pipeline, head, rate, length = 1.0, f_low = 100, f_high = 400, filter_latency = 0.5, freq_res = 0.0, td = True):
length = int(length * rate)
# Find alpha, and the actual frequency resolution
alpha = freq_res * length / rate if freq_res > 0.0 else 3.0
alpha = 1.0 if alpha < 1.0 else alpha
freq_res = alpha * rate / length
# Adjust the cutoff frequency to "protect" the passband.
f_low += 0.75 * freq_res
# Make a DPSS window
dpss = fir.kaiser(length, numpy.pi * alpha) # fir.DPSS(length, alpha, max_time = 10)
# Compute a temporary low-pass filter.
lowpass = numpy.sinc(2 * numpy.float128(f_low) / rate * (numpy.arange(numpy.float128(length)) - (length - 1) // 2))
lowpass *= dpss
lowpass /= numpy.sum(lowpass)
# Create the high-pass filter from the low-pass filter through spectral inversion.
highpass = -lowpass
highpass[(length - 1) // 2] += 1
# Adjust the cutoff frequency to "protect" the passband.
f_high -= 0.75 * freq_res
# Compute the low-pass filter.
lowpass = numpy.sinc(2 * numpy.float128(f_high) / rate * (numpy.arange(numpy.float128(length)) - (length - 1) // 2))
lowpass *= dpss
lowpass /= numpy.sum(lowpass)
# Do a circular convolution of the high-pass and low-pass filters to make a temporary band-pass filter.
bandpass = numpy.zeros(length, dtype = numpy.float128)
for i in range(length):
bandpass[i] = numpy.sum(highpass * numpy.roll(lowpass, (length - 1) // 2 - i))
# Create a band-stop filter from the band-pass filter through spectral inversion.
bandstop = -bandpass
bandstop[(length - 1) // 2] += 1
bandstop = numpy.float64(bandstop)
# Now apply the filter
return mkcomplexfirbank(pipeline, head, latency = int((length - 1) * 2 * filter_latency + 0.25), fir_matrix = [bandstop], time_domain = td)
def linear_phase_filter(pipeline, head, shift_samples, num_samples = 256, gain = 1.0, filter_update = None, sample_rate = 2048, update_samples = 320, average_samples = 1, phase_measurement_frequency = 100, taper_length = 320, kernel_endtime = None, filter_timeshift = 0):
# Apply a linear-phase filter to shift timestamps. shift_samples is the number
# of samples of timestamp shift. It need not be an integer. A positive value
# advances the output data relative to the timestamps, and a negative value
# delays the output.
# Compute filter using odd filter length
odd_num_samples = int(num_samples) - (1 - int(num_samples) % 2)
filter_latency_samples = int(num_samples / 2) + int(numpy.floor(shift_samples))
fractional_shift_samples = shift_samples % 1
# Make a filter using a sinc table, slightly shifted relative to the samples
sinc_arg = numpy.arange(-int(odd_num_samples / 2), 1 + int(odd_num_samples / 2)) + fractional_shift_samples
sinc_filter = numpy.sinc(sinc_arg)
# Apply a Blackman window
sinc_filter *= numpy.blackman(odd_num_samples)
# Normalize the filter
sinc_filter *= gain / numpy.sum(sinc_filter)
# In case filter length is actually even
if not int(num_samples) % 2:
sinc_filter = numpy.insert(sinc_filter, 0, 0.0)
# Filter the data
if filter_update is None:
# Static filter
head = mkcomplexfirbank(pipeline, head, latency = filter_latency_samples, fir_matrix = [sinc_filter[::-1]], time_domain = True)
else:
# Filter gets updated with variable time delay and gain
if kernel_endtime is None:
# Update filter as soon as new filter is available, and do it with minimal latency
head = pipeparts.mkgeneric(pipeline, head, "lal_tdwhiten", kernel = sinc_filter[::-1], latency = filter_latency_samples, taper_length = taper_length)
filter_update = mkadaptivefirfilt(pipeline, filter_update, variable_filter_length = num_samples, adaptive_filter_length = num_samples, update_samples = update_samples, average_samples = average_samples, filter_sample_rate = sample_rate, phase_measurement_frequency = phase_measurement_frequency)
filter_update.connect("notify::adaptive-filter", update_filter, head, "adaptive_filter", "kernel")
else:
# Update filters at specified timestamps to ensure reproducibility
head = pipeparts.mkgeneric(pipeline, mkqueue(pipeline, head), "lal_tdwhiten", kernel = sinc_filter[::-1], latency = filter_latency_samples, taper_length = taper_length, kernel_endtime = kernel_endtime)
filter_update = mkadaptivefirfilt(pipeline, filter_update, variable_filter_length = num_samples, adaptive_filter_length = num_samples, update_samples = update_samples, average_samples = average_samples, filter_sample_rate = sample_rate, phase_measurement_frequency = phase_measurement_frequency, filter_timeshift = filter_timeshift)
filter_update.connect("notify::adaptive-filter", update_filter, head, "adaptive_filter", "kernel")
filter_update.connect("notify::filter-endtime", update_property_simple, head, "filter_endtime", "kernel_endtime", 1)
return head
def whiten(pipeline, head, num_samples = 512, nyq_magnitude = 1e15, scale = 'log', td = True):
# Number of filter samples should be even, since numpy's inverse real fft returns an even length array
num_samples += num_samples % 2
fd_num_samples = num_samples // 2 + 1
fd_filter = numpy.ones(fd_num_samples)
fd_filter[-1] = nyq_magnitude
if scale == 'log':
log_nyq_mag = numpy.log10(nyq_magnitude)
for i in range(1, fd_num_samples - 1):
fd_filter[i] = pow(10, log_nyq_mag * float(i) / (fd_num_samples - 1))
elif scale == 'linear':
for i in range(1, fd_num_samples - 1):
fd_filter[i] = nyq_magnitude * float(i) / (fd_num_samples - 1)
else:
raise ValueError("calibration_parts.whiten(): scale must be either 'log' or 'linear'.")
return head
# Take an inverse fft to get a time-domain filter
whiten_filter = numpy.fft.irfft(fd_filter)
# Add delay of half the filter length
whiten_filter = numpy.roll(whiten_filter, num_samples // 2)
# Window the filter
whiten_filter *= numpy.blackman(num_samples)
# Apply the filter
return mkcomplexfirbank(pipeline, head, latency = num_samples // 2, fir_matrix = [whiten_filter[::-1]], time_domain = td)
def compute_rms(pipeline, head, rate, average_time, f_min = None, f_max = None, filter_latency = 0.5, rate_out = 16, td = True):
# Find the root mean square amplitude of a signal between two frequencies
# Downsample to save computational cost
head = mkresample(pipeline, head, 4, filter_latency == 0.0, rate)
# Remove any frequency content we don't care about
if (f_min is not None) and (f_max is not None):
head = bandpass(pipeline, head, rate, f_low = f_min, f_high = f_max, filter_latency = filter_latency, td = td)
elif f_min is not None:
head = highpass(pipeline, head, rate, fcut = f_min, filter_latency = filter_latency, td = td)
elif f_max is not None:
head = lowpass(pipeline, head, rate, fcut = f_max, filter_latency = filter_latency, td = td)
# Square it
head = mkpow(pipeline, head, exponent = 2.0)
# Downsample again to save computational cost
head = mkresample(pipeline, head, 4, filter_latency == 0.0, rate_out)
# Compute running average
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", default_kappa_re = 0.0, array_size = 1, avg_array_size = average_time * rate_out, filter_latency = filter_latency)
# Take the square root
head = mkpow(pipeline, head, exponent = 0.5)
return head
#
# Calibration factor related functions
#
def smooth_kappas_no_coherence(pipeline, head, var, expected, N, Nav, default_to_median, filter_latency):
# Find median of calibration factors array with size N and smooth out medians with an average over Nav samples
# Use the maximum_offset_re property to determine whether input kappas are good or not
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", maximum_offset_re = var, default_kappa_re = expected, array_size = N, avg_array_size = Nav, default_to_median = default_to_median, filter_latency = filter_latency)
return head
def smooth_complex_kappas_no_coherence(pipeline, head, real_var, imag_var, real_expected, imag_expected, N, Nav, default_to_median, filter_latency):
# Find median of complex calibration factors array with size N, split into real and imaginary parts, and smooth out medians with an average over Nav samples
# Use the maximum_offset_re and maximum_offset_im properties to determine whether input kappas are good or not
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", maximum_offset_re = real_var, maximum_offset_im = imag_var, default_kappa_re = real_expected, default_kappa_im = imag_expected, array_size = N, avg_array_size = Nav, default_to_median = default_to_median, filter_latency = filter_latency)
return head
def smooth_kappas(pipeline, head, expected, N, Nav, default_to_median, filter_latency):
# Find median of calibration factors array with size N and smooth out medians with an average over Nav samples
# Assume input was previously gated with coherence uncertainty to determine if input kappas are good or not
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", default_kappa_re = expected, array_size = N, avg_array_size = Nav, default_to_median = default_to_median, filter_latency = filter_latency)
return head
def smooth_complex_kappas(pipeline, head, real_expected, imag_expected, N, Nav, default_to_median, filter_latency):
# Find median of complex calibration factors array with size N and smooth out medians with an average over Nav samples
# Assume input was previously gated with coherence uncertainty to determine if input kappas are good or not
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", default_kappa_re = real_expected, default_kappa_im = imag_expected, array_size = N, avg_array_size = Nav, default_to_median = default_to_median, filter_latency = filter_latency)
return head
def track_bad_kappas_no_coherence(pipeline, head, var, expected, N, Nav, default_to_median, filter_latency):
# Produce output of 1's or 0's that correspond to median not corrupted (1) or corrupted (0) based on whether median of input array is defualt value.
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", maximum_offset_re = var, default_kappa_re = expected, array_size = N, avg_array_size = Nav if default_to_median else 1, track_bad_kappa = True, default_to_median = default_to_median, filter_latency = filter_latency)
return head
def track_bad_complex_kappas_no_coherence(pipeline, head, real_var, imag_var, real_expected, imag_expected, N, Nav, default_to_median, filter_latency):
# Produce output of 1's or 0's that correspond to median not corrupted (1) or corrupted (0) based on whether median of input array is defualt value.
# Real and imaginary parts are done separately (outputs of lal_smoothkappas can be 1+i, 1, i, or 0)
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", maximum_offset_re = real_var, maximum_offset_im = imag_var, default_kappa_re = real_expected, default_kappa_im = imag_expected, array_size = N, avg_array_size = Nav if default_to_median else 1, track_bad_kappa = True, default_to_median = default_to_median, filter_latency = filter_latency)
re, im = split_into_real(pipeline, head)
return re, im
def track_bad_kappas(pipeline, head, expected, N, Nav, default_to_median, filter_latency):
# Produce output of 1's or 0's that correspond to median not corrupted (1) or corrupted (0) based on whether median of input array is defualt value.
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", default_kappa_re = expected, array_size = N, avg_array_size = Nav if default_to_median else 1, track_bad_kappa = True, default_to_median = default_to_median, filter_latency = filter_latency)
return head
def track_bad_complex_kappas(pipeline, head, real_expected, imag_expected, N, Nav, default_to_median, filter_latency):
# Produce output of 1's or 0's that correspond to median not corrupted (1) or corrupted (0) based on whether median of input array is defualt value.
# Real and imaginary parts are done separately (outputs of lal_smoothkappas can be 1+i, 1, i, or 0)
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", default_kappa_re = real_expected, default_kappa_im = imag_expected, array_size = N, avg_array_size = Nav if default_to_median else 1, track_bad_kappa = True, default_to_median = default_to_median, filter_latency = filter_latency)
re, im = split_into_real(pipeline, head)
return re, im
def smooth_kappas_no_coherence_test(pipeline, head, var, expected, N, Nav, default_to_median, filter_latency):
# Find median of calibration factors array with size N and smooth out medians with an average over Nav samples
head = pipeparts.mktee(pipeline, head)
pipeparts.mknxydumpsink(pipeline, head, "raw_kappatst.txt")
head = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", maximum_offset_re = var, default_kappa_re = expected, array_size = N, avg_array_size = Nav, default_to_median = default_to_median, filter_latency = filter_latency)
head = pipeparts.mktee(pipeline, head)
pipeparts.mknxydumpsink(pipeline, head, "smooth_kappatst.txt")
return head
def compute_kappa_bits(pipeline, smooth, expected_real, expected_imag, real_ok_var, imag_ok_var, median_samples, avg_samples, status_out_smooth = 1, starting_rate=16, ending_rate=16):
# Compensate for digital error in the running average
expected_real_sum = 0.0
expected_imag_sum = 0.0
for i in range(0, avg_samples):
expected_real_sum = expected_real_sum + expected_real
expected_imag_sum = expected_imag_sum + expected_imag
expected_real = expected_real_sum / avg_samples
expected_imag = expected_imag_sum / avg_samples
# Compute the property bad-data-intervals
if type(real_ok_var) is not list:
real_ok_var = [expected_real - real_ok_var, expected_real + real_ok_var]
if type(imag_ok_var) is not list:
imag_ok_var = [expected_imag - imag_ok_var, expected_imag + imag_ok_var]
bad_data_intervals = [real_ok_var[0], imag_ok_var[0], expected_real, expected_imag, expected_real, expected_imag, real_ok_var[1], imag_ok_var[1]]
# Use lal_insertgap to check if the data is within the required range
smoothInRange = mkinsertgap(pipeline, smooth, bad_data_intervals = bad_data_intervals, insert_gap = True, remove_gap = False, replace_value = 0, fill_discont = True, block_duration = Gst.SECOND)
# Turn it into a bit vector
smoothInRange = pipeparts.mkbitvectorgen(pipeline, smoothInRange, nongap_is_control = True, bit_vector = status_out_smooth)
smoothInRange = pipeparts.mkcapsfilter(pipeline, smoothInRange, "audio/x-raw, format=U32LE, rate=%d, channel-mask=(bitmask)0x0" % starting_rate)
if starting_rate != ending_rate:
smoothInRange = pipeparts.mkgeneric(pipeline, smoothInRange, "lal_logicalundersample", required_on = status_out_smooth, status_out = status_out_smooth)
smoothInRange = pipeparts.mkcapsfilter(pipeline, smoothInRange, "audio/x-raw, format=U32LE, rate=%d, channel-mask=(bitmask)0x0" % ending_rate)
smoothInRangetee = pipeparts.mktee(pipeline, smoothInRange)
# Require that kappas have been in range for enough time for the smoothing process to settle
min_samples = int(median_samples / 2) + avg_samples
smoothInRange = mkgate(pipeline, smoothInRangetee, smoothInRangetee, status_out_smooth, attack_length = -min_samples)
smoothInRange = pipeparts.mkbitvectorgen(pipeline, smoothInRange, nongap_is_control = True, bit_vector = status_out_smooth)
smoothInRange = pipeparts.mkcapsfilter(pipeline, smoothInRange, "audio/x-raw, format=U32LE, rate=%d, channel-mask=(bitmask)0x0" % ending_rate)
return smoothInRange
def compute_kappa_bits_only_real(pipeline, smooth, expected, ok_var, median_samples, avg_samples, status_out_smooth = 1, starting_rate=16, ending_rate=16):
# Compensate for digital error in the running average
expected_sum = 0.0
for i in range(0, avg_samples):
expected_sum = expected_sum + expected
expected = expected_sum / avg_samples
if type(ok_var) is list:
smoothInRange = mkinsertgap(pipeline, smooth, bad_data_intervals = [ok_var[0], expected, expected, ok_var[1]], insert_gap = True, remove_gap = False, replace_value = 0, fill_discont = True, block_duration = Gst.SECOND)
else:
smoothInRange = mkinsertgap(pipeline, smooth, bad_data_intervals = [expected - ok_var, expected, expected, expected + ok_var], insert_gap = True, remove_gap = False, replace_value = 0, fill_discont = True, block_duration = Gst.SECOND)
smoothInRange = pipeparts.mkbitvectorgen(pipeline, smoothInRange, nongap_is_control = True, bit_vector = status_out_smooth)
smoothInRange = pipeparts.mkcapsfilter(pipeline, smoothInRange, "audio/x-raw, format=U32LE, rate=%d, channel-mask=(bitmask)0x0" % starting_rate)
if starting_rate != ending_rate:
smoothInRange = pipeparts.mkgeneric(pipeline, smoothInRange, "lal_logicalundersample", required_on = status_out_smooth, status_out = status_out_smooth)
smoothInRange = pipeparts.mkcapsfilter(pipeline, smoothInRange, "audio/x-raw, format=U32LE, rate=%d, channel-mask=(bitmask)0x0" % ending_rate)
smoothInRangetee = pipeparts.mktee(pipeline, smoothInRange)
min_samples = int(median_samples / 2) + avg_samples
smoothInRange = mkgate(pipeline, smoothInRangetee, smoothInRangetee, status_out_smooth, attack_length = -min_samples)
smoothInRange = pipeparts.mkbitvectorgen(pipeline, smoothInRange, nongap_is_control = True, bit_vector = status_out_smooth)
return smoothInRange
def merge_into_complex(pipeline, real, imag):
# Merge real and imag into one complex channel with complex caps
head = mkinterleave(pipeline, list_srcs(pipeline, real, imag))
head = pipeparts.mktogglecomplex(pipeline, head)
return head
def split_into_real(pipeline, complex_chan):
# split complex channel with complex caps into two channels (real and imag) with real caps
complex_chan = pipeparts.mktee(pipeline, complex_chan)
real = pipeparts.mkgeneric(pipeline, complex_chan, "creal")
imag = pipeparts.mkgeneric(pipeline, complex_chan, "cimag")
# elem = pipeparts.mkgeneric(pipeline, elem, "deinterleave", keep_positions=True)
# real = pipeparts.mkgeneric(pipeline, None, "identity")
# pipeparts.src_deferred_link(elem, "src_0", real.get_static_pad("sink"))
# imag = pipeparts.mkgeneric(pipeline, None, "identity")
# pipeparts.src_deferred_link(elem, "src_1", imag.get_static_pad("sink"))
return real, imag
def complex_audioamplify(pipeline, chan, WR, WI):
# Multiply a complex channel chan by a complex number WR+I WI
# Re[out] = -chanI*WI + chanR*WR
# Im[out] = chanR*WI + chanI*WR
head = pipeparts.mktogglecomplex(pipeline, chan)
head = pipeparts.mkmatrixmixer(pipeline, head, matrix=[[WR, WI],[-WI, WR]])
head = pipeparts.mktogglecomplex(pipeline, head)
return head
def complex_inverse(pipeline, head):
# Invert a complex number (1/z)
head = mkpow(pipeline, head, exponent = -1)
return head
def complex_division(pipeline, a, b):
# Perform complex division of c = a/b and output the complex quotient c
bInv = complex_inverse(pipeline, b)
c = mkmultiplier(pipeline, list_srcs(pipeline, a, bInv))
return c
def compute_kappatst_from_filters_file(pipeline, derrfesd, tstexcfesd, pcalfdarm, derrfdarm, ktstfacR, ktstfacI):
#
# \kappa_TST = ktstfac * (derrfesd/tstexcfesd) * (pcalfdarm/derrfdarm)
# ktstfac = EP1 = (1/A0fesd) * (C0fdarm/(1+G0fdarm)) * ((1+G0fesd)/C0fesd)
#
derrfdarminv = complex_inverse(pipeline, derrfdarm)
tstexcfesdinv = complex_inverse(pipeline, tstexcfesd)
ktst = mkmultiplier(pipeline, list_srcs(pipeline, pcalfdarm, derrfdarminv, tstexcfesdinv, derrfesd))
ktst = complex_audioamplify(pipeline, ktst, ktstfacR, ktstfacI)
return ktst
def compute_kappatst(pipeline, derrfesd, tstexcfesd, pcalfdarm, derrfdarm, ktstfac):
#
# \kappa_TST = ktstfac * (derrfesd/tstexcfesd) * (pcalfdarm/derrfdarm)
# ktstfac = EP1 = (1/A0fesd) * (C0fdarm/(1+G0fdarm)) * ((1+G0fesd)/C0fesd)
#
derrfdarminv = complex_inverse(pipeline, derrfdarm)
tstexcfesdinv = complex_inverse(pipeline, tstexcfesd)
ktst = mkmultiplier(pipeline, list_srcs(pipeline, ktstfac, pcalfdarm, derrfdarminv, tstexcfesdinv, derrfesd))
return ktst
def compute_kappapum_from_filters_file(pipeline, derrfpum, pumexcfpum, pcalfpcal, derrfpcal, kpumfacR, kpumfacI):
#
# \kappa_PUM = kpumfac * [derr(fpum) / pumexc(fpum)] * [pcal(fpcal) / derr(fpcal)]
# kpumfac = EP15 = [1 / A_PUM0(fpum)] * [C0(fpcal) / (1 + G0(fpcal))] * [(1 + G0(fpum)) / C0(fpum)]
#
pumexcfpuminv = complex_inverse(pipeline, pumexcfpum)
derrfpcalinv = complex_inverse(pipeline, derrfpcal)
kpum = mkmultiplier(pipeline, list_srcs(pipeline, derrfpum, pumexcfpuminv, pcalfpcal, derrfpcalinv))
kpum = complex_audioamplify(pipeline, kpum, kpumfacR, kpumfacI)
return kpum
def compute_kappapum(pipeline, derrfpum, pumexcfpum, pcalfpcal, derrfpcal, kpumfac):
#
# \kappa_PUM = kpumfac * [derr(fpum) / pumexc(fpum)] * [pcal(fpcal) / derr(fpcal)]
# kpumfac = EP15 = [1 / A_PUM0(fpum)] * [C0(fpcal) / (1 + G0(fpcal))] * [(1 + G0(fpum)) / C0(fpum)]
#
pumexcfpuminv = complex_inverse(pipeline, pumexcfpum)
derrfpcalinv = complex_inverse(pipeline, derrfpcal)
kpum = mkmultiplier(pipeline, list_srcs(pipeline, kpumfac, derrfpum, pumexcfpuminv, pcalfpcal, derrfpcalinv))
return kpum
def compute_afctrl_from_filters_file(pipeline, derrfdarm, excfdarm, pcalfpcal, derrfpcal, afctrlfacR, afctrlfacI):
#
# A(f_ctrl) = -afctrlfac * (derrfdarm/excfdarm) * (pcalfpcal/derrfpcal)
# afctrlfac = EP2 = C0fpcal/(1+G0fpcal) * (1+G0fctrl)/C0fctrl
#
derrfpcalinv = complex_inverse(pipeline, derrfpcal)
excfdarminv = complex_inverse(pipeline, excfdarm)
afctrl = mkmultiplier(pipeline, list_srcs(pipeline, pcalfpcal, derrfpcalinv, excfdarminv, derrfdarm))
afctrl = complex_audioamplify(pipeline, afctrl, -1.0 * afctrlfacR, -1.0 * afctrlfacI)
return afctrl
def compute_afctrl(pipeline, derrfdarm, excfdarm, pcalfpcal, derrfpcal, afctrlfac):
#
# A(f_ctrl) = -afctrlfac * (derrfdarm/excfdarm) * (pcalfpcal/derrfpcal)
# afctrlfac = EP2 = C0fpcal/(1+G0fpcal) * (1+G0fctrl)/C0fctrl
#
derrfpcalinv = complex_inverse(pipeline, derrfpcal)
excfdarminv = complex_inverse(pipeline, excfdarm)
afctrl = mkmultiplier(pipeline, list_srcs(pipeline, afctrlfac, pcalfpcal, derrfpcalinv, excfdarminv, derrfdarm))
afctrl = complex_audioamplify(pipeline, afctrl, -1.0, 0.0)
return afctrl
def compute_kappauim_from_filters_file(pipeline, EP16R, EP16I, afctrl, ktst, EP4R, EP4I, kpum, EP17R, EP17I):
#
# \kappa_uim = EP16 * (afctrl - ktst * EP4 - kpum * EP17)
#
kuim = complex_audioamplify(pipeline, mkadder(pipeline, list_srcs(pipeline, afctrl, complex_audioamplify(pipeline, ktst, -1.0 * EP4R, -1.0 * EP4I), complex_audioamplify(pipeline, kpum, -1.0 * EP17R, -1.0 * EP17I))), EP16R, EP16I)
return kuim
def compute_kappauim(pipeline, EP16, afctrl, ktst, EP4, kpum, EP17):
#
# \kappa_uim = EP16 * (afctrl - ktst * EP4 - kpum * EP17)
#
ep4_kappatst = mkmultiplier(pipeline, list_srcs(pipeline, ktst, complex_audioamplify(pipeline, EP4, -1.0, 0.0)))
ep17_kappapum = mkmultiplier(pipeline, list_srcs(pipeline, kpum, complex_audioamplify(pipeline, EP17, -1.0, 0.0)))
kuim = mkadder(pipeline, list_srcs(pipeline, afctrl, ep4_kappatst, ep17_kappapum))
kuim = mkmultiplier(pipeline, list_srcs(pipeline, EP16, kuim))
return kuim
def compute_kappauim_from_filters_file_uim_line(pipeline, derrfuim, uimexcfuim, pcalfpcal, derrfpcal, kuimfacR, kuimfacI):
#
# \kappa_UIM = kuimfac * [derr(fuim) / uimexc(fuim)] * [pcal(fpcal) / derr(fpcal)]
# kuimfac = EP22 = [1 / A_UIM0(fuim)] * [C0(fpcal) / (1 + G0(fpcal))] * [(1 + G0(fuim)) / C0(fuim)]
#
uimexcfuiminv = complex_inverse(pipeline, uimexcfuim)
derrfpcalinv = complex_inverse(pipeline, derrfpcal)
kuim = mkmultiplier(pipeline, list_srcs(pipeline, derrfuim, uimexcfuiminv, pcalfpcal, derrfpcalinv))
kuim = complex_audioamplify(pipeline, kuim, kuimfacR, kuimfacI)
return kuim
def compute_kappauim_uim_line(pipeline, derrfuim, uimexcfuim, pcalfpcal, derrfpcal, kuimfac):
#
# \kappa_UIM = kuimfac * [derr(fuim) / uimexc(fuim)] * [pcal(fpcal) / derr(fpcal)]
# kuimfac = EP22 = [1 / A_UIM0(fuim)] * [C0(fpcal) / (1 + G0(fpcal))] * [(1 + G0(fuim)) / C0(fuim)]
#
uimexcfuiminv = complex_inverse(pipeline, uimexcfuim)
derrfpcalinv = complex_inverse(pipeline, derrfpcal)
kuim = mkmultiplier(pipeline, list_srcs(pipeline, kuimfac, derrfuim, uimexcfuiminv, pcalfpcal, derrfpcalinv))
return kuim
def compute_kappapu_from_filters_file(pipeline, EP3R, EP3I, afctrl, ktst, EP4R, EP4I):
#
# \kappa_pu = EP3 * (afctrl - ktst * EP4)
#
kpu = complex_audioamplify(pipeline, mkadder(pipeline, list_srcs(pipeline, afctrl, complex_audioamplify(pipeline, ktst, -1.0*EP4R, -1.0*EP4I))), EP3R, EP3I)
return kpu
def compute_kappapu(pipeline, EP3, afctrl, ktst, EP4):
#
# \kappa_pu = EP3 * (afctrl - ktst * EP4)
#
ep4_kappatst = mkmultiplier(pipeline, list_srcs(pipeline, ktst, complex_audioamplify(pipeline, EP4, -1.0, 0.0)))
afctrl_kappatst = mkadder(pipeline, list_srcs(pipeline, afctrl, ep4_kappatst))
kpu = mkmultiplier(pipeline, list_srcs(pipeline, EP3, afctrl_kappatst))
return kpu
def compute_kappaa_from_filters_file(pipeline, afctrl, EP4R, EP4I, EP5R, EP5I):
#
#\kappa_a = afctrl / (EP4+EP5)
#
facR = (EP4R + EP5R) / ((EP4R + EP5R)**2 + (EP4I + EP5I)**2)
facI = -(EP4I + EP5I) / ((EP4R + EP5R)**2 + (EP4I + EP5I)**2)
ka = complex_audioamplify(pipeline, afctrl, facR, facI)
return ka
def compute_kappaa(pipeline, afctrl, EP4, EP5):
#
#\kappa_a = afctrl / (EP4 + EP5)
#
ka = complex_division(pipeline, afctrl, mkadder(pipeline, list_srcs(pipeline, EP4, EP5)))
return ka
def compute_exact_kappas_from_filters_file(pipeline, X, freqs, EPICS, rate, default_fcc = 400, default_fs_squared = 1.0, default_fs_over_Q = 1.0):
#
# See P1900052, Section 5.2.6 for details. All constants are contained in the list
# variable EPICS. The variable freqs is a list containing calibration line
# frequencies, stored in the order f1, f2, fT, fP, fU, i.e., the Pcal lines come
# first, and then the actuator lines. All other quantities evaluated at the
# calibration lines are stored in the same order. The list variable X contains the
# ratios X[i] = injection(f_i) / d_err(f_i) for each calibration line frequency.
#
kappas = []
num_lines = len(freqs)
num_stages = num_lines - 2 # Stages of actuation (currently 3)
MV_matrix = list(numpy.zeros(2 * num_stages * (2 * num_stages + 1)))
Y = []
Yreal = []
Yimag = []
CAX = []
CAXreal = []
CAXimag = []
Gres = []
kappas = []
for i in range(num_lines):
if i < 2:
# Then it's a Pcal line
Y.append(pipeparts.mktee(pipeline, complex_audioamplify(pipeline, X[i], EPICS[2 * (1 + num_stages) * i], EPICS[2 * (1 + num_stages) * i + 1])))
Yreal.append(pipeparts.mktee(pipeline, mkcapsfiltersetter(pipeline, pipeparts.mkgeneric(pipeline, Y[i], "creal"), "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate, name = "capsfilter_Yreal_%d" % i)))
Yimag.append(pipeparts.mktee(pipeline, mkcapsfiltersetter(pipeline, pipeparts.mkgeneric(pipeline, Y[i], "cimag"), "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate, name = "capsfilter_Yimag_%d" % i)))
else:
# It's an actuator line
CAX.append(pipeparts.mktee(pipeline, complex_audioamplify(pipeline, X[i], EPICS[2 * (1 + num_stages) * i], EPICS[2 * (1 + num_stages) * i + 1])))
CAXreal.append(pipeparts.mktee(pipeline, mkcapsfiltersetter(pipeline, pipeparts.mkgeneric(pipeline, CAX[-1], "creal"), "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate, name = "capsfilter_CAXreal_%d" % i)))
CAXimag.append(pipeparts.mktee(pipeline, mkcapsfiltersetter(pipeline, pipeparts.mkgeneric(pipeline, CAX[-1], "cimag"), "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate, name = "capsfilter_CAXimag_%d" % i)))
# Let's start by computing the V's of Eqs. 5.2.78 and 5.2.79
for j in range(num_stages):
factor1 = pow(freqs[0], -2) - pow(freqs[2 + j], -2)
factor2 = pow(freqs[2 + j], -2) - pow(freqs[1], -2)
factor3 = freqs[1] * (pow(freqs[0], 2) - pow(freqs[2 + j], 2))
factor4 = freqs[0] * (pow(freqs[2 + j], 2) - pow(freqs[1], 2))
Vj = mkadder(pipeline, list_srcs(pipeline, pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, Yreal[1], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor1), pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, Yreal[0], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor2)))
Vj = pipeparts.mkcapsfilter(pipeline, Vj, "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate)
Vjplus3 = mkadder(pipeline, list_srcs(pipeline, pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, Yimag[1], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor3), pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, Yimag[0], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor4)))
Vjplus3 = pipeparts.mkcapsfilter(pipeline, Vjplus3, "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate)
MV_matrix[j] = Vj
MV_matrix[num_stages + j] = Vjplus3
# Now let's compute the elements of the matrix M, given by Eqs. 5.2.70 - 5.2.77
# Many of the elements are constant, so make a stream of ones to multiply
if num_stages > 1:
ones = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkcapsfilter(pipeline, Yreal[0], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), exponent = 0.0))
for j in range(num_stages):
# Time-dependent matrix elements
factor = pow(freqs[0], -2) - pow(freqs[1], -2)
addend = (pow(freqs[0], -2) - pow(freqs[2 + j], -2)) * EPICS[2 * ((1 + num_stages) + 1 + j)] + (pow(freqs[2 + j], -2) - pow(freqs[1], -2)) * EPICS[2 * (1 + j)] - (pow(freqs[0], -2) - pow(freqs[1], -2)) * EPICS[2 * ((2 + j) * (1 + num_stages) + 1 + j)]
Mjj = pipeparts.mkgeneric(pipeline, pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, CAXreal[j], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor), "lal_add_constant", value = addend)
factor = -2.0 * numpy.pi * freqs[2 + j] * (pow(freqs[0], -2) - pow(freqs[1], -2))
addend = -2.0 * numpy.pi * freqs[1] * (pow(freqs[0], -2) - pow(freqs[2 + j], -2)) * EPICS[1 + 2 * ((1 + num_stages) + 1 + j)] - 2.0 * numpy.pi * freqs[0] * (pow(freqs[2 + j], -2) - pow(freqs[1], -2)) * EPICS[1 + 2 * (1 + j)] + 2.0 * numpy.pi * freqs[2 + j] * (pow(freqs[0], -2) - pow(freqs[1], -2)) * EPICS[1 + 2 * ((2 + j) * (1 + num_stages) + 1 + j)]
Mjjplus3 = pipeparts.mkgeneric(pipeline, pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, CAXimag[j], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor), "lal_add_constant", value = addend)
factor = -freqs[2 + j] * (pow(freqs[1], 2) - pow(freqs[0], 2))
addend = freqs[1] * (pow(freqs[0], 2) - pow(freqs[2 + j], 2)) * EPICS[1 + 2 * ((1 + num_stages) + 1 + j)] + freqs[0] * (pow(freqs[2 + j], 2) - pow(freqs[1], 2)) * EPICS[1 + 2 * (1 + j)] + freqs[2 + j] * (pow(freqs[1], 2) - pow(freqs[0], 2)) * EPICS[1 + 2 * ((2 + j) * (1 + num_stages) + 1 + j)]
Mjplus3j = pipeparts.mkgeneric(pipeline, pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, CAXimag[j], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor), "lal_add_constant", value = addend)
factor = -2.0 * numpy.pi * pow(freqs[2 + j], 2) * (pow(freqs[1], 2) - pow(freqs[0], 2))
addend = 2.0 * numpy.pi * pow(freqs[1], 2) * (pow(freqs[0], 2) - pow(freqs[2 + j], 2)) * EPICS[2 * ((1 + num_stages) + 1 + j)] + 2.0 * numpy.pi * pow(freqs[0], 2) * (pow(freqs[2 + j], 2) - pow(freqs[1], 2)) * EPICS[2 * (1 + j)] + 2.0 * numpy.pi * pow(freqs[2 + j], 2) * (pow(freqs[1], 2) - pow(freqs[0], 2)) * EPICS[2 * ((2 + j) * (1 + num_stages) + 1 + j)]
Mjplus3jplus3 = pipeparts.mkgeneric(pipeline, pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, CAXreal[j], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor), "lal_add_constant", value = addend)
# Add these into the matrix
MV_matrix[(1 + j) * 2 * num_stages + j] = Mjj
MV_matrix[(1 + j) * 2 * num_stages + num_stages + j] = Mjjplus3
MV_matrix[(1 + num_stages + j) * 2 * num_stages + j] = Mjplus3j
MV_matrix[(1 + num_stages + j) * 2 * num_stages + num_stages + j] = Mjplus3jplus3
# Constant matrix elements
knotequalj = list(numpy.arange(num_stages))
knotequalj.remove(j)
for k in knotequalj:
factor = (pow(freqs[0], -2) - pow(freqs[2 + j], -2)) * EPICS[2 * ((1 + num_stages) + 1 + k)] + (pow(freqs[2 + j], -2) - pow(freqs[1], -2)) * EPICS[2 * (1 + k)] - (pow(freqs[0], -2) - pow(freqs[1], -2)) * EPICS[2 * ((2 + j) * (1 + num_stages) + 1 + k)]
Mjk = pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, ones, "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor)
factor = -2.0 * numpy.pi * freqs[1] * (pow(freqs[0], -2) - pow(freqs[2 + j], -2)) * EPICS[1 + 2 * ((1 + num_stages) + 1 + k)] - 2.0 * numpy.pi * freqs[0] * (pow(freqs[2 + j], -2) - pow(freqs[1], -2)) * EPICS[1 + 2 * (1 + k)] + 2.0 * numpy.pi * freqs[2 + j] * (pow(freqs[0], -2) - pow(freqs[1], -2)) * EPICS[1 + 2 * ((2 + j) * (1 + num_stages) + 1 + k)]
Mjkplus3 = pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, ones, "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor)
factor = freqs[1] * (pow(freqs[0], 2) - pow(freqs[2 + j], 2)) * EPICS[1 + 2 * ((1 + num_stages) + 1 + k)] + freqs[0] * (pow(freqs[2 + j], 2) - pow(freqs[1], 2)) * EPICS[1 + 2 * (1 + k)] + freqs[2 + j] * (pow(freqs[1], 2) - pow(freqs[0], 2)) * EPICS[1 + 2 * ((2 + j) * (1 + num_stages) + 1 + k)]
Mjplus3k = pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, ones, "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor)
factor = 2.0 * numpy.pi * pow(freqs[1], 2) * (pow(freqs[0], 2) - pow(freqs[2 + j], 2)) * EPICS[2 * ((1 + num_stages) + 1 + k)] + 2.0 * numpy.pi * pow(freqs[0], 2) * (pow(freqs[2 + j], 2) - pow(freqs[1], 2)) * EPICS[2 * (1 + k)] + 2.0 * numpy.pi * pow(freqs[2 + j], 2) * (pow(freqs[1], 2) - pow(freqs[0], 2)) * EPICS[2 * ((2 + j) * (1 + num_stages) + 1 + k)]
Mjplus3kplus3 = pipeparts.mkaudioamplify(pipeline, pipeparts.mkcapsfilter(pipeline, ones, "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), factor)
# Add these into the matrix
MV_matrix[(1 + j) * 2 * num_stages + k] = Mjk
MV_matrix[(1 + j) * 2 * num_stages + num_stages + k] = Mjkplus3
MV_matrix[(1 + num_stages + j) * 2 * num_stages + k] = Mjplus3k
MV_matrix[(1 + num_stages + j) * 2 * num_stages + num_stages + k] = Mjplus3kplus3
# Now pass these to the matrix solver to find kappa_T, kappa_P, kappa_U, tau_T, tau_P, and tau_U.
MV_matrix = mkinterleave(pipeline, MV_matrix)
MV_matrix = pipeparts.mkcapsfilter(pipeline, MV_matrix, "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=%d" % (rate, (2 * num_stages) * (2 * num_stages + 1)))
kappas = pipeparts.mkgeneric(pipeline, MV_matrix, "lal_matrixsolver")
kappas = list(mkdeinterleave(pipeline, pipeparts.mkcapsfilter(pipeline, kappas, "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=%d" % (rate, 2 * num_stages)), 2 * num_stages))
for i in range(len(kappas)):
kappas[i] = pipeparts.mkcapsfilter(pipeline, kappas[i], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate)
if i >= len(kappas) // 2:
kappas[i] = mkmultiplier(pipeline, [kappas[i], mkpow(pipeline, kappas[i - len(kappas) // 2], exponent = -1.0)])
kappas[i] = pipeparts.mktee(pipeline, kappas[i])
# Next, compute kappa_C. This is going to take some work...
# Start by computing G_res at each frequency, defined in Eq. 5.2.30
for n in range(2):
Gres_components = []
for j in range(num_stages):
kappajGresjatn = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkcapsfilter(pipeline, kappas[j], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), matrix = [[EPICS[2 * (n * (1 + num_stages) + 1 + j)], EPICS[1 + 2 * (n * (1 + num_stages) + 1 + j)]]]))
i_omega_tau = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkcapsfilter(pipeline, kappas[num_stages + j], "audio/x-raw,format=F64LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate), matrix = [[0, 2.0 * numpy.pi * freqs[n]]]))
i_omega_tau = mkcapsfiltersetter(pipeline, i_omega_tau, "audio/x-raw,format=Z128LE,rate=%d,channel-mask=(bitmask)0x0,channels=1" % rate)
phase = pipeparts.mkgeneric(pipeline, i_omega_tau, "cexp")
Gres_components.append(mkmultiplier(pipeline, list_srcs(pipeline, kappajGresjatn, phase)))
Gres.append(mkadder(pipeline, Gres_components))
sensing_inputs = mkinterleave(pipeline, Gres + Y, complex_data = True)
sensing_outputs = pipeparts.mkgeneric(pipeline, sensing_inputs, "lal_sensingtdcfs", sensing_model = 0, freq1 = freqs[0], freq2 = freqs[1], current_fcc = default_fcc, current_fs_squared = default_fs_squared, current_fs_over_Q = default_fs_over_Q)
sensing_outputs = list(mkdeinterleave(pipeline, sensing_outputs, 4))
kappas += sensing_outputs
return kappas
def compute_S_from_filters_file(pipeline, EP6R, EP6I, pcalfpcal2, derrfpcal2, EP7R, EP7I, ktst, EP8R, EP8I, kpu, EP9R, EP9I):
#
# S = 1/EP6 * ( pcalfpcal2/derrfpcal2 - EP7*(ktst*EP8 + kpu*EP9) ) ^ (-1)
#
pcal_over_derr = complex_division(pipeline, pcalfpcal2, derrfpcal2)
ep8_kappatst = complex_audioamplify(pipeline, ktst, EP8R, EP8I)
ep9_kappapu = complex_audioamplify(pipeline, kpu, EP9R, EP9I)
kappatst_kappapu = mkadder(pipeline, list_srcs(pipeline, ep8_kappatst, ep9_kappapu))
kappatst_kappapu = complex_audioamplify(pipeline, kappatst_kappapu, -1.0*EP7R, -1.0*EP7I)
Sinv = mkadder(pipeline, list_srcs(pipeline, pcal_over_derr, kappatst_kappapu))
Sinv = complex_audioamplify(pipeline, Sinv, EP6R, EP6I)
S = complex_inverse(pipeline, Sinv)
return S
def compute_S_from_filters_file_split_act(pipeline, fpcal2, EP6R, EP6I, pcalfpcal2, derrfpcal2, EP7R, EP7I, ftst, ktst, apply_complex_ktst, EP8R, EP8I, fpum, kpum, apply_complex_kpum, EP18R, EP18I, fuim, kuim, apply_complex_kuim, EP19R, EP19I):
#
# S = (1 / EP6) * (pcalfpcal2 / derrfpcal2 - EP7 * (ktst * EP8 + kpum * EP18 + kuim * EP19))^(-1)
#
if apply_complex_ktst:
ktst = pipeparts.mkgeneric(pipeline, ktst, "lpshiftfreq", frequency_ratio = fpcal2 / ftst)
ep8_ktst = complex_audioamplify(pipeline, ktst, EP8R, EP8I)
else:
ep8_ktst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, ktst, "cabs"), matrix = [[EP8R, EP8I]]))
if apply_complex_kpum:
kpum = pipeparts.mkgeneric(pipeline, kpum, "lpshiftfreq", frequency_ratio = fpcal2 / fpum)
ep18_kpum = complex_audioamplify(pipeline, kpum, EP18R, EP18I)
else:
ep18_kpum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, kpum, "cabs"), matrix = [[EP18R, EP18I]]))
if apply_complex_kuim:
kuim = pipeparts.mkgeneric(pipeline, kuim, "lpshiftfreq", frequency_ratio = fpcal2 / fuim)
ep19_kuim = complex_audioamplify(pipeline, kuim, EP19R, EP19I)
else:
ep19_kuim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, kuim, "cabs"), matrix = [[EP19R, EP19I]]))
pcal_over_derr = complex_division(pipeline, pcalfpcal2, derrfpcal2)
A_at_fpcal2 = mkadder(pipeline, list_srcs(pipeline, ep8_ktst, ep18_kpum, ep19_kuim))
DA_at_fpcal2 = complex_audioamplify(pipeline, A_at_fpcal2, -1.0 * EP7R, -1.0 * EP7I)
Sinv = mkadder(pipeline, list_srcs(pipeline, pcal_over_derr, DA_at_fpcal2))
Sinv = complex_audioamplify(pipeline, Sinv, EP6R, EP6I)
S = complex_inverse(pipeline, Sinv)
return S
def compute_S(pipeline, EP6, pcalfpcal2, derrfpcal2, EP7, ktst, EP8, kpu, EP9):
#
# S = 1/EP6 * ( pcalfpcal2/derrfpcal2 - EP7*(ktst*EP8 + kpum*EP9) ) ^ (-1)
#
pcal_over_derr = complex_division(pipeline, pcalfpcal2, derrfpcal2)
ep8_kappatst = mkmultiplier(pipeline, list_srcs(pipeline, ktst, EP8))
ep9_kappapu = mkmultiplier(pipeline, list_srcs(pipeline, kpu, EP9))
kappatst_kappapu = mkadder(pipeline, list_srcs(pipeline, ep8_kappatst, ep9_kappapu))
kappatst_kappapu = mkmultiplier(pipeline, list_srcs(pipeline, complex_audioamplify(pipeline, EP7, -1.0, 0.0), kappatst_kappapu))
Sinv = mkadder(pipeline, list_srcs(pipeline, pcal_over_derr, kappatst_kappapu))
Sinv = mkmultiplier(pipeline, list_srcs(pipeline, EP6, Sinv))
S = complex_inverse(pipeline, Sinv)
return S
def compute_S_split_act(pipeline, fpcal2, EP6, pcalfpcal2, derrfpcal2, EP7, ftst, ktst, apply_complex_ktst, EP8, fpum, kpum, apply_complex_kpum, EP18, fuim, kuim, apply_complex_kuim, EP19):
#
# S = (1 / EP6) * (pcalfpcal2 / derrfpcal2 - EP7 * (ktst * EP8 + kpu * EP18 + kuim * EP19))^(-1)
#
if apply_complex_ktst:
ktst = pipeparts.mkgeneric(pipeline, ktst, "lpshiftfreq", frequency_ratio = fpcal2 / ftst)
else:
ktst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, ktst, "cabs"), matrix = [[1.0, 0.0]]))
if apply_complex_kpum:
kpum = pipeparts.mkgeneric(pipeline, kpum, "lpshiftfreq", frequency_ratio = fpcal2 / fpum)
else:
kpum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, kpum, "cabs"), matrix = [[1.0, 0.0]]))
if apply_complex_kuim:
kuim = pipeparts.mkgeneric(pipeline, kuim, "lpshiftfreq", frequency_ratio = fpcal2 / fuim)
else:
kuim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, kuim, "cabs"), matrix = [[1.0, 0.0]]))
pcal_over_derr = complex_division(pipeline, pcalfpcal2, derrfpcal2)
ep8_ktst = mkmultiplier(pipeline, list_srcs(pipeline, ktst, EP8))
ep18_kpum = mkmultiplier(pipeline, list_srcs(pipeline, kpum, EP18))
ep19_kuim = mkmultiplier(pipeline, list_srcs(pipeline, kuim, EP19))
A_at_fpcal2 = mkadder(pipeline, list_srcs(pipeline, ep8_ktst, ep18_kpum, ep19_kuim))
DA_at_fpcal2 = mkmultiplier(pipeline, list_srcs(pipeline, complex_audioamplify(pipeline, EP7, -1.0, 0.0), A_at_fpcal2))
Sinv = mkadder(pipeline, list_srcs(pipeline, pcal_over_derr, DA_at_fpcal2))
Sinv = mkmultiplier(pipeline, list_srcs(pipeline, EP6, Sinv))
S = complex_inverse(pipeline, Sinv)
return S
def compute_kappac(pipeline, SR, SI):
#
# \kappa_C = |S|^2 / Re[S]
#
SR = pipeparts.mktee(pipeline, SR)
S2 = mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, SR, exponent=2.0), mkpow(pipeline, SI, exponent=2.0)))
kc = mkmultiplier(pipeline, list_srcs(pipeline, S2, mkpow(pipeline, SR, exponent=-1.0)))
return kc
def compute_fcc(pipeline, SR, SI, fpcal2, freq_update = None):
#
# f_cc = - (Re[S]/Im[S]) * fpcal2
#
fcc = mkmultiplier(pipeline, list_srcs(pipeline, pipeparts.mkaudioamplify(pipeline, SR, -1.0), mkpow(pipeline, SI, exponent=-1.0)))
fcc = pipeparts.mkaudioamplify(pipeline, fcc, fpcal2)
if freq_update is not None:
freq_update.connect("notify::timestamped-average", update_timestamped_property, fcc, "timestamped_average", "amplification", 1)
return fcc
def compute_Xi_from_filters_file(pipeline, pcalfpcal4, darmfpcal4, fpcal4, EP11_real, EP11_imag, EP12_real, EP12_imag, EP13_real, EP13_imag, EP14_real, EP14_imag, ktst, kpu, kc, fcc):
#
# Xi = -1 + ((EP11*kc) / (1 + i * f_src/f_cc)) * (pcalfpcal4/derrfpcal4 - EP12*(ktst*EP13 + kpu*EP14))
#
Atst = complex_audioamplify(pipeline, ktst, EP13_real, EP13_imag)
Apu = complex_audioamplify(pipeline, kpu, EP14_real, EP14_imag)
A = mkadder(pipeline, list_srcs(pipeline, Atst, Apu))
minusAD = complex_audioamplify(pipeline, A, -1.0 * EP12_real, -1.0 * EP12_imag)
pcal_over_derr = complex_division(pipeline, pcalfpcal4, darmfpcal4)
pcal_over_derr_res = mkadder(pipeline, list_srcs(pipeline, pcal_over_derr, minusAD))
fpcal4_over_fcc = pipeparts.mkaudioamplify(pipeline, mkpow(pipeline, fcc, exponent = -1.0), fpcal4)
i_fpcal4_over_fcc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, fpcal4_over_fcc, matrix = [[0, 1]]))
i_fpcal4_over_fcc_plus_one = pipeparts.mkgeneric(pipeline, i_fpcal4_over_fcc, "lal_add_constant", value = 1.0)
i_fpcal4_over_fcc_plus_one_inv = complex_inverse(pipeline, i_fpcal4_over_fcc_plus_one)
kc_EP11 = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kc, matrix = [[EP11_real, EP11_imag]]))
Xi_plus_one = mkmultiplier(pipeline, list_srcs(pipeline, kc_EP11, i_fpcal4_over_fcc_plus_one_inv, pcal_over_derr_res))
Xi = pipeparts.mkgeneric(pipeline, Xi_plus_one, "lal_add_constant", value = -1.0)
return Xi
def compute_Xi_from_filters_file_split_act(pipeline, pcalfpcal4, darmfpcal4, fpcal4, EP11R, EP11I, EP12R, EP12I, EP13R, EP13I, EP20R, EP20I, EP21R, EP21I, ftst, ktst, apply_complex_ktst, fpum, kpum, apply_complex_kpum, fuim, kuim, apply_complex_kuim, kc, fcc):
#
# Xi = -1 + ((EP11 * kc) / (1 + i * f_src / f_cc)) * (pcalfpcal4 / derrfpcal4 - EP12 * (ktst * EP13 + kpum * EP20 + kuim * EP21))
#
if apply_complex_ktst:
ktst = pipeparts.mkgeneric(pipeline, ktst, "lpshiftfreq", frequency_ratio = fpcal4 / ftst)
Atst = complex_audioamplify(pipeline, ktst, EP13R, EP13I)
else:
Atst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, ktst, "cabs"), matrix = [[EP13R, EP13I]]))
if apply_complex_kpum:
kpum = pipeparts.mkgeneric(pipeline, kpum, "lpshiftfreq", frequency_ratio = fpcal4 / fpum)
Apum = complex_audioamplify(pipeline, kpum, EP20R, EP20I)
else:
Apum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, kpum, "cabs"), matrix = [[EP20R, EP20I]]))
if apply_complex_kuim:
kuim = pipeparts.mkgeneric(pipeline, kuim, "lpshiftfreq", frequency_ratio = fpcal4 / fuim)
Auim = complex_audioamplify(pipeline, kuim, EP21R, EP21I)
else:
Auim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, kuim, "cabs"), matrix = [[EP21R, EP21I]]))
A = mkadder(pipeline, list_srcs(pipeline, Atst, Apum, Auim))
minusAD = complex_audioamplify(pipeline, A, -1.0 * EP12R, -1.0 * EP12I)
pcal_over_derr = complex_division(pipeline, pcalfpcal4, darmfpcal4)
pcal_over_derr_res = mkadder(pipeline, list_srcs(pipeline, pcal_over_derr, minusAD))
fpcal4_over_fcc = pipeparts.mkaudioamplify(pipeline, mkpow(pipeline, fcc, exponent = -1.0), fpcal4)
i_fpcal4_over_fcc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, fpcal4_over_fcc, matrix = [[0, 1]]))
i_fpcal4_over_fcc_plus_one = pipeparts.mkgeneric(pipeline, i_fpcal4_over_fcc, "lal_add_constant", value = 1.0)
i_fpcal4_over_fcc_plus_one_inv = complex_inverse(pipeline, i_fpcal4_over_fcc_plus_one)
kc_EP11 = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kc, matrix = [[EP11R, EP11I]]))
Xi_plus_one = mkmultiplier(pipeline, list_srcs(pipeline, kc_EP11, i_fpcal4_over_fcc_plus_one_inv, pcal_over_derr_res))
Xi = pipeparts.mkgeneric(pipeline, Xi_plus_one, "lal_add_constant", value = -1.0)
return Xi
def compute_Xi(pipeline, pcalfpcal4, darmfpcal4, fpcal4, EP11, EP12, EP13, EP14, ktst, kpu, kc, fcc):
#
# Xi = -1 + ((EP11*kc) / (1 + i * f_src/f_cc)) * (pcalfpcal4/derrfpcal4 - EP12*(ktst*EP13 + kpu*EP14))
#
complex_kc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kc, matrix=[[1,0]]))
Atst = mkmultiplier(pipeline, list_srcs(pipeline, EP13, ktst))
Apu = mkmultiplier(pipeline, list_srcs(pipeline, EP14, kpu))
A = mkadder(pipeline, list_srcs(pipeline, Atst, Apu))
minusAD = mkmultiplier(pipeline, list_srcs(pipeline, complex_audioamplify(pipeline, EP12, -1.0, 0.0), A))
pcal_over_derr = complex_division(pipeline, pcalfpcal4, darmfpcal4)
pcal_over_derr_res = mkadder(pipeline, list_srcs(pipeline, pcal_over_derr, minusAD))
fpcal4_over_fcc = pipeparts.mkaudioamplify(pipeline, mkpow(pipeline, fcc, exponent = -1.0), fpcal4)
i_fpcal4_over_fcc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, fpcal4_over_fcc, matrix = [[0, 1]]))
i_fpcal4_over_fcc_plus_one = pipeparts.mkgeneric(pipeline, i_fpcal4_over_fcc, "lal_add_constant", value = 1.0)
i_fpcal4_over_fcc_plus_one_inv = complex_inverse(pipeline, i_fpcal4_over_fcc_plus_one)
Xi_plus_one = mkmultiplier(pipeline, list_srcs(pipeline, EP11, complex_kc, i_fpcal4_over_fcc_plus_one_inv, pcal_over_derr_res))
Xi = pipeparts.mkgeneric(pipeline, Xi_plus_one, "lal_add_constant", value = -1.0)
return Xi
def compute_Xi_split_act(pipeline, pcalfpcal4, darmfpcal4, fpcal4, EP11, EP12, EP13, EP20, EP21, ftst, ktst, apply_complex_ktst, fpum, kpum, apply_complex_kpum, fuim, kuim, apply_complex_kuim, kc, fcc):
#
# Xi = -1 + ((EP11 * kc) / (1 + i * f_src / f_cc)) * (pcalfpcal4 / derrfpcal4 - EP12 * (ktst * EP13 + kpum * EP20 + kuim * EP21))
#
if apply_complex_ktst:
ktst = pipeparts.mkgeneric(pipeline, ktst, "lpshiftfreq", frequency_ratio = fpcal4 / ftst)
else:
ktst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, ktst, "cabs"), matrix = [[1.0, 0.0]]))
if apply_complex_kpum:
kpum = pipeparts.mkgeneric(pipeline, kpum, "lpshiftfreq", frequency_ratio = fpcal4 / fpum)
else:
kpum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, kpum, "cabs"), matrix = [[1.0, 0.0]]))
if apply_complex_kuim:
kuim = pipeparts.mkgeneric(pipeline, kuim, "lpshiftfreq", frequency_ratio = fpcal4 / fuim)
else:
kuim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, pipeparts.mkgeneric(pipeline, kuim, "cabs"), matrix = [[1.0, 0.0]]))
complex_kc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kc, matrix=[[1,0]]))
Atst = mkmultiplier(pipeline, list_srcs(pipeline, EP13, ktst))
Apum = mkmultiplier(pipeline, list_srcs(pipeline, EP20, kpum))
Auim = mkmultiplier(pipeline, list_srcs(pipeline, EP21, kuim))
A = mkadder(pipeline, list_srcs(pipeline, Atst, Apum, Auim))
minusAD = mkmultiplier(pipeline, list_srcs(pipeline, complex_audioamplify(pipeline, EP12, -1.0, 0.0), A))
pcal_over_derr = complex_division(pipeline, pcalfpcal4, darmfpcal4)
pcal_over_derr_res = mkadder(pipeline, list_srcs(pipeline, pcal_over_derr, minusAD))
fpcal4_over_fcc = pipeparts.mkaudioamplify(pipeline, mkpow(pipeline, fcc, exponent = -1.0), fpcal4)
i_fpcal4_over_fcc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, fpcal4_over_fcc, matrix = [[0, 1]]))
i_fpcal4_over_fcc_plus_one = pipeparts.mkgeneric(pipeline, i_fpcal4_over_fcc, "lal_add_constant", value = 1.0)
i_fpcal4_over_fcc_plus_one_inv = complex_inverse(pipeline, i_fpcal4_over_fcc_plus_one)
Xi_plus_one = mkmultiplier(pipeline, list_srcs(pipeline, EP11, complex_kc, i_fpcal4_over_fcc_plus_one_inv, pcal_over_derr_res))
Xi = pipeparts.mkgeneric(pipeline, Xi_plus_one, "lal_add_constant", value = -1.0)
return Xi
def compute_uncertainty_reduction(pipeline, head, demod_samples, median_samples, avg_samples):
#
# How much is the uncertainty of the TDCFs reduced by the running median
# and average, given the length of the demodulation filter?
#
# Represent each process as a filter with the same effect on uncertainty
demod_filt = fir.kaiser(demod_samples, 3 * numpy.pi)
demod_filt /= numpy.sum(demod_filt)
if demod_samples < 1:
demod_filt = numpy.ones(1)
demod_uncertainty_reduction = numpy.sqrt(sum(pow(demod_filt, 2.0)))
# In the limit of large N, a median reduces uncertainty by sqrt(pi/(2N)),
# so pretend it's a filter where each coefficient equals sqrt(pi/2) / N.
median_filt = numpy.ones(median_samples) / median_samples * numpy.sqrt(numpy.pi / 2.0)
if median_samples < 1:
median_filt = numpy.ones(1)
avg_filt = numpy.ones(avg_samples) / avg_samples
if avg_samples < 1:
avg_filt = numpy.ones(1)
effective_filt = numpy.convolve(numpy.convolve(demod_filt, median_filt), avg_filt)
uncertainty_reduction = numpy.sqrt(sum(pow(effective_filt, 2.0)))
return pipeparts.mkaudioamplify(pipeline, head, uncertainty_reduction / demod_uncertainty_reduction)
def compute_calline_uncertainty(pipeline, coh_unc, coh_samples, demod_samples, median_samples, avg_samples):
#
# The coherence uncertainties may not be equal to the
# uncertainties in the line ratios after the low-pass filtering
# (kaiser window), running median, and running mean.
#
# I assume that the uncertainty computed from coherence assumes
# that a mean was used.
assumed_unc_reduction = 1.0 / numpy.sqrt(coh_samples)
# Represent each element as a filter with the same effect on uncertainty
demod_filt = fir.kaiser(demod_samples, 3 * numpy.pi)
demod_filt /= numpy.sum(demod_filt)
if demod_samples < 1:
demod_filt = numpy.ones(1)
# In the limit of large N, a median reduces uncertainty by sqrt(pi/(2N)),
# so pretend it's a filter where each coefficient equals sqrt(pi/2) / N.
median_filt = numpy.ones(median_samples) / median_samples * numpy.sqrt(numpy.pi / 2.0)
if median_samples < 1:
median_filt = numpy.ones(1)
avg_filt = numpy.ones(avg_samples) / avg_samples
if avg_samples < 1:
avg_filt = numpy.ones(1)
effective_filt = numpy.convolve(numpy.convolve(demod_filt, median_filt), avg_filt)
uncertainty_reduction = numpy.sqrt(sum(pow(effective_filt, 2.0)))
return pipeparts.mkaudioamplify(pipeline, coh_unc, uncertainty_reduction / assumed_unc_reduction)
def compute_act_stage_uncertainty(pipeline, pcaly_line1_coh, sus_line_coh, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples, coherence_unc_threshold):
pcaly_line1_coh_clipped = mkinsertgap(pipeline, pcaly_line1_coh, bad_data_intervals = [0, coherence_unc_threshold], replace_value = coherence_unc_threshold, insert_gap = False)
sus_line_coh_clipped = mkinsertgap(pipeline, sus_line_coh, bad_data_intervals = [0, coherence_unc_threshold], replace_value = coherence_unc_threshold, insert_gap = False)
pcaly_line1_unc = compute_calline_uncertainty(pipeline, pcaly_line1_coh_clipped, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples)
sus_line_unc = compute_calline_uncertainty(pipeline, sus_line_coh_clipped, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples)
return mkpow(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, pcaly_line1_unc, exponent = 2.0), mkpow(pipeline, sus_line_unc, exponent = 2.0))), exponent = 0.5)
def compute_S_c_uncertainty_from_filters_file(pipeline, EP6_real, EP6_imag, EP7_real, EP7_imag, opt_gain_fcc_line_freq, X2, pcaly_line2_coh, EP8_real, EP8_imag, ktst, tau_tst, apply_complex_kappatst, ktst_unc, EP18_real, EP18_imag, kpum, tau_pum, apply_complex_kappapum, kpum_unc, EP19_real, EP19_imag, kuim, tau_uim, apply_complex_kappauim, kuim_unc, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples, coherence_unc_threshold):
#
# S_c = (1 / EP6) * (X2 - EP7 * (ktst * EP8 + kpum * EP18 + kuim * EP19))^(-1)
#
EP7_mag = pow(EP7_real * EP7_real + EP7_imag * EP7_imag, 0.5)
EP8_mag = pow(EP8_real * EP8_real + EP8_imag * EP8_imag, 0.5)
EP18_mag = pow(EP18_real * EP18_real + EP18_imag * EP18_imag, 0.5)
EP19_mag = pow(EP19_real * EP19_real + EP19_imag * EP19_imag, 0.5)
X2 = pipeparts.mktee(pipeline, X2)
pcaly_line2_coh_clipped = mkinsertgap(pipeline, pcaly_line2_coh, bad_data_intervals = [0, coherence_unc_threshold], replace_value = coherence_unc_threshold, insert_gap = False)
X2_unc = compute_calline_uncertainty(pipeline, pcaly_line2_coh_clipped, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples)
X2_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, X2_unc, pipeparts.mkgeneric(pipeline, X2, "cabs")))
A_tst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, ktst, matrix = [[EP8_real, EP8_imag]]))
if apply_complex_kappatst:
i_omega_tau_tst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_tst, matrix = [[0, 2 * numpy.pi * opt_gain_fcc_line_freq]]))
exp_i_omega_tau_tst = pipeparts.mkgeneric(pipeline, i_omega_tau_tst, "cexp")
A_tst = mkmultiplier(pipeline, list_srcs(pipeline, A_tst, exp_i_omega_tau_tst))
A_tst_unc_abs = pipeparts.mkaudioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, ktst, ktst_unc)), EP8_mag)
A_pum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kpum, matrix = [[EP18_real, EP18_imag]]))
if apply_complex_kappapum:
i_omega_tau_pum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_pum, matrix = [[0, 2 * numpy.pi * opt_gain_fcc_line_freq]]))
exp_i_omega_tau_pum = pipeparts.mkgeneric(pipeline, i_omega_tau_pum, "cexp")
A_pum = mkmultiplier(pipeline, list_srcs(pipeline, A_pum, exp_i_omega_tau_pum))
A_pum_unc_abs = pipeparts.mkaudioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, kpum, kpum_unc)), EP18_mag)
A_uim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kuim, matrix = [[EP19_real, EP19_imag]]))
if apply_complex_kappauim:
i_omega_tau_uim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_uim, matrix = [[0, 2 * numpy.pi * opt_gain_fcc_line_freq]]))
exp_i_omega_tau_uim = pipeparts.mkgeneric(pipeline, i_omega_tau_uim, "cexp")
A_uim = mkmultiplier(pipeline, list_srcs(pipeline, A_uim, exp_i_omega_tau_uim))
A_uim_unc_abs = pipeparts.mkaudioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, kuim, kuim_unc)), EP19_mag)
A = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, A_tst, A_pum, A_uim)))
A_unc_abs = mkpow(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, A_tst_unc_abs, exponent = 2.0), mkpow(pipeline, A_pum_unc_abs, exponent = 2.0), mkpow(pipeline, A_uim_unc_abs, exponent = 2.0))), exponent = 0.5)
minus_DA = complex_audioamplify(pipeline, A, -EP7_real, -EP7_imag)
DA_unc_abs = pipeparts.mkaudioamplify(pipeline, A_unc_abs, EP7_mag)
X2_minus_DA = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, X2, minus_DA)))
X2_minus_DA_mag = pipeparts.mkgeneric(pipeline, X2_minus_DA, "cabs")
X2_minus_DA_unc_abs = mkpow(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, X2_unc_abs, exponent = 2.0), mkpow(pipeline, DA_unc_abs, exponent = 2.0))), exponent = 0.5)
S_c_unc = pipeparts.mktee(pipeline, complex_division(pipeline, X2_minus_DA_unc_abs, X2_minus_DA_mag))
S_c = pipeparts.mktee(pipeline, complex_inverse(pipeline, complex_audioamplify(pipeline, X2_minus_DA, EP6_real, EP6_imag)))
S_c_real = pipeparts.mkgeneric(pipeline, S_c, "creal")
S_c_imag = pipeparts.mkgeneric(pipeline, S_c, "cimag")
S_c_product = pipeparts.mkgeneric(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, S_c_real, S_c_imag)), "cabs")
S_c_square_modulus = pipeparts.mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_c, "cabs"), exponent = 2.0)
S_c_square_modulus_over_S_c_product = complex_division(pipeline, S_c_square_modulus, S_c_product)
fcc_unc = mkmultiplier(pipeline, list_srcs(pipeline, S_c_square_modulus_over_S_c_product, S_c_unc))
return S_c, S_c_unc, fcc_unc
def compute_S_c_uncertainty(pipeline, EP6, EP7, opt_gain_fcc_line_freq, X2, pcaly_line2_coh, EP8, ktst, tau_tst, apply_complex_kappatst, ktst_unc, EP18, kpum, tau_pum, apply_complex_kappapum, kpum_unc, EP19, kuim, tau_uim, apply_complex_kappauim, kuim_unc, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples, coherence_unc_threshold):
#
# S_c = (1 / EP6) * (X2 - EP7 * (ktst * EP8 + kpum * EP18 + kuim * EP19))^(-1)
#
EP7_mag = pipeparts.mkgeneric(pipeline, EP7, "cabs")
EP8_mag = pipeparts.mkgeneric(pipeline, EP8, "cabs")
EP18_mag = pipeparts.mkgeneric(pipeline, EP18, "cabs")
EP19_mag = pipeparts.mkgeneric(pipeline, EP19, "cabs")
X2 = pipeparts.mktee(pipeline, X2)
pcaly_line2_coh_clipped = mkinsertgap(pipeline, pcaly_line2_coh, bad_data_intervals = [0, coherence_unc_threshold], replace_value = coherence_unc_threshold, insert_gap = False)
X2_unc = compute_calline_uncertainty(pipeline, pcaly_line2_coh_clipped, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples)
X2_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, X2_unc, pipeparts.mkgeneric(pipeline, X2, "cabs")))
complex_ktst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatriximxer(pipeline, ktst, matrix = [[1.0, 0.0]]))
A_tst = mkmultiplier(pipeline, list_srcs(pipeline, complex_ktst, EP8))
if apply_complex_kappatst:
i_omega_tau_tst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_tst, matrix = [[0, 2 * numpy.pi * opt_gain_fcc_line_freq]]))
exp_i_omega_tau_tst = pipeparts.mkgeneric(pipeline, i_omega_tau_tst, "cexp")
A_tst = mkmultiplier(pipeline, list_srcs(pipeline, A_tst, exp_i_omega_tau_tst))
A_tst_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, ktst, ktst_unc, EP8_mag))
complex_kpum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatriximxer(pipeline, kpum, matrix = [[1.0, 0.0]]))
A_pum = mkmultiplier(pipeline, list_srcs(pipeline, complex_kpum, EP18))
if apply_complex_kappapum:
i_omega_tau_pum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_pum, matrix = [[0, 2 * numpy.pi * opt_gain_fcc_line_freq]]))
exp_i_omega_tau_pum = pipeparts.mkgeneric(pipeline, i_omega_tau_pum, "cexp")
A_pum = mkmultiplier(pipeline, list_srcs(pipeline, A_pum, exp_i_omega_tau_pum))
A_pum_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, kpum, kpum_unc, EP18_mag))
complex_kuim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatriximxer(pipeline, kuim, matrix = [[1.0, 0.0]]))
A_uim = mkmultiplier(pipeline, list_srcs(pipeline, complex_kuim, EP19))
if apply_complex_kappauim:
i_omega_tau_uim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_uim, matrix = [[0, 2 * numpy.pi * opt_gain_fcc_line_freq]]))
exp_i_omega_tau_uim = pipeparts.mkgeneric(pipeline, i_omega_tau_uim, "cexp")
A_uim = mkmultiplier(pipeline, list_srcs(pipeline, A_uim, exp_i_omega_tau_uim))
A_uim_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, kuim, kuim_unc, EP19_mag))
A = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, A_tst, A_pum, A_uim)))
A_unc_abs = mkpow(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, A_tst_unc_abs, exponent = 2.0), mkpow(pipeline, A_pum_unc_abs, exponent = 2.0), mkpow(pipeline, A_uim_unc_abs, exponent = 2.0))), exponent = 0.5)
minus_DA = complex_audioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, A, EP7)), -1.0, 0.0)
DA_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, A_unc_abs, EP7_mag))
X2_minus_DA = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, X2, minus_DA)))
X2_minus_DA_mag = pipeparts.mkgeneric(pipeline, X2_minus_DA, "cabs")
X2_minus_DA_unc_abs = mkpow(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, X2_unc_abs, exponent = 2.0), mkpow(pipeline, DA_unc_abs, exponent = 2.0))), exponent = 0.5)
S_c_unc = pipeparts.mktee(pipeline, complex_division(pipeline, X2_minus_DA_unc_abs, X2_minus_DA_mag))
S_c = pipeparts.mktee(pipeline, complex_inverse(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, X2_minus_DA, EP6))))
S_c_real = pipeparts.mkgeneric(pipeline, S_c, "creal")
S_c_imag = pipeparts.mkgeneric(pipeline, S_c, "cimag")
S_c_product = pipeparts.mkgeneric(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, S_c_real, S_c_imag)), "cabs")
S_c_square_modulus = pipeparts.mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_c, "cabs"), exponent = 2.0)
S_c_square_modulus_over_S_c_product = complex_division(pipeline, S_c_square_modulus, S_c_product)
fcc_unc = mkmultiplier(pipeline, list_srcs(pipeline, S_c_square_modulus_over_S_c_product, S_c_unc))
return S_c, S_c_unc, fcc_unc
def compute_SRC_uncertainty_from_filters_file(pipeline, EP11_real, EP11_imag, kc, kc_unc, fcc, fcc_unc, EP12_real, EP12_imag, act_pcal_line_freq, X1, pcaly_line1_coh, EP13_real, EP13_imag, ktst, tau_tst, apply_complex_kappatst, ktst_unc, EP20_real, EP20_imag, kpum, tau_pum, apply_complex_kappapum, kpum_unc, EP21_real, EP21_imag, kuim, tau_uim, apply_complex_kappauim, kuim_unc, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples, coherence_unc_threshold):
#
# S_s^{-1} = ((EP11 * kc) / (1 + i * f_src / f_cc)) * (pcalfpcal4 / derrfpcal4 - EP12 * (ktst * EP13 + kpum * EP20 + kuim * EP21))
#
EP12_mag = pow(EP12_real * EP12_real + EP12_imag * EP12_imag, 0.5)
EP13_mag = pow(EP13_real * EP13_real + EP13_imag * EP13_imag, 0.5)
EP20_mag = pow(EP20_real * EP20_real + EP20_imag * EP20_imag, 0.5)
EP21_mag = pow(EP21_real * EP21_real + EP21_imag * EP21_imag, 0.5)
X1 = pipeparts.mktee(pipeline, X1)
fcc = pipeparts.mktee(pipeline, fcc)
pcaly_line1_coh_clipped = mkinsertgap(pipeline, pcaly_line1_coh, bad_data_intervals = [0, coherence_unc_threshold], replace_value = coherence_unc_threshold, insert_gap = False)
X1_unc = compute_calline_uncertainty(pipeline, pcaly_line1_coh_clipped, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples)
X1_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, X1_unc, pipeparts.mkgeneric(pipeline, X1, "cabs")))
A_tst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, ktst, matrix = [[EP13_real, EP13_imag]]))
if apply_complex_kappatst:
i_omega_tau_tst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_tst, matrix = [[0, 2 * numpy.pi * act_pcal_line_freq]]))
exp_i_omega_tau_tst = pipeparts.mkgeneric(pipeline, i_omega_tau_tst, "cexp")
A_tst = mkmultiplier(pipeline, list_srcs(pipeline, A_tst, exp_i_omega_tau_tst))
A_tst_unc_abs = pipeparts.mkaudioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, ktst, ktst_unc)), EP13_mag)
A_pum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kpum, matrix = [[EP20_real, EP20_imag]]))
if apply_complex_kappapum:
i_omega_tau_pum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_pum, matrix = [[0, 2 * numpy.pi * act_pcal_line_freq]]))
exp_i_omega_tau_pum = pipeparts.mkgeneric(pipeline, i_omega_tau_pum, "cexp")
A_pum = mkmultiplier(pipeline, list_srcs(pipeline, A_pum, exp_i_omega_tau_pum))
A_pum_unc_abs = pipeparts.mkaudioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, kpum, kpum_unc)), EP20_mag)
A_uim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kuim, matrix = [[EP21_real, EP21_imag]]))
if apply_complex_kappauim:
i_omega_tau_uim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_uim, matrix = [[0, 2 * numpy.pi * act_pcal_line_freq]]))
exp_i_omega_tau_uim = pipeparts.mkgeneric(pipeline, i_omega_tau_uim, "cexp")
A_uim = mkmultiplier(pipeline, list_srcs(pipeline, A_uim, exp_i_omega_tau_uim))
A_uim_unc_abs = pipeparts.mkaudioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, kuim, kuim_unc)), EP21_mag)
A = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, A_tst, A_pum, A_uim)))
A_unc_abs = mkpow(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, A_tst_unc_abs, exponent = 2.0), mkpow(pipeline, A_pum_unc_abs, exponent = 2.0), mkpow(pipeline, A_uim_unc_abs, exponent = 2.0))), exponent = 0.5)
minus_DA = complex_audioamplify(pipeline, A, -EP12_real, -EP12_imag)
DA_unc_abs = pipeparts.mkaudioamplify(pipeline, A_unc_abs, EP12_mag)
X1_minus_DA = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, X1, minus_DA)))
X1_minus_DA_mag = pipeparts.mkgeneric(pipeline, X1_minus_DA, "cabs")
X1_minus_DA_unc_abs = mkpow(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, X1_unc_abs, exponent = 2.0), mkpow(pipeline, DA_unc_abs, exponent = 2.0))), exponent = 0.5)
S_s_S_c_inverse_unc_squared = pipeparts.mktee(pipeline, mkpow(pipeline, complex_division(pipeline, X1_minus_DA_unc_abs, X1_minus_DA_mag), exponent = 2.0))
S_s_S_c_inverse = pipeparts.mktee(pipeline, complex_audioamplify(pipeline, X1_minus_DA, EP11_real, EP11_imag))
S_s_S_c_inverse_real_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_s_S_c_inverse, "creal"), exponent = 2.0))
S_s_S_c_inverse_imag_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_s_S_c_inverse, "cimag"), exponent = 2.0))
complex_kc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kc, [[1.0, 0.0]]))
f_over_fcc = complex_inverse(pipeline, pipeparts.mkaudioamplify(pipeline, fcc, 1.0 / act_pcal_line_freq))
i_f_over_fcc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, f_over_fcc, [[0.0, 1.0]]))
one_plus_i_f_over_fcc = pipeparts.mkgeneric(pipeline, i_f_over_fcc, "lal_add_constant", value = 1.0)
S_c = pipeparts.mktee(pipeline, complex_division(pipeline, complex_kc, one_plus_i_f_over_fcc))
S_c_real_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_c, "creal"), exponent = 2.0))
S_c_imag_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_c, "cimag"), exponent = 2.0))
S_s_inv = pipeparts.mktee(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, S_s_S_c_inverse, S_c)))
pipeparts.mkfakesink(pipeline, S_s_inv)
f_squared_over_fcc_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkaudioamplify(pipeline, fcc, 1.0 / act_pcal_line_freq), exponent = -2.0))
fcc_unc_squared = pipeparts.mktee(pipeline, mkpow(pipeline, fcc_unc, exponent = 2.0))
inv_denominator = mkpow(pipeline, pipeparts.mkgeneric(pipeline, f_squared_over_fcc_squared, "lal_add_constant", value = 1.0), exponent = -2.0)
numerator = pipeparts.mkaudioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, fcc_unc_squared, mkpow(pipeline, f_squared_over_fcc_squared, exponent = 2.0))), 2.0)
S_c_unc_real_squared = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, kc_unc, exponent = 2.0), mkmultiplier(pipeline, list_srcs(pipeline, numerator, inv_denominator)))))
S_c_unc_imag_squared = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, S_c_unc_real_squared, fcc_unc_squared)))
S_s_inverse_real_unc_abs_squared = mkadder(pipeline, list_srcs(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, S_c_real_squared, S_s_S_c_inverse_real_squared, mkadder(pipeline, list_srcs(pipeline, S_c_unc_real_squared, S_s_S_c_inverse_unc_squared)))), mkmultiplier(pipeline, list_srcs(pipeline, S_c_imag_squared, S_s_S_c_inverse_imag_squared, mkadder(pipeline, list_srcs(pipeline, S_c_unc_imag_squared, S_s_S_c_inverse_unc_squared))))))
S_s_inverse_imag_unc_abs_squared = mkadder(pipeline, list_srcs(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, S_c_real_squared, S_s_S_c_inverse_imag_squared, mkadder(pipeline, list_srcs(pipeline, S_c_unc_real_squared, S_s_S_c_inverse_unc_squared)))), mkmultiplier(pipeline, list_srcs(pipeline, S_c_imag_squared, S_s_S_c_inverse_real_squared, mkadder(pipeline, list_srcs(pipeline, S_c_unc_imag_squared, S_s_S_c_inverse_unc_squared))))))
fs_squared_unc_abs = pipeparts.mkaudioamplify(pipeline, mkpow(pipeline, S_s_inverse_real_unc_abs_squared, exponent = 0.5), act_pcal_line_freq * act_pcal_line_freq)
fs_over_Q_unc_abs = pipeparts.mkaudioamplify(pipeline, mkpow(pipeline, S_s_inverse_imag_unc_abs_squared, exponent = 0.5), act_pcal_line_freq)
return S_s_inv, fs_squared_unc_abs, fs_over_Q_unc_abs
def compute_SRC_uncertainty(pipeline, EP11, kc, kc_unc, fcc, fcc_unc, EP12, act_pcal_line_freq, X1, pcaly_line1_coh, EP13, ktst, tau_tst, apply_complex_kappatst, ktst_unc, EP20, kpum, tau_pum, apply_complex_kappapum, kpum_unc, EP21, kuim, tau_uim, apply_complex_kappauim, kuim_unc, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples, coherence_unc_threshold):
#
# S_s^{-1} = ((EP11 * kc) / (1 + i * f_src / f_cc)) * (pcalfpcal4 / derrfpcal4 - EP12 * (ktst * EP13 + kpum * EP20 + kuim * EP21))
#
EP12_mag = pipeparts.mkgeneric(pipeline, EP12, "cabs")
EP13_mag = pipeparts.mkgeneric(pipeline, EP13, "cabs")
EP20_mag = pipeparts.mkgeneric(pipeline, EP20, "cabs")
EP21_mag = pipeparts.mkgeneric(pipeline, EP21, "cabs")
X1 = pipeparts.mktee(pipeline, X1)
fcc = pipeparts.mktee(pipeline, fcc)
pcaly_line1_coh_clipped = mkinsertgap(pipeline, pcaly_line1_coh, bad_data_intervals = [0, coherence_unc_threshold], replace_value = coherence_unc_threshold, insert_gap = False)
X1_unc = compute_calline_uncertainty(pipeline, pcaly_line1_coh_clipped, coherence_samples, integration_samples, median_smoothing_samples, factors_average_samples)
X1_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, X1_unc, pipeparts.mkgeneric(pipeline, X1, "cabs")))
A_tst = mkmultiplier(pipeline, list_srcs(pipeline, pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, ktst, matrix = [[1.0, 0.0]])), EP13))
if apply_complex_kappatst:
i_omega_tau_tst = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_tst, matrix = [[0, 2 * numpy.pi * act_pcal_line_freq]]))
exp_i_omega_tau_tst = pipeparts.mkgeneric(pipeline, i_omega_tau_tst, "cexp")
A_tst = mkmultiplier(pipeline, list_srcs(pipeline, A_tst, exp_i_omega_tau_tst))
A_tst_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, ktst, ktst_unc, EP13_mag))
A_pum = mkmultiplier(pipeline, list_srcs(pipeline, pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kpum, matrix = [[1.0, 0.0]])), EP20))
if apply_complex_kappapum:
i_omega_tau_pum = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_pum, matrix = [[0, 2 * numpy.pi * act_pcal_line_freq]]))
exp_i_omega_tau_pum = pipeparts.mkgeneric(pipeline, i_omega_tau_pum, "cexp")
A_pum = mkmultiplier(pipeline, list_srcs(pipeline, A_pum, exp_i_omega_tau_pum))
A_pum_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, kpum, kpum_unc, EP20_mag))
A_uim = mkmultiplier(pipeline, list_srcs(pipeline, pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kuim, matrix = [[1.0, 0.0]])), EP21))
if apply_complex_kappauim:
i_omega_tau_uim = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, tau_uim, matrix = [[0, 2 * numpy.pi * act_pcal_line_freq]]))
exp_i_omega_tau_uim = pipeparts.mkgeneric(pipeline, i_omega_tau_uim, "cexp")
A_uim = mkmultiplier(pipeline, list_srcs(pipeline, A_uim, exp_i_omega_tau_uim))
A_uim_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, kuim, kuim_unc, EP21_mag))
A = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, A_tst, A_pum, A_uim)))
A_unc_abs = mkpow(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, A_tst_unc_abs, exponent = 2.0), mkpow(pipeline, A_pum_unc_abs, exponent = 2.0), mkpow(pipeline, A_uim_unc_abs, exponent = 2.0))), exponent = 0.5)
minus_DA = complex_audioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, A, EP12)), -1.0, 0.0)
DA_unc_abs = mkmultiplier(pipeline, list_srcs(pipeline, A_unc_abs, EP12_mag))
X1_minus_DA = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, X1, minus_DA)))
X1_minus_DA_mag = pipeparts.mkgeneric(pipeline, X1_minus_DA, "cabs")
X1_minus_DA_unc_abs = mkpow(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, X1_unc_abs, exponent = 2.0), mkpow(pipeline, DA_unc_abs, exponent = 2.0))), exponent = 0.5)
S_s_S_c_inverse_unc_squared = pipeparts.mktee(pipeline, mkpow(pipeline, complex_division(pipeline, X1_minus_DA_unc_abs, X1_minus_DA_mag), exponent = 2.0))
S_s_S_c_inverse = pipeparts.mktee(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, X1_minus_DA, EP11)))
S_s_S_c_inverse_real_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_s_S_c_inverse, "creal"), exponent = 2.0))
S_s_S_c_inverse_imag_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_s_S_c_inverse, "cimag"), exponent = 2.0))
complex_kc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, kc, [[1.0, 0.0]]))
f_over_fcc = complex_inverse(pipeline, pipeparts.mkaudioamplify(pipeline, fcc, 1.0 / act_pcal_line_freq))
i_f_over_fcc = pipeparts.mktogglecomplex(pipeline, pipeparts.mkmatrixmixer(pipeline, f_over_fcc, [[0.0, 1.0]]))
one_plus_i_f_over_fcc = pipeparts.mkgeneric(pipeline, i_f_over_fcc, "lal_add_constant", value = 1.0)
S_c = pipeparts.mktee(pipeline, complex_division(pipeline, complex_kc, one_plus_i_f_over_fcc))
S_c_real_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_c, "creal"), exponent = 2.0))
S_c_imag_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkgeneric(pipeline, S_c, "cimag"), exponent = 2.0))
S_s_inv = pipeparts.mktee(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, S_s_S_c_inverse, S_c)))
pipeparts.mkfakesink(pipeline, S_s_inv)
f_squared_over_fcc_squared = pipeparts.mktee(pipeline, mkpow(pipeline, pipeparts.mkaudioamplify(pipeline, fcc, 1.0 / act_pcal_line_freq), exponent = -2.0))
fcc_unc_squared = pipeparts.mktee(pipeline, mkpow(pipeline, fcc_unc, exponent = 2.0))
inv_denominator = mkpow(pipeline, pipeparts.mkgeneric(pipeline, f_squared_over_fcc_squared, "lal_add_constant", value = 1.0), exponent = -2.0)
numerator = pipeparts.mkaudioamplify(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, fcc_unc_squared, mkpow(pipeline, f_squared_over_fcc_squared, exponent = 2.0))), 2.0)
S_c_unc_real_squared = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, mkpow(pipeline, kc_unc, exponent = 2.0), mkmultiplier(pipeline, list_srcs(pipeline, numerator, inv_denominator)))))
S_c_unc_imag_squared = pipeparts.mktee(pipeline, mkadder(pipeline, list_srcs(pipeline, S_c_unc_real_squared, fcc_unc_squared)))
S_s_inverse_real_unc_abs_squared = mkadder(pipeline, list_srcs(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, S_c_real_squared, S_s_S_c_inverse_real_squared, mkadder(pipeline, list_srcs(pipeline, S_c_unc_real_squared, S_s_S_c_inverse_unc_squared)))), mkmultiplier(pipeline, list_srcs(pipeline, S_c_imag_squared, S_s_S_c_inverse_imag_squared, mkadder(pipeline, list_srcs(pipeline, S_c_unc_imag_squared, S_s_S_c_inverse_unc_squared))))))
S_s_inverse_imag_unc_abs_squared = mkadder(pipeline, list_srcs(pipeline, mkmultiplier(pipeline, list_srcs(pipeline, S_c_real_squared, S_s_S_c_inverse_imag_squared, mkadder(pipeline, list_srcs(pipeline, S_c_unc_real_squared, S_s_S_c_inverse_unc_squared)))), mkmultiplier(pipeline, list_srcs(pipeline, S_c_imag_squared, S_s_S_c_inverse_real_squared, mkadder(pipeline, list_srcs(pipeline, S_c_unc_imag_squared, S_s_S_c_inverse_unc_squared))))))
fs_squared_unc_abs = pipeparts.mkaudioamplify(pipeline, mkpow(pipeline, S_s_inverse_real_unc_abs_squared, exponent = 0.5), act_pcal_line_freq * act_pcal_line_freq)
fs_over_Q_unc_abs = pipeparts.mkaudioamplify(pipeline, mkpow(pipeline, S_s_inverse_imag_unc_abs_squared, exponent = 0.5), act_pcal_line_freq)
return S_s_inv, fs_squared_unc_abs, fs_over_Q_unc_abs
def find_injection_ratios_from_model(filters, ktst = 1.0, tau_tst = 0.0, kpum = 1.0, tau_pum = 0.0, kuim = 1.0, tau_uim = 0.0, kc = 1.0, fcc = None, fs_squared = None, Qinv = None):
#
# Find the model-predicted X's of eqs. 5.2.22 and 5.2.23 in P1900052, given
# a filters file and values for the TDCFs. Useful mainly for testing.
#
f1 = float(filters['ka_pcal_line_freq'])
f2 = float(filters['kc_pcal_line_freq'])
fT = float(filters['ktst_esd_line_freq'])
fP = float(filters['pum_act_line_freq'])
fU = float(filters['uim_act_line_freq'])
Cres1 = float(filters['EP11_real']) + 1j * float(filters['EP11_imag'])
CresDAT1 = float(filters['EP25_real']) + 1j * float(filters['EP25_imag'])
CresDAP1 = float(filters['EP26_real']) + 1j * float(filters['EP26_imag'])
CresDAU1 = float(filters['EP27_real']) + 1j * float(filters['EP27_imag'])
Cres2 = float(filters['EP6_real']) + 1j * float(filters['EP6_imag'])
CresDAT2 = float(filters['EP28_real']) + 1j * float(filters['EP28_imag'])
CresDAP2 = float(filters['EP29_real']) + 1j * float(filters['EP29_imag'])
CresDAU2 = float(filters['EP30_real']) + 1j * float(filters['EP30_imag'])
CresAT0T = float(filters['EP31_real']) + 1j * float(filters['EP31_imag'])
CresDATT = float(filters['EP32_real']) + 1j * float(filters['EP32_imag'])
CresDAPT = float(filters['EP33_real']) + 1j * float(filters['EP33_imag'])
CresDAUT = float(filters['EP34_real']) + 1j * float(filters['EP34_imag'])
CresAP0P = float(filters['EP35_real']) + 1j * float(filters['EP35_imag'])
CresDATP = float(filters['EP36_real']) + 1j * float(filters['EP36_imag'])
CresDAPP = float(filters['EP37_real']) + 1j * float(filters['EP37_imag'])
CresDAUP = float(filters['EP38_real']) + 1j * float(filters['EP38_imag'])
CresAU0U = float(filters['EP39_real']) + 1j * float(filters['EP39_imag'])
CresDATU = float(filters['EP40_real']) + 1j * float(filters['EP40_imag'])
CresDAPU = float(filters['EP41_real']) + 1j * float(filters['EP41_imag'])
CresDAUU = float(filters['EP42_real']) + 1j * float(filters['EP42_imag'])
fcc_model = float(filters['fcc'])
if fcc is None:
fcc = fcc_model
fs_squared_model = float(filters['fs_squared'])
if fs_squared is None:
fs_squared = fs_squared_model
Qinv_model = 1.0 / float(filters['srcQ'])
if Qinv is None:
Qinv = Qinv_model
CresDAT1 *= ktst * numpy.exp(2.0 * numpy.pi * 1j * f1 * tau_tst)
CresDAP1 *= kpum * numpy.exp(2.0 * numpy.pi * 1j * f1 * tau_pum)
CresDAU1 *= kuim * numpy.exp(2.0 * numpy.pi * 1j * f1 * tau_uim)
CresDAT2 *= ktst * numpy.exp(2.0 * numpy.pi * 1j * f2 * tau_tst)
CresDAP2 *= kpum * numpy.exp(2.0 * numpy.pi * 1j * f2 * tau_pum)
CresDAU2 *= kuim * numpy.exp(2.0 * numpy.pi * 1j * f2 * tau_uim)
CresAT0T *= ktst * numpy.exp(2.0 * numpy.pi * 1j * fT * tau_tst)
CresDATT *= ktst * numpy.exp(2.0 * numpy.pi * 1j * fT * tau_tst)
CresDAPT *= kpum * numpy.exp(2.0 * numpy.pi * 1j * fT * tau_pum)
CresDAUT *= kuim * numpy.exp(2.0 * numpy.pi * 1j * fT * tau_uim)
CresAP0P *= kpum * numpy.exp(2.0 * numpy.pi * 1j * fP * tau_pum)
CresDATP *= ktst * numpy.exp(2.0 * numpy.pi * 1j * fP * tau_tst)
CresDAPP *= kpum * numpy.exp(2.0 * numpy.pi * 1j * fP * tau_pum)
CresDAUP *= kuim * numpy.exp(2.0 * numpy.pi * 1j * fP * tau_uim)
CresAU0U *= kuim * numpy.exp(2.0 * numpy.pi * 1j * fU * tau_uim)
CresDATU *= ktst * numpy.exp(2.0 * numpy.pi * 1j * fU * tau_tst)
CresDAPU *= kpum * numpy.exp(2.0 * numpy.pi * 1j * fU * tau_pum)
CresDAUU *= kuim * numpy.exp(2.0 * numpy.pi * 1j * fU * tau_uim)
CresX1 = CresDAT1 + CresDAP1 + CresDAU1 + ((1.0 + 1j * f1 / fcc) / kc) * ((f1 * f1 + fs_squared - 1j * f1 * numpy.sqrt(abs(fs_squared)) * Qinv) / (f1 * f1))
CresX2 = CresDAT2 + CresDAP2 + CresDAU2 + ((1.0 + 1j * f2 / fcc) / kc) * ((f2 * f2 + fs_squared - 1j * f2 * numpy.sqrt(abs(fs_squared)) * Qinv) / (f2 * f2))
CresAT0XT = CresDATT + CresDAPT + CresDAUT + ((1.0 + 1j * fT / fcc) / kc) * ((fT * fT + fs_squared - 1j * fT * numpy.sqrt(abs(fs_squared)) * Qinv) / (fT * fT))
CresAP0XP = CresDATP + CresDAPP + CresDAUP + ((1.0 + 1j * fP / fcc) / kc) * ((fP * fP + fs_squared - 1j * fP * numpy.sqrt(abs(fs_squared)) * Qinv) / (fP * fP))
CresAU0XU = CresDATU + CresDAPU + CresDAUU + ((1.0 + 1j * fU / fcc) / kc) * ((fU * fU + fs_squared - 1j * fU * numpy.sqrt(abs(fs_squared)) * Qinv) / (fU * fU))
X1 = CresX1 / Cres1
X2 = CresX2 / Cres2
XT = CresAT0XT / CresAT0T
XP = CresAP0XP / CresAP0P
XU = CresAU0XU / CresAU0U
return X1, X2, XT, XP, XU
def update_property_simple(prop_maker, arg, prop_taker, maker_prop_name, taker_prop_name, prefactor):
prop = prop_maker.get_property(maker_prop_name)
prop_taker.set_property(taker_prop_name, prefactor * prop)
def update_timestamped_property(prop_maker, arg, prop_taker, maker_prop_name, taker_prop_name, prefactor):
prop = prop_maker.get_property(maker_prop_name)
cs = GstController.InterpolationControlSource.new()
binding = GstController.DirectControlBinding.new_absolute(prop_taker, taker_prop_name, cs)
prop_taker.add_control_binding(binding)
cs.set_property('mode', GstController.InterpolationMode.NONE) # no interpolation
cs.set(int(prop[0] * Gst.SECOND), prefactor * prop[1])
def update_filter(filter_maker, arg, filter_taker, maker_prop_name, taker_prop_name):
firfilter = filter_maker.get_property(maker_prop_name)[::-1]
filter_taker.set_property(taker_prop_name, firfilter)
def update_filters(filter_maker, arg, filter_taker, maker_prop_name, taker_prop_name, filter_number):
firfilter = filter_maker.get_property(maker_prop_name)[filter_number][::-1]
filter_taker.set_property(taker_prop_name, firfilter)
def clean_data(pipeline, signal, signal_rate, witnesses, witness_rate, fft_length, fft_overlap, num_ffts, min_ffts, update_samples, fir_length, frequency_resolution, filter_taper_length, use_median = False, parallel_mode = False, notch_frequencies = [], high_pass = 15.0, noisesub_gate_bit = None, delay_time = 0.0, critical_lock_loss_time = 0, fft_window_type = 'dpss', fir_window_type = 'dpss', filename = None):
#
# Use witness channels that monitor the environment to remove environmental noise
# from a signal of interest. This function accounts for potential correlation
# between witness channels.
#
signal_tee = pipeparts.mktee(pipeline, signal)
witnesses = list(witnesses)
witness_tees = []
for i in range(0, len(witnesses)):
witnesses[i] = mkresample(pipeline, witnesses[i], 4, False, witness_rate)
witness_tees.append(pipeparts.mktee(pipeline, witnesses[i]))
resampled_signal = mkresample(pipeline, signal_tee, 4, False, witness_rate)
transfer_functions = mkinterleave(pipeline, numpy.insert(witness_tees, 0, resampled_signal, axis = 0))
if noisesub_gate_bit is not None:
transfer_functions = mkgate(pipeline, transfer_functions, noisesub_gate_bit, 1)
transfer_functions = mktransferfunction(pipeline, transfer_functions, fft_length = fft_length, fft_overlap = fft_overlap, num_ffts = num_ffts, min_ffts = min_ffts, update_samples = update_samples, make_fir_filters = -1, fir_length = fir_length, frequency_resolution = frequency_resolution, high_pass = high_pass / 2.0, update_after_gap = True, use_median = use_median, parallel_mode = parallel_mode, notch_frequencies = notch_frequencies, use_first_after_gap = critical_lock_loss_time * witness_rate, update_delay_samples = int(delay_time * witness_rate), fir_timeshift = 0, fft_window_type = fft_window_type, fir_window_type = fir_window_type, filename = filename)
signal_minus_noise = [signal_tee]
for i in range(0, len(witnesses)):
if parallel_mode:
minus_noise = pipeparts.mkgeneric(pipeline, mkqueue(pipeline, highpass(pipeline, witness_tees[i], witness_rate, fcut = high_pass, freq_res = high_pass / 3.0)), "lal_tdwhiten", kernel = numpy.zeros(fir_length), latency = fir_length // 2, taper_length = filter_taper_length, kernel_endtime = 0)
transfer_functions.connect("notify::fir-filters", update_filters, minus_noise, "fir_filters", "kernel", i)
transfer_functions.connect("notify::fir-endtime", update_property_simple, minus_noise, "fir_endtime", "kernel_endtime", 1)
else:
minus_noise = pipeparts.mkgeneric(pipeline, highpass(pipeline, witness_tees[i], witness_rate, fcut = high_pass, freq_res = high_pass / 3.0), "lal_tdwhiten", kernel = numpy.zeros(fir_length), latency = fir_length // 2, taper_length = filter_taper_length)
transfer_functions.connect("notify::fir-filters", update_filters, minus_noise, "fir_filters", "kernel", i)
signal_minus_noise.append(mkresample(pipeline, minus_noise, 4, False, signal_rate))
return mkadder(pipeline, tuple(signal_minus_noise))
| [] |
2024-01-10 | alvations/transformers | examples~run_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
from tqdm import trange
import torch
import torch.nn.functional as F
import numpy as np
from transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from transformers import XLNetLMHeadModel, XLNetTokenizer
from transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from transformers import XLMWithLMHeadModel, XLMTokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),
'xlm': (XLMWithLMHeadModel, XLMTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False,
xlm_lang=None, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
if xlm_lang is not None:
inputs["langs"] = torch.tensor([xlm_lang] * inputs["input_ids"].shape[1]).view(1, -1)
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--xlm_lang", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--stop_token', type=str, default=None,
help="Token at which text generation is stopped")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
model.to(args.device)
model.eval()
if args.length < 0 and model.config.max_position_embeddings > 0:
args.length = model.config.max_position_embeddings
elif 0 < model.config.max_position_embeddings < args.length:
args.length = model.config.max_position_embeddings # No generation bigger than model size
elif args.length < 0:
args.length = MAX_LENGTH # avoid infinite loop
print(args)
while True:
xlm_lang = None
# XLM Language usage detailed in the issues #1414
if args.model_type in ["xlm"] and hasattr(tokenizer, 'lang2id') and hasattr(model.config, 'use_lang_emb') \
and model.config.use_lang_emb:
if args.xlm_lang:
language = args.xlm_lang
else:
language = None
while language not in tokenizer.lang2id.keys():
language = input("Using XLM. Select language in " + str(list(tokenizer.lang2id.keys())) + " >>> ")
xlm_lang = tokenizer.lang2id[language]
raw_text = args.prompt if args.prompt else input("Model prompt >>> ")
if args.model_type in ["transfo-xl", "xlnet"]:
# Models with memory likes to have a long prompt for short inputs.
raw_text = (args.padding_text if args.padding_text else PADDING_TEXT) + raw_text
context_tokens = tokenizer.encode(raw_text)
out = sample_sequence(
model=model,
context=context_tokens,
length=args.length,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
is_xlnet=bool(args.model_type == "xlnet"),
xlm_lang=xlm_lang,
device=args.device,
)
out = out[0, len(context_tokens):].tolist()
text = tokenizer.decode(out, clean_up_tokenization_spaces=True, skip_special_tokens=True)
text = text[: text.find(args.stop_token) if args.stop_token else None]
print(text)
if args.prompt:
break
return text
if __name__ == '__main__':
main()
| [] |
2024-01-10 | yitong241/LiteraLink | trash~response.py | import warnings
warnings.filterwarnings('ignore')
from langchain.document_loaders import TextLoader #for textfiles
from langchain.text_splitter import CharacterTextSplitter #text splitter
from langchain.embeddings import HuggingFaceEmbeddings #for using HugginFace models
# Vectorstore: https://python.langchain.com/en/latest/modules/indexes/vectorstores.html
from langchain.vectorstores import FAISS #facebook vectorizationfrom langchain.chains.question_answering import load_qa_chain
from langchain.chains.question_answering import load_qa_chain
from langchain import HuggingFaceHub
from langchain.document_loaders import UnstructuredPDFLoader #load pdf
from langchain.indexes import VectorstoreIndexCreator #vectorize db index with chromadb
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredURLLoader #load urls into docoument-loader
'''
import requests
url2 = "https://github.com/fabiomatricardi/cdQnA/raw/main/KS-all-info_rev1.txt"
res = requests.get(url2)
with open("KS-all-info_rev1.txt", "w") as f:
f.write(res.text)
'''
import os
def generate_response(txt_file, query):
os.environ['HUGGINGFACEHUB_API_TOKEN'] = 'xxx'
# Document Loader
from langchain.document_loaders import TextLoader
loader = TextLoader(txt_file)
documents = loader.load()
import textwrap
def wrap_text_preserve_newlines(text, width=110):
# Split the input text into lines based on newline characters
lines = text.split('\n')
# Wrap each line individually
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
# Join the wrapped lines back together using newline characters
wrapped_text = '\n'.join(wrapped_lines)
return wrapped_text
# Text Splitter
from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10)
docs = text_splitter.split_documents(documents)
# Embeddings
from langchain.embeddings import HuggingFaceEmbeddings
embeddings = HuggingFaceEmbeddings()
#Create the vectorized db
# Vectorstore: https://python.langchain.com/en/latest/modules/indexes/vectorstores.html
from langchain.vectorstores import FAISS
db = FAISS.from_documents(docs, embeddings)
from langchain.chains.question_answering import load_qa_chain
from langchain import HuggingFaceHub
llm6 = HuggingFaceHub(repo_id="MBZUAI/LaMini-Flan-T5-783M", model_kwargs={"temperature":0, "max_length":512})
chain = load_qa_chain(llm6, chain_type="stuff")
#our questions
docs = db.similarity_search(query)
response = chain.run(input_documents=docs, question=query)
return response
if __name__ == '__main__':
generate_response('./sample_pdf/KS-all-info_rev1.txt', 'What is Hierarchy 4.0?') | [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.