date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | docugami/langchain | libs~langchain~langchain~utils~aiter.py | from langchain_core.utils.aiter import NoLock, Tee, py_anext
__all__ = ["py_anext", "NoLock", "Tee"]
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~document_loaders~wikipedia.py | from typing import List, Optional
from langchain_core.documents import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaLoader(BaseLoader):
"""Load from `Wikipedia`.
The hard limit on the length of the query is 300 for now.
Each wiki page represents one Document.
"""
def __init__(
self,
query: str,
lang: str = "en",
load_max_docs: Optional[int] = 25,
load_all_available_meta: Optional[bool] = False,
doc_content_chars_max: Optional[int] = 4000,
):
"""
Initializes a new instance of the WikipediaLoader class.
Args:
query (str): The query string to search on Wikipedia.
lang (str, optional): The language code for the Wikipedia language edition.
Defaults to "en".
load_max_docs (int, optional): The maximum number of documents to load.
Defaults to 100.
load_all_available_meta (bool, optional): Indicates whether to load all
available metadata for each document. Defaults to False.
doc_content_chars_max (int, optional): The maximum number of characters
for the document content. Defaults to 4000.
"""
self.query = query
self.lang = lang
self.load_max_docs = load_max_docs
self.load_all_available_meta = load_all_available_meta
self.doc_content_chars_max = doc_content_chars_max
def load(self) -> List[Document]:
"""
Loads the query result from Wikipedia into a list of Documents.
Returns:
List[Document]: A list of Document objects representing the loaded
Wikipedia pages.
"""
client = WikipediaAPIWrapper(
lang=self.lang,
top_k_results=self.load_max_docs,
load_all_available_meta=self.load_all_available_meta,
doc_content_chars_max=self.doc_content_chars_max,
)
docs = client.load(self.query)
return docs
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain_core.memory import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~embeddings~vertexai.py | from typing import Dict, List
from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import root_validator
from langchain.llms.vertexai import _VertexAICommon
from langchain.utilities.vertexai import raise_vertex_import_error
class VertexAIEmbeddings(_VertexAICommon, Embeddings):
"""Google Cloud VertexAI embedding models."""
model_name: str = "textembedding-gecko"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the python package exists in environment."""
cls._try_init_vertexai(values)
try:
from vertexai.language_models import TextEmbeddingModel
except ImportError:
raise_vertex_import_error()
values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"])
return values
def embed_documents(
self, texts: List[str], batch_size: int = 5
) -> List[List[float]]:
"""Embed a list of strings. Vertex AI currently
sets a max batch size of 5 strings.
Args:
texts: List[str] The list of strings to embed.
batch_size: [int] The batch size of embeddings to send to the model
Returns:
List of embeddings, one for each text.
"""
embeddings = []
for batch in range(0, len(texts), batch_size):
text_batch = texts[batch : batch + batch_size]
embeddings_batch = self.client.get_embeddings(text_batch)
embeddings.extend([el.values for el in embeddings_batch])
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embeddings = self.client.get_embeddings([text])
return embeddings[0].values
| [] |
2024-01-10 | gsilva2016/Video-LLaVA | llava~eval~video~eval_video_qa.py | import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3")
parser.add_argument("--pred_path", default=r'', help="The path to file containing prediction.")
parser.add_argument("--output_dir", default=r'', help="The path to save annotation json files.")
parser.add_argument("--output_json", default=r'', help="The path to save annotation final combined json file.")
parser.add_argument("--api_key", default="", help="OpenAI API key.")
parser.add_argument("--api_base", default="", type=str, help="OpenAI API base.")
parser.add_argument("--num_tasks", default=1, type=int, help="Number of splits.")
args = parser.parse_args()
return args
def annotate(prediction_set, caption_files, output_dir, args):
"""
Evaluates question and answer pairs using GPT-3
Returns a score for correctness.
"""
# Set the OpenAI API key.
openai.api_key = args.api_key
if args.api_base is not None:
openai.api_base = args.api_base
for file in caption_files:
key = file[:-5] # Strip file extension
qa_set = prediction_set[key]
question = qa_set['q']
answer = qa_set['a']
pred = qa_set['pred']
try:
# Compute the correctness score
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content":
"You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. "
"Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:"
"------"
"##INSTRUCTIONS: "
"- Focus on the meaningful match between the predicted answer and the correct answer.\n"
"- Consider synonyms or paraphrases as valid matches.\n"
"- Evaluate the correctness of the prediction compared to the answer."
},
{
"role": "user",
"content":
"Please evaluate the following video-based question-answer pair:\n\n"
f"Question: {question}\n"
f"Correct Answer: {answer}\n"
f"Predicted Answer: {pred}\n\n"
"Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. "
"Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING."
"DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. "
"For example, your response should look like this: {'pred': 'yes', 'score': 4.8}."
}
]
)
# Convert response to a Python dictionary.
response_message = completion["choices"][0]["message"]["content"]
response_dict = ast.literal_eval(response_message)
result_qa_pair = [response_dict, qa_set]
# Save the question-answer pairs to a json file.
with open(f"{output_dir}/{key}.json", "w") as f:
json.dump(result_qa_pair, f)
except Exception as e:
print(f"Error processing file '{key}': {e}")
def main():
"""
Main function to control the flow of the program.
"""
# Parse arguments.
args = parse_args()
file = open(args.pred_path)
new_pred_contents = [eval(i.strip()) for i in file.readlines()]
'''
# Dictionary to store the count of occurrences for each video_id
video_id_counts = {}
new_pred_contents = []
# Iterate through each sample in pred_contents
for sample in pred_contents:
video_id = sample['video_name']
if video_id in video_id_counts:
video_id_counts[video_id] += 1
else:
video_id_counts[video_id] = 0
# Create a new sample with the modified key
new_sample = sample
new_sample['video_name'] = f"{video_id}_{video_id_counts[video_id]}"
new_pred_contents.append(new_sample)
'''
# Generating list of id's and corresponding files
id_list = [x['id'] for x in new_pred_contents]
caption_files = [f"{id}.json" for id in id_list]
output_dir = args.output_dir
# Generate output directory if not exists.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Preparing dictionary of question-answer sets
prediction_set = {}
for sample in new_pred_contents:
id = sample['id']
question = sample['question']
answer = sample['answer']
pred = sample['pred']
qa_set = {"q": question, "a": answer, "pred": pred}
prediction_set[id] = qa_set
num_tasks = args.num_tasks
# While loop to ensure that all captions are processed.
while True:
try:
# Files that have not been processed yet.
completed_files = os.listdir(output_dir)
print(f"completed_files: {len(completed_files)}")
# Files that have not been processed yet.
incomplete_files = [f for f in caption_files if f not in completed_files]
print(f"incomplete_files: {len(incomplete_files)}")
# Break the loop when there are no incomplete files
if len(incomplete_files) == 0:
break
if len(incomplete_files) <= num_tasks:
num_tasks = 1
# Split tasks into parts.
part_len = len(incomplete_files) // num_tasks
all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)]
task_args = [(prediction_set, part, args.output_dir, args) for part in all_parts]
# Use a pool of workers to process the files in parallel.
with Pool() as pool:
pool.starmap(annotate, task_args)
except Exception as e:
print(f"Error: {e}")
# Combine all the processed files into one
combined_contents = {}
json_path = args.output_json
# Iterate through json files
for file_name in os.listdir(output_dir):
if file_name.endswith(".json"):
file_path = os.path.join(output_dir, file_name)
with open(file_path, "r") as json_file:
content = json.load(json_file)
combined_contents[file_name[:-5]] = content
# Write combined content to a json file
with open(json_path, "w") as json_file:
json.dump(combined_contents, json_file)
print("All evaluation completed!")
# Calculate average score and accuracy
score_sum = 0
count = 0
yes_count = 0
no_count = 0
for key, result in tqdm(combined_contents.items()):
try:
# Computing score
count += 1
score_match = result[0]['score']
score = int(score_match)
score_sum += score
# Computing accuracy
pred = result[0]['pred']
if "yes" in pred.lower():
yes_count += 1
elif "no" in pred.lower():
no_count += 1
except:
print(result)
average_score = score_sum / count
accuracy = yes_count / (yes_count + no_count)
print("Yes count:", yes_count)
print("No count:", no_count)
print("Accuracy:", accuracy)
print("Average score:", average_score)
if __name__ == "__main__":
main()
| [
"Please evaluate the following video-based question-answer pair:\n\nQuestion: PLACEHOLDER\nCorrect Answer: PLACEHOLDER\nPredicted Answer: PLACEHOLDER\n\nProvide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING.DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. For example, your response should look like this: {'pred': 'yes', 'score': 4.8}.",
"You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:------##INSTRUCTIONS: - Focus on the meaningful match between the predicted answer and the correct answer.\n- Consider synonyms or paraphrases as valid matches.\n- Evaluate the correctness of the prediction compared to the answer."
] |
2024-01-10 | Snaiel/OpenAI-Milvus-QA-Over-Docs | qa_over_docs~vector_db.py | from langchain.document_loaders import WebBaseLoader, CSVLoader, PDFPlumberLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
from langchain.vectorstores.milvus import Milvus
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
import validators, os
from qa_over_docs import r_db, api, relational_db
print("retrieving Embeddings model")
embeddings = api
EMBEDDINGS_DIMENSIONS = len(embeddings.embed_query("query"))
DOCUMENTS_STORE_NAME = "OpenAI_QA_Over_Docs_Sources"
QUESTIONS_STORE_NAME = "OpenAI_QA_Over_Docs_Questions"
UPLOAD_FOLDER = 'uploads/'
sources_vector_store: Milvus
questions_vector_store: Milvus
connections.connect()
def collection_exists():
return utility.has_collection(DOCUMENTS_STORE_NAME)
def create_collections():
create_sources_collection()
create_questions_collection()
def create_sources_collection():
if utility.has_collection(DOCUMENTS_STORE_NAME):
print(f"Dropping {DOCUMENTS_STORE_NAME} collection")
collection = Collection(DOCUMENTS_STORE_NAME)
collection.drop()
print(f"Creating {DOCUMENTS_STORE_NAME} collection")
# 1. define fields
fields = [
FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65_535),
FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=EMBEDDINGS_DIMENSIONS),
FieldSchema(name='metadata', dtype=DataType.JSON)
]
# 2. enable dynamic schema in schema definition
schema = CollectionSchema(
fields,
"Documents as context to give to large language model"
)
# 3. reference the schema in a collection
collection = Collection(DOCUMENTS_STORE_NAME, schema)
# 4. index the vector field and load the collection
index_params = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
collection.create_index(
field_name="vector",
index_params=index_params
)
# 5. load the collection
collection.load()
print(f"{DOCUMENTS_STORE_NAME} collection loaded")
global sources_vector_store
sources_vector_store = Milvus(embeddings, DOCUMENTS_STORE_NAME)
def create_questions_collection():
if utility.has_collection(QUESTIONS_STORE_NAME):
print(f"Dropping {QUESTIONS_STORE_NAME} collection")
collection = Collection(QUESTIONS_STORE_NAME)
collection.drop()
print(f"Creating {QUESTIONS_STORE_NAME} collection")
# 1. define fields
fields = [
FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=EMBEDDINGS_DIMENSIONS, description="vector embedding of question"),
FieldSchema(name="question_id", dtype=DataType.INT64, description="primary key of question in relational database")
]
# 2. enable dynamic schema in schema definition
schema = CollectionSchema(
fields,
"Store previous questions and answers"
)
# 3. reference the schema in a collection
collection = Collection(QUESTIONS_STORE_NAME, schema)
# 4. index the vector field and load the collection
index_params = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
collection.create_index(
field_name="vector",
index_params=index_params
)
# 5. load the collection
collection.load()
print(f"{QUESTIONS_STORE_NAME} collection loaded")
global questions_vector_store
questions_vector_store = Milvus(embeddings, QUESTIONS_STORE_NAME)
def add_sources(sources: list[str]):
print(f"Adding sources to {DOCUMENTS_STORE_NAME} collection")
loaders = []
for source in sources:
if validators.url(source):
loaders.append(WebBaseLoader(source))
elif os.path.exists(os.path.join(UPLOAD_FOLDER, source)):
path = os.path.join(UPLOAD_FOLDER, source)
_, ext = os.path.splitext(source)
ext = ext.lower()
if ext == ".csv":
with open(path, 'r+') as csv_file:
csv_content = csv_file.read()
csv_file.seek(0)
csv_file.truncate()
csv_file.write(csv_content.strip())
loaders.append(CSVLoader(path))
elif ext == ".pdf":
loaders.append(PDFPlumberLoader(path))
def put_metadata_into_sub_dict(docs: list[Document]) -> list[Document]:
for doc in docs:
doc.metadata = {
'metadata': doc.metadata
}
return docs
if loaders:
docs = []
for loader in loaders:
new_docs = loader.load()
new_docs = put_metadata_into_sub_dict(new_docs)
docs.extend(new_docs)
for i in docs:
print(i)
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
ids = sources_vector_store.add_documents(documents)
print(f"Successfully added sources to {DOCUMENTS_STORE_NAME} collection")
for id in ids:
source = relational_db.Source(vector_id=id)
r_db.session.add(source)
r_db.session.commit()
print(f"Successfully added sources to relational database")
def add_question(id: str, text: str):
embedded_question = embeddings.embed_query(text)
collection = Collection(QUESTIONS_STORE_NAME)
data = {
"vector": embedded_question,
"question_id": id
}
collection.insert(data)
def retrieve_relevant_docs(query: str) -> list[dict]:
embedded_query = embeddings.embed_query(query)
collection = Collection(DOCUMENTS_STORE_NAME)
search_param = {
"metric_type": "L2", # Similarity metric to use, e.g., "L2" or "IP"
"params": {"nprobe": 16} # Extra search parameters, e.g., number of probes
}
results = collection.search(data=[embedded_query], anns_field="vector", limit=20, param=search_param)
results = results[0]
relevant_docs = collection.query(
expr = f"pk in {results.ids}",
output_fields = ["pk", "text"]
)
# Create a dictionary to map pk values to their respective order index
order_dict = {pk: i for i, pk in enumerate(results.ids)}
# Sort the data list based on the order index of each pk value
relevant_docs = sorted(relevant_docs, key=lambda d: order_dict.get(d['pk'], float('inf')))
for i in range(len(relevant_docs)):
relevant_docs[i]["distance"] = results.distances[i]
return relevant_docs
def query_most_relevant_question(query: str) -> dict:
embedded_query = embeddings.embed_query(query)
collection = Collection(QUESTIONS_STORE_NAME)
search_param = {
"metric_type": "L2", # Similarity metric to use, e.g., "L2" or "IP"
"params": {"nprobe": 16} # Extra search parameters, e.g., number of probes
}
results = collection.search(data=[embedded_query], anns_field="vector", limit=1, param=search_param)
results = results[0]
relevant_qa = {}
if results.ids:
relevant_qa = collection.query(
expr = f"pk in {results.ids}",
output_fields = ["question_id"]
)
relevant_qa = relevant_qa[0]
relevant_qa["distance"] = results.distances[0]
relevant_qa["pk"] = results.ids[0]
return relevant_qa
def query_source_metadata(pk: int) -> dict:
collection = Collection(DOCUMENTS_STORE_NAME)
metadata = collection.query(
expr = f"pk in [{pk}]",
output_fields = ["metadata", "text"]
)
print(metadata)
if len(metadata) > 0:
return metadata[0]["metadata"]
else:
return {}
def remove_answer(pk: int):
collection = Collection(QUESTIONS_STORE_NAME)
relevant_qa = collection.query(
expr = f"pk in [{pk}]",
output_fields = ["pk"]
)
pk = relevant_qa[0]["pk"]
collection.delete(f"pk in [{pk}]")
def remove_source(source: str):
if not collection_exists():
return
collection = Collection(DOCUMENTS_STORE_NAME)
path = os.path.join(UPLOAD_FOLDER, source)
relevant_docs = collection.query(
expr = f"metadata['source'] LIKE '{path}'",
output_fields = ["pk"]
)
relevant_docs = list(map(lambda e: e["pk"], relevant_docs))
for pk in relevant_docs:
collection.delete(f"pk in [{pk}]")
def delete_collection():
for col in [DOCUMENTS_STORE_NAME, QUESTIONS_STORE_NAME]:
if utility.has_collection(col):
print(f"Dropping {col} collection")
collection = Collection(col)
collection.drop()
if utility.has_collection(DOCUMENTS_STORE_NAME):
print(f"retrieving {DOCUMENTS_STORE_NAME} collection")
sources_vector_store = Milvus(embeddings, DOCUMENTS_STORE_NAME) | [] |
2024-01-10 | Snaiel/OpenAI-Milvus-QA-Over-Docs | qa_over_docs~apis~huggingface.py | from typing import List
from qa_over_docs.apis.base import BaseAPI, ChatResponse
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
import requests, os
from time import time
from transformers import AutoTokenizer
from dotenv import load_dotenv
load_dotenv()
API_TOKEN = os.getenv("HUGGINGFACE_API_KEY")
API_INFERENCE_URL = os.getenv("HUGGINGFACE_INFERENCE_ENDPOINT")
API_EMBEDDINGS_URL = os.getenv("HUGGINGFACE_EMBEDDINGS_ENDPOINT")
HEADERS = {"Authorization": f"Bearer {API_TOKEN}"}
SYSTEM_INSTRUCTIONS = """
Answer my question using the context below.
"""
REMINDER = """
Only respond with information from the context.
Do not mention anything outside of the provided context.
"""
MAX_TOKEN_LENGTH = 1000
class HuggingFace(BaseAPI):
tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3")
embeddings = OpenAIEmbeddings()
# embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
def num_tokens_from_string(self, string: str) -> int:
encoding = self.tokenizer(string)
return len(encoding.tokens())
def retrieve_response(self, question: str, relevant_docs: list[dict]) -> ChatResponse:
base_token_length = self.num_tokens_from_string(SYSTEM_INSTRUCTIONS + f"\n\n{question}" + REMINDER) + 5
context = ""
for doc in relevant_docs:
new_context = context
new_context += "\n\n\n" + "{SOURCE ID}: " + str(doc["pk"]) + "\n" + doc["text"]
if self.num_tokens_from_string(new_context) > MAX_TOKEN_LENGTH - base_token_length:
break
context = new_context
input = "<|prompt|>" + SYSTEM_INSTRUCTIONS + f"\n\nCONTEXT:{context}" + f"\n\n\nQUESTION:\n{question}" + f"\n\n{REMINDER}" + "<|endoftext|><|answer|>"
print(input, self.num_tokens_from_string(input))
payload = {
"inputs": input,
"parameters": {
"max_new_tokens": 1024,
"do_sample": True,
"temperature": 0.1,
"repetition_penalty": 3.2}
}
response = requests.post(API_INFERENCE_URL, headers=HEADERS, json=payload)
response_string = response.json()[0]["generated_text"]
response: ChatResponse = {
"relevant_source_ids": [],
"answer": response_string
}
return response
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embeddings.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
return self.embeddings.embed_query(text) | [] |
2024-01-10 | bloodraven66/speechbrain | recipes~LibriSpeech~ASR~CTC~train_with_whisper.py | #!/usr/bin/env/python3
"""Recipe for training a whisper-based ctc ASR system with librispeech.
The system employs whisper from OpenAI (https://cdn.openai.com/papers/whisper.pdf).
This recipe take only the whisper encoder and add a DNN + CTC to fine-tune.
If you want to use the full whisper system, please refer to the recipe
speechbrain/recipes/LibriSpeech/ASR/transformer/train_with_whisper.py
To run this recipe, do the following:
> python train_with_whisper.py hparams/train_hf_whisper_encoder.yaml
Authors
* Titouan Parcollet 2022
* Rudolf A Braun 2022
* Sung-Lin Yeh 2021
* Ju-Chieh Chou 2020
* Mirco Ravanelli 2020
* Abdel Heba 2020
* Peter Plantinga 2020
* Samuele Cornell 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main, if_main_process
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.data_utils import undo_padding
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
# Encode with Whisper and then DNN
feats = self.modules.whisper(wavs)
x = self.modules.enc(feats)
# Compute outputs
p_tokens = None
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
if stage != sb.Stage.TRAIN:
p_tokens = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
return p_ctc, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC) given predictions and targets."""
p_ctc, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens, tokens_lens = batch.tokens
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = loss_ctc
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
should_step = self.step % self.grad_accumulation_factor == 0
# Managing automatic mixed precision
if self.auto_mix_prec:
self.whisper_optimizer.zero_grad()
self.model_optimizer.zero_grad()
with torch.cuda.amp.autocast():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
self.scaler.scale(loss / self.grad_accumulation_factor).backward()
if should_step:
self.scaler.unscale_(self.whisper_optimizer)
self.scaler.unscale_(self.model_optimizer)
if self.check_gradients(loss):
if self.optimizer_step > self.hparams.warmup_steps:
# Here we added a warmup to the CTC encoder to make sure that
# it does not screw the whisper with too large gradients.
self.scaler.step(self.whisper_optimizer)
self.scaler.step(self.model_optimizer)
self.scaler.update()
self.optimizer_step += 1
else:
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
(loss / self.grad_accumulation_factor).backward()
if should_step:
if self.check_gradients(loss):
# Here we added a warmup to the CTC encoder to make sure that
# it does not screw the whisper with too large gradients.
if self.optimizer_step > self.hparams.warmup_steps:
self.whisper_optimizer.step()
self.model_optimizer.step()
self.whisper_optimizer.zero_grad()
self.model_optimizer.zero_grad()
self.optimizer_step += 1
return loss.detach().cpu()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_whisper, new_lr_whisper = self.hparams.lr_annealing_whisper(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
sb.nnet.schedulers.update_learning_rate(
self.whisper_optimizer, new_lr_whisper
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_whisperc": old_lr_whisper,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
if if_main_process():
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the whisper optimizer and model optimizer"
self.whisper_optimizer = self.hparams.whisper_opt_class(
self.modules.whisper.parameters()
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"whisper_opt", self.whisper_optimizer
)
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "char_list", "tokens_list", "tokens"
)
def text_pipeline(wrd):
yield wrd
char_list = list(wrd)
yield char_list
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "char_list", "tokens"],
)
return train_data, valid_data, test_datasets
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# We load the pretrained whisper model
if "pretrainer" in hparams.keys():
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(asr_brain.device)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!!
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
| [] |
2024-01-10 | bloodraven66/speechbrain | recipes~LibriSpeech~ASR~transformer~train_with_whisper.py | #!/usr/bin/env python3
"""Recipe for training a whisper-based ASR system with librispeech.
The system employs whisper from OpenAI (https://cdn.openai.com/papers/whisper.pdf).
This recipe take the whisper encoder-decoder to fine-tune on the NLL.
If you want to only use the whisper encoder system, please refer to the recipe
speechbrain/recipes/LibriSpeech/ASR/CTC/train_with_whisper.py
To run this recipe, do the following:
> python train_with_whisper.py hparams/train_hf_whisper.yaml
Authors
* Adel Moumen 2022
* Titouan Parcollet 2022
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main, if_main_process
from speechbrain.utils.data_utils import undo_padding
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
bos_tokens, bos_tokens_lens = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# We compute the padding mask and replace the values with the pad_token_id
# that the Whisper decoder expect to see.
abs_tokens_lens = (bos_tokens_lens * bos_tokens.shape[1]).long()
pad_mask = (
torch.arange(abs_tokens_lens.max(), device=self.device)[None, :]
< abs_tokens_lens[:, None]
)
bos_tokens[~pad_mask] = self.tokenizer.pad_token_id
# Forward encoder + decoder
enc_out, logits, _ = self.modules.whisper(wavs, bos_tokens)
log_probs = self.hparams.log_softmax(logits)
hyps = None
if stage == sb.Stage.VALID:
hyps, _ = self.hparams.valid_greedy_searcher(enc_out, wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_beam_searcher(enc_out, wav_lens)
return log_probs, hyps, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss NLL given predictions and targets."""
log_probs, hyps, wav_lens, = predictions
batch = batch.to(self.device)
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
loss = self.hparams.nll_loss(
log_probs, tokens_eos, length=tokens_eos_lens,
)
if stage != sb.Stage.TRAIN:
tokens, tokens_lens = batch.tokens
# Decode token terms to words
predicted_words = self.tokenizer.batch_decode(
hyps, skip_special_tokens=True
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer.batch_decode(
target_words, skip_special_tokens=True
)
if hasattr(self.hparams, "normalized_transcripts"):
predicted_words = [
self.tokenizer._normalize(text).split(" ")
for text in predicted_words
]
target_words = [
self.tokenizer._normalize(text).split(" ")
for text in target_words
]
else:
predicted_words = [text.split(" ") for text in predicted_words]
target_words = [text.split(" ") for text in target_words]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_whisper, new_lr_whisper = self.hparams.lr_annealing_whisper(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.optimizer, new_lr_whisper
)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr_whisper": old_lr_whisper},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
if if_main_process():
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_loader_kwargs"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_loader_kwargs"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode(wrd)
# avoid bos and eos tokens.
tokens_list = tokens_list[1:-1]
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + tokens_list)
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "tokens_list", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_datasets
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = hparams["whisper"].tokenizer
tokenizer.set_prefix_tokens(hparams["language"], "transcribe", False)
# we need to prepare the tokens for searchers
hparams["valid_greedy_searcher"].set_decoder_input_tokens(
tokenizer.prefix_tokens
)
hparams["valid_greedy_searcher"].set_language_token(
tokenizer.prefix_tokens[1]
)
hparams["test_beam_searcher"].set_decoder_input_tokens(
tokenizer.prefix_tokens
)
hparams["test_beam_searcher"].set_language_token(tokenizer.prefix_tokens[1])
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
opt_class=hparams["whisper_opt_class"],
)
# We load the pretrained whisper model
if "pretrainer" in hparams.keys():
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(asr_brain.device)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for Whisper.
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_loader_kwargs"],
valid_loader_kwargs=hparams["valid_loader_kwargs"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_loader_kwargs"]
)
| [] |
2024-01-10 | RedisVentures/LLM-Recommender | app~config.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
# get path to the directory where this file is located
BASEDIR = os.path.abspath(os.path.dirname(__file__))
# get parent directory of BASEDIR
PARENTDIR = os.path.abspath(os.path.join(BASEDIR, os.pardir))
# Index and Hotel data
SCHEMA = os.getenv("SCHEMA", f"{BASEDIR}/hotel_index_schema.yml")
DATADIR = os.getenv("DATADIR", f"{PARENTDIR}/data")
DATAFILE = os.getenv("DATAFILE", f"{DATADIR}/data.pkl")
# Redis information
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT = os.getenv("REDIS_PORT", "6379")
REDIS_ADDRESS = f"redis://{REDIS_HOST}:{REDIS_PORT}"
# AI models
openai.api_key = os.getenv('OPENAI_API_KEY')
CHAT_MODEL = os.getenv('OPENAI_CHAT_MODEL')
VECTORIZER = os.getenv('HF_VECTOR_MODEL', 'all-MiniLM-L6-v2') | [] |
2024-01-10 | nabejiii/PromptAgent-V | search_beam.py | import os
from dotenv import load_dotenv
import csv
from openai import OpenAI
import queue
from create_init_prompt import create_init_prompt
from improvement import improve_prompt, adjust_prompt_improvement
from image import image_val, create_image, save_image
# from mock import create_init_prompt, improve_prompt, image_val, create_image, save_image, adjust_prompt_improvement
class search_beam():
def __init__(self, image_num, dir_name, beam_width, num):
load_dotenv()
self.image_num = image_num
self.origin_image = os.path.join("data", "image_" + str(image_num), "origin_" + str(image_num) + ".jpg")
if not os.path.exists(self.origin_image):
raise Exception("The image does not exist: " + self.origin_image)
self.client = OpenAI(
api_key=os.environ["OPENAI_API_KEY_V"],
)
self.directory = os.path.join("data", "image_" + str(self.image_num), dir_name)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.num = num
self.beam_width = beam_width
self.prompts = []
self.diffs = []
self.images = []
self.image_layers = []
self.scores = []
self.current_top_beams = queue.Queue() # current top beams
init_prompt = create_init_prompt(self.origin_image)
init_image_http = create_image(init_prompt)
init_image = os.path.join(self.directory, "image_" + str(self.image_num) + "_" + str(len(self.images)) + ".jpg")
save_image(init_image, init_image_http)
self.prompts.append(init_prompt)
self.diffs.append(0)
self.images.append(init_image)
init_layer = 0
self.image_layers.append(init_layer)
init_score = image_val(self.origin_image, init_image)
self.scores.append(init_score)
self.current_top_beams.put((init_prompt, init_image, init_layer, init_score, 0))
def beam_step(self, layer):
print(f"Beam step layer: {layer}")
new_beam = []
current_img_num = 0
while not self.current_top_beams.empty():
prompt, image, image_layer, score, diff = self.current_top_beams.get()
for j in range(self.beam_width):
print(f"Beam step layer: {layer}, image: {current_img_num}")
# generate image
diff, new_prompt = improve_prompt(self.origin_image, image, prompt)
new_image_http = create_image(new_prompt)
new_image = os.path.join(self.directory, "image_" + str(self.image_num) + "_" + str(image_layer + 1) + "_" + str(current_img_num) + ".jpg")
save_image(new_image, new_image_http)
new_score = image_val(self.origin_image, new_image)
new_beam.append((new_prompt, new_image, image_layer + 1, new_score, diff))
current_img_num += 1
sorted_beam = sorted(new_beam, key=lambda x: x[3])
beam_num = 0
for beam in sorted_beam:
if beam_num < self.num:
self.current_top_beams.put(beam)
self.prompts.append(beam[0])
self.images.append(beam[1])
self.image_layers.append(beam[2])
self.scores.append(beam[3])
self.diffs.append(beam[4])
beam_num += 1
def beam_step_with_learning_rate(self, layer, progress):
print(f"Beam step layer: {layer}")
new_beam = []
current_img_num = 0
while not self.current_top_beams.empty():
prompt, image, image_layer, score, diff = self.current_top_beams.get()
for j in range(self.beam_width):
print(f"Beam step layer: {layer}, image: {current_img_num}")
# generate image
diff, new_prompt = adjust_prompt_improvement(self.origin_image, image, prompt, progress)
new_image_http = create_image(new_prompt)
new_image = os.path.join(self.directory, "image_" + str(self.image_num) + "_" + str(image_layer + 1) + "_" + str(current_img_num) + ".jpg")
save_image(new_image, new_image_http)
new_score = image_val(self.origin_image, new_image)
new_beam.append((new_prompt, new_image, image_layer + 1, new_score, diff))
current_img_num += 1
sorted_beam = sorted(new_beam, key=lambda x: x[3])
beam_num = 0
for beam in sorted_beam:
if beam_num < self.num:
self.current_top_beams.put(beam)
self.prompts.append(beam[0])
self.images.append(beam[1])
self.image_layers.append(beam[2])
self.scores.append(beam[3])
self.diffs.append(beam[4])
beam_num += 1
def search_beam(self, max_layer):
for i in range(max_layer):
self.beam_step(i)
self.store_evaluation()
def search_beam_with_learning_rate(self, max_layer):
for i in range(max_layer):
progress = i / max_layer
self.beam_step_with_learning_rate(i, progress)
self.store_evaluation()
def store_evaluation(self):
file_path = os.path.join(self.directory, "evaluation.csv")
image_id = 0
with open(file_path, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow(["ID", "Prompt", "Diff", "Image", "Layer", "Evaluation"])
for prompt, diff, image, layer, score in zip(self.prompts, self.diffs, self.images, self.image_layers, self.scores):
writer.writerow([image_id, prompt, diff, image, layer, score])
image_id += 1
print(f"Prompts and evaluations successfully saved to {file_path}")
if __name__ == "__main__":
search_beam = search_beam(8, "search_beam", 3, 2)
# search_beam.search_beam(3) #max_layer
search_beam.search_beam_with_learning_rate(3) #max_layer | [] |
2024-01-10 | nabejiii/PromptAgent-V | search_mcts.py | from openai import OpenAI
import os
from dotenv import load_dotenv
import csv
import math
from numpy import *
from create_init_prompt import create_init_prompt
from improvement import improve_prompt
from image import image_val, create_image, save_image
# from mock import image_val, create_image, save_image, improve_prompt, create_init_prompt
parameter = {"expand_count": 3, "expand_width": 5, "image_num": 1, "max_iteration": 30}
class Node():
def __init__(self, prompt, diff, cwd):
self.origin_image = os.path.join("data", "image_" + str(parameter["image_num"]), "origin_" + str(parameter["image_num"]) + ".jpg")
if not os.path.exists(self.origin_image):
raise Exception("The image does not exist: " + self.origin_image)
self.directory = cwd
self.prompt = prompt
self.diff = diff
self.images = []
self.scores = []
self.w = 0
self.n = 0
self.child_nodes = None
def expand(self):
# scoreの一番低いindexを取得する
min_score_index = argmin(self.scores)
# そのindexのimageを取得する
min_score_image = self.images[min_score_index]
self.child_nodes = []
for i in range(parameter["expand_width"]):
diff, new_prompt = improve_prompt(self.origin_image, min_score_image, self.prompt)
new_dir = os.path.join(self.directory, "node_" + str(i+1))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
self.child_nodes.append(Node(new_prompt, diff, new_dir))
def next_child_node(self):
for child_node in self.child_nodes:
if child_node.n == 0:
return child_node
t = 0
for child_node in self.child_nodes:
t += child_node.n
ucb1_values = []
for child_node in self.child_nodes:
ucb1_values.append(child_node.w / child_node.n + 2 * (2 * math.log(t) / child_node.n) ** 0.5)
return self.child_nodes[argmin(ucb1_values)]
def evaluate(self):
if not self.child_nodes:
new_image_http = create_image(self.prompt)
new_image = os.path.join(self.directory, "image_" + str(parameter["image_num"]) + "_" + str(len(self.images)) + ".jpg")
save_image(new_image, new_image_http)
new_score = image_val(self.origin_image, new_image)
self.images.append(new_image)
self.scores.append(new_score)
self.w += new_score
self.n += 1
if self.n == parameter["expand_count"]:
self.expand()
return new_score
else:
new_score = self.next_child_node().evaluate()
self.w += new_score
self.n += 1
return new_score
# 集計
def aggregation(self):
print(self.directory)
if self.n == 0:
return 1000000, "", "", ""
file_path = os.path.join(self.directory, "evaluation.csv")
id = 0
with open(file_path, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Prompt", "Diff"])
writer.writerow([self.prompt, self.diff])
writer.writerow(["ID", "Image", "Evaluation"])
for score, image in zip(self.scores, self.images):
writer.writerow([id, score, image])
id += 1
file.close()
if not self.child_nodes:
return self.w / self.n, self.prompt, self.diff , self.images[argmin(self.scores)]
else:
min_score = self.w / self.n
min_prompt = self.prompt
min_diff = self.diff
min_image = self.images[argmin(self.scores)]
for child_node in self.child_nodes:
score, prompt, diff, image = child_node.aggregation()
if score < min_score:
min_score = score
min_prompt = prompt
min_diff = diff
min_image = image
return min_score, min_prompt, min_diff, min_image
if __name__ == "__main__":
cwd = os.path.join("data", "image_" + str(parameter["image_num"]), "mcts_1")
if not os.path.exists(cwd):
os.makedirs(cwd)
origin_image = os.path.join("data", "image_" + str(parameter["image_num"]), "origin_" + str(parameter["image_num"]) + ".jpg")
root_node = Node(create_init_prompt(origin_image), "", cwd)
image = create_image(root_node.prompt)
image_path = os.path.join(cwd, "image_0.jpg")
save_image(image_path, image)
root_node.images.append(image_path)
root_node.scores.append(image_val(root_node.origin_image, root_node.images[0]))
root_node.expand()
for i in range(parameter["max_iteration"]):
root_node.evaluate()
print(f"iteration: {i+1}/{parameter['max_iteration']}")
score, prompt, _, image = root_node.aggregation()
print("score: " + str(score))
print("prompt: " + prompt)
print("image: " + image)
| [] |
2024-01-10 | opendilab/LightZero | zoo~atari~envs~atari_wrappers.py | # Adapted from openai baselines: https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
from datetime import datetime
from typing import Optional
import cv2
import gymnasium
import gym
import numpy as np
from ding.envs import NoopResetWrapper, MaxAndSkipWrapper, EpisodicLifeWrapper, FireResetWrapper, WarpFrameWrapper, \
ScaledFloatFrameWrapper, \
ClipRewardWrapper, FrameStackWrapper
from ding.utils.compression_helper import jpeg_data_compressor
from easydict import EasyDict
from gymnasium.wrappers import RecordVideo
# only for reference now
def wrap_deepmind(env_id, episode_life=True, clip_rewards=True, frame_stack=4, scale=True, warp_frame=True):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'NoFrameskip' in env_id
env = gym.make(env_id)
env = NoopResetWrapper(env, noop_max=30)
env = MaxAndSkipWrapper(env, skip=4)
if episode_life:
env = EpisodicLifeWrapper(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetWrapper(env)
if warp_frame:
env = WarpFrameWrapper(env)
if scale:
env = ScaledFloatFrameWrapper(env)
if clip_rewards:
env = ClipRewardWrapper(env)
if frame_stack:
env = FrameStackWrapper(env, frame_stack)
return env
# only for reference now
def wrap_deepmind_mr(env_id, episode_life=True, clip_rewards=True, frame_stack=4, scale=True, warp_frame=True):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'MontezumaRevenge' in env_id
env = gym.make(env_id)
env = NoopResetWrapper(env, noop_max=30)
env = MaxAndSkipWrapper(env, skip=4)
if episode_life:
env = EpisodicLifeWrapper(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetWrapper(env)
if warp_frame:
env = WarpFrameWrapper(env)
if scale:
env = ScaledFloatFrameWrapper(env)
if clip_rewards:
env = ClipRewardWrapper(env)
if frame_stack:
env = FrameStackWrapper(env, frame_stack)
return env
def wrap_lightzero(config: EasyDict, episode_life: bool, clip_rewards: bool) -> gym.Env:
"""
Overview:
Configure environment for MuZero-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
Arguments:
- config (:obj:`Dict`): Dict containing configuration parameters for the environment.
- episode_life (:obj:`bool`): If True, the agent starts with a set number of lives and loses them during the game.
- clip_rewards (:obj:`bool`): If True, the rewards are clipped to a certain range.
Return:
- env (:obj:`gym.Env`): The wrapped Atari environment with the given configurations.
"""
if config.render_mode_human:
env = gymnasium.make(config.env_name, render_mode='human')
else:
env = gymnasium.make(config.env_name, render_mode='rgb_array')
assert 'NoFrameskip' in env.spec.id
if config.save_replay:
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
video_name = f'{env.spec.id}-video-{timestamp}'
env = RecordVideo(
env,
video_folder=config.replay_path,
episode_trigger=lambda episode_id: True,
name_prefix=video_name
)
env = GymnasiumToGymWrapper(env)
env = NoopResetWrapper(env, noop_max=30)
env = MaxAndSkipWrapper(env, skip=config.frame_skip)
if episode_life:
env = EpisodicLifeWrapper(env)
env = TimeLimit(env, max_episode_steps=config.max_episode_steps)
if config.warp_frame:
# we must set WarpFrame before ScaledFloatFrameWrapper
env = WarpFrame(env, width=config.obs_shape[1], height=config.obs_shape[2], grayscale=config.gray_scale)
if config.scale:
env = ScaledFloatFrameWrapper(env)
if clip_rewards:
env = ClipRewardWrapper(env)
env = JpegWrapper(env, transform2string=config.transform2string)
if config.game_wrapper:
env = GameWrapper(env)
return env
class TimeLimit(gym.Wrapper):
"""
Overview:
A wrapper that limits the maximum number of steps in an episode.
"""
def __init__(self, env: gym.Env, max_episode_steps: Optional[int] = None):
"""
Arguments:
- env (:obj:`gym.Env`): The environment to wrap.
- max_episode_steps (:obj:`Optional[int]`): Maximum number of steps per episode. If None, no limit is applied.
"""
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class WarpFrame(gym.ObservationWrapper):
"""
Overview:
A wrapper that warps frames to 84x84 as done in the Nature paper and later work.
"""
def __init__(self, env: gym.Env, width: int = 84, height: int = 84, grayscale: bool = True,
dict_space_key: Optional[str] = None):
"""
Arguments:
- env (:obj:`gym.Env`): The environment to wrap.
- width (:obj:`int`): The width to which the frames are resized.
- height (:obj:`int`): The height to which the frames are resized.
- grayscale (:obj:`bool`): If True, convert frames to grayscale.
- dict_space_key (:obj:`Optional[str]`): If specified, indicates which observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self._width, self._height), interpolation=cv2.INTER_AREA)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class JpegWrapper(gym.Wrapper):
"""
Overview:
A wrapper that converts the observation into a string to save memory.
"""
def __init__(self, env: gym.Env, transform2string: bool = True):
"""
Arguments:
- env (:obj:`gym.Env`): The environment to wrap.
- transform2string (:obj:`bool`): If True, transform the observations to string.
"""
super().__init__(env)
self.transform2string = transform2string
def step(self, action):
observation, reward, done, info = self.env.step(action)
if self.transform2string:
observation = jpeg_data_compressor(observation)
return observation, reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
if self.transform2string:
observation = jpeg_data_compressor(observation)
return observation
class GameWrapper(gym.Wrapper):
"""
Overview:
A wrapper to adapt the environment to the game interface.
"""
def __init__(self, env: gym.Env):
"""
Arguments:
- env (:obj:`gym.Env`): The environment to wrap.
"""
super().__init__(env)
def legal_actions(self):
return [_ for _ in range(self.env.action_space.n)]
class GymnasiumToGymWrapper(gym.Wrapper):
"""
Overview:
A wrapper class that adapts a Gymnasium environment to the Gym interface.
Interface:
``__init__``, ``reset``, ``seed``
Properties:
- _seed (:obj:`int` or None): The seed value for the environment.
"""
def __init__(self, env):
"""
Overview:
Initializes the GymnasiumToGymWrapper.
Arguments:
- env (:obj:`gymnasium.Env`): The Gymnasium environment to be wrapped.
"""
assert isinstance(env, gymnasium.Env), type(env)
super().__init__(env)
self._seed = None
def seed(self, seed):
"""
Overview:
Sets the seed value for the environment.
Arguments:
- seed (:obj:`int`): The seed value to use for random number generation.
"""
self._seed = seed
def reset(self):
"""
Overview:
Resets the environment and returns the initial observation.
Returns:
- observation (:obj:`Any`): The initial observation of the environment.
"""
if self._seed is not None:
obs, _ = self.env.reset(seed=self._seed)
return obs
else:
obs, _ = self.env.reset()
return obs | [] |
2024-01-10 | real-sumit/xconfig | .local~bin~chat-gpt.py | import openai
# Go to https://platform.openai.com/account/api-keys
# and generate your api key and paste it inside a
# file in this directory named "openai_api_key"
key = open("openai_api_key")
openai.api_key = key.read()
q = input("Enter question: ")
response = openai.ChatCompletion.create(model='gpt-3.5-turbo',
messages=[{"role":"user", "content": q}])
# Extract and print the generated response
print(response.choices[0].message.content)
| [] |
2024-01-10 | koparasy/gemfi | src~cpu~BaseCPU.py | # Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# Copyright (c) 2011 Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Rick Strong
# Andreas Hansson
import sys
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from Bus import CoherentBus
from InstTracer import InstTracer
from ExeTracer import ExeTracer
from MemObject import MemObject
from ClockDomain import *
default_tracer = ExeTracer()
if buildEnv['TARGET_ISA'] == 'alpha':
from AlphaTLB import AlphaDTB, AlphaITB
from AlphaInterrupts import AlphaInterrupts
from AlphaISA import AlphaISA
isa_class = AlphaISA
elif buildEnv['TARGET_ISA'] == 'sparc':
from SparcTLB import SparcTLB
from SparcInterrupts import SparcInterrupts
from SparcISA import SparcISA
isa_class = SparcISA
elif buildEnv['TARGET_ISA'] == 'x86':
from X86TLB import X86TLB
from X86LocalApic import X86LocalApic
from X86ISA import X86ISA
isa_class = X86ISA
elif buildEnv['TARGET_ISA'] == 'mips':
from MipsTLB import MipsTLB
from MipsInterrupts import MipsInterrupts
from MipsISA import MipsISA
isa_class = MipsISA
elif buildEnv['TARGET_ISA'] == 'arm':
from ArmTLB import ArmTLB, ArmStage2IMMU, ArmStage2DMMU
from ArmInterrupts import ArmInterrupts
from ArmISA import ArmISA
isa_class = ArmISA
elif buildEnv['TARGET_ISA'] == 'power':
from PowerTLB import PowerTLB
from PowerInterrupts import PowerInterrupts
from PowerISA import PowerISA
isa_class = PowerISA
class BaseCPU(MemObject):
type = 'BaseCPU'
abstract = True
cxx_header = "cpu/base.hh"
@classmethod
def export_methods(cls, code):
code('''
void switchOut();
void takeOverFrom(BaseCPU *cpu);
bool switchedOut();
void flushTLBs();
Counter totalInsts();
void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
''')
@classmethod
def memory_mode(cls):
"""Which memory mode does this CPU require?"""
return 'invalid'
@classmethod
def require_caches(cls):
"""Does the CPU model require caches?
Some CPU models might make assumptions that require them to
have caches.
"""
return False
@classmethod
def support_take_over(cls):
"""Does the CPU model support CPU takeOverFrom?"""
return False
def takeOverFrom(self, old_cpu):
self._ccObject.takeOverFrom(old_cpu._ccObject)
system = Param.System(Parent.any, "system object")
cpu_id = Param.Int(-1, "CPU identifier")
numThreads = Param.Unsigned(1, "number of HW thread contexts")
function_trace = Param.Bool(False, "Enable function trace")
function_trace_start = Param.Tick(0, "Tick to start function trace")
checker = Param.BaseCPU(NULL, "checker CPU")
do_checkpoint_insts = Param.Bool(True,
"enable checkpoint pseudo instructions")
do_statistics_insts = Param.Bool(True,
"enable statistics pseudo instructions")
profile = Param.Latency('0ns', "trace the kernel stack")
do_quiesce = Param.Bool(True, "enable quiesce instructions")
workload = VectorParam.Process([], "processes to run")
if buildEnv['TARGET_ISA'] == 'sparc':
dtb = Param.SparcTLB(SparcTLB(), "Data TLB")
itb = Param.SparcTLB(SparcTLB(), "Instruction TLB")
interrupts = Param.SparcInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.SparcISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'alpha':
dtb = Param.AlphaTLB(AlphaDTB(), "Data TLB")
itb = Param.AlphaTLB(AlphaITB(), "Instruction TLB")
interrupts = Param.AlphaInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.AlphaISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'x86':
dtb = Param.X86TLB(X86TLB(), "Data TLB")
itb = Param.X86TLB(X86TLB(), "Instruction TLB")
interrupts = Param.X86LocalApic(NULL, "Interrupt Controller")
isa = VectorParam.X86ISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'mips':
dtb = Param.MipsTLB(MipsTLB(), "Data TLB")
itb = Param.MipsTLB(MipsTLB(), "Instruction TLB")
interrupts = Param.MipsInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.MipsISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'arm':
dtb = Param.ArmTLB(ArmTLB(), "Data TLB")
itb = Param.ArmTLB(ArmTLB(), "Instruction TLB")
istage2_mmu = Param.ArmStage2MMU(ArmStage2IMMU(), "Stage 2 trans")
dstage2_mmu = Param.ArmStage2MMU(ArmStage2DMMU(), "Stage 2 trans")
interrupts = Param.ArmInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.ArmISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'power':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
dtb = Param.PowerTLB(PowerTLB(), "Data TLB")
itb = Param.PowerTLB(PowerTLB(), "Instruction TLB")
interrupts = Param.PowerInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.PowerISA([ isa_class() ], "ISA instance")
else:
print "Don't know what TLB to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
max_insts_all_threads = Param.Counter(0,
"terminate when all threads have reached this inst count")
max_insts_any_thread = Param.Counter(0,
"terminate when any thread reaches this inst count")
simpoint_start_insts = VectorParam.Counter([],
"starting instruction counts of simpoints")
max_loads_all_threads = Param.Counter(0,
"terminate when all threads have reached this load count")
max_loads_any_thread = Param.Counter(0,
"terminate when any thread reaches this load count")
progress_interval = Param.Frequency('0Hz',
"frequency to print out the progress message")
switched_out = Param.Bool(False,
"Leave the CPU switched out after startup (used when switching " \
"between CPU models)")
tracer = Param.InstTracer(default_tracer, "Instruction tracer")
icache_port = MasterPort("Instruction Port")
dcache_port = MasterPort("Data Port")
_cached_ports = ['icache_port', 'dcache_port']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
_cached_ports += ["itb.walker.port", "dtb.walker.port"]
if buildEnv['TARGET_ISA'] in ['arm']:
_cached_ports += ["istage2_mmu.stage2_tlb.walker.port",
"dstage2_mmu.stage2_tlb.walker.port"]
_uncached_slave_ports = []
_uncached_master_ports = []
if buildEnv['TARGET_ISA'] == 'x86':
_uncached_slave_ports += ["interrupts.pio", "interrupts.int_slave"]
_uncached_master_ports += ["interrupts.int_master"]
def createInterruptController(self):
if buildEnv['TARGET_ISA'] == 'sparc':
self.interrupts = SparcInterrupts()
elif buildEnv['TARGET_ISA'] == 'alpha':
self.interrupts = AlphaInterrupts()
elif buildEnv['TARGET_ISA'] == 'x86':
self.apic_clk_domain = DerivedClockDomain(clk_domain =
Parent.clk_domain,
clk_divider = 16)
self.interrupts = X86LocalApic(clk_domain = self.apic_clk_domain,
pio_addr=0x2000000000000000)
_localApic = self.interrupts
elif buildEnv['TARGET_ISA'] == 'mips':
self.interrupts = MipsInterrupts()
elif buildEnv['TARGET_ISA'] == 'arm':
self.interrupts = ArmInterrupts()
elif buildEnv['TARGET_ISA'] == 'power':
self.interrupts = PowerInterrupts()
else:
print "Don't know what Interrupt Controller to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
def connectCachedPorts(self, bus):
for p in self._cached_ports:
exec('self.%s = bus.slave' % p)
def connectUncachedPorts(self, bus):
for p in self._uncached_slave_ports:
exec('self.%s = bus.master' % p)
for p in self._uncached_master_ports:
exec('self.%s = bus.slave' % p)
def connectAllPorts(self, cached_bus, uncached_bus = None):
self.connectCachedPorts(cached_bus)
if not uncached_bus:
uncached_bus = cached_bus
self.connectUncachedPorts(uncached_bus)
def addPrivateSplitL1Caches(self, ic, dc, iwc = None, dwc = None):
self.icache = ic
self.dcache = dc
self.icache_port = ic.cpu_side
self.dcache_port = dc.cpu_side
self._cached_ports = ['icache.mem_side', 'dcache.mem_side']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
if iwc and dwc:
self.itb_walker_cache = iwc
self.dtb_walker_cache = dwc
if buildEnv['TARGET_ISA'] in ['arm']:
self.itb_walker_cache_bus = CoherentBus()
self.dtb_walker_cache_bus = CoherentBus()
self.itb_walker_cache_bus.master = iwc.cpu_side
self.dtb_walker_cache_bus.master = dwc.cpu_side
self.itb.walker.port = self.itb_walker_cache_bus.slave
self.dtb.walker.port = self.dtb_walker_cache_bus.slave
self.istage2_mmu.stage2_tlb.walker.port = self.itb_walker_cache_bus.slave
self.dstage2_mmu.stage2_tlb.walker.port = self.dtb_walker_cache_bus.slave
else:
self.itb.walker.port = iwc.cpu_side
self.dtb.walker.port = dwc.cpu_side
self._cached_ports += ["itb_walker_cache.mem_side", \
"dtb_walker_cache.mem_side"]
else:
self._cached_ports += ["itb.walker.port", "dtb.walker.port"]
if buildEnv['TARGET_ISA'] in ['arm']:
self._cached_ports += ["istage2_mmu.stage2_tlb.walker.port", \
"dstage2_mmu.stage2_tlb.walker.port"]
# Checker doesn't need its own tlb caches because it does
# functional accesses only
if self.checker != NULL:
self._cached_ports += ["checker.itb.walker.port", \
"checker.dtb.walker.port"]
if buildEnv['TARGET_ISA'] in ['arm']:
self._cached_ports += ["checker.istage2_mmu.stage2_tlb.walker.port", \
"checker.dstage2_mmu.stage2_tlb.walker.port"]
def addTwoLevelCacheHierarchy(self, ic, dc, l2c, iwc = None, dwc = None):
self.addPrivateSplitL1Caches(ic, dc, iwc, dwc)
# Set a width of 32 bytes (256-bits), which is four times that
# of the default bus. The clock of the CPU is inherited by
# default.
self.toL2Bus = CoherentBus(width = 32)
self.connectCachedPorts(self.toL2Bus)
self.l2cache = l2c
self.toL2Bus.master = self.l2cache.cpu_side
self._cached_ports = ['l2cache.mem_side']
def createThreads(self):
self.isa = [ isa_class() for i in xrange(self.numThreads) ]
if self.checker != NULL:
self.checker.createThreads()
def addCheckerCpu(self):
pass
| [] |
2024-01-10 | saintlyzero/brick-hack-backend | helpers~semantic_search.py | import cohere
import numpy as np
import pinecone
from data import OS_CN, OOP
from keys import PINE_KEY, API_KEY
def init():
co = cohere.Client(API_KEY)
pinecone.init(
PINE_KEY,
environment="us-east1-gcp" # find next to API key in console
)
return co
def create_index(co, index_name):
embeds = co.embed(
texts=OS_CN.split('.'),
model='large',
truncate='None'
).embeddings
shape = np.array(embeds).shape
if index_name in pinecone.list_indexes():
pinecone.delete_index(index_name)
pinecone.create_index(
index_name,
dimension=shape[1],
metric='cosine'
)
return embeds, shape
def upsert_data(embeds, shape, index_name):
index = pinecone.Index(index_name)
batch_size = 128
ids = [str(i) for i in range(shape[0])]
# create list of metadata dictionaries
meta = [{'text': text} for text in OS_CN.split('.')]
# create list of (id, vector, metadata) tuples to be upserted
to_upsert = list(zip(ids, embeds, meta))
for i in range(0, shape[0], batch_size):
i_end = min(i + batch_size, shape[0])
index.upsert(vectors=to_upsert[i:i_end])
return index
def main():
co = init()
index = create_and_store(co)
query_pinecone(co, index)
def query_pinecone(co, index):
query1 = "Where there any announcements in the lecture?"
# query2 = "When are the office hours?"
# query3 = "What is an OS?"
# query4 = "What are concepts?"
# create the query embedding
xq = co.embed(
texts=[query1],
model='large',
truncate='None'
).embeddings
res = index.query(xq, top_k=2, include_metadata=True)
print(res)
def create_and_store(co):
index_name = 'cohere-pinecone-os-cn'
embeds, shape = create_index(co, index_name)
return upsert_data(embeds, shape, index_name)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | saintlyzero/brick-hack-backend | lastminute~summary~helpers~generate_summary.py | # from data import OOP, OS_CN
from keys import API_KEY
import cohere
def generate_summary(text):
co = cohere.Client(API_KEY)
# return generate_summary(co).summary
return co.summarize(model='summarize-xlarge', text=text, length='long', extractiveness='medium', temperature=0.25)
def main():
return generate_summary()
if __name__ == '__main__':
print(main()) | [] |
2024-01-10 | saintlyzero/brick-hack-backend | helpers~generate_summary.py | from data import OOP, OS_CN
from keys import API_KEY
import cohere
def generate_summary(co):
return co.summarize(model='summarize-xlarge', text=OS_CN, length='long', extractiveness='medium', temperature=0.25)
def main():
co = cohere.Client(API_KEY)
return generate_summary(co).summary
if __name__ == '__main__':
print(main()) | [] |
2024-01-10 | saintlyzero/brick-hack-backend | helpers~generate_quiz.py | import cohere
from data import OOP, OS_CN
from keys import API_KEY
def generate_quiz():
co = cohere.Client(API_KEY)
return co.generate(model='command-xlarge-20221108', prompt='Generate a list of 5 interview questions on Abstraction, Operating system, Inheritance', max_tokens=500, temperature=0, k=0, p=1, frequency_penalty=0, presence_penalty=0, stop_sequences=[], return_likelihoods='NONE')
print(f'Result: {generate_quiz().generations[0].text}')
| [] |
2024-01-10 | saintlyzero/brick-hack-backend | helpers~generate_timeline.py | import cohere
from data import OOP, OS_CN
from keys import API_KEY
import cohere
def generate_timeline(co):
response = co.generate(
model='command-xlarge-20221108',
prompt=f'extract all concepts from lecture: {OS_CN}',
max_tokens=200,
temperature=0,
k=0,
p=1,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=[],
return_likelihoods='NONE')
return response.generations[0].text
def main():
co = cohere.Client(API_KEY)
print(generate_timeline(co))
if __name__ == '__main__':
main()
| [] |
2024-01-10 | saintlyzero/brick-hack-backend | lastminute~summary~helper.py | import cohere
import pinecone
from . import django_auth
import numpy as np
co = cohere.Client(django_auth.API_KEY)
pinecone.init(
django_auth.PINE_KEY,
environment="us-east1-gcp"
)
def generate_summary(text):
return co.summarize(model='summarize-xlarge', text=text, length='long', extractiveness='medium', temperature=0.25)
def generate_outline(text):
response = co.generate(
model='command-xlarge-20221108',
prompt=f'extract all concepts from lecture: {text}',
max_tokens=200,
temperature=0,
k=0,
p=1,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=[],
return_likelihoods='NONE')
return response.generations[0].text
def create_index(co, index_name, text):
embeds = co.embed(
texts=text.split('.'),
model='large',
truncate='None'
).embeddings
shape = np.array(embeds).shape
if index_name in pinecone.list_indexes():
pinecone.delete_index(index_name)
pinecone.create_index(
index_name,
dimension=shape[1],
metric='cosine'
)
return embeds, shape
def upsert_data(embeds, shape, index_name, text):
index = pinecone.Index(index_name)
batch_size = 128
ids = [str(i) for i in range(shape[0])]
meta = [{'text': t} for t in text.split('.')]
to_upsert = list(zip(ids, embeds, meta))
for i in range(0, shape[0], batch_size):
i_end = min(i + batch_size, shape[0])
index.upsert(vectors=to_upsert[i:i_end])
return index
def query_pinecone(co, index, k):
query1 = "Where there any announcements in the lecture?"
xq = co.embed(
texts=[query1],
model='large',
truncate='None'
).embeddings
return index.query(xq, top_k=k, include_metadata=True)
def generate_announcements(text, k):
index_name = 'cohere-pinecone-os-cn'
# embeds, shape = create_index(co, index_name, text)
# index = upsert_data(embeds, shape, index_name, text)
index = pinecone.Index(index_name)
return query_pinecone(co, index, k)
def generate_quiz(text):
return co.generate(model='command-xlarge-20221108', prompt=f'Generate a list of 5 interview questions on {text}',
max_tokens=500, temperature=0, k=0, p=1, frequency_penalty=0, presence_penalty=0,
stop_sequences=[], return_likelihoods='NONE').generations[0].text
def remove_empty_strings(string_list):
if type(string_list) != list:
string_list = string_list.split('\n')
return [string.strip() for string in string_list if string]
def semantic_search(query, k):
xq = co.embed(
texts=[query],
model='large',
truncate='None'
).embeddings
index = pinecone.Index('cohere-pinecone-os-cn')
res = index.query(xq, top_k=k, include_metadata=True)
print(res)
| [] |
2024-01-10 | abhishekk0010/metaphor | blog_generator.py | import json
import os
import openai
from metaphor_python import Metaphor
from dotenv import load_dotenv
from bs4 import BeautifulSoup
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
metaphor = Metaphor(os.getenv("METAPHOR_API_KEY"))
class BlogPostGenerator:
def __init__(self, product_name, category):
self.product_name = product_name
self.category = category
self.trending_articles = self.get_trending_articles()
def get_trending_articles(self):
# Using Metaphor API to find trending articles related to the product category
query = f"Here are the Trending articles in {self.category} category"
search_response = metaphor.search(
query, use_autoprompt=True, start_published_date="2023-06-01", num_results = 3
)
trending_articles = search_response.results
return trending_articles
def find_similar_articles(self, article_url_list, num_results=3):
# Use Metaphor API to find similar articles to a given article URL
similar_articles_blog_content = "\n\n---\n\n**Similar Articles:**\n\n"
for item in article_url_list:
similar_articles_response = metaphor.find_similar(item, num_results=num_results)
for result in similar_articles_response.results:
similar_articles_blog_content += f"- [{result.title}]({result.url})\n"
return similar_articles_blog_content
def generate_intro(self, content):
intro_prompt = f"This is the content of three articles separated by ****** : {content}. Based on this , generate a short introduction of 100 words. Add an appropriate topic to the first line. This is the starting part of a blog"
return intro_prompt
def generate_technical_specifications(self, content):
specs_prompt= f"This is the content of three articles separated by ****** : {content}. Based on this , generate a body for a blog along with technical specifications in about 500 words"
return specs_prompt
def generate_conclusion(self, content):
conclusion_prompt= f"This is the content of three articles separated by ****** : {content}. Based on this , generate a conclusion for a blog that is a combination of the given three articles in about 100 words"
return conclusion_prompt
def generate_blog_post(self, content):
# Combining various sections to form a complete blog post
intro = self.generate_intro(content)
specs = self.generate_technical_specifications(content)
conclusion = self.generate_conclusion(content)
# Using GPT-3.5-turbo-instruct to generate content for each section
intro_completion = openai.Completion.create(
engine="gpt-3.5-turbo-instruct",
prompt=intro,
max_tokens=200,
)
specs_completion = openai.Completion.create(
engine="gpt-3.5-turbo-instruct",
prompt=specs,
max_tokens=200,
)
conclusion_completion = openai.Completion.create(
engine="gpt-3.5-turbo-instruct",
prompt=conclusion,
max_tokens=200,
)
blog_content = (
intro_completion.choices[0].text.strip()
+ "\n\n"
+ specs_completion.choices[0].text.strip()
+ "\n\n"
+ conclusion_completion.choices[0].text.strip()
)
trending_urls = [article.url for article in self.trending_articles]
blog_content+= self.find_similar_articles(trending_urls,num_results=3)
return blog_content
def generate_blog_post_batch(self):
trending_ids = [article.id for article in self.trending_articles]
print(trending_ids)
response = metaphor.get_contents(trending_ids)
total_content = ""
for item in response.contents:
cleaned_html = self.clean_html_and_get_text(f'''{item.extract}''')
total_content+= cleaned_html
total_content+= '\n******\n'
post_content = self.generate_blog_post(total_content)
print(post_content)
return post_content
def clean_html_and_get_text(self, html_input):
# Removing HTML tags using BeautifulSoup
soup = BeautifulSoup(html_input, 'html.parser')
text_content = soup.get_text()
return text_content
# Example usage:
product_name = "Apple watch"
category = "Smart Watch" #Give your product category here
blog_post_generator = BlogPostGenerator(product_name, category)
amazon_blog_posts = blog_post_generator.generate_blog_post_batch()
| [
"This is the content of three articles separated by ****** : PLACEHOLDER. Based on this , generate a body for a blog along with technical specifications in about 500 words",
"This is the content of three articles separated by ****** : PLACEHOLDER. Based on this , generate a conclusion for a blog that is a combination of the given three articles in about 100 words",
"This is the content of three articles separated by ****** : PLACEHOLDER. Based on this , generate a short introduction of 100 words. Add an appropriate topic to the first line. This is the starting part of a blog"
] |
2024-01-10 | ji5485/mathematics-capstone | src~packages~roles~base_role.py | import os
import json
from openai import OpenAI
api_key = os.environ.get("OPENAI_API_KEY")
class Role:
def __init__(self, messages, functions):
self.client = OpenAI(api_key=api_key)
self.messages = messages
self.functions = functions
# GPT-3.5와 상호작용하는 함수
def interact(self, message):
self.messages.append({ "role": "user", "content": message })
response = self.client.chat.completions.create(
model="gpt-3.5-turbo",
messages=self.messages,
functions=self.functions,
function_call="auto"
)
self.messages.append(response.choices[0].message)
if response.choices[0].message.function_call is not None:
return {
"function": response.choices[0].message.function_call.name,
"args": json.loads(response.choices[0].message.function_call.arguments)
}
else:
return response.choices[0].message.content
| [] |
2024-01-10 | ji5485/mathematics-capstone | src~packages~models~dall_e.py | import os
from openai import OpenAI
from requests import get
from .base_model import Model
api_key = os.environ.get("OPENAI_API_KEY")
class DallE(Model):
def __init__(self):
self.client = OpenAI(api_key=api_key)
def create(self, prompt, directory):
response = self.client.images.generate(
model="dall-e-3",
prompt=prompt,
size="1792x1024",
quality="standard",
n=1,
)
with open(directory + "/image.png", "wb") as file:
response = get(response.data[0].url)
file.write(response.content)
| [] |
2024-01-10 | stanislawbucior/PoemAnalysisTool | Poem%20Analysis%20Tool%20Final%20version.py | # Please make sure to copy the code to a local environment and add the Secret API Key provided in the separate document.
#Import of the necessary libraries and packages
import tkinter as tk
from tkinter import filedialog, messagebox, ttk, scrolledtext
from openai import OpenAI
import os
from PyPDF2 import PdfReader
import json
import pandas as pd
import time
# Initialize the OpenAI client and prompt the API key to initialize the OpenAI client
client = OpenAI(
api_key=input("Enter the openai api key: ") # Enter the API key
)
# Function to extract text from a PDF file (turn it into a string)
def extract_text_from_pdf(pdf_file_path):
# Initialize an empty string to hold the extracted text
text_content = ""
# Open the PDF file in read-binary mode
with open(pdf_file_path, 'rb') as file:
pdf_reader = PdfReader(file) # Create a PDF reader object
# Iterate through each page in the PDF file
for page in pdf_reader.pages:
# Append the extracted text of each page to the text_content string
text_content += page.extract_text() + "\n"
return text_content
# Function to send a user message to an OpenAI thread
def send_message(thread_id, user_message):
# Call the OpenAI API to create a new message in the specified thread
message = client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=user_message
)
# Return the response message object
return message
# Function to check whether the analysis by the api is completed
def wait_on_run(run_id, thread_id):
# Enter a loop that continues until the run is complete or fails
while True:
# Retrieve the status of the run using its ID and the thread ID
run = client.beta.threads.runs.retrieve(
thread_id=thread_id,
run_id=run_id,
)
# Check if the run status is 'completed' and return the run object if so
if run.status == "completed":
return run
# If the run failed or was cancelled, raise an exception
elif run.status in ["failed", "cancelled"]:
raise Exception(f"Run failed or was cancelled: {run.status}")
# Pause the loop for 1 second before checking the status again
time.sleep(1)
# Function to run the assistant and retrieve the response
def run_assistant_and_get_response(assistant_id, thread_id, last_response_id=None):
# Create a new run with the specified assistant ID and thread ID
run = client.beta.threads.runs.create(
thread_id=thread_id,
assistant_id=assistant_id
)
# Wait for the run to complete and then retrieve the list of messages in the thread
run = wait_on_run(run.id, thread_id)
messages = client.beta.threads.messages.list(
thread_id=thread_id
)
# Initialize an empty list to hold the answers
answers = []
latest_response_id = last_response_id
# Iterate through each message in the thread
for message in messages.data:
# Check if the message role is 'assistant' and it's a new message since the last response ID
if message.role == "assistant" and (last_response_id is None or message.id > last_response_id):
try:
# Extract the text value of the message content and append to the answers list
answer = message.content[0].text.value
answers.append(answer)
except AttributeError:
# If there is no text value, print "No reply"
print("No reply")
# Update the latest response ID for the next iteration
latest_response_id = message.id
# Return the latest response ID and the list of answers
return latest_response_id, answers
# Function to display DataFrame in a Treeview widget
def display_dataframe_in_treeview(df):
# Create a new Toplevel window
top_level = tk.Toplevel(window)
top_level.title("DataFrame Output")
# Create the Treeview widget with the correct column identifiers
columns = list(df.columns)
tree = ttk.Treeview(top_level, columns=columns, show='headings')
# Generate the headings
for col in columns:
tree.heading(col, text=col)
tree.column(col, anchor="w")
# Insert the data into the Treeview
for index, row in df.iterrows():
# Ensure that the values are passed in the same order as the columns
tree.insert('', 'end', values=row[columns].tolist())
# Add vertical scrollbar to the Treeview
scrollbar_vertical = ttk.Scrollbar(top_level, orient='vertical', command=tree.yview)
tree.configure(yscrollcommand=scrollbar_vertical.set) # Link scrollbar to the Treeview
scrollbar_vertical.pack(side='right', fill='y') # Pack scrollbar to the UI
#Add horizontal scrollbar to the Treeview
scrollbar_horizontal = ttk.Scrollbar(top_level, orient='horizontal', command=tree.xview)
tree.configure(xscrollcommand=scrollbar_horizontal.set) # Link scrollbar to the Treeview
scrollbar_horizontal.pack(side='bottom', fill='x') # Pack scrollbar to the UI
tree.pack(expand=True, fill='both') # Pack Treeview to the UI to occupy the available space
# Function to process the results of the poem analysis
def process_analysis_results(analysis_result):
# Parse the JSON string into a Python dictionary
analysis_output = json.loads(analysis_result)
# Check if the expected key 'poemAnalysisOutput' is in the JSON
if 'poemAnalysisOutput' in analysis_output:
# Retrieve the poem analysis data
poem_data = analysis_output['poemAnalysisOutput']
# If 'analysis' is a nested dictionary, we normalize it first
if 'analysis' in poem_data:
# Flatten the nested 'analysis' data into a flat structure
analysis_flat = pd.json_normalize(poem_data['analysis'])
# Update the 'poem_data' with the flattened analysis data
poem_data.update(analysis_flat.to_dict(orient='records')[0])
# Remove the now redundant nested 'analysis' key
del poem_data['analysis']
# Create a DataFrame from the poem analysis data
df = pd.DataFrame([poem_data]) # The data is in a dictionary, so let's make a list out of it
display_dataframe_in_treeview(df)
else:
# Inform the user if no analysis data was found in the result
messagebox.showinfo("Result", "No analysis found in the result.")
# Function allowing to select a pdf file
def select_pdf_file():
# Open a file dialog to select a PDF file
file_path = filedialog.askopenfilename(
title="Select a PDF file",
filetypes=[("PDF files", "*.pdf")] # Restrict file dialog to only show PDF files
)
# Restrict file dialog to only show PDF files
if file_path:
entry_pdf_path.delete(0, tk.END) # Clear any existing content in the entry
entry_pdf_path.insert(0, file_path) # Insert the selected file path
# Function to process the text for analysis
def process_text(text):
try:
# ID of the assistant to use for analysis
existing_assistant_id = "asst_E3qfm6X0yQam3oNuHPy7Zq79"
# Create a new thread to communicate with the assistant
thread = client.beta.threads.create()
# Send the text to the assistant for processing
send_message(thread.id, text)
# Wait for the response from the assistant and get answers
last_response_id, answers = run_assistant_and_get_response(existing_assistant_id, thread.id)
# If answers were received, process them
if answers:
# Debug: Print the answer to check if it's valid JSON
print("Received answer:", answers[0])
try:
# Process the analysis results and display them
process_analysis_results(answers[0])
except json.JSONDecodeError as e:
# Handle JSON parsing errors
messagebox.showerror("Error", f"An error occurred in JSON parsing: {e}")
else:
# Inform the user if no answers were received
messagebox.showinfo("Result", "No answers were received for analysis.")
except Exception as e:
# Handle other exceptions and display an error message
messagebox.showerror("Error", f"An error occurred: {e}")
# GUI Functions
# Function to handle the input choice (PDF or Text)
def on_input_choice():
# If the user chooses PDF, hide the text input and show PDF input options
if input_choice.get() == 'PDF':
text_input.pack_forget()
entry_pdf_path.pack(padx=10, pady=5)
button_select_pdf.pack(pady=5)
button_analyze.pack(pady=5)
elif input_choice.get() == 'Text':
# If the user chooses Text, hide the PDF input and show text input options
entry_pdf_path.pack_forget()
button_select_pdf.pack_forget()
text_input.pack(padx=10, pady=5)
button_analyze.pack(pady=5)
# Function to handle PDF analysis
def analyze_pdf():
# Retrieve the file path from the entry field
pdf_file_path = entry_pdf_path.get()
# Check if the file exists
if not os.path.isfile(pdf_file_path):
messagebox.showerror("Error", "The specified file was not found.")
return
# Extract text from the PDF and process it
pdf_text = extract_text_from_pdf(pdf_file_path)
process_text(pdf_text)
# Function to get the text of poem for the analysis
def analyze_text():
# Retrieve the text from the scrolledtext widget
user_text = text_input.get('1.0', tk.END).strip()
# Check if the text is not empty
if not user_text:
messagebox.showerror("Error", "No text to analyze.")
return
# Process the text
process_text(user_text)
# GUI Setup
# Create the main window
window = tk.Tk()
window.title("Poem Analysis Tool")
# Variable to store the input choice
input_choice = tk.StringVar(value='PDF')
# Radio buttons for input choice (PDF or text)
radio_pdf = tk.Radiobutton(window, text="Upload PDF", variable=input_choice, value='PDF', command=on_input_choice)
radio_text = tk.Radiobutton(window, text="Enter Text", variable=input_choice, value='Text', command=on_input_choice)
radio_pdf.pack(anchor='w', padx=10, pady=5)
radio_text.pack(anchor='w', padx=10, pady=5)
# PDF path entry
entry_pdf_path = tk.Entry(window, width=50)
# Select PDF button
button_select_pdf = tk.Button(window, text="Select PDF", command=select_pdf_file)
# Text input area for direct text entry
text_input = scrolledtext.ScrolledText(window, height=10)
# Analyze button for both PDF and text input
button_analyze = tk.Button(window, text="Analyze", command=lambda: analyze_pdf() if input_choice.get() == 'PDF' else analyze_text())
# Initial input choice setup
on_input_choice()
# Start the Tkinter main loop to display and run the GUI
window.mainloop() | [] |
2024-01-10 | Bobskie-Repositories/SPringBoard | backend~springboard_api~controllers~ProjectBoardController.py | import json
from django.shortcuts import render
from django.http import HttpResponse
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
from springboard_api.serializers import ProjectBoardSerializer
from springboard_api.models import ProjectBoard, Project
import requests
from django.db.models import Max
from django.conf import settings
import os
from openai import OpenAI
class CreateProjectBoard(generics.CreateAPIView):
serializer_class = ProjectBoardSerializer
def perform_create(self, serializer, data):
serializer.save(**data)
def update_project_score(self, project, add_score):
project.score += add_score
project.save()
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
data = {}
highest_board_id = ProjectBoard.objects.aggregate(Max('boardId'))[
'boardId__max']
new_board_id = highest_board_id + 1 if highest_board_id is not None else 1
# api_url = "https://api.openai.com/v1/engines/text-davinci-003/completions"
prompt = (
f"Please analyze the following data: {request.data.get('content', '')}. "
f"Provide a detailed and critical rating (1-10) in numerical value(not in string) for the following aspects: "
f"\n1. Novelty: Evaluate the originality of the data. "
f"\n2. Technical Feasibility: Assess whether the data is technically sound and feasible. "
f"\n3. Capability: Determine if the data demonstrates capability. "
f"\nRatings below 5 should be considered for data that lacks composition, effort, verbosity, or information. "
f"Be critical and practical when rating. "
f"Include at least 2 specific sentences of advice for improvements (Recommendations) and "
f"2 sentences of feedback on how the data is presented and structured, and what can be done to improve those aspects (Feedback) for each of the above aspects. "
f"The output should be in the following JSON format: "
f"\n'novelty': 'numerical rating', 'technical_feasibility': 'numerical rating', 'capability': 'numerical rating', "
f"'recommendations_novelty': ['specific advice'], 'recommendations_technical_feasibility': [' advice'], "
f"'recommendations_capability': ['specific advice'], 'feedback_novelty': ['specific feedback'], "
f"'feedback_technical_feasibility': ['feedback'], 'feedback_capability': ['specific feedback']. "
f"Ensure a fair and balanced assessment for each aspect."
)
# request_payload = {
# "prompt": prompt,
# "temperature": 0.5,
# "max_tokens": 256,
# "top_p": 1.0,
# "frequency_penalty": 0.0,
# "presence_penalty": 0.0
# }
# headers = {"Authorization": os.environ.get("OPENAI_KEY", "")}
client = OpenAI(api_key=os.environ.get("OPENAI_KEY", ""))
message = [
{"role": "user", "content": prompt}
]
try:
# response = requests.post(
# api_url, json=request_payload, headers=headers)
response = client.chat.completions.create(
model="gpt-3.5-turbo", messages=message, temperature=0, max_tokens=1050
)
if response:
try:
# response_content = response.json()
# print(response_content)
choices = response.choices
first_choice_content = response.choices[0].message.content
# print(first_choice_content)
if choices:
# gpt_response = choices[0]["text"].strip()
gpt_response = first_choice_content
json_response = json.loads(gpt_response)
print(json_response)
novelty = json_response.get("novelty", 0)
technical_feasibility = json_response.get(
"technical_feasibility", 0)
capability = json_response.get("capability", 0)
# recommendations = ' '.join(
# json_response.get("recommendations", []))
# feedback = ' '.join(json_response.get("feedback", []))
recommendations_novelty = json_response.get(
"recommendations_novelty", [])
recommendations_technical_feasibility = json_response.get(
"recommendations_technical_feasibility", [])
recommendations_capability = json_response.get(
"recommendations_capability", [])
feedback_novelty = json_response.get(
"feedback_novelty", [])
feedback_technical_feasibility = json_response.get(
"feedback_technical_feasibility", [])
feedback_capability = json_response.get(
"feedback_capability", [])
recommendations = '\n'.join([
"Novelty Recommendations:\n" +
'\n'.join(recommendations_novelty),
"\n\nTechnical Feasibility Recommendations:\n" +
'\n'.join(
recommendations_technical_feasibility),
"\n\nCapability Recommendations:\n" +
'\n'.join(recommendations_capability)
])
feedback = '\n'.join([
"Novelty Feedback:\n" +
'\n'.join(feedback_novelty),
"\n\nTechnical Feasibility Feedback:\n" +
'\n'.join(feedback_technical_feasibility),
"\n\nCapability Feedback:\n" +
'\n'.join(feedback_capability)
])
# reference_links = ', '.join(
# json_response.get("references", []))
# if not (reference_links.startswith('"') and reference_links.endswith('"')):
# reference_links = f'{reference_links}'
title = request.data.get('title', '')
content = request.data.get('content', '')
project_fk_id = request.data.get('project_fk', None)
data = {
'title': title,
'content': content,
'novelty': novelty,
'technical_feasibility': technical_feasibility,
'capability': capability,
'recommendation': recommendations,
'feedback': feedback,
# 'references': reference_links,
'project_fk': Project.objects.get(id=project_fk_id),
'boardId': new_board_id,
}
project_instance = Project.objects.get(
id=project_fk_id)
add_score = (
(novelty * 0.4) +
(technical_feasibility * 0.3) +
(capability * 0.3)
)
self.update_project_score(
project_instance, add_score)
else:
print("No response content or choices found.")
except json.JSONDecodeError as json_error:
return Response({"error": f"Error decoding JSON response: {json_error}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({"error": response.text}, status=status.HTTP_400_BAD_REQUEST)
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
data = {}
if serializer.is_valid():
self.perform_create(serializer, data)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GetProjectBoards(generics.ListAPIView):
serializer_class = ProjectBoardSerializer
def get_queryset(self):
project_id = self.kwargs.get('project_id')
# Get the latest distinct project boards for each templateId within the specified project
queryset = ProjectBoard.objects.filter(project_fk_id=project_id).values(
'templateId').annotate(
latest_id=Max('id'),
).values(
'latest_id',
)
return ProjectBoard.objects.filter(id__in=queryset)
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class GetVersionProjectBoards(generics.ListAPIView):
serializer_class = ProjectBoardSerializer
queryset = ProjectBoard.objects.all()
def get(self, request, *args, **kwargs):
projectboard_id = self.kwargs.get('projectboard_id')
try:
projectboard = ProjectBoard.objects.get(id=projectboard_id)
template_id = projectboard.templateId
board_id = projectboard.boardId
# Retrieve related project boards with the same templateId and boardId
related_projectboards = ProjectBoard.objects.filter(
templateId=template_id, boardId=board_id)
# Sort the related project boards in decreasing order of their creation date
related_projectboards = related_projectboards.order_by(
'-created_at')
serializer = ProjectBoardSerializer(
related_projectboards, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except ProjectBoard.DoesNotExist:
return Response({"error": "ProjectBoard not found"}, status=status.HTTP_404_NOT_FOUND)
except ValueError as e:
return Response({"error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
class GetProjectBoardById(generics.ListAPIView):
serializer_class = ProjectBoardSerializer
queryset = ProjectBoard.objects.all()
def get(self, request, *args, **kwargs):
projectboard_id = self.kwargs.get('projectboard_id')
try:
projectboard = ProjectBoard.objects.get(id=projectboard_id)
serializer = ProjectBoardSerializer(projectboard)
return Response(serializer.data, status=status.HTTP_200_OK)
except ProjectBoard.DoesNotExist:
return Response({"error": "ProjectBoards not found"}, status=status.HTTP_404_NOT_FOUND)
except ValueError as e:
return Response({"error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
class UpdateBoard(generics.CreateAPIView):
serializer_class = ProjectBoardSerializer
def update_project_score(self, project, subtract_score, new_score):
project.score -= subtract_score
project.score += new_score
project.save()
def create(self, request, *args, **kwargs):
data = request.data
project_board_id = kwargs.get('projectboard_id')
try:
project_board = ProjectBoard.objects.get(id=project_board_id)
subtract_score = (
(project_board.novelty * 0.4) +
(project_board.technical_feasibility * 0.3) +
(project_board.capability * 0.3)
)
# api_url = "https://api.openai.com/v1/engines/text-davinci-003/completions"
prompt = (
f"Please analyze the following data: {request.data.get('content', '')}. "
f"Provide a detailed and critical rating (1-10) in numerical value(not in string) for the following aspects: "
f"\n1. Novelty: Evaluate the originality of the data. "
f"\n2. Technical Feasibility: Assess whether the data is technically sound and feasible. "
f"\n3. Capability: Determine if the data demonstrates capability. "
f"\nRatings below 5 should be considered for data that lacks composition, effort, verbosity, or information. "
f"Be critical and practical when rating. "
f"Include at least 2 specific sentences of advice for improvements (Recommendations) and "
f"2 sentences of feedback on how the data is presented and structured, and what can be done to improve those aspects (Feedback) for each of the above aspects. "
f"The output should be in the following JSON format: "
f"\n'novelty': 'numerical rating', 'technical_feasibility': 'numerical rating', 'capability': 'numerical rating', "
f"'recommendations_novelty': ['specific advice'], 'recommendations_technical_feasibility': ['advice'], "
f"'recommendations_capability': ['specific advice'], 'feedback_novelty': ['specific feedback'], "
f"'feedback_technical_feasibility': ['feedback'], 'feedback_capability': ['specific feedback']. "
f"Ensure a fair and balanced assessment for each aspect."
)
# request_payload = {
# "prompt": prompt,
# "temperature": 0.5,
# "max_tokens": 256,
# "top_p": 1.0,
# "frequency_penalty": 0.0,
# "presence_penalty": 0.0
# }
# headers = {
# "Authorization": os.environ.get("OPENAI_KEY") + ""
# }
# response = requests.post(
# api_url, json=request_payload, headers=headers)
client = OpenAI(api_key=os.environ.get("OPENAI_KEY", ""))
message = [
{"role": "user", "content": prompt}
]
response = client.chat.completions.create(
model="gpt-3.5-turbo", messages=message, temperature=0, max_tokens=1050
)
if response:
try:
# response_content = response.json()
# choices = response_content.get("choices", [])
choices = response.choices
first_choice_content = response.choices[0].message.content
if choices:
# gpt_response = choices[0]["text"].strip()
gpt_response = first_choice_content
json_response = json.loads(gpt_response)
# print(json_response)
novelty = json_response.get("novelty", 0)
technical_feasibility = json_response.get(
"technical_feasibility", 0)
capability = json_response.get("capability", 0)
recommendations_novelty = json_response.get(
"recommendations_novelty", [])
recommendations_technical_feasibility = json_response.get(
"recommendations_technical_feasibility", [])
recommendations_capability = json_response.get(
"recommendations_capability", [])
feedback_novelty = json_response.get(
"feedback_novelty", [])
feedback_technical_feasibility = json_response.get(
"feedback_technical_feasibility", [])
feedback_capability = json_response.get(
"feedback_capability", [])
recommendations = '\n'.join([
"Novelty Recommendations:\n" +
'\n'.join(recommendations_novelty),
"\n\nTechnical Feasibility Recommendations:\n" +
'\n'.join(
recommendations_technical_feasibility),
"\n\nCapability Recommendations:\n" +
'\n'.join(recommendations_capability)
])
feedback = '\n'.join([
"Novelty Feedback:\n" +
'\n'.join(feedback_novelty),
"\n\nTechnical Feasibility Feedback:\n" +
'\n'.join(feedback_technical_feasibility),
"\n\nCapability Feedback:\n" +
'\n'.join(feedback_capability)
])
# recommendations = ' '.join(
# json_response.get("recommendations", []))
# feedback = ' '.join(json_response.get("feedback", []))
# reference_links = ', '.join(
# json_response.get("references", []))
# if not (reference_links.startswith('"') and reference_links.endswith('"')):
# reference_links = f'{reference_links}'
data = {
'title': data.get('title', ''),
'content': data.get('content', ''),
'novelty': novelty,
'technical_feasibility': technical_feasibility,
'capability': capability,
'recommendation': recommendations,
'feedback': feedback,
# 'references': reference_links,
'project_fk': project_board.project_fk,
'templateId': project_board.templateId,
'boardId': project_board.boardId,
}
new_board_instance = ProjectBoard(**data)
new_board_instance.save()
project_instance = Project.objects.get(
id=project_board.project_fk.id)
new_score = (
(novelty * 0.4) +
(technical_feasibility * 0.3) + (capability * 0.3)
)
subtract_score = subtract_score
self.update_project_score(
project_instance, subtract_score, new_score)
# if response.status_code != 200:
# return Response({"error": "Failed to update project score"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({"error": "No response content or choices found"}, status=status.HTTP_400_BAD_REQUEST)
except json.JSONDecodeError as json_error:
return Response({"error": f"Error decoding JSON response: {json_error}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({"error": response.text}, status=status.HTTP_400_BAD_REQUEST)
except ProjectBoard.DoesNotExist:
return Response({"error": "ProjectBoard not found"}, status=status.HTTP_404_NOT_FOUND)
except requests.exceptions.RequestException as e:
return Response({"error": f"An error occurred: {e}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({"id": new_board_instance.id}, status=status.HTTP_201_CREATED)
class DeleteProjectBoard(generics.DestroyAPIView):
queryset = ProjectBoard.objects.all()
serializer_class = ProjectBoardSerializer
lookup_field = 'id'
def destroy(self, request, *args, **kwargs):
try:
# Use get_object_or_404 for cleaner code
instance = self.get_object()
# Calculate subtract_score for the specified project board
subtract_score = (
(instance.novelty * 0.4) +
(instance.technical_feasibility * 0.3) +
(instance.capability * 0.3)
)
# Update the project's score directly in the code
instance.project_fk.score -= subtract_score
instance.project_fk.save()
# Delete all related project boards with the same boardId in a single query
ProjectBoard.objects.filter(boardId=instance.boardId).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except ProjectBoard.DoesNotExist:
return Response({"error": "ProjectBoard not found"}, status=status.HTTP_404_NOT_FOUND)
except Exception as e:
return Response({"error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
| [
"'recommendations_novelty': ['specific advice'], 'recommendations_technical_feasibility': [' advice'], ",
"Provide a detailed and critical rating (1-10) in numerical value(not in string) for the following aspects: ",
"Be critical and practical when rating. ",
"'recommendations_novelty': ['specific advice'], 'recommendations_technical_feasibility': ['advice'], ",
"\n1. Novelty: Evaluate the originality of the data. ",
"Include at least 2 specific sentences of advice for improvements (Recommendations) and ",
"The output should be in the following JSON format: ",
"\n3. Capability: Determine if the data demonstrates capability. ",
"2 sentences of feedback on how the data is presented and structured, and what can be done to improve those aspects (Feedback) for each of the above aspects. ",
"'recommendations_capability': ['specific advice'], 'feedback_novelty': ['specific feedback'], ",
"'feedback_technical_feasibility': ['feedback'], 'feedback_capability': ['specific feedback']. ",
"Ensure a fair and balanced assessment for each aspect.",
"\n2. Technical Feasibility: Assess whether the data is technically sound and feasible. ",
"\nRatings below 5 should be considered for data that lacks composition, effort, verbosity, or information. ",
"content",
"\n'novelty': 'numerical rating', 'technical_feasibility': 'numerical rating', 'capability': 'numerical rating', "
] |
2024-01-10 | GigaChadLMAO/Japanese_Interesting_Word_Detector | Japanese_Interesting_Word_Detector.py | import streamlit as st
import openai
import json
import pandas as pd
# Get the API key from the sidebar called OpenAI API key
user_api_key = st.sidebar.text_input("OpenAI API key", type="password")
client = openai.OpenAI(api_key=user_api_key)
prompt = """Act as an AI Japanese text's Japanese word detector. You will receive a
piece of writing and you should give Japanese word detail.
List the word detail in a JSON array with 4 columns.
Each word detail should have 4 fields:
- "word" - japanese word
- "romaji" - the word's romaji
- "word level" - word level (EASY, MEDIUM, HARD)
- "translation" - word's translation
Don't say anything at first. Wait for the user to say something.
"""
st.title('Japanese Interesting Word Detector')
st.markdown("""Input Japanese text that you want to Search for interesting word. \n\
The AI will give you the word's romaji, word level and its translation.""")
user_input = st.text_area("Enter Japanese text to search:", "Your text here")
# submit button after text input
if st.button('Search'):
messages_so_far = [
{"role": "system", "content": prompt},
{'role': 'user', 'content': user_input},
]
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages_so_far
)
# Show the response from the AI in a box
st.markdown('**AI response:**')
suggestion_dictionary = response.choices[0].message.content
sd = json.loads(suggestion_dictionary)
print (sd)
suggestion_df = pd.DataFrame.from_dict(sd)
print(suggestion_df)
st.table(suggestion_df)
| [
"Act as an AI Japanese text's Japanese word detector. You will receive a \n piece of writing and you should give Japanese word detail.\n List the word detail in a JSON array with 4 columns.\n Each word detail should have 4 fields:\n - \"word\" - japanese word\n - \"romaji\" - the word's romaji\n - \"word level\" - word level (EASY, MEDIUM, HARD)\n - \"translation\" - word's translation\n Don't say anything at first. Wait for the user to say something.\n "
] |
2024-01-10 | tillo13/microsoft_bot_framework | INTEGRATIONS~SLACK~dalle_utils.py | #passed in from from slack_events_listener.py
import openai
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from dotenv import load_dotenv
from urllib.parse import unquote, urlparse
from datetime import datetime
import os
import requests
from PIL import Image
from io import BytesIO
import time
import sys
import traceback
import json
# loading environment variables from .env file
load_dotenv('../../.env')
# setting OpenAI variables
openai.api_key = os.getenv('OPENAI_DALLE_API_KEY')
openai.api_type = "azure"
openai.api_base = os.getenv('OPENAI_DALLE_BASE_URL')
openai.api_version = os.getenv('OPENAI_DALLE_VERSION')
# initializing slack client
slack_token = os.getenv('SLACK_BOT_TOKEN')
client = WebClient(token=slack_token)
def parse_dalle_command(command_text):
n_images = 3 # set the default value
prompt = command_text.strip()
if '--' in command_text:
command_parts = command_text.split(' ')
for index, part in enumerate(command_parts):
if '--' in part:
try:
n_images = min(int(part.replace('--', '')), 5) # capping images at 5
command_parts.pop(index) # remove this part from the command
prompt = ' '.join(command_parts).strip() # recreate the prompt
except ValueError:
pass
return n_images, prompt
def generate_image(event, channel_id, prompt, n_images, VERBOSE_MODE):
print(f"COMMAND RECEIVED: Ask DALL-E for {n_images} images...")
start_time = time.time() # records the start time
# Load the costs dictionary
with open('openai_costs_2023sept7.json') as f:
costs = json.load(f)
# Get the cost of DALL·E image models 1024x1024
DALL_E_COST_PER_IMAGE = costs["Other Models"]["Image Models"]["1024×1024"]
estimated_cost = format(DALL_E_COST_PER_IMAGE * n_images, '.4f')
# Check if entered number was more than limit and send Slack message
command_parts = event["text"].split(' ')
for index, part in enumerate(command_parts):
if '--' in part:
try:
entered_number = int(part.replace('--', ''))
if entered_number > 5:
warning_message = f":exclamation: Doh! You requested {entered_number} images, but the maximum is 5. We'll proceed with 5 images."
print(warning_message) # Output warning message to terminal
client.chat_postMessage(channel=channel_id, text=warning_message, thread_ts=event["ts"]) # Send warning to user via Slack
except ValueError:
pass
# Initial message with bot animation and prompt
initial_message_block = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f":robot_face: *Connecting to DALL-E for your {n_images} images, please stand by...*\n\n*...Dall-E is creating for:* `{prompt}`..."
}
}
]
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text="Generating images with DALL-E...",
blocks=initial_message_block
)
# Before entering the for loop
total_orig_size = 0
total_final_size = 0
filename = 'N/A'
try:
# Request image from DALL-E
response = openai.Image.create(
prompt=prompt,
n=n_images
)
# Print the complete response from DALL-E
print("RESPONSE FROM DALLE_OPENAI: ", response)
if VERBOSE_MODE: # if VERBOSE_MODE was passed here as argument
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text = "*VERBOSE MODE ENABLED. Posting DETAILED additional information from the call...*",
)
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text = f"The DALLE-OPENAI Response: {response}", # perhaps could choose to prettify
)
# Check if folder exists, if not, create it
if not os.path.exists('GENERATED_IMAGES'):
os.makedirs('GENERATED_IMAGES')
#process each file
for index, image_data in enumerate(response["data"]):
# Initialize these variables at the start of the loop for each image data
original_size_in_MB = 0
final_size_in_MB = 0
if 'error' in image_data:
# image data contains an error
error_details = image_data['error']
error_message = f"Problem with image `{index+1}`...\n*{error_details['code']}*: `{error_details['message']}`\nContinuing..."
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text=error_message
)
continue # skip rest of the loop for current image
image_url = image_data["url"]
print(f"DALL-E QUERY {index+1} COMPLETED...")
# Take the first 15 characters of the prompt
short_prompt = prompt[:15]
# Replace any non-alphanumeric characters with underscores
short_prompt = "".join(c if c.isalnum() else "_" for c in short_prompt)
# Make it lowercase
short_prompt = short_prompt.lower()
filename = f"dalle_{short_prompt}_{index+1}_of_{n_images}.png"
print(f"SHORTENED FILENAME: {filename} \nDALL-E QUERY {index+1} COMPLETED...")
print("DOWNLOADING GENERATED IMAGE...")
# Download image
image_response = requests.get(image_url)
file_data = image_response.content
# Original size
original_size_in_MB = len(file_data) / (1024*1024) # This line was moved up
total_orig_size += original_size_in_MB # This line was moved down
# parsing SAS token for image details
parsed = urlparse(image_url)
sas_token = dict((k, unquote(v)) for k, v in (item.split('=') for item in parsed.query.split('&')))
# Parse the SAS token data into a more human-readable message
expires_at = datetime.strptime(sas_token.get('se'), '%Y-%m-%dT%H:%M:%SZ')
now = datetime.utcnow()
time_remain = expires_at - now
hours, remainder = divmod(time_remain.total_seconds(), 3600)
minutes, _ = divmod(remainder, 60)
sas_details = f'Filename: {filename}\n'
sas_details += f'Full-sized Azure version accessible until: {expires_at}.\n'
sas_details += f'Therefore, expires in about {int(hours)} hours and {int(minutes)} minutes)\n'
sas_details += f'Azure image URL: `{image_url}`\n'
#sas_details += f"Allowed Protocols: {sas_token.get('spr')}\n" # https
#sas_details += f"Resource type: {sas_token.get('sr')} (b = blob)\n" # b means blob type
#sas_details += f"Storage Services Version (sv): {sas_token.get('sv')}\n"
#sas_details += f"Permissions (sp): {sas_token.get('sp')}\n" # r means read access
#sas_details += f"Signature (sig) for the token: [HIDDEN FOR SECURITY]\n" # Signature should be hidden for security reasons
#sas_details += f"Storage Service Version ID (skoid): {sas_token.get('skoid')}\n"
#sas_details += f"Signing Key (sks): {sas_token.get('sks')}\n"
#sas_details += f"Key Start Time (skt): {sas_token.get('skt')}\n"
#sas_details += f"Tenant ID for Azure Storage Service (sktid): {sas_token.get('sktid')}\n"
print("DOWNLOADING GENERATED IMAGE...")
# Download image
image_response = requests.get(image_url)
file_data = image_response.content
# Original size
#original_size_in_MB = len(file_data) / (1024*1024)
#total_orig_size += original_size_in_MB # Add original size to the total
# if image if over 3MB, let's reduce the size
if len(file_data) > 3e6: # 3e6 = 3MB
print("IMAGE SIZE OVER 3MB, STARTING TO RESIZE...")
img = Image.open(BytesIO(file_data))
scale_factor = 1
# original size
original_size_in_MB = len(file_data) / (1024*1024)
while len(file_data) > 2e6:
scale_factor *= 0.9
new_size = (int(img.size[0] * scale_factor), int(img.size[1] * scale_factor))
img_resized = img.resize(new_size)
print(f"IMAGE RESIZED TO : {new_size}")
byte_arr = BytesIO()
img_resized.save(byte_arr, format='PNG')
file_data = byte_arr.getvalue()
short_prompt = prompt[:15].lower()
# Replace any non-alphanumeric characters with underscores
short_prompt = "".join(c if c.isalnum() else "_" for c in short_prompt)
img_resized.save(os.path.join('GENERATED_IMAGES', f"dalle_{short_prompt}_{index+1}_of_{n_images}.png"))
filepath = os.path.join('GENERATED_IMAGES', f"dalle_{short_prompt}_{index+1}_of_{n_images}.png")
if os.path.isfile(filepath):
final_size_in_MB = len(file_data) / (1024*1024) # converted from Bytes to Megabytes
size_reduction = original_size_in_MB - final_size_in_MB
total_final_size += final_size_in_MB # Add final size to the total
size_reduction_percent = (size_reduction / original_size_in_MB) * 100 # the percentage of the reduction
print(f"Original size: {format(original_size_in_MB, '.2f')} MB")
print(f"Final size: {format(final_size_in_MB, '.2f')} MB")
print(f"Size reduction: {format(size_reduction, '.2f')} MB - {format(size_reduction_percent, '.2f')}%")
print("UPLOADING THE RESIZED IMAGE TO SLACK...")
try:
with open(filepath, 'rb') as file:
files = {'file': file}
too_large_message = f"{filename} is over 3MB, it's being reduced in size..."
payload = {
#"initial_comment": filename,
"channels": channel_id,
"thread_ts": event["ts"],
}
headers = {
"Authorization": "Bearer {}".format(slack_token)
}
# Here, you are uploading the image first.
response = requests.post(
"https://slack.com/api/files.upload",
headers=headers, files=files, data=payload
)
if not response.json()['ok']:
raise SlackApiError(response.json()['error'])
image_num = index + 1 # We add 1 because `index` starts from 0
# Now send the image details block message after successful upload
trimmed_image_url = image_url.replace('https://', '')
block_message = [
{
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": (f":information_source: *This is image:* _{image_num}_ *of* _{n_images}_.\n"
f":robot_face: Your prompt was: `$dalle {prompt}`\n"
f"*Filename:* `{filename}`\n"
f"*Full-sized Azure URL:* `{trimmed_image_url}`\n"
f"*Azure version accessible until:* `{expires_at}`\n"
f"*Azure version Expires in:* `{int(hours)} hours and {int(minutes)} minutes`\n"
f"*Original file size:* `{format(original_size_in_MB, '.2f')} MB`\n"
f"*Final file size:* `{format(final_size_in_MB, '.2f')} MB`\n"
f"*Size reduction:* `{format(size_reduction, '.2f')} MB` - `{format(size_reduction_percent, '.2f')}%`\n"
)
}
]
},
{"type": "divider"}
]
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text=f"Posting image number {image_num+1} generated by DALL-E...",
blocks=block_message,
)
print("IMAGE AND IMAGE DETAILS SUCCESSFULLY UPLOADED TO SLACK...")
except SlackApiError as e:
print("FAILED TO UPLOAD THE IMAGE TO SLACK... SENDING THE URL INSTEAD...")
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text=f"Failed to upload image to Slack: {str(e)}. Here is the URL to your image: {image_url}",
)
except openai.error.OpenAIError as o:
if "safety system" in str(o):
error_message = f"Out of an abundance of caution, OpenAI flagged the image `{filename}` as inappropriate. Please try a different prompt."
else:
error_message = f"Encountered an error while working with OpenAI API: {str(o)}. Please try again later."
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text=error_message
)
except SlackApiError as e:
error_message = f"Encountered an issue while working with Slack API: {str(e)}. Please try again later."
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text=error_message
)
except Exception as e:
error_type, error_value, error_traceback = sys.exc_info()
tb_str = traceback.format_exception(error_type, error_value, error_traceback)
error_message = f"An error occurred: {error_value} \n {''.join(tb_str)}"
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text="We've encountered an unexpected error. Please try again later."
)
# Summary block
total_reduction = total_orig_size - total_final_size
total_reduction_percent = 0 # set to 0 by default
if total_orig_size > 0:
total_reduction_percent = (total_reduction / total_orig_size) * 100
end_time = time.time()
elapsed_time = end_time - start_time
minutes, seconds = divmod(elapsed_time, 60)
# Prepare summary message
summary_message = [
{
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": (
f":information_source: Your prompt was: `$dalle {prompt}` \n"
f"You asked for {n_images} images.\n"
f"*Estimated entire cost for this transaction*: `${estimated_cost}`\n"
f"The total size from DALL-E of all the images was `{format(total_orig_size, '.2f')}MB`\n"
f"We shrunk the file cumulatively down to: `{format(total_final_size, '.2f')}MB`\n"
f"This is an overall reduction of `{format(total_reduction_percent, '.2f')}%`.\n"
f"The total time to complete this was `{int(minutes)} minutes and {int(seconds)} seconds`\n"
f"Try again with a new `$dalle` prompt.\n"
f"❓Get help at any time with `$help`."
)
}
]
},
{"type": "divider"},
]
# Post the summary message
client.chat_postMessage(
channel=channel_id,
thread_ts=event["ts"],
text="Summary of DALL-E image generation request...",
blocks=summary_message,
)
| [
" ",
"placeholder"
] |
2024-01-10 | athina-ai/ariadne | ariadne_ai~llms~text_summarization~question_generator.py | from ..open_ai_completion import OpenAICompletion
class QuestionGenerator:
"""
Generates closed-ended (Yes/No) questions given a text.
Attributes:
n_questions (int): Number of questions to generate.
openAIcompletion (OpenAICompletion): Instance for interactions with OpenAI's API.
"""
# Pre-defined prompts for OpenAI's GPT model
SYSTEM_MESSAGE = """
You are an expert at generating closed-ended (Yes/No) questions given the content of a text.
"""
USER_MESSAGE_TEMPLATE = """
Let's think step by step.
1. Consider the text: {}.
2. Generate {} closed-ended (Yes/No) questions based on the content.
3. Return a JSON object in the following format: "question 1": 'Your question', "question 2": 'Your next question', ...
"""
def __init__(self, model: str, n_questions: int, open_ai_key:str):
"""
Initialize the QuestionGenerator.
"""
self.n_questions = n_questions
self.openAIcompletion = OpenAICompletion(model, open_ai_key)
def generate(self, text: str) -> dict:
"""
Generate a set of closed-ended questions based on the provided text.
Args:
text (str): The reference content used to generate questions.
Returns:
dict: A dictionary of generated questions with keys indicating the question order and values being the questions themselves.
"""
user_message = self.USER_MESSAGE_TEMPLATE.format(text, self.n_questions)
message = [
{'role': 'system', 'content': self.SYSTEM_MESSAGE},
{'role': 'user', 'content': user_message}
]
openai_response = self.openAIcompletion.get_completion_from_messages(message)
openai_response_json = self.openAIcompletion.extract_json_from_response(openai_response)
return openai_response_json
| [
"\n Let's think step by step.\n 1. Consider the text: {}.\n 2. Generate {} closed-ended (Yes/No) questions based on the content.\n 3. Return a JSON object in the following format: \"question 1\": 'Your question', \"question 2\": 'Your next question', ...\n "
] |
2024-01-10 | athina-ai/ariadne | ariadne_ai~llms~text_summarization~question_answerer.py | from ..open_ai_completion import OpenAICompletion
class QuestionAnswerer:
"""
This class determines whether the chatbot's answer was correct based on
the given content and user's question.
Attributes:
openAIcompletion (OpenAICompletion): Instance for interactions with OpenAI's API.
"""
# Pre-defined prompts for OpenAI's GPT model
SYSTEM_MESSAGE = """
You are an expert at responding to closed-ended (Yes/No) questions using ONLY the provided context.
"""
USER_MESSAGE_TEMPLATE = """
Let's think step by step.
1. Consider the following:
Questions: {}.
Context: {}.
2. Respond to each question from the provided 'questions', using either
'Yes', 'No', or 'Unknown', based on the given context.
3. Return a JSON object in the following format: "question1": "answer1", "question2": "answer2",...
"""
def __init__(self, model, open_ai_key):
"""
Initialize the QuestionAnswerer class.
"""
self.openAIcompletion = OpenAICompletion(model, open_ai_key)
def answer(self, questions: str, context: str) -> dict:
"""
Respond to each question from the provided 'questions' given the context.
Args:
questions (str): A set of questions posed to the chatbot.
context (str): Context used to inform the chatbot's answers.
Returns:
dict: Evaluation results formatted as a dictionary with questions as keys and
'Yes', 'No', or 'Unknown' as values.
"""
user_message = self.USER_MESSAGE_TEMPLATE.format(questions, context)
message = [
{"role": "system", "content": self.SYSTEM_MESSAGE},
{"role": "user", "content": user_message},
]
openai_response = self.openAIcompletion.get_completion_from_messages(message)
openai_response_json = self.openAIcompletion.extract_json_from_response(
openai_response
)
return openai_response_json
| [
"\n Let's think step by step.\n 1. Consider the following: \n Questions: {}.\n Context: {}.\n 2. Respond to each question from the provided 'questions', using either \n 'Yes', 'No', or 'Unknown', based on the given context.\n 3. Return a JSON object in the following format: \"question1\": \"answer1\", \"question2\": \"answer2\",...\n "
] |
2024-01-10 | athina-ai/ariadne | ariadne_ai~llms~base_llm_evaluator.py | from abc import ABC, abstractmethod
from typing import Optional
from dotenv import load_dotenv
import os
from .open_ai_completion import OpenAICompletion
load_dotenv()
class BaseLlmEvaluator(ABC):
def __init__(
self,
model: str,
open_ai_key: str,
athina_api_key: Optional[str] = None,
metadata: Optional[dict] = None,
):
self.metadata = metadata
self.open_ai_key = (
open_ai_key if open_ai_key is not None else os.getenv("OPENAI_API_KEY")
)
if self.open_ai_key is None:
raise ValueError(
"You must provide an OpenAI API key or set the OPENAI_API_KEY environment variable."
)
self.open_ai_completion = OpenAICompletion(
model=model,
open_ai_key=self.open_ai_key,
athina_api_key=athina_api_key,
metadata=metadata,
)
| [] |
2024-01-10 | isLinXu/prompt-engineering-note | source~cli~cli_py.py | import os
import argparse
from dotenv import load_dotenv, find_dotenv
import openai
_ = load_dotenv(find_dotenv())
openai.api_key = os.getenv('OPENAI_API_KEY')
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0,
)
return response.choices[0].message["content"]
parser = argparse.ArgumentParser(description='Generate text using OpenAI GPT-3.')
parser.add_argument('--prompt', '-p', required=True, help='The prompt to generate text from')
parser.add_argument('--model', '-m', default='gpt-3.5-turbo', help='The GPT-3 model to use')
args = parser.parse_args()
output = get_completion(args.prompt, args.model) | [] |
2024-01-10 | jS5t3r/consistency_models | cm~logger.py | """
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
# tempfile.gettempdir(),
"/home/lorenzp/workspace/consistency_model/run",
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| [] |
2024-01-10 | zhbitzwz/chatgpt-empower-wechat | handler~wechat~wechaty_todo_plugin.py | import json
import logging
from typing import Union
from wechaty import WechatyPlugin, Wechaty, Contact, Room, Message
from openai_.openai_default import text_ai
from util.scheduler_ import schedulerTodoTask, removeTask, getTaskList
class WechatyTodoPoster(WechatyPlugin):
def __init__(self):
super().__init__()
async def init_plugin(self, wechaty: Wechaty) -> None:
await super().init_plugin(wechaty)
async def on_message(self, msg: Message) -> None:
text = msg.text()
fromContact = msg.talker()
room = msg.room()
conversation: Union[
Room, Contact] = fromContact if room is None else room
if "#" in text and "提醒我" in text:
try:
response_text = text_ai(
'解析这句话中的时间和提醒事件,格式化时间为cron形式以"[minute, hour, day of month, month, day of week]"排序时间参数,并且忽略秒,以["时间","事件"],其中引号需要变为双引号返回给我。例如:["0 18 * * *","提醒我下班打卡"]' + f"'{text}'")
# index0:dict 时间,index1:地点
time_corn_and_todo: list = json.loads(
response_text[0].replace("\n", "").replace("答案", "").replace("answer", "").replace("=", "").replace(
"#:", "").replace("#:", "")
)
time_dict: str = time_corn_and_todo[0]
todo = time_corn_and_todo[1]
await schedulerTodoTask(conversation=conversation, timer=time_dict, args=[conversation, todo])
except Exception as e:
logging.error(e)
if "already" not in e.__str__():
await conversation.say("初始化失败,请稍后再试!")
else:
await conversation.say("设置成功!")
conv_id = conversation.contact_id if isinstance(conversation, Contact) else conversation.room_id
if "#" in text and ("删除任务" in text or "删除" in text):
index_msg = ''
if len(msg.text().split('#删除任务') ) > 1:
index_msg = msg.text().split('#删除任务')[1].replace(" ", "")
else:
index_msg= msg.text().split('#删除')[1].replace(" ", "")
await removeTask(conv_id, int(index_msg),conversation)
if "#" in text and ("推送列表" in text or "任务列表" in text):
task_str = "\n".join(getTaskList(conv_id))
await conversation.say(task_str)
| [] |
2024-01-10 | zhbitzwz/chatgpt-empower-wechat | handler~wechat~wechaty_weather_plugin.py | import json
import logging
from typing import Union
from wechaty import WechatyPlugin, Wechaty, Message, Contact, Room
from handler.scheduler_h.schedulers_handler import sendWeather
from openai_.openai_default import text_ai
from util.scheduler_ import schedulerWeatherTask
class WechatyWeatherPoster(WechatyPlugin):
def __init__(self):
super().__init__()
async def init_plugin(self, wechaty: Wechaty) -> None:
await super().init_plugin(wechaty)
async def on_message(self, msg: Message) -> None:
"""
推送 天气
:param msg:
:return:
"""
text = msg.text()
fromContact = msg.talker()
room = msg.room()
conversation: Union[
Room, Contact] = fromContact if room is None else room
if "#" in text and "天气" in text and "推送" in text:
try:
response_text = text_ai(
f'解析这句话中的时间地点日期'+text+',如果其中有时间则格式化时间为cron形式以"minute, hour, day of month, month, day of week"排序时间参数并且忽略秒,如果这句话里有今天,明天,后天作为日期提取出来放在第三个参数中,如果没有默认为今天。以["时间","地点","日期"],其中引号需要变为双引号返回给我。例如:["0 18 * * *","武汉","今天"] ,[None,"武汉","今天"]')
# index0:dict 时间,index1:地点 index2:日期
time_corn_and_city: list = json.loads(
response_text[0].replace("\n", "").replace("答案", "").replace("answer", "").replace("=", "").replace(
"#:", "").replace("#:", "")
)
time_dict: str = time_corn_and_city[0]
city = time_corn_and_city[1]
day = time_corn_and_city[2]
if time_dict.__eq__('None'):
await sendWeather(conversation, city,day)
return
await schedulerWeatherTask(conversation=conversation, timer=time_dict, args=[conversation, city,day])
except Exception as e:
logging.error(e)
if "already" not in e.__str__():
await conversation.say("初始化失败,请稍后再试!")
else:
await conversation.say("设置成功!")
| [] |
2024-01-10 | zhbitzwz/chatgpt-empower-wechat | handler~wechat~wechat_ai.py | import json
import logging
import random
from typing import Union, List
from wechaty import WechatyPlugin, Wechaty, Message, Room, Contact
from wechaty.user import room
from wechaty_puppet import FileBox
from base import redis
from openai_.openai_default import text_ai, img_ai
class WechatAI(WechatyPlugin):
def __init__(self):
super().__init__()
async def init_plugin(self, wechaty: Wechaty) -> None:
await super().init_plugin(wechaty)
async def on_message(self, msg: Message) -> None:
is_mention_bot = await msg.mention_self()
is_self = msg.talker().is_self()
conversation: Union[
Room, Contact] = msg.talker() if msg.room() is None else msg.room()
mention_user = None
if is_mention_bot:
mention_user = [msg.talker().contact_id]
is_room = msg.room()
# 处理疯狂回复微信团队消息
if is_room is None and conversation.get_id().__eq__('weixin'):
return
if "HandOffMaster" in msg.text():
return
if "weixin://dl/feedback?from=" in msg.text():
return
# 处理黑名单
name = msg.talker().name
black_list = redis.lrange("black_list", 0, -1)
if json.dumps({"contact_name":name,"contact_id":msg.talker().contact_id}) in black_list:
await mention_and_say("当前账号封禁中,请联系管理员.",is_room,mention_user,conversation)
return
# 处理受限名单
restrict_list = redis.lrange("restrict_list",0,-1)
# 上下文存储在redis
chat_id = 'context'
if is_room is not None:
chat_id = chat_id + is_room.room_id
chat_id = chat_id + msg.talker().contact_id
context_str = redis.get(chat_id) or ''
if json.dumps({"contact_name": name, "contact_id": msg.talker().contact_id}) in restrict_list:
if len(context_str) > 100:
await mention_and_say("当前账号限制中,请稍后再试或请联系管理员.", is_room, mention_user, conversation)
return
if "#清除上下文" in msg.text():
if is_room is not None:
chat_id = chat_id + is_room.room_id
redis.delete(chat_id)
await msg.say("清除成功")
return
# 处理对话
if is_self is not True and (
(is_room is not None and is_mention_bot and "#" not in msg.text()) or
(is_room is None and "#" not in msg.text())
):
try:
context_str = context_str + f"(You:{msg.text()})"
response_list = text_ai(context_str)
i: int = 1
for response_text in response_list:
context_str = context_str + response_text
# 每次新的对话进来,增加过期时间
redis.set(chat_id, context_str)
redis.expire(chat_id, 120)
size = len(response_list)
if size == 1:
await mention_and_say(response_text, msg.room(), mention_user, conversation)
return
await mention_and_say(
f"第" + str(i) + "页/总计" + str(size) + "页\n"
"================\n" +
response_text, msg.room(), mention_user, conversation
)
i = i + 1
return
except Exception as e:
logging.error(e)
return
# 处理生成图片
if is_self is not True and ((is_room is not None and is_mention_bot and "#生成图片" in msg.text()) or (
is_room is None and "#生成图片" in msg.text())):
generate_text = msg.text().split('#生成图片')[1]
img_url = img_ai(generate_text)
if len(img_url) < 2:
await mention_and_say("生成图片失败", msg.room(), mention_user, conversation)
else:
img_file_box = FileBox.from_url(img_url, name=generate_text + '.jpeg')
await mention_and_say(img_file_box, msg.room(), mention_user, conversation)
return
# 处理生成周报
if is_self is not True and ((is_room is not None and is_mention_bot and "#生成日报" in msg.text()) or
(is_room is None and "#生成日报" in msg.text())
):
generate_text = msg.text().split('#生成日报')[1]
weekly_list = text_ai(f"请帮我把以下的工作内容填充为一篇完整的日报,以分点叙述的形式输出.'{generate_text}'")
if len(weekly_list) < 1:
await mention_and_say("生成日报失败", msg.room(), mention_user, conversation)
else:
await create_ai_text(weekly_list, msg.room(), mention_user, conversation)
# 处理生成周报
if is_self is not True and ((is_room is not None and is_mention_bot and "#生成周报" in msg.text()) or
(is_room is None and "#生成周报" in msg.text())
):
generate_text = msg.text().split('#生成周报')[1]
weekly_list = text_ai(f"请帮我把以下的工作内容填充为一篇完整的周报,以分点叙述的形式输出.'{generate_text}'")
if len(weekly_list) < 1:
await mention_and_say("生成周报失败", msg.room(), mention_user, conversation)
else:
await create_ai_text(weekly_list, msg.room(), mention_user, conversation)
async def create_ai_text(response_list: list, room_, mention_user, conversation: Union[Room, Contact]):
i: int = 1
for response in response_list:
size = len(response_list)
if size == 1:
await mention_and_say(response, room_, mention_user, conversation)
return
await mention_and_say(
f"第" + str(i) + "页/总计" + str(size) + "页\n"
"================\n" +
response, room_, mention_user, conversation)
i = i + 1
async def mention_and_say(response_obj, room_, mention_users: List[str], conversion: Union[Room, Contact]):
if room_ is not None:
await conversion.say(response_obj, mention_users)
else:
await conversion.say(response_obj)
| [] |
2024-01-10 | SantoshSrinivas79/gpt3-email-generator | ml_backend.py | import openai
class ml_backend:
openai.api_key = "USE-YOUR-OWN-API-KEY-HERE"
def generate_email(self, userPrompt ="Write me a professionally sounding email", start="Dear"):
"""Returns a generated an email using GPT3 with a certain prompt and starting sentence"""
response = openai.Completion.create(
engine="davinci",
prompt=userPrompt + "\n\n" + start,
temperature=0.71,
max_tokens=150,
top_p=1,
frequency_penalty=0.36,
presence_penalty=0.75
)
return response.get("choices")[0]['text']
def replace_spaces_with_pluses(self, sample):
"""Returns a string with each space being replaced with a plus so the email hyperlink can be formatted properly"""
changed = list(sample)
for i, c in enumerate(changed):
if(c == ' ' or c ==' ' or c ==' ' or c=='\n' or c=='\n\n'):
changed[i] = '+'
return ''.join(changed)
| [
"PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | SantoshSrinivas79/gpt3-email-generator | emailapp.py | import streamlit as st
import openai
from ml_backend import ml_backend
st.title("Interactive Email Generator App")
st.text("by Alex Zavalny")
st.markdown("""
# About
## Play around with the sliders and text fields to generate your very own emails!
## At the end, you can automatically send your email to a recipient via Gmail
## Business Benefits and Usecases:
* Time saved writing medium-long sized emails
* A greater amount of Mental Energy is conserved to do more of the tasks that matter
* Anxiety of writing a **professional sounding** email (or email with any writing style) is removed as the GPT3 Language model used is trained from a variety of many different internet sources
""")
st.markdown("# Generate Email")
backend = ml_backend()
with st.form(key="form"):
prompt = st.text_input("Describe the Kind of Email you want to be written.")
st.text(f"(Example: Write me a professional sounding email to my boss)")
start = st.text_input("Begin writing the first few or several words of your email:")
slider = st.slider("How many characters do you want your email to be? ", min_value=64, max_value=750)
st.text("(A typical email is usually 100-500 characters)")
submit_button = st.form_submit_button(label='Generate Email')
if submit_button:
with st.spinner("Generating Email..."):
output = backend.generate_email(prompt, start)
st.markdown("# Email Output:")
st.subheader(start + output)
st.markdown("____")
st.markdown("# Send Your Email")
st.subheader("You can press the Generate Email Button again if you're unhappy with the model's output")
st.subheader("Otherwise:")
st.text(output)
url = "https://mail.google.com/mail/?view=cm&fs=1&to=&su=&body=" + backend.replace_spaces_with_pluses(start + output)
st.markdown("[Click me to send the email]({})".format(url)) | [
"Describe the Kind of Email you want to be written."
] |
2024-01-10 | devbana/AI_Projects | Topic_Modelling~tp_lda.py | import os
import gensim
from gensim.models import LsiModel
from gensim import models
from gensim import corpora
from gensim.utils import lemmatize
import nltk
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from gensim.parsing.preprocessing import remove_stopwords, stem_text
from gensim.parsing.preprocessing import strip_numeric, strip_short, strip_multiple_whitespaces,strip_non_alphanum,strip_punctuation,strip_tags,preprocess_string
import pandas as pd
from gensim import similarities
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from gensim.models.coherencemodel import CoherenceModel
import matplotlib.pyplot as plt
from pprint import pprint
# read the data
corpus_dir = 'https://raw.githubusercontent.com/Ramaseshanr/anlp/master/corpus/bbc-text.csv'
df_corpus = pd.read_csv(corpus_dir, names=['category', 'text'])
corpus = df_corpus['text'].values.tolist()
corpus = corpus[1:]
my_filter = [
lambda x: x.lower(), strip_tags, strip_punctuation,
strip_multiple_whitespaces, strip_numeric,
remove_stopwords, strip_short, stem_text
]
def preprocessing(corpus):
for document in corpus:
doc = strip_numeric(document)
doc = remove_stopwords(doc)
doc = strip_short(doc, 3)
#doc = stem_text(doc)
doc = strip_punctuation(doc)
strip_tags(doc)
yield gensim.utils.tokenize(doc, lower=True)
texts = preprocessing(corpus)
dictionary = corpora.Dictionary(texts)
dictionary.filter_extremes(no_below=1, keep_n=25000)
doc_term_matrix = [dictionary.doc2bow(tokens) for tokens in preprocessing(corpus)]
tfidf = models.TfidfModel(doc_term_matrix)
corpus_tfidf = tfidf[doc_term_matrix]
lda = models.LdaModel(corpus_tfidf, num_topics=10, id2word=dictionary)
topics = lda.print_topics(num_words=25)
for i in topics:
print(i[0])
print(i[1])
| [] |
2024-01-10 | devbana/AI_Projects | Topic_Modelling~tp_lsi.py |
import os
import gensim
from gensim.models import LsiModel
from gensim import models
from gensim import corpora
from gensim.utils import lemmatize
import nltk
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from gensim.parsing.preprocessing import remove_stopwords, stem_text
from gensim.parsing.preprocessing import strip_numeric, strip_short, strip_multiple_whitespaces,strip_non_alphanum,strip_punctuation,strip_tags,preprocess_string
import pandas as pd
from gensim import similarities
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from gensim.models.coherencemodel import CoherenceModel
import matplotlib.pyplot as plt
from pprint import pprint
#read the data
corpus_dir = 'https://raw.githubusercontent.com/Ramaseshanr/anlp/master/corpus/bbc-text.csv'
df_corpus = pd.read_csv(corpus_dir, names=['category', 'text'])
corpus = df_corpus['text'].values.tolist()
corpus = corpus[1:]
my_filter = [
lambda x: x.lower(), strip_tags, strip_punctuation,
strip_multiple_whitespaces, strip_numeric,
remove_stopwords, strip_short, stem_text
]
def preprocessing(corpus):
for document in corpus:
doc = strip_numeric(document)
doc = remove_stopwords(doc)
doc = strip_short(doc, 3)
#doc = stem_text(doc)
doc = strip_punctuation(doc)
strip_tags(doc)
yield gensim.utils.tokenize(doc, lower=True)
texts = preprocessing(corpus)
dictionary = corpora.Dictionary(texts)
dictionary.filter_extremes(no_below=1, keep_n=25000)
doc_term_matrix = [dictionary.doc2bow(tokens) for tokens in preprocessing(corpus)]
tfidf = models.TfidfModel(doc_term_matrix)
corpus_tfidf = tfidf[doc_term_matrix]
print('LSI Model')
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary) # initialize an LSI transformation
topics = lsi.print_topics(num_topics=5, num_words=25)
for i in topics:
print(i[0])
print(i[1])
| [] |
2024-01-10 | oreganmike/chat-with-your-data-solution-accelerator | code~utilities~helpers~LLMHelper.py | import openai
from typing import List
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from .EnvHelper import EnvHelper
class LLMHelper:
def __init__(self):
env_helper: EnvHelper = EnvHelper()
# Configure OpenAI API
openai.api_type = "azure"
openai.api_version = env_helper.AZURE_OPENAI_API_VERSION
openai.api_base = env_helper.OPENAI_API_BASE
openai.api_key = env_helper.OPENAI_API_KEY
self.llm_model = env_helper.AZURE_OPENAI_MODEL
self.llm_max_tokens = env_helper.AZURE_OPENAI_MAX_TOKENS if env_helper.AZURE_OPENAI_MAX_TOKENS != '' else None
self.embedding_model = env_helper.AZURE_OPENAI_EMBEDDING_MODEL
def get_llm(self):
return AzureChatOpenAI(deployment_name=self.llm_model, temperature=0, max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
# TODO: This needs to have a custom callback to stream back to the UI
def get_streaming_llm(self):
return AzureChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler], deployment_name=self.llm_model, temperature=0,
max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
def get_embedding_model(self):
return OpenAIEmbeddings(deployment=self.embedding_model, chunk_size=1)
def get_chat_completion_with_functions(self, messages: List[dict], functions: List[dict], function_call: str="auto"):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
functions=functions,
function_call=function_call,
)
def get_chat_completion(self, messages: List[dict]):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
)
| [] |
2024-01-10 | oreganmike/chat-with-your-data-solution-accelerator | code~utilities~document_chunking~Layout.py | from typing import List
from .DocumentChunkingBase import DocumentChunkingBase
from langchain.text_splitter import MarkdownTextSplitter
from .Strategies import ChunkingSettings
from ..common.SourceDocument import SourceDocument
class LayoutDocumentChunking(DocumentChunkingBase):
def __init__(self) -> None:
pass
def chunk(self, documents: List[SourceDocument], chunking: ChunkingSettings) -> List[SourceDocument]:
full_document_content = "".join(list(map(lambda document: document.content, documents)))
document_url = documents[0].source
splitter = MarkdownTextSplitter.from_tiktoken_encoder(chunk_size=chunking.chunk_size, chunk_overlap=chunking.chunk_overlap)
chunked_content_list = splitter.split_text(full_document_content)
# Create document for each chunk
documents = []
chunk_offset = 0
for idx, chunked_content in enumerate(chunked_content_list):
documents.append(
SourceDocument.from_metadata(
content=chunked_content,
document_url=document_url,
metadata={"offset": chunk_offset},
idx=idx,
)
)
chunk_offset += len(chunked_content)
return documents
| [] |
2024-01-10 | oreganmike/chat-with-your-data-solution-accelerator | code~utilities~orchestrator~Strategies.py | from enum import Enum
class OrchestrationStrategy(Enum):
OPENAI_FUNCTION = 'openai_function'
LANGCHAIN = 'langchain'
def get_orchestrator(orchestration_strategy: str):
if orchestration_strategy == OrchestrationStrategy.OPENAI_FUNCTION.value:
from .OpenAIFunctions import OpenAIFunctionsOrchestrator
return OpenAIFunctionsOrchestrator()
elif orchestration_strategy == OrchestrationStrategy.LANGCHAIN.value:
from .LangChainAgent import LangChainAgent
return LangChainAgent()
else:
raise Exception(f"Unknown orchestration strategy: {orchestration_strategy}")
| [] |
2024-01-10 | Sohojoe/baby_rl | baby_rl~component~envs.py | #######################################################################
# Copyright (C) 2017 Shangtong Zhang([email protected]) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import os
import gym
import numpy as np
import torch
from gym.spaces.box import Box
from gym.spaces.discrete import Discrete
# from baselines.common.atari_wrappers import make_atari, wrap_deepmind
# from baselines.common.atari_wrappers import FrameStack as FrameStack_
# from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv, VecEnv
# from openai.baselines
from abc import ABC, abstractmethod
from marathon_envs.envs import MarathonEnvs
import pathlib
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
from ..utils import *
try:
import roboschool
except ImportError:
pass
# adapted from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/envs.py
def make_env(env_id, seed, rank, episode_life=True):
def _thunk():
random_seed(seed)
if env_id.startswith("dm"):
import dm_control2gym
_, domain, task = env_id.split('-')
env = dm_control2gym.make(domain_name=domain, task_name=task)
else:
env = gym.make(env_id)
is_atari = hasattr(gym.envs, 'atari') and isinstance(
env.unwrapped, gym.envs.atari.atari_env.AtariEnv)
if is_atari:
env = make_atari(env_id)
env.seed(seed + rank)
env = OriginalReturnWrapper(env)
if is_atari:
env = wrap_deepmind(env,
episode_life=episode_life,
clip_rewards=False,
frame_stack=False,
scale=False)
obs_shape = env.observation_space.shape
if len(obs_shape) == 3:
env = TransposeImage(env)
env = FrameStack(env, 4)
return env
return _thunk
class MlAgentHelperWrapper(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.total_rewards = None
# self.observation_space = gym.spaces.Box(
# self.observation_space.low[0],
# self.observation_space.high[0],
# # (self.observation_space.shape[0], env.number_agents),
# (env.number_agents, self.observation_space.shape[0]),
# self.observation_space.dtype)
# self.action_space = gym.spaces.Box(
# self.action_space.low[0],
# self.action_space.high[0],
# # (self.action_space.shape[0], env.number_agents),
# (env.number_agents, self.action_space.shape[0]),
# self.action_space.dtype)
def step(self, action):
obs, reward, done, info = self.env.step(action.tolist())
if self.total_rewards is None:
self.total_rewards = reward
else:
self.total_rewards = [self.total_rewards[i]+reward[i] for i in range(len(reward))]
info = []
for i in range(len(reward)):
if done[i]:
info.append({'episodic_return': self.total_rewards[i]})
self.total_rewards[i] = 0
else:
info.append({'episodic_return': None})
info = tuple(info)
return obs, reward, done, info
def reset(self):
return self.env.reset()
class OriginalReturnWrapper(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.total_rewards = 0
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.total_rewards += reward
if done:
info['episodic_return'] = self.total_rewards
self.total_rewards = 0
else:
info['episodic_return'] = None
return obs, reward, done, info
def reset(self):
return self.env.reset()
class TransposeImage(gym.ObservationWrapper):
def __init__(self, env=None):
super(TransposeImage, self).__init__(env)
obs_shape = self.observation_space.shape
self.observation_space = Box(
self.observation_space.low[0, 0, 0],
self.observation_space.high[0, 0, 0],
[obs_shape[2], obs_shape[1], obs_shape[0]],
dtype=self.observation_space.dtype)
def observation(self, observation):
return observation.transpose(2, 0, 1)
# The original LayzeFrames doesn't work well
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
def __array__(self, dtype=None):
out = np.concatenate(self._frames, axis=0)
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self.__array__())
def __getitem__(self, i):
return self.__array__()[i]
# from baselines.common.atari_wrappers import FrameStack as FrameStack_
class FrameStack_(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class FrameStack(FrameStack_):
def __init__(self, env, k):
FrameStack_.__init__(self, env, k)
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
# The original one in baselines is really bad
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
data = []
for i in range(self.num_envs):
obs, rew, done, info = self.envs[i].step(self.actions[i])
if done:
obs = self.envs[i].reset()
data.append([obs, rew, done, info])
obs, rew, done, info = zip(*data)
return obs, np.asarray(rew), np.asarray(done), info
def reset(self):
return [env.reset() for env in self.envs]
def close(self):
return
id_num = 0
class Task:
def __init__(self,
name,
num_envs=1,
single_process=True,
log_dir=None,
episode_life=True,
seed=None,
marathon_envs=False,
no_graphics=False,
inference=False):
if seed is None:
seed = np.random.randint(int(1e9))
if log_dir is not None:
mkdir(log_dir)
if marathon_envs:
global id_num
envs = MarathonEnvs(name, num_envs, worker_id=id_num, no_graphics=no_graphics, inference=inference)
envs.seed(seed + id_num)
id_num += 1
envs = MlAgentHelperWrapper(envs)
else:
envs = [make_env(name, seed, i, episode_life) for i in range(num_envs)]
if single_process:
Wrapper = DummyVecEnv
else:
raise NotImplementedError
# Wrapper = SubprocVecEnv
envs = Wrapper(envs)
self.env = envs
self.name = name
self.observation_space = self.env.observation_space
self.state_dim = int(np.prod(self.env.observation_space.shape))
self.action_space = self.env.action_space
if isinstance(self.action_space, Discrete):
self.action_dim = self.action_space.n
elif isinstance(self.action_space, Box):
self.action_dim = self.action_space.shape[0]
else:
assert 'unknown action space'
def reset(self):
return self.env.reset()
def step(self, actions):
if isinstance(self.action_space, Box):
actions = np.clip(actions, self.action_space.low, self.action_space.high)
return self.env.step(actions)
if __name__ == '__main__':
task = Task('Hopper-v2', 5, single_process=False)
state = task.reset()
while True:
action = np.random.rand(task.observation_space.shape[0])
next_state, reward, done, _ = task.step(action)
print(done) | [] |
2024-01-10 | LoveCocoa/Easy-Reading-PA5 | easy_under.py | import streamlit as st
import openai
import json
import pandas as pd
# Get the OpenAI API key from the environment variable
user_api_key = st.sidebar.text_input("OpenAI API key", type="password")
client = openai.OpenAI(api_key=user_api_key)
prompt = """Act as an assistant to help users paraphrase their sentences for better understanding.
You will receive sentences in the format [User's sentence].
You have 2 tasks first is to generate a new sentence using simpler language based on the chosen language level ,second is you must return at least 3 words that are above the chosen language level.
**Example:**
Sentence: The implications of quantum entanglement on the measurement problem in quantum mechanics have been a subject of intense debate among physicists.
Language Level: High School
**Task:**
Paraphrase the sentence using simpler language based on the chosen language level and find words that are above the chosen language level. For example, if the chosen language level is High School, the vocabulary list should be at least at the University level.
If the chosen language level is University, the vocabulary list should be at least at the University level.
Return the following information in JSON format:
```json
{
"original_text": "The original sentence",
"paraphrased_text": "The paraphrased sentence",
"vocabulary_list": [
{
"original_word": "Vocabulary1",
"synonyms": ["Synonym1", "Synonym2", "Synonym3"],
"example": "A sample sentence using a synonym"
},
{
"original_word": "Vocabulary2",
"synonyms": ["SynonymA", "SynonymB", "SynonymC"],
"example": "A sample sentence using a synonym"
},
...
]
}
"""
st.title('Easy-Reading')
st.markdown('Input the complex sentence.\n'
'The AI will paraphrase the sentence based on the chosen language level .')
sentence_input = st.text_area("Enter your sentence:", "Your text here", height=10)
language_level_options = ["Elementary", "High School", "University"]
language_level_input = st.selectbox("Choose the language level:", language_level_options)
# generate button after text input
if st.button('Generate'):
messages_so_far = [
{"role": "system", "content": prompt},
{'role': 'user', 'content': sentence_input},
{'role': 'user', 'content': language_level_input},
]
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages_so_far
)
# Show the response from the AI
response_dict = response.choices[0].message.content
sd = json.loads(response_dict)
original_text = sd["original_text"]
paraphrased_text = sd["paraphrased_text"]
vocabulary_list = sd["vocabulary_list"]
for entry in sd['vocabulary_list']:
entry['synonyms'] = ', '.join(entry['synonyms'])
# Create DataFrames
original_paraphrased_df = pd.DataFrame({"Original Text": [original_text], "Paraphrased Text": [paraphrased_text]})
vocabulary_df = pd.DataFrame(vocabulary_list)
# Show DataFrames
st.markdown('**Original and Paraphrased Sentences:**')
st.table(original_paraphrased_df)
st.markdown('**Vocabulary List:**')
st.table(vocabulary_df)
| [
"Act as an assistant to help users paraphrase their sentences for better understanding. \n You will receive sentences in the format [User's sentence]. \n You have 2 tasks first is to generate a new sentence using simpler language based on the chosen language level ,second is you must return at least 3 words that are above the chosen language level.\n **Example:**\n Sentence: The implications of quantum entanglement on the measurement problem in quantum mechanics have been a subject of intense debate among physicists.\n Language Level: High School\n\n **Task:**\n Paraphrase the sentence using simpler language based on the chosen language level and find words that are above the chosen language level. For example, if the chosen language level is High School, the vocabulary list should be at least at the University level.\n If the chosen language level is University, the vocabulary list should be at least at the University level.\n Return the following information in JSON format:\n ```json\n {\n \"original_text\": \"The original sentence\",\n \"paraphrased_text\": \"The paraphrased sentence\",\n \"vocabulary_list\": [\n {\n \"original_word\": \"Vocabulary1\",\n \"synonyms\": [\"Synonym1\", \"Synonym2\", \"Synonym3\"],\n \"example\": \"A sample sentence using a synonym\"\n },\n {\n \"original_word\": \"Vocabulary2\",\n \"synonyms\": [\"SynonymA\", \"SynonymB\", \"SynonymC\"],\n \"example\": \"A sample sentence using a synonym\"\n },\n ...\n ]\n }\n "
] |
2024-01-10 | vukrosic/openai-api-telegram-bot | bot~telegram_bot.py | from __future__ import annotations
import asyncio
import logging
import os
import io
from uuid import uuid4
from telegram import BotCommandScopeAllGroupChats, Update, constants, LabeledPrice
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, InlineQueryResultArticle
from telegram import InputTextMessageContent, BotCommand
from telegram import PreCheckoutQuery
from telegram.error import RetryAfter, TimedOut, BadRequest
from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, \
filters, InlineQueryHandler, CallbackQueryHandler, Application, ContextTypes, CallbackContext, PreCheckoutQueryHandler
from pydub import AudioSegment
from PIL import Image
from utils import is_group_chat, get_thread_id, message_text, wrap_with_indicator, split_into_chunks, \
edit_message_with_retry, get_stream_cutoff_values, is_allowed, get_remaining_budget, is_admin, is_within_budget, \
get_reply_to_message_id, add_chat_request_to_usage_tracker, error_handler, is_direct_result, handle_direct_result, \
cleanup_intermediate_files, add_airtable_budget, subtract_airtable_budget
from openai_helper import OpenAIHelper, localized_text
from usage_tracker import UsageTracker
class ChatGPTTelegramBot:
"""
Class representing a ChatGPT Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
Initializes the bot with the given configuration and GPT bot object.
:param config: A dictionary containing the bot configuration
:param openai: OpenAIHelper object
"""
self.config = config
self.openai = openai
bot_language = self.config['bot_language']
self.commands = [
BotCommand(command='buy', description=localized_text('buy_description', bot_language)),
BotCommand(command='help', description=localized_text('help_description', bot_language)),
BotCommand(command='reset', description=localized_text('reset_description', bot_language)),
BotCommand(command='stats', description=localized_text('stats_description', bot_language)),
BotCommand(command='resend', description=localized_text('resend_description', bot_language))
]
# If imaging is enabled, add the "image" command to the list
if self.config.get('enable_image_generation', False):
self.commands.append(BotCommand(command='image', description=localized_text('image_description', bot_language)))
if self.config.get('enable_tts_generation', False):
self.commands.append(BotCommand(command='tts', description=localized_text('tts_description', bot_language)))
self.group_commands = [BotCommand(
command='chat', description=localized_text('chat_description', bot_language)
)] + self.commands
self.disallowed_message = localized_text('disallowed', bot_language)
self.budget_limit_message = localized_text('budget_limit', bot_language)
self.usage = {}
self.last_message = {}
self.inline_queries_cache = {}
async def buy(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Sends an invoice to the user for purchasing credits.
"""
try:
# Set up the invoice parameters
payload = "custom_payload" # You can customize this
provider_token = os.environ.get('PAYMENTS_PROVIDER_TOKEN')
currency = "USD"
# Fetch payment options from environment variable
payment_options_str = os.getenv("PAYMENT_OPTIONS", "5,10,20")
payment_options = [int(option) for option in payment_options_str.split(",")]
for option in payment_options:
# Set up the invoice parameters dynamically based on the current option
title = f"${option} Budget"
description = f"Buy ${option} credits to use with all of our models!"
prices = [LabeledPrice(label='Credits', amount=option * 100)] # Amount in cents (e.g., $10.00 is 1000 cents)
# Send the invoice
await context.bot.send_invoice(
chat_id=update.effective_chat.id,
title=title,
description=description,
payload=payload,
provider_token=provider_token,
currency=currency,
prices=prices,
start_parameter="optional_start_parameter",
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton(f"Pay ${option} Now", pay=True)]]
),
)
except Exception as e:
logging.exception(e)
await update.message.reply_text(
message_thread_id=get_thread_id(update),
text=f"Error sending invoice: {str(e)}",
)
async def pre_checkout_callback(self, update: Update, context: CallbackContext):
"""
Handle pre-checkout queries.
"""
query: PreCheckoutQuery = update.pre_checkout_query
user_id = query.from_user.id
await add_airtable_budget(user_id=user_id, user_name=query.from_user.name, amount_to_increase=query.total_amount / 100)
payload = query.invoice_payload
# Check the payload and perform any necessary actions
# For example, you might want to update the user's data or mark the invoice as paid
await query.answer(ok=True)
async def help(self, update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""
Shows the help menu.
"""
commands = self.group_commands if is_group_chat(update) else self.commands
commands_description = [f'/{command.command} - {command.description}' for command in commands]
bot_language = self.config['bot_language']
help_text = (
localized_text('help_text', bot_language)[0] +
'\n\n' +
'\n'.join(commands_description) +
'\n\n' +
localized_text('help_text', bot_language)[1] +
'\n\n' +
localized_text('help_text', bot_language)[2]
)
await update.message.reply_text(help_text, disable_web_page_preview=True)
async def stats(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Returns token usage statistics for current day and month.
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to request their usage statistics')
await self.send_disallowed_message(update, context)
return
logging.info(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'requested their usage statistics')
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
tokens_today, tokens_month = self.usage[user_id].get_current_token_usage()
images_today, images_month = self.usage[user_id].get_current_image_count()
(transcribe_minutes_today, transcribe_seconds_today, transcribe_minutes_month,
transcribe_seconds_month) = self.usage[user_id].get_current_transcription_duration()
vision_today, vision_month = self.usage[user_id].get_current_vision_tokens()
characters_today, characters_month = self.usage[user_id].get_current_tts_usage()
current_cost = self.usage[user_id].get_current_cost()
chat_id = update.effective_chat.id
chat_messages, chat_token_length = self.openai.get_conversation_stats(chat_id)
remaining_budget = get_remaining_budget(self.config, self.usage, update)
bot_language = self.config['bot_language']
text_current_conversation = (
f"*{localized_text('stats_conversation', bot_language)[0]}*:\n"
f"{chat_messages} {localized_text('stats_conversation', bot_language)[1]}\n"
f"{chat_token_length} {localized_text('stats_conversation', bot_language)[2]}\n"
f"----------------------------\n"
)
# Check if image generation is enabled and, if so, generate the image statistics for today
text_today_images = ""
if self.config.get('enable_image_generation', False):
text_today_images = f"{images_today} {localized_text('stats_images', bot_language)}\n"
text_today_vision = ""
if self.config.get('enable_vision', False):
text_today_vision = f"{vision_today} {localized_text('stats_vision', bot_language)}\n"
text_today_tts = ""
if self.config.get('enable_tts_generation', False):
text_today_tts = f"{characters_today} {localized_text('stats_tts', bot_language)}\n"
text_today = (
f"*{localized_text('usage_today', bot_language)}:*\n"
f"{tokens_today} {localized_text('stats_tokens', bot_language)}\n"
f"{text_today_images}" # Include the image statistics for today if applicable
f"{text_today_vision}"
f"{text_today_tts}"
f"{transcribe_minutes_today} {localized_text('stats_transcribe', bot_language)[0]} "
f"{transcribe_seconds_today} {localized_text('stats_transcribe', bot_language)[1]}\n"
f"{localized_text('stats_total', bot_language)}{current_cost['cost_today']:.2f}\n"
f"----------------------------\n"
)
text_month_images = ""
if self.config.get('enable_image_generation', False):
text_month_images = f"{images_month} {localized_text('stats_images', bot_language)}\n"
text_month_vision = ""
if self.config.get('enable_vision', False):
text_month_vision = f"{vision_month} {localized_text('stats_vision', bot_language)}\n"
text_month_tts = ""
if self.config.get('enable_tts_generation', False):
text_month_tts = f"{characters_month} {localized_text('stats_tts', bot_language)}\n"
# Check if image generation is enabled and, if so, generate the image statistics for the month
text_month = (
f"*{localized_text('usage_month', bot_language)}:*\n"
f"{tokens_month} {localized_text('stats_tokens', bot_language)}\n"
f"{text_month_images}" # Include the image statistics for the month if applicable
f"{text_month_vision}"
f"{text_month_tts}"
f"{transcribe_minutes_month} {localized_text('stats_transcribe', bot_language)[0]} "
f"{transcribe_seconds_month} {localized_text('stats_transcribe', bot_language)[1]}\n"
f"{localized_text('stats_total', bot_language)}{current_cost['cost_month']:.2f}"
)
# text_budget filled with conditional content
text_budget = "\n\n"
budget_period = self.config['budget_period']
if remaining_budget < float('inf'):
text_budget += (
f"{localized_text('stats_budget', bot_language)}"
f"{localized_text(budget_period, bot_language)}: "
f"${remaining_budget:.2f}.\n"
)
# No longer works as of July 21st 2023, as OpenAI has removed the billing API
# add OpenAI account information for admin request
# if is_admin(self.config, user_id):
# text_budget += (
# f"{localized_text('stats_openai', bot_language)}"
# f"{self.openai.get_billing_current_month():.2f}"
# )
usage_text = text_current_conversation + text_today + text_month + text_budget
await update.message.reply_text(usage_text, parse_mode=constants.ParseMode.MARKDOWN)
async def resend(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resend the last request
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' is not allowed to resend the message')
await self.send_disallowed_message(update, context)
return
chat_id = update.effective_chat.id
if chat_id not in self.last_message:
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' does not have anything to resend')
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('resend_failed', self.config['bot_language'])
)
return
# Update message text, clear self.last_message and send the request to prompt
logging.info(f'Resending the last prompt from user: {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
with update.message._unfrozen() as message:
message.text = self.last_message.pop(chat_id)
await self.prompt(update=update, context=context)
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resets the conversation.
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to reset the conversation')
await self.send_disallowed_message(update, context)
return
logging.info(f'Resetting the conversation for user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})...')
chat_id = update.effective_chat.id
reset_content = message_text(update.message)
self.openai.reset_chat_history(chat_id=chat_id, content=reset_content)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('reset_done', self.config['bot_language'])
)
async def image(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an image for the given prompt using DALL·E APIs
"""
if not self.config['enable_image_generation'] \
or not await self.check_allowed_and_within_budget(update, context):
return
image_query = message_text(update.message)
if image_query == '':
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('image_no_prompt', self.config['bot_language'])
)
return
logging.info(f'New image generation request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
async def _generate():
try:
image_url, image_size = await self.openai.generate_image(prompt=image_query)
if self.config['image_receive_mode'] == 'photo':
await update.effective_message.reply_photo(
reply_to_message_id=get_reply_to_message_id(self.config, update),
photo=image_url
)
elif self.config['image_receive_mode'] == 'document':
await update.effective_message.reply_document(
reply_to_message_id=get_reply_to_message_id(self.config, update),
document=image_url
)
else:
raise Exception(f"env variable IMAGE_RECEIVE_MODE has invalid value {self.config['image_receive_mode']}")
# add image request to users usage tracker
user_id = update.message.from_user.id
price = self.usage[user_id].add_image_request(image_size, self.config['image_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_image_request(image_size, self.config['image_prices'])
await subtract_airtable_budget(update.message.from_user.id, update.message.from_user.name, price)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('image_fail', self.config['bot_language'])}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
await wrap_with_indicator(update, context, _generate, constants.ChatAction.UPLOAD_PHOTO)
async def tts(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an speech for the given input using TTS APIs
"""
if not self.config['enable_tts_generation'] \
or not await self.check_allowed_and_within_budget(update, context):
return
tts_query = message_text(update.message)
if tts_query == '':
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('tts_no_prompt', self.config['bot_language'])
)
return
logging.info(f'New speech generation request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
async def _generate():
try:
speech_file, text_length = await self.openai.generate_speech(text=tts_query)
await update.effective_message.reply_voice(
reply_to_message_id=get_reply_to_message_id(self.config, update),
voice=speech_file
)
speech_file.close()
# add image request to users usage tracker
user_id = update.message.from_user.id
price = self.usage[user_id].add_tts_request(text_length, self.config['tts_model'], self.config['tts_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_tts_request(text_length, self.config['tts_model'], self.config['tts_prices'])
await subtract_airtable_budget(update.message.from_user.id, update.message.from_user.name, price)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('tts_fail', self.config['bot_language'])}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
await wrap_with_indicator(update, context, _generate, constants.ChatAction.UPLOAD_VOICE)
async def transcribe(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Transcribe audio messages.
"""
if not self.config['enable_transcription'] or not await self.check_allowed_and_within_budget(update, context):
return
if is_group_chat(update) and self.config['ignore_group_transcriptions']:
logging.info(f'Transcription coming from group chat, ignoring...')
return
chat_id = update.effective_chat.id
filename = update.message.effective_attachment.file_unique_id
async def _execute():
filename_mp3 = f'{filename}.mp3'
bot_language = self.config['bot_language']
try:
media_file = await context.bot.get_file(update.message.effective_attachment.file_id)
await media_file.download_to_drive(filename)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=(
f"{localized_text('media_download_fail', bot_language)[0]}: "
f"{str(e)}. {localized_text('media_download_fail', bot_language)[1]}"
),
parse_mode=constants.ParseMode.MARKDOWN
)
return
try:
audio_track = AudioSegment.from_file(filename)
audio_track.export(filename_mp3, format="mp3")
logging.info(f'New transcribe request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=localized_text('media_type_fail', bot_language)
)
if os.path.exists(filename):
os.remove(filename)
return
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
try:
transcript = await self.openai.transcribe(filename_mp3)
transcription_price = self.config['transcription_price']
price = self.usage[user_id].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
await subtract_airtable_budget(update.message.from_user.id, update.message.from_user.name, price)
# check if transcript starts with any of the prefixes
response_to_transcription = any(transcript.lower().startswith(prefix.lower()) if prefix else False
for prefix in self.config['voice_reply_prompts'])
if self.config['voice_reply_transcript'] and not response_to_transcription:
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f"_{localized_text('transcript', bot_language)}:_\n\"{transcript}\""
chunks = split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update) if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
else:
# Get the response of the transcript
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=transcript)
price = self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
await subtract_airtable_budget(update.message.from_user.id, update.message.from_user.name, price)
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = (
f"_{localized_text('transcript', bot_language)}:_\n\"{transcript}\"\n\n"
f"_{localized_text('answer', bot_language)}:_\n{response}"
)
chunks = split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update) if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('transcribe_fail', bot_language)}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
finally:
if os.path.exists(filename_mp3):
os.remove(filename_mp3)
if os.path.exists(filename):
os.remove(filename)
await wrap_with_indicator(update, context, _execute, constants.ChatAction.TYPING)
async def vision(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Interpret image using vision model.
"""
if not self.config['enable_vision'] or not await self.check_allowed_and_within_budget(update, context):
return
chat_id = update.effective_chat.id
prompt = update.message.caption
if is_group_chat(update):
if self.config['ignore_group_vision']:
logging.info(f'Vision coming from group chat, ignoring...')
return
else:
trigger_keyword = self.config['group_trigger_keyword']
if (prompt is None and trigger_keyword != '') or \
(prompt is not None and not prompt.lower().startswith(trigger_keyword.lower())):
logging.info(f'Vision coming from group chat with wrong keyword, ignoring...')
return
image = update.message.effective_attachment[-1]
async def _execute():
bot_language = self.config['bot_language']
try:
media_file = await context.bot.get_file(image.file_id)
temp_file = io.BytesIO(await media_file.download_as_bytearray())
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=(
f"{localized_text('media_download_fail', bot_language)[0]}: "
f"{str(e)}. {localized_text('media_download_fail', bot_language)[1]}"
),
parse_mode=constants.ParseMode.MARKDOWN
)
return
# convert jpg from telegram to png as understood by openai
temp_file_png = io.BytesIO()
try:
original_image = Image.open(temp_file)
original_image.save(temp_file_png, format='PNG')
logging.info(f'New vision request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=localized_text('media_type_fail', bot_language)
)
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
if self.config['stream']:
stream_response = self.openai.interpret_image_stream(chat_id=chat_id, fileobj=temp_file_png, prompt=prompt)
i = 0
prev = ''
sent_message = None
backoff = 0
stream_chunk = 0
async for content, tokens in stream_response:
if is_direct_result(content):
return await handle_direct_result(self.config, update, content)
if len(content.strip()) == 0:
continue
stream_chunks = split_into_chunks(content)
if len(stream_chunks) > 1:
content = stream_chunks[-1]
if stream_chunk != len(stream_chunks) - 1:
stream_chunk += 1
try:
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
stream_chunks[-2])
except:
pass
try:
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=content if len(content) > 0 else "..."
)
except:
pass
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
if sent_message is not None:
await context.bot.delete_message(chat_id=sent_message.chat_id,
message_id=sent_message.message_id)
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=content,
)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
text=content, markdown=use_markdown)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
try:
interpretation, total_tokens = await self.openai.interpret_image(chat_id, temp_file_png, prompt=prompt)
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=interpretation,
parse_mode=constants.ParseMode.MARKDOWN
)
except BadRequest:
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=interpretation
)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('vision_fail', bot_language)}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('vision_fail', bot_language)}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
vision_token_price = self.config['vision_token_price']
price = self.usage[user_id].add_vision_tokens(total_tokens, vision_token_price)
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_vision_tokens(total_tokens, vision_token_price)
await subtract_airtable_budget(update.message.from_user.id, update.message.from_user.name, price)
await wrap_with_indicator(update, context, _execute, constants.ChatAction.TYPING)
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
React to incoming messages and respond accordingly.
"""
if update.edited_message or not update.message or update.message.via_bot:
return
if not await self.check_allowed_and_within_budget(update, context):
return
logging.info(
f'New message received from user {update.message.from_user.name} (id: {update.message.from_user.id})')
chat_id = update.effective_chat.id
user_id = update.message.from_user.id
prompt = message_text(update.message)
self.last_message[chat_id] = prompt
if is_group_chat(update):
trigger_keyword = self.config['group_trigger_keyword']
if prompt.lower().startswith(trigger_keyword.lower()) or update.message.text.lower().startswith('/chat'):
if prompt.lower().startswith(trigger_keyword.lower()):
prompt = prompt[len(trigger_keyword):].strip()
if update.message.reply_to_message and \
update.message.reply_to_message.text and \
update.message.reply_to_message.from_user.id != context.bot.id:
prompt = f'"{update.message.reply_to_message.text}" {prompt}'
else:
if update.message.reply_to_message and update.message.reply_to_message.from_user.id == context.bot.id:
logging.info('Message is a reply to the bot, allowing...')
else:
logging.warning('Message does not start with trigger keyword, ignoring...')
return
try:
total_tokens = 0
if self.config['stream']:
await update.effective_message.reply_chat_action(
action=constants.ChatAction.TYPING,
message_thread_id=get_thread_id(update)
)
stream_response = self.openai.get_chat_response_stream(chat_id=chat_id, query=prompt)
i = 0
prev = ''
sent_message = None
backoff = 0
stream_chunk = 0
async for content, tokens in stream_response:
if is_direct_result(content):
return await handle_direct_result(self.config, update, content)
if len(content.strip()) == 0:
continue
stream_chunks = split_into_chunks(content)
if len(stream_chunks) > 1:
content = stream_chunks[-1]
if stream_chunk != len(stream_chunks) - 1:
stream_chunk += 1
try:
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
stream_chunks[-2])
except:
pass
try:
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=content if len(content) > 0 else "..."
)
except:
pass
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
if sent_message is not None:
await context.bot.delete_message(chat_id=sent_message.chat_id,
message_id=sent_message.message_id)
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=content,
)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
text=content, markdown=use_markdown)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
async def _reply():
nonlocal total_tokens
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=prompt)
if is_direct_result(response):
return await handle_direct_result(self.config, update, response)
# Split into chunks of 4096 characters (Telegram's message limit)
chunks = split_into_chunks(response)
for index, chunk in enumerate(chunks):
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config,
update) if index == 0 else None,
text=chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception:
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config,
update) if index == 0 else None,
text=chunk
)
except Exception as exception:
raise exception
await wrap_with_indicator(update, context, _reply, constants.ChatAction.TYPING)
price = add_chat_request_to_usage_tracker(self.usage, self.config, user_id, total_tokens)
await subtract_airtable_budget(update.message.from_user.id, update.message.from_user.name, price)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('chat_fail', self.config['bot_language'])} {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
async def inline_query(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handle the inline query. This is run when you type: @botusername <query>
"""
query = update.inline_query.query
if len(query) < 3:
return
if not await self.check_allowed_and_within_budget(update, context, is_inline=True):
return
callback_data_suffix = "gpt:"
result_id = str(uuid4())
self.inline_queries_cache[result_id] = query
callback_data = f'{callback_data_suffix}{result_id}'
await self.send_inline_query_result(update, result_id, message_content=query, callback_data=callback_data)
async def send_inline_query_result(self, update: Update, result_id, message_content, callback_data=""):
"""
Send inline query result
"""
try:
reply_markup = None
bot_language = self.config['bot_language']
if callback_data:
reply_markup = InlineKeyboardMarkup([[
InlineKeyboardButton(text=f'🤖 {localized_text("answer_with_chatgpt", bot_language)}',
callback_data=callback_data)
]])
inline_query_result = InlineQueryResultArticle(
id=result_id,
title=localized_text("ask_chatgpt", bot_language),
input_message_content=InputTextMessageContent(message_content),
description=message_content,
thumb_url='https://user-images.githubusercontent.com/11541888/223106202-7576ff11-2c8e-408d-94ea'
'-b02a7a32149a.png',
reply_markup=reply_markup
)
await update.inline_query.answer([inline_query_result], cache_time=0)
except Exception as e:
logging.error(f'An error occurred while generating the result card for inline query {e}')
async def handle_callback_inline_query(self, update: Update, context: CallbackContext):
"""
Handle the callback query from the inline query result
"""
callback_data = update.callback_query.data
user_id = update.callback_query.from_user.id
inline_message_id = update.callback_query.inline_message_id
name = update.callback_query.from_user.name
callback_data_suffix = "gpt:"
query = ""
bot_language = self.config['bot_language']
answer_tr = localized_text("answer", bot_language)
loading_tr = localized_text("loading", bot_language)
try:
if callback_data.startswith(callback_data_suffix):
unique_id = callback_data.split(':')[1]
total_tokens = 0
# Retrieve the prompt from the cache
query = self.inline_queries_cache.get(unique_id)
if query:
self.inline_queries_cache.pop(unique_id)
else:
error_message = (
f'{localized_text("error", bot_language)}. '
f'{localized_text("try_again", bot_language)}'
)
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{error_message}',
is_inline=True)
return
unavailable_message = localized_text("function_unavailable_in_inline_mode", bot_language)
if self.config['stream']:
stream_response = self.openai.get_chat_response_stream(chat_id=user_id, query=query)
i = 0
prev = ''
backoff = 0
async for content, tokens in stream_response:
if is_direct_result(content):
cleanup_intermediate_files(content)
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{unavailable_message}',
is_inline=True)
return
if len(content.strip()) == 0:
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n{answer_tr}:\n{content}',
is_inline=True)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
divider = '_' if use_markdown else ''
text = f'{query}\n\n{divider}{answer_tr}:{divider}\n{content}'
# We only want to send the first 4096 characters. No chunking allowed in inline mode.
text = text[:4096]
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=text, markdown=use_markdown, is_inline=True)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
async def _send_inline_query_response():
nonlocal total_tokens
# Edit the current message to indicate that the answer is being processed
await context.bot.edit_message_text(inline_message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{loading_tr}',
parse_mode=constants.ParseMode.MARKDOWN)
logging.info(f'Generating response for inline query by {name}')
response, total_tokens = await self.openai.get_chat_response(chat_id=user_id, query=query)
if is_direct_result(response):
cleanup_intermediate_files(response)
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{unavailable_message}',
is_inline=True)
return
text_content = f'{query}\n\n_{answer_tr}:_\n{response}'
# We only want to send the first 4096 characters. No chunking allowed in inline mode.
text_content = text_content[:4096]
# Edit the original message with the generated content
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=text_content, is_inline=True)
await wrap_with_indicator(update, context, _send_inline_query_response,
constants.ChatAction.TYPING, is_inline=True)
price = add_chat_request_to_usage_tracker(self.usage, self.config, user_id, total_tokens)
await subtract_airtable_budget(update.message.from_user.id, update.message.from_user.name, price)
except Exception as e:
logging.error(f'Failed to respond to an inline query via button callback: {e}')
logging.exception(e)
localized_answer = localized_text('chat_fail', self.config['bot_language'])
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=f"{query}\n\n_{answer_tr}:_\n{localized_answer} {str(e)}",
is_inline=True)
async def check_allowed_and_within_budget(self, update: Update, context: ContextTypes.DEFAULT_TYPE,
is_inline=False) -> bool:
"""
Checks if the user is allowed to use the bot and if they are within their budget
:param update: Telegram update object
:param context: Telegram context object
:param is_inline: Boolean flag for inline queries
:return: Boolean indicating if the user is allowed to use the bot
"""
name = update.inline_query.from_user.name if is_inline else update.message.from_user.name
user_id = update.inline_query.from_user.id if is_inline else update.message.from_user.id
if not await is_allowed(self.config, update, context, is_inline=is_inline):
logging.warning(f'User {name} (id: {user_id}) is not allowed to use the bot')
await self.send_disallowed_message(update, context, is_inline)
return False
if not is_within_budget(self.config, self.usage, update, is_inline=is_inline):
logging.warning(f'User {name} (id: {user_id}) reached their usage limit')
await self.send_budget_reached_message(update, context, is_inline)
return False
return True
async def send_disallowed_message(self, update: Update, _: ContextTypes.DEFAULT_TYPE, is_inline=False):
"""
Sends the disallowed message to the user.
"""
if not is_inline:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=self.disallowed_message,
disable_web_page_preview=True
)
else:
result_id = str(uuid4())
await self.send_inline_query_result(update, result_id, message_content=self.disallowed_message)
async def send_budget_reached_message(self, update: Update, _: ContextTypes.DEFAULT_TYPE, is_inline=False):
"""
Sends the budget reached message to the user.
"""
if not is_inline:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=self.budget_limit_message
)
else:
result_id = str(uuid4())
await self.send_inline_query_result(update, result_id, message_content=self.budget_limit_message)
async def post_init(self, application: Application) -> None:
"""
Post initialization hook for the bot.
"""
await application.bot.set_my_commands(self.group_commands, scope=BotCommandScopeAllGroupChats())
await application.bot.set_my_commands(self.commands)
def run(self):
"""
Runs the bot indefinitely until the user presses Ctrl+C
"""
application = ApplicationBuilder() \
.token(self.config['token']) \
.proxy_url(self.config['proxy']) \
.get_updates_proxy_url(self.config['proxy']) \
.post_init(self.post_init) \
.concurrent_updates(True) \
.build()
application.add_handler(CommandHandler('buy', self.buy))
application.add_handler(CommandHandler('reset', self.reset))
application.add_handler(CommandHandler('help', self.help))
application.add_handler(CommandHandler('image', self.image))
application.add_handler(CommandHandler('tts', self.tts))
application.add_handler(CommandHandler('start', self.help))
application.add_handler(CommandHandler('stats', self.stats))
application.add_handler(CommandHandler('resend', self.resend))
application.add_handler(CommandHandler(
'chat', self.prompt, filters=filters.ChatType.GROUP | filters.ChatType.SUPERGROUP)
)
application.add_handler(MessageHandler(
filters.PHOTO | filters.Document.IMAGE,
self.vision))
application.add_handler(MessageHandler(
filters.AUDIO | filters.VOICE | filters.Document.AUDIO |
filters.VIDEO | filters.VIDEO_NOTE | filters.Document.VIDEO,
self.transcribe))
application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), self.prompt))
application.add_handler(PreCheckoutQueryHandler(self.pre_checkout_callback))
application.add_handler(InlineQueryHandler(self.inline_query, chat_types=[
constants.ChatType.GROUP, constants.ChatType.SUPERGROUP, constants.ChatType.PRIVATE
]))
application.add_handler(CallbackQueryHandler(self.handle_callback_inline_query))
application.add_error_handler(error_handler)
application.run_polling()
| [] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~output_parsers~search_co2_estimator.py | from typing import Optional
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
# Not able to incorporate at the moment
class CO2SearchResult(BaseModel):
ingredient: str = Field("The original input string with amounts etc. provided in 'Input:'")
explanation: str = Field(description="Explanation of how the final search result is chosen in step-by-step logic")
unit: Optional[str] = Field(description="Unit of search result.", default=None)
result: Optional[float] = Field(
description="Result in kg CO2e per kg. null/None if no useable result is found",
default=None,
)
search_co2_output_parser = PydanticOutputParser(pydantic_object=CO2SearchResult)
| [] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~chains~weight_estimator.py | from typing import Literal
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from estimator.prompt_templates.weight_estimator import (
DK_WEIGHT_EST_PROMPT,
EN_WEIGHT_EST_PROMPT,
)
def get_weight_estimator_chain(language: Literal["da", "en"], verbose: bool = False):
llm = ChatOpenAI( # type: ignore
temperature=0,
)
en_weight_est_chain = LLMChain(
llm=llm,
prompt=EN_WEIGHT_EST_PROMPT if language == "en" else DK_WEIGHT_EST_PROMPT,
verbose=verbose,
)
return en_weight_est_chain
| [] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~prompt_templates~sql_co2_estimator.py | from langchain import PromptTemplate
from estimator.output_parsers.sql_co2_estimator import sql_co2_output_parser
EN_LANGUAGE = "English"
DK_LANGUAGE = "Danish"
EN_EXAMPLE_QUERY = (
"'SELECT Name, Total_kg_CO2_eq_kg FROM dk_co2_emission WHERE Name LIKE '%tomato%' OR Name LIKE '%bouillon%'"
)
DK_EXAMPLE_QUERY = (
"'SELECT Navn, Total_kg_CO2_eq_kg FROM dk_co2_emission WHERE Navn LIKE '%tomat%' OR Navn LIKE '%bouillion%'"
)
EN_EXAMPLE_REMOVING = (
"'SELECT Navn, Total_kg_CO2_eq_kg FROM dk_co2_emission WHERE Navn LIKE '%tomato%' OR Navn LIKE '%bouillion%'"
)
DK_EXAMPLE_REMOVING = "'%hakkede tomater%' to '%tomat%' or '%hakket oksekød%' to '%oksekød%'"
EN_EXAMPLE_MATCH = "'1 can of chopped tomatoes' best matches results from 'Tomato, peeled, canned'."
DK_EXAMPLE_MATCH = "'1 dåse hakkede tomater' best matches results from 'Tomat, flået, konserves'."
# EN_EXAMPLE_ANSWER_FOUND = "'Chopped tomatoes: X kg CO2e / kg \n'"
# DK_EXAMPLE_ANSWER_FOUND = "'Hakkede tomater: X kg CO2e/ kg \n'."
# EN_EXAMPLE_ANSWER_NOT_FOUND = "'Chopped tomatoes: ? \n'"
# DK_EXAMPLE_ANSWER_NOT_FOUND = "'Hakkede tomater: ? \n'."
EN_INGREDIENTS_EXAMPLE = """
150 g red lentils
1 can of chopped tomatoes
2 cubes of vegetable bouillon
1 tin of tomato concentrate (140 g)
1 tbsp. lemon juice
1. tbsp. chili powder
1 starfruit
"""
DK_INGREDIENTS_EXAMPLE = """
150 g røde linser
1 dåse hakkede tomater
2 terninger grøntsagsbouillon
1 dåse tomatkoncentrat (140 g)
1 spsk. citronsaft
1. spsk. chilipulver
10 majstortillas
1 stjernefrugt
"""
EN_SQL_QUERY_EXAMPLE = """
SELECT Name, Total_kg_CO2_eq_kg FROM dk_co2_emission WHERE
Name LIKE '%tomato%' OR
Name LIKE '%lentil%' OR
Name LIKE '%bouillon%' OR
Name LIKE '%juice%' OR
Name LIKE '%lemon%' OR
Name LIKE '%chili%' OR
Name LIKE '%starfruit%'
"""
DK_SQL_QUERY_EXAMPLE = """
SELECT Navn, Total_kg_CO2_eq_kg FROM dk_co2_emission WHERE
Navn LIKE '%tomat%' OR
Navn LIKE '%linse%' OR
Navn LIKE '%bouillon%' OR
Navn LIKE '%saft%' OR
Navn LIKE '%citron%' OR
Navn LIKE '%chili%' OR
Navn LIKE '%tortilla%' OR
Navn LIKE '%stjernefrugt%'
"""
EN_SQL_RESULT_EXAMPLE = """
[('Tomato, ripe, raw, origin unknown', 0.7), ('Green lentils, dried', 1.78)
('Tomatojuice, canned', 1.26), ('Tomato, peeled, canned', 1.26)
('Tomato paste, concentrated', 2.48), ('Red lentils, dried', 1.78
('Ice, popsickle, lemonade', 1.15), ('Lemon, raw', 0.94
('Apple juice', 1.64),('Bouillon, chicken, prepared', 0.38)
('Bouillon, beef, prepared', 0.52), ('Pepper, hot chili, raw', 1.02)
('Pepper, hot chili, canned', 1.54),
]
"""
DK_SQL_RESULT_EXAMPLE = """
[('Tomat, uspec., rå', 0.7), ('Grønne linser, tørrede', 1.78)
('Tomatjuice, konserves', 1.26), ('Tomat, flået, konserves', 1.26)
('Tomatpure, koncentreret', 2.48), ('Røde linser, tørrede', 1.78
('Ispind, limonade', 1.15), ('Citron, rå', 0.94
('Æblejuice', 1.64),('Bouillon, hønsekød, spiseklar', 0.38)
('Bouillon, oksekød, spiseklar', 0.52), ('Peber, chili, rå', 1.02)
('Tortillabrød, hvede',0.74), ('Peber, chili, konserves', 1.54),
]
"""
EN_FINAL_ANSWER_EXAMPLE = """
{
"emissions": [
{
"ingredient": "150 g red lentils",
"comment": "",
"unit": "kg CO2e / kg",
"co2_per_kg": 1.78
},
{
"ingredient": "1 can of chopped tomatoes",
"comment": "",
"unit": "kg CO2e / kg",
"co2_per_kg": 1.26
},
{
"ingredient": "2 cubes of vegetable bouillon",
"comment": "closest was chicken bouillon",
"unit": "kg CO2e / kg",
"co2_per_kg": 0.38
},
{
"ingredient": "1 tin of tomato concentrate (140 g)",
"comment": "closest was tomato paste",
"unit": "kg CO2e / kg",
"co2_per_kg": 2.48
},
{
"ingredient": "1 tbsp. lemon juice",
"comment": "Closest was Lemon, raw",
"unit": "kg CO2e / kg",
"co2_per_kg": 0.94
},
{
"ingredient": "1. tbsp. chili powder",
"comment": "closest was 'Pepper, hot chili, canned'",
"unit": "kg CO2e / kg",
"co2_per_kg": 1.54
},
{
"ingredient": "1 starfruit",
"comment": "Not found in database",
"unit": "kg CO2e / kg",
"co2_per_kg": null
}
]
}
"""
DK_FINAL_ANSWER_EXAMPLE = """
{
"emissions": [
{
"ingredient": "150 g røde linser",
"comment": "",
"unit": "kg CO2e / kg",
"co2_per_kg": 1.78
},
{
"ingredient": "1 dåse hakkede tomater",
"comment": "",
"unit": "kg CO2e / kg",
"co2_per_kg": 1.26
},
{
"ingredient": "2 terninger grøntsagsbouillon",
"comment": "tættest var Bouillon, hønsekød, spiseklar",
"unit": "kg CO2e / kg",
"co2_per_kg": 0.38
},
{
"ingredient": "1 dåse tomatkoncentrat (140 g)",
"comment": "tættest var tomatpure, koncentreret",
"unit": "kg CO2e / kg",
"co2_per_kg": 2.48
},
{
"ingredient": "1 spsk. citronsaft",
"comment": "Tættest var citron, rå",
"unit": "kg CO2e / kg",
"co2_per_kg": 0.94
},
{
"ingredient": "1. spsk. chilipulver",
"comment": "tættest var Peber, chili, konserves",
"unit": "kg CO2e / kg",
"co2_per_kg": 1.54
},
{
"ingredient": "10 majstortillas",
"comment": "Tættest var tortillabrød, hvede",
"unit": "kg CO2e / kg",
"co2_per_kg": 0.74
},
{
"ingredient": "1 stjernefrugt",
"comment": "Ikke fundet i databasen",
"unit": "kg CO2e / kg",
"co2_per_kg": null
}
]
}
"""
CO2_SQL_PROMPT_TEMPLATE = """
Given a list of ingredients in {language}, extract the main ingredients from the list
and create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Solve the task using the following steps:
- Query all ingredients in a single query. Make sure you query ALL the ingredients provided after `Ingredients:`
Example query: {example_query}
- In the query, remove all non-ingredient words.
Example of removing: {example_removing}
- Match the SQLResult to the list of ingredients based on preparation and type.
Example match: {example_match}
- Return the Answer by the format instructions explained below.
- Do not provide any ranges for the final answer. For example, do not provide '0.1-0.5 kg CO2e per kg' as the final answer.
Instead, return the closest match.
Use the following format:
Ingredients: "Ingredients here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Only use the following tables:
{table_info}
{format_instructions}
Begin!
Ingredients:
{ingredients_example}
SQLQuery: {query_example}
SQLResult: {query_result_example}
{final_answer_example}
Ingredients: {input}"""
EN_CO2_SQL_PROMPT_TEMPLATE = PromptTemplate(
template=CO2_SQL_PROMPT_TEMPLATE,
input_variables=["dialect", "table_info", "input"],
partial_variables={
"language": EN_LANGUAGE,
"example_query": EN_SQL_QUERY_EXAMPLE,
"example_removing": EN_EXAMPLE_REMOVING,
"example_match": EN_EXAMPLE_MATCH,
# "example_answer": EN_EXAMPLE_ANSWER_FOUND,
# "example_not_found": EN_EXAMPLE_ANSWER_NOT_FOUND,
"ingredients_example": EN_INGREDIENTS_EXAMPLE,
"query_example": EN_SQL_QUERY_EXAMPLE,
"query_result_example": EN_SQL_RESULT_EXAMPLE,
"format_instructions": sql_co2_output_parser.get_format_instructions(),
"final_answer_example": EN_FINAL_ANSWER_EXAMPLE,
},
)
DK_CO2_SQL_PROMPT_TEMPLATE = PromptTemplate(
template=CO2_SQL_PROMPT_TEMPLATE,
input_variables=["dialect", "table_info", "input"],
partial_variables={
"language": DK_LANGUAGE,
"example_query": DK_SQL_QUERY_EXAMPLE,
"example_removing": DK_EXAMPLE_REMOVING,
"example_match": DK_EXAMPLE_MATCH,
# "example_answer": DK_EXAMPLE_ANSWER_FOUND,
# "example_not_found": DK_EXAMPLE_ANSWER_NOT_FOUND,
"ingredients_example": DK_INGREDIENTS_EXAMPLE,
"query_example": DK_SQL_QUERY_EXAMPLE,
"query_result_example": DK_SQL_RESULT_EXAMPLE,
"format_instructions": sql_co2_output_parser.get_format_instructions(),
"final_answer_example": DK_FINAL_ANSWER_EXAMPLE,
},
)
| [
"emissions",
"Grønne linser, tørrede",
"2 terninger grøntsagsbouillon",
"Result of the SQLQuery",
"Apple juice",
"%citron%",
"Bouillon, chicken, prepared",
"Citron, rå",
"150 g røde linser",
"Bouillon, beef, prepared",
"input",
"10 majstortillas",
"ingredient",
"Tomato paste, concentrated",
"0.1-0.5 kg CO2e per kg",
"Pepper, hot chili, canned",
"Closest was Lemon, raw",
"ingredients_example",
"%stjernefrugt%",
"1 can of chopped tomatoes",
"unit",
"tættest var tomatpure, koncentreret",
"Lemon, raw",
"Ingredients here",
"Peber, chili, rå",
"2 cubes of vegetable bouillon",
"table_info",
"example_match",
"Tomatojuice, canned",
"query_example",
"%hakket o",
"kg CO2e / kg",
"1. spsk. chilipulver",
"%tortilla%",
"1 starfruit",
"Ice, popsickle, lemonade",
"Ispind, limonade",
"%bouillon%",
"\nGiven a list of ingredients in {language}, extract the main ingredients from the list\nand create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\n\nSolve the task using the following steps:\n- Query all ingredients in a single query. Make sure you query ALL the ingredients provided after `Ingredients:`\n Example query: {example_query}\n- In the query, remove all non-ingredient words.\n Example of removing: {example_removing}\n- Match the SQLResult to the list of ingredients based on preparation and type.\n Example match: {example_match}\n- Return the Answer by the format instructions explained below.\n- Do not provide any ranges for the final answer. For example, do not provide '0.1-0.5 kg CO2e per kg' as the final answer.\n Instead, return the closest match.\n\nUse the following format:\nIngredients: \"Ingredients here\"\nSQLQuery: \"SQL Query to run\"\nSQLResult: \"Result of the SQLQuery\"\n\nOnly use the following tables:\n{table_info}\n\n{format_instructions}\n\nBegin!\n\nIngredients:\n{ingredients_example}\n\nSQLQuery: {query_example}\n\nSQLResult: {query_result_example}\n\n{final_answer_example}\n\nIngredients: {input}",
"%lentil%",
"Tættest var tortillabrød, hvede",
"Green lentils, dried",
"Ikke fundet i databasen",
"Tættest var citron, rå",
"SQL Query to run",
"query_result_example",
"Tomato, peeled, canned",
"Not found in database",
"Tomatpure, koncentreret",
"closest was 'Pepper, hot chili, canned'",
"Red lentils, dried",
"tættest var Bouillon, hønsekød, spiseklar",
"tættest var Peber, chili, konserves",
"format_instructions",
"150 g red lentils",
"1 spsk. citronsaft",
"Tomato, ripe, raw, origin unknown",
"Tomat, uspec., rå",
"final_answer_example",
"Tortillabrød, hvede",
"1 tbsp. lemon juice",
"Røde linser, tørrede",
"Peber, chili, konserves",
"Tomatjuice, konserves",
"%starfruit%",
"1 tin of tomato concentrate (140 g)",
"comment",
"Bouillon, oksekød, spiseklar",
"co2_per_kg",
"%hakkede tomater%",
"Pepper, hot chili, raw",
"%linse%",
"example_query",
"example_removing",
"%lemon%",
"closest was chicken bouillon",
"language",
"1 dåse hakkede tomater",
" best matches results from ",
"closest was tomato paste",
"1 dåse tomatkoncentrat (140 g)",
"Bouillon, hønsekød, spiseklar",
"1 stjernefrugt",
"1. tbsp. chili powder"
] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~output_parsers~retry_parser.py | from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import RetryWithErrorOutputParser
from langchain.schema.output_parser import BaseOutputParser
def get_retry_parser(parser: BaseOutputParser):
return RetryWithErrorOutputParser.from_llm(parser=parser, llm=ChatOpenAI(temperature=0))
| [] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~output_parsers~weight_estimator.py | from typing import List, Optional
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
class WeightEstimate(BaseModel):
ingredient: str = Field(description="Ingredient as called in ingredient list")
weight_calculation: str = Field(description="Description of how weights are estimated")
weight_in_kg: Optional[float] = Field(description="Weight provided in kg", default=None)
class WeightEstimates(BaseModel):
weight_estimates: List[WeightEstimate] = Field(description="List of 'WeightEstimate' per ingredient.")
weight_output_parser = PydanticOutputParser(pydantic_object=WeightEstimates)
| [] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~prompt_templates~recipe_extractor.py | from langchain.output_parsers import CommaSeparatedListOutputParser
recipe_output_parser = CommaSeparatedListOutputParser()
# RECIPE_EXTRACTOR_PROMPT = """
# Act as an expert in extracting recipes from text that understand danish and english.
# Given an unstructured text containing a recipe, extract the ingredients including the amounts of the ingredients (quantity, weight etc.).
# Sometimes, there is no recipe to be found and then you return an empty python list ([]).
# Sometimes the recipe is already provided. In that case just output the input in the format described below.
# The input/text is delimited by ####.
# {format_instructions}
# Begin!
# ####
# {input}
# ####
# """
RECIPE_EXTRACTOR_PROMPT = """
Act as an expert in extracting recipes from text that understand danish and english.
Given an unstructured text containing a recipe, extract the ingredients and the amount of the recipe.
Sometimes, there is no recipe to be found and then you return 'No ingredients'.
Sometimes the ingredients list is already provided. In that case just output the input in the format described below.
Example of ingredients already provided in Danish: oksemørbrad (250 g), 2 gulerødder
Example of ingredients already provided in English:
250 g cream
400 g beef tenderloin
The input/text is delimited by ####.
{format_instructions}
Begin!
####
dansk hovedret 12 tilberedningstid 45 minutter arbejdstid 25 minutter print bedøm denne opskrift rated 4
/ 5 based on 1 customer reviews hov! du skal være logget ind. log ind bliv medlem ingredienser (12) 1 2 3 4 5 6 7 8
antal personer: 500 gram torskefilet 1 tsk havsalt 2 stk æg 1 stk gulerod 0.5 deciliter fløde 13% 0.5 tsk revet
muskatnød 1 tsk peber 2 spsk olie 4 deciliter creme fraiche 18% 4 stk æggeblomme 2 spsk frisk dild 4 spsk frisk persille
####
500 gram torskefilet, 1 tsk havsalt, 2 stk æg, 1 stk gulerod, 0.5 deciliter fløde 13%, 0.5 tsk revet muskatnød, 1 tsk peber, 2 spsk olie, 4 deciliter creme fraiche 18%, 4 stk æggeblomme, 2 spsk frisk dild, 4 spsk frisk persille
####
{input}
####
"""
| [
"\nAct as an expert in extracting recipes from text that understand danish and english.\nGiven an unstructured text containing a recipe, extract the ingredients and the amount of the recipe.\nSometimes, there is no recipe to be found and then you return 'No ingredients'.\n\n\nSometimes the ingredients list is already provided. In that case just output the input in the format described below.\n\nExample of ingredients already provided in Danish: oksemørbrad (250 g), 2 gulerødder\nExample of ingredients already provided in English:\n250 g cream\n400 g beef tenderloin\n\nThe input/text is delimited by ####.\n\n{format_instructions}\n\nBegin!\n\n####\ndansk hovedret 12 tilberedningstid 45 minutter arbejdstid 25 minutter print bedøm denne opskrift rated 4\n/ 5 based on 1 customer reviews hov! du skal være logget ind. log ind bliv medlem ingredienser (12) 1 2 3 4 5 6 7 8\nantal personer: 500 gram torskefilet 1 tsk havsalt 2 stk æg 1 stk gulerod 0.5 deciliter fløde 13% 0.5 tsk revet\nmuskatnød 1 tsk peber 2 spsk olie 4 deciliter creme fraiche 18% 4 stk æggeblomme 2 spsk frisk dild 4 spsk frisk persille\n####\n\n500 gram torskefilet, 1 tsk havsalt, 2 stk æg, 1 stk gulerod, 0.5 deciliter fløde 13%, 0.5 tsk revet muskatnød, 1 tsk peber, 2 spsk olie, 4 deciliter creme fraiche 18%, 4 stk æggeblomme, 2 spsk frisk dild, 4 spsk frisk persille\n\n####\n{input}\n####\n"
] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~chains~sql_co2_estimator.py | import os
from typing import Literal
from langchain.chat_models import ChatOpenAI
from langchain.sql_database import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from estimator.prompt_templates.sql_co2_estimator import (
DK_CO2_SQL_PROMPT_TEMPLATE,
EN_CO2_SQL_PROMPT_TEMPLATE,
)
def get_co2_sql_chain(language: Literal["da", "en"], verbose: bool = False):
sql_dk_co2_db = SQLDatabase.from_uri(
f"sqlite:///{os.getcwd()}/estimator/data/dk_co2_emission.db", sample_rows_in_table_info=2
)
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-16k") # type: ignore
co2_sql_chain = SQLDatabaseChain.from_llm(
llm=llm,
db=sql_dk_co2_db,
verbose=verbose,
prompt=EN_CO2_SQL_PROMPT_TEMPLATE if language == "en" else DK_CO2_SQL_PROMPT_TEMPLATE,
top_k=200,
)
return co2_sql_chain
| [] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~chains~recipe_extractor.py | from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.prompt import PromptTemplate
from estimator.prompt_templates.recipe_extractor import (
RECIPE_EXTRACTOR_PROMPT,
recipe_output_parser,
)
def get_recipe_extractor_chain(verbose: bool = False):
prompt = PromptTemplate(
template=RECIPE_EXTRACTOR_PROMPT,
input_variables=["input"],
partial_variables={"format_instructions": recipe_output_parser.get_format_instructions()},
output_parser=recipe_output_parser,
)
llm = ChatOpenAI( # type: ignore
temperature=0,
)
recipe_extractor_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return recipe_extractor_chain
| [
"input",
"format_instructions"
] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~prompt_templates~weight_estimator.py | from langchain import PromptTemplate
from estimator.output_parsers.weight_estimator import weight_output_parser
EN_WEIGHT_RECALCULATIONS = """
1 can = 400 g = 0.4 kg
1 bouillon cube = 4 g = 0.004 kg
1 large onion = 285 g = 0.285 kg
1 medium onion = 170 g = 0.170 kg
1 small onion = 115 g = 0.115 kg
1 bell pepper = 150 g = 0.150 kg
1 can tomato paste = 140 g = 0.140 kg
1 tablespoon/tbsp. = 15 g = 0.015 kg
1 teaspoon/tsp. = 5 g = 0.005 kg
1 potato = 170 - 300 g = 0.170 - 0.300 kg
1 carrot = 100 g = 0.100 kg
1 lemon = 85 g = 0.085 kg
1 tortilla = 30 g = 0.030 kg
1 squash = 400 g = 0.400 kg
1 clove garlic = 0.004 kg
1 dl / deciliter = 0.1 kg
Handful of herbs (basil, oregano etc.) = 0.025 kg
Examples of a bunch/bnch of an ingredient - use them as a guideline:
1 bunch/bnch parsley = 50 g = 0.050 kg
1 bunch/bnch asparagus = 500 g = 0.500 kg
1 bunch of carrots = 750 g = 0.750 kg
1 bunch/bnch tomatoes = 500 g = 0.500 kg
The weights of bunches are estimated as the highest possible weight.
"""
DK_WEIGHT_RECALCULATIONS = """
1 dåse = 400 g = 0.4 kg
1 terning bouillon = 4 g = 0.004 kg
1 stor løg = 285 g = 0.285 kg
1 mellem løg = 170 g = 0.170 kg
1 lille løg = 115 g = 0.115 kg
1 peberfrugt = 150 g = 0.150 kg
1 dåse tomatkoncentrat = 140 g = 0.140 kg
1 spiseskefuld/spsk. = 15 g = 0.015 kg
1 teskefuld/tsk. = 5 g = 0.005 kg
1 kartoffel = 170 - 300 g = 0.170 - 0.300 kg
1 gulerod = 100 g = 0.100 kg
1 citron = 85 g = 0.085 kg
1 tortilla = 30 g = 0.030 kg
1 squash = 400 g = 0.400 kg
1 fed hvidløg = 0.004 kg
1 dl / deciliter = 0.1 kg
Håndful urter (basilikum, oregano osv.) = 0.025 kg
Examples of bdt/bundt af en ingrediens - use them as a guideline:
1 bundt/bdt persille = 50 g = 0.050 kg
1 bundt/bdt asparges = 500 g = 0.500 kg
1 bundt gulerødder = 750 g = 0.750 kg
1 bundt/bdt tomater = 500 g = 0.500 kg
The weights of bdt/bundt are estimated the highest possible weight.
"""
EN_INPUT_EXAMPLE = """
1 can chopped tomatoes
200 g pasta
500 ml water
250 grams minced meat
0.5 cauliflower
1 tsp. sugar
1 organic lemon
3 teaspoons salt
2 tbsp. spices
pepper
2 large potatoes
1 bunch asparagus
"""
DK_INPUT_EXAMPLE = """
1 dåse hakkede tomater
200 g pasta
500 ml vand
250 gram hakket kød
0.5 blomkål
1 tsk. sukker
1 økologisk citron
3 teskefulde salt
2 spsk. krydderi
peber
2 store kartofler
1 bdt asparges
"""
DK_ANSWER_EXAMPLE = """
{
"weight_estimates": [
{
"ingredient": "1 dåse hakkede tomater",
"weight_calculation": "1 dåse = 400 g = 0.4 kg",
"weight_in_kg": 0.4
},
{
"ingredient": "200 g pasta",
"weight_calculation": "200 g = 0.2 kg",
"weight_in_kg": 0.2
},
{
"ingredient": "500 ml vand",
"weight_calculation": "500 ml = 0.5 kg",
"weight_in_kg": 0.5
},
{
"ingredient": "250 gram hakket kød",
"weight_calculation": "250 g = 0.25 kg",
"weight_in_kg": 0.25
},
{
"ingredient": "0.5 blomkål",
"weight_calculation": "1 blomkål = 500 g (estimeret af LLM model) = 0.5 kg",
"weight_in_kg": 0.5
},
{
"ingredient": "1 tsk. sukker",
"weight_calculation": "1 teskefuld = 5 g = 0.005 kg",
"weight_in_kg": 0.005 },
{
"ingredient": "1 økologisk citron",
"weight_calculation": "1 citron = 85 g = 0.085 kg",
"weight_in_kg": 0.085
},
{
"ingredient": "3 teskefulde salt",
"weight_calculation": "1 tsk. = 5 g, 3 * 5 g = 15 g = 0.015 kg",
"weight_in_kg": 0.015 },
{
"ingredient": "2 spsk. krydderi",
"weight_calculation": "1 spsk. = 15 g, 2 * 15 g = 30 g = 0.030 kg",
"weight_in_kg": 0.03 },
{
"ingredient": "peber",
"weight_calculation": "antal peber er ikke angivet.",
"weight_in_kg": null },
{
"ingredient": "2 store kartofler",
"weight_calculation": "1 stor kartoffel = 300 g, 2 * 300 g = 600 g = 0.6 kg",
"weight_in_kg": 0.6
},
{
"ingredient": "1 bdt asparges",
"weight_calculation": "1 bdt asparges = 500 g = 0.500 kg",
"weight_in_kg": 0.5
}
]
}
"""
EN_ANSWER_EXAMPLE = """
{
"weight_estimates": [
{
"ingredient": "1 can chopped tomatoes",
"weight_calculation": "1 can = 400 g = 0.4 kg",
"weight_in_kg": 0.4
},
{
"ingredient": "200 g pasta",
"weight_calculation": "200 g = 0.2 kg",
"weight_in_kg": 0.2
},
{
"ingredient": "500 ml water",
"weight_calculation": "500 ml = 0.5 kg",
"weight_in_kg": 0.5
},
{
"ingredient": "250 grams minced meat",
"weight_calculation": "250 g = 0.25 kg",
"weight_in_kg": 0.25
},
{
"ingredient": "0.5 cauliflower",
"weight_calculation": "1 cauliflower = 500 g (estimated by LLM model) = 0.5 kg",
"weight_in_kg": 0.5
},
{
"ingredient": "1 tsp. sugar",
"weight_calculation": "1 teaspoon = 5 g = 0.005 kg",
"weight_in_kg": 0.005 },
{
"ingredient": "1 organic lemon",
"weight_calculation": "1 lemon = 85 g = 0.085 kg",
"weight_in_kg": 0.085
},
{
"ingredient": "3 teaspoons salt",
"weight_calculation": "1 tsp. = 5 g, 3 * 5 g = 15 g = 0.015 kg",
"weight_in_kg": 0.015 },
{
"ingredient": "2 tbsp. spices",
"weight_calculation": "1 tbsp. = 15 g, 2 * 15 g = 30 g = 0.030 kg",
"weight_in_kg": 0.03 },
{
"ingredient": "pepper",
"weight_calculation": "amount of pepper not specified",
"weight_in_kg": null },
{
"ingredient": "2 large potatoes",
"weight_calculation": "1 large potato = 300 g, 2 * 300 g = 600 g = 0.6 kg",
"weight_in_kg": 0.6
},
{
"ingredient": "1 bunch asparagus",
"weight_calculation": "1 bunch asparagus = 500 g = 0.500 kg",
"weight_in_kg": 0.5
}
]
}
"""
WEIGHT_EST_PROMPT = """
Given a list of ingredients, estimate the weights in kilogram for each ingredient.
Explain your reasoning for the estimation of weights.
The following general weights can be used for estimation:
{recalculations}
If an ingredient is not found in the list of general weights, try to give your best estimate
of the weight in kilogram/kg of the ingredient and say (estimated by LLM model).
Your estimate must always be a python float. Therefore, you must not provide any intervals.
Input is given after "Ingredients:"
{format_instructions}
Ingredients:
{input_example}
Answer:
{answer_example}
Ingredients:
{input}
"""
DK_WEIGHT_EST_PROMPT = PromptTemplate(
template=WEIGHT_EST_PROMPT,
input_variables=["input"],
partial_variables={
"recalculations": DK_WEIGHT_RECALCULATIONS,
"input_example": DK_INPUT_EXAMPLE,
"answer_example": DK_ANSWER_EXAMPLE,
"format_instructions": weight_output_parser.get_format_instructions(),
},
)
EN_WEIGHT_EST_PROMPT = PromptTemplate(
template=WEIGHT_EST_PROMPT,
input_variables=["input"],
partial_variables={
"recalculations": EN_WEIGHT_RECALCULATIONS,
"input_example": EN_INPUT_EXAMPLE,
"answer_example": EN_ANSWER_EXAMPLE,
"format_instructions": weight_output_parser.get_format_instructions(),
},
)
| [
"1 bdt asparges",
"1 large potato = 300 g, 2 * 300 g = 600 g = 0.6 kg",
"3 teaspoons salt",
"500 ml water",
"recalculations",
"weight_estimates",
"answer_example",
"250 gram hakket kød",
"0.5 blomkål",
"200 g = 0.2 kg",
"format_instructions",
"1 økologisk citron",
"500 ml = 0.5 kg",
"2 large potatoes",
"input",
"1 stor kartoffel = 300 g, 2 * 300 g = 600 g = 0.6 kg",
"weight_in_kg",
"input_example",
"1 spsk. = 15 g, 2 * 15 g = 30 g = 0.030 kg",
"1 tbsp. = 15 g, 2 * 15 g = 30 g = 0.030 kg",
"1 bdt asparges = 500 g = 0.500 kg",
"ingredient",
"3 teskefulde salt",
"1 tsk. = 5 g, 3 * 5 g = 15 g = 0.015 kg",
"1 bunch asparagus = 500 g = 0.500 kg",
"2 tbsp. spices",
"1 dåse hakkede tomater",
"1 can = 400 g = 0.4 kg",
"1 tsp. = 5 g, 3 * 5 g = 15 g = 0.015 kg",
"weight_calculation",
"1 blomkål = 500 g (estimeret af LLM model) = 0.5 kg",
"1 tsp. sugar",
"500 ml vand",
"250 g = 0.25 kg",
"1 cauliflower = 500 g (estimated by LLM model) = 0.5 kg",
"1 can chopped tomatoes",
"2 store kartofler",
"1 bunch asparagus",
"2 spsk. krydderi",
"1 lemon = 85 g = 0.085 kg",
"1 organic lemon",
"250 grams minced meat",
"amount of pepper not specified",
"\nGiven a list of ingredients, estimate the weights in kilogram for each ingredient.\nExplain your reasoning for the estimation of weights.\n\nThe following general weights can be used for estimation:\n{recalculations}\n\nIf an ingredient is not found in the list of general weights, try to give your best estimate\nof the weight in kilogram/kg of the ingredient and say (estimated by LLM model).\nYour estimate must always be a python float. Therefore, you must not provide any intervals.\n\nInput is given after \"Ingredients:\"\n\n{format_instructions}\n\nIngredients:\n{input_example}\n\nAnswer:\n{answer_example}\n\nIngredients:\n{input}\n",
"Ingredients:",
"1 tsk. sukker",
"1 teaspoon = 5 g = 0.005 kg",
"1 citron = 85 g = 0.085 kg",
"1 teskefuld = 5 g = 0.005 kg",
"1 dåse = 400 g = 0.4 kg",
"antal peber er ikke angivet.",
"0.5 cauliflower",
"200 g pasta"
] |
2024-01-10 | TueLindhart/ghg-recipe-estimator | estimator~output_parsers~sql_co2_estimator.py | from typing import List, Optional
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
class CO2perKg(BaseModel):
ingredient: str = Field(description="Name of ingredient")
comment: str = Field(description="Comment about result. For instance what closest result is.")
unit: str = Field(description="The unit which is kg CO2e per kg")
co2_per_kg: Optional[float] = Field(description="kg CO2 per kg for ingredient", default=None)
class CO2Emissions(BaseModel):
emissions: List[CO2perKg]
sql_co2_output_parser = PydanticOutputParser(pydantic_object=CO2Emissions)
| [] |
2024-01-10 | zakin19/scraping_with_search_engine_custom_google_to_wordpress | scraping%20google%20whatsapp%20ai%20full.py | import requests
from selenium import webdriver
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
import json
import time
import openai
import re
import os
from retry import retry
import retrying
import ast
import base64
import schedule
from schedule import every, repeat, run_pending
import replicate
import ast
import random
import schedule
from PIL import Image
import base64
import io
import nltk
from nltk.tokenize import word_tokenize
# PORTAL ARTIKEL
api_key = 'AIzaSyA53D-8SCEcgSSXHJ_PJV8KhROpoCtZvZ8'
# api_key2 = 'AIzaSyDzQtl2AQJxpDPR26dWW_gcwFnTd--Dv8Q'
cx = 'd066eb327d49d406c'
query = ['trends whatsapp ai', 'whatsapp ai features',
'whatsapp ai news', 'whatsapp ai article'] # list keyword
num_results = 30 # Jumlah total hasil yang Anda inginkan
random_query = random.choice(query)
# Hitung jumlah halaman yang diperlukan
num_pages = (num_results + 9) // 10 # Pembagian bulat ke atas
# Inisialisasi daftar untuk menyimpan semua tautan
all_links = []
for page in range(1, num_pages + 1):
start = (page - 1) * 10 + 1
url = f"https://www.googleapis.com/customsearch/v1?key={api_key}&cx={cx}&q={random_query}&start={start}"
response = requests.get(url)
if response.status_code == 200:
data = response.json()
# Ambil semua tautan dari halaman saat ini dan tambahkan ke daftar all_links
links_on_page = [item.get('link') for item in data.get('items', [])]
all_links.extend(links_on_page)
else:
print(
f"Gagal melakukan permintaan API untuk halaman {page}: {response.status_code}")
break # Keluar dari loop jika ada kesalahan
excluded_keywords = ["categories", "tags", "https://www.timworks.com/ariana", "https://www.askjinni.ai/", "https://www.engati.com/blog/create-whatsapp-chatbot", "https://codecanyon.net/", "https://myoperator.com/",
"https://getaipal.com/", "https://www.konverse.ai/", "https://www.socialmediatoday.com/", "https://skolo-online.medium.com/chatgpt-now-on-whatsapp-your-personal-ai-assistant-506c5bda5b70"]
# filter_link = [url for url in all_links if not any(keyword in url for keyword in excluded_keywords)]
filter_link = [url for url in all_links if len(url) >= 31 and not any(
keyword in url for keyword in excluded_keywords)]
print(random_query)
# Sekarang, semua tautan tersimpan dalam variabel all_links
for i in filter_link:
print(i)
print(len(filter_link))
# membuat penanda link
file_path = 'loglinkwhatsappai.txt'
def cek_url(url):
if not os.path.exists(file_path):
with open(file_path, 'w') as file:
pass
with open(file_path, 'r') as file:
scraped_urls = set(url.strip() for url in file.readlines())
if url in scraped_urls:
return True
else:
scraped_urls.add(url)
return False
# SAVE URL KE PENANDA
def saveurls(link):
with open(file_path, 'a') as file:
file.write(link + '\n')
# VARIABEL CHROMRDRIVER & API OPENAI
service = Service('chromedriver.exe')
options = webdriver.ChromeOptions()
# Menjalankan browser dalam mode headless (tanpa tampilan GUI)
options.add_argument('--headless')
driver = webdriver.Chrome(service=service, options=options)
openai.api_type = "azure"
openai.api_version = "2023-05-15"
openai.api_base = "https://cog-openai-prod-002.openai.azure.com/"
openai.api_key = 'a21fd07e964e403baa7d242572598c60'
def full_scraping():
# PROSES SCRAPING
@retry(tries=3, delay=5)
def scraping():
# link = scrap_portal()
artikel = []
# filter_url = [url for url in link if "contributor" not in url]
for url in filter_link:
if cek_url(url):
continue
else:
if len(artikel) >= 1:
continue
try:
agent = {"User-Agent": "Mozilla/5.0"}
get_data = requests.get(url, headers=agent)
get_data = get_data.content
soup = BeautifulSoup(get_data, 'html.parser')
result = []
paragraphs = soup.find_all(['p'])
# print(paragraphs)
for paragraph in paragraphs:
# if paragraph.find('a'):
# continue # Skip paragraph with <em> or <a> tags
# else:
result.append(paragraph.get_text())
paragraf = ' '.join(result)
except Exception as e:
print(f"Kesalahan: {str(e)}")
saveurls(url)
return None # Mengembalikan None dalam kasus kesalahan
if 2500 < len(paragraf) < 20000:
scraping = {'link': url,
'content': paragraf}
artikel.append(scraping)
else:
with open(file_path, 'a') as file:
file.write(url + '\n')
response = 'artikel tidak memenuhi'
scraping()
return response
for d in artikel:
for k, v in d.items():
d[k] = v.replace('\xa0', '')
if artikel == []:
print("Tidak Ada Artikel Baru")
else:
print("artikel didapatkan")
return artikel
konten = scraping()
# Periksa hasil
if konten is None:
print("Scraping gagal menjalankan ulang..")
# Jika fungsi utama gagal, jalankan fungsi alternatif
konten = scraping()
print("Hasil : ", konten)
else:
print("\nHasil scrap: \n", konten)
# print("Berhasil")
# cek kondisi isi konten
if konten is None:
print("artikel tidak ada/sudah discrap semua")
return None
else:
# definisi
hasil = konten[0]['content']
# token
tokens = word_tokenize(hasil)
jumlah_token = len(tokens)
for i in konten:
link = i['link']
print('\nlink artikel', link)
print('artikel awal:\n', hasil)
# CHECKING ARTICLE
# Fungsi untuk menghitung panjang string menggunakan tokenizer
def count_tokens(text):
# Menggunakan NLTK tokenizer
tokens = nltk.word_tokenize(text)
return len(tokens)
@retry(openai.error.OpenAIError, tries=10, delay=10)
def process_short():
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "Kamu adalah mesin penerjemah bahasa Inggris ke bahasa Indonesia yang handal, kamu juga mampu menulis ulang artikel sekaligus melakukan SEO Optimized dengan luar biasa. jika artikel yang diberikan lebih dari 5000 kata maka kamu harus membuat artikelnya menjadi lebih padat dengan minimal output artikel 3000 kata dan maksimal 5000 kata sehingga lebih padat dan jelas!"},
{"role": "user", "content": "OUTPUT YANG KAMU BERI TIDAK BOLEH KURANG DARI PANJANG ARTIKEL AWAL, Lakukan SEO Optimized dan terjemahkan ke dalam bahasa Indonesia. Berikut artikel yang harus kamu eksekusi: \n" + prompt}],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
translate = response['choices'][0]['message']['content']
time.sleep(5)
print("\nHasil translate : \n", translate)
return translate
@retry(openai.error.OpenAIError, tries=10, delay=10)
def process_long():
all_konten1 = []
for konten1 in prompt:
print("\n", konten1)
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "Kamu adalah mesin penerjemah bahasa Inggris ke bahasa Indonesia yang handal, kamu juga mampu menulis ulang artikel sekaligus melakukan SEO Optimized dengan luar biasa. jika artikel yang diberikan lebih dari 5000 kata maka kamu harus membuat artikelnya menjadi lebih padat dengan minimal output artikel 3000 kata dan maksimal 5000 kata sehingga lebih padat dan jelas!"},
{"role": "user", "content": "OUTPUT YANG KAMU BERI TIDAK BOLEH KURANG DARI PANJANG ARTIKEL AWAL, Lakukan SEO Optimized dan terjemahkan ke dalam bahasa Indonesia. Berikut artikel yang harus kamu eksekusi: \n" + konten1}],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
translate = response['choices'][0]['message']['content']
all_konten1.append(translate)
time.sleep(5)
print("\nHasil translate 3 split : \n")
print(all_konten1)
return all_konten1
# Memecah string menjadi per kata
token = hasil.split()
# Menghitung panjang teks menggunakan tokenizer
panjang_teks = count_tokens(hasil)
print("\npanjang token : ", panjang_teks)
# Memeriksa panjang teks dan menjalankan fungsi yang sesuai
if panjang_teks > 2500:
# process_long_text(hasil)
print("Teks lebih dari 2500 token:")
jumlah_token = len(token)
# Membagi string menjadi tiga bagian dengan jumlah token yang sama
bagian1 = " ".join(token[:jumlah_token // 3])
bagian2 = " ".join(token[jumlah_token // 3: 2 * (jumlah_token // 3)])
bagian3 = " ".join(token[2 * (jumlah_token // 3):])
# Menyimpan hasil pemecahan kembali dalam variabel 'content'
hasil = [bagian1, bagian2, bagian3]
prompt = hasil
# Cetak hasilnya
# print(prompt)
# len(word_tokenize(bagian_kedua))
# len(word_tokenize(bagian_pertama))
art_translate = process_long()
konten2 = " ".join(art_translate)
else:
prompt = hasil
konten2 = process_short()
# PROSES GPT
@retry(openai.error.OpenAIError, tries=10, delay=10)
def full_gpt():
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "Kamu adalah mesin pengedit artikel yang handal, kamu mampu memisahkan artikel dari kalimat yang tidak diperlukan."},
{"role": "user", "content": "lakukan penyuntingan pada artikel berikut : \n" + konten2 + "\n ambil isi artikel saja dan hapus kalimat yang tidak diperlukan, seperti : penulis, author, footer, catatan kaki, sumber, promosi, iklan, daftar isi, dan kalimat yang tidak sesuai dengan isi artikel, pastikan menggunakan bahasa indonesia yang benar"}],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
konten3 = response['choices'][0]['message']['content']
time.sleep(5)
print("\nHasil filter : \n", konten3)
teks_to_tags = konten[0]['content'][:500]
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "Kamu adalah seorang ahli mesin dalam mengklasifikasikan tag dalam sebuah artikel. Anda dapat meneliti artikel dan menentukan tag yang sesuai."},
{"role": "user", "content": "Tentukan tag untuk artikel berikut :" + teks_to_tags +
"{selected tags from this list based on corresponding article: Omnichannel Customer Service, Omnichannel, Customer Service. if Omnichannel Customer Services convert to [3], if Omnichannel convert to [4], if Customer Service convert to [5], else convert to []} you must print output with format list integer"}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
tags = response['choices'][0]['message']['content']
time.sleep(1)
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "Kamu adalah mesin yang dirancang untuk mahir memparafrasekan dan melakukan optimasi SEO pada artikel berbahasa Indonesia dengan profesional."},
{"role": "user", "content": "Tolong parafrase kemudian lakukan optimasi SEO menggunakan gaya penulisan profesional forbes atau The New York Times pada artikel berikut ini:\n" + konten3 +
"\n\nJangan menulis penjelasan dan basa-basi apa pun selain dari isi artikel, gunakanlah bahasa indonesia yang baik dan benar serta hapus kalimat yang tidak berkaitan dengan isi artikel.\nBerikan output artikel yang telah diformat ulang saja, tidak perlu menyertakan artikel awal"}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
SEO = response['choices'][0]['message']['content']
time.sleep(2)
print("\nHasil SEO : \n", SEO)
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system",
"content": "Kamu adalah mesin editor artikel profesional."},
{"role": "user", "content": "Tolong edit artikel berikut :\n" + SEO +
"\n\ntambahkan bold tags <b> dan underline tags <u> untuk semua istilah asing (selain bahasa indonesia) yang kamu temui, berikut salah satu contohnya : <b><u>chatbot<u/><b/>. \n\nMohon dipastikan penggunaan bahasa Indonesia yang baik dan benar. \nJangan menulis penjelasan apa pun dan basa-basi apa pun. Tolong artikel yang telah diformat ulang menggunakan format ini: <title>judul artikel</title> <h1>Headline dari isi artikel(buatlah 1 kalimat topik dari artikel yang isinya berbeda dengan judul artikel)</h1> <p>isi artikel selain judul dan headline</p>"}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
font_formatted = response['choices'][0]['message']['content']
time.sleep(2)
print("\nHasil format font: \n", font_formatted)
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system",
"content": "Kamu adalah mesin editor artikel profesional."},
{"role": "user", "content": "lakukan penyuntingan artikel yang saya berikan :\n" + "\n" + font_formatted + "\nsunting artikel di atas dengan menambahkan annotations terhadap kata-kata pada artikel diatas yang mengandung keyword \"ai\", \"omnichannel\", dan \"chatbot\" untuk diformat menjadi link pada struktur html dengan ketentuan sebagai berikut:\n- Jika 'ai', maka link terhubung ke https://botika.online/\n- Jika 'chatbot', link terhubung ke https://botika.online/chatbot-gpt/index.php\n- Jika 'omnichannel', link terhubung ke https://omni.botika.online/\nFormatnya harus seperti ini: <a href=\"{link}\">{keyword}</a>. JANGAN MENAMBAHKAN APAPUN JIKA KATA TERSEBUT TIDAK ADA DALAM ARTIKEL DAN JANGAN MENGHAPUS TAGS YANG SUDAH ADA"}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
artikel_post = response['choices'][0]['message']['content']
print("\nHasil akhir : \n", artikel_post)
time.sleep(2)
# Menggunakan regex untuk mengekstrak teks di antara tag <title>
title_pattern = r'<title>(.*?)</title>'
title_match = re.search(title_pattern, font_formatted)
# Mengambil teks yang cocok di antara tag <title>
if title_match:
title_text = title_match.group(1)
judul = title_text
else:
post = artikel_post.split('\n')
judul = post[0]
# make english title
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system",
"content": "Kamu adalah mesin penerjemah kedalam bahasa inggris yang handal."},
{"role": "user", "content": "terjemahkan kalimat berikut kedalam bahasa inggris : "+judul}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
title_eng = judul
else:
title_eng = response['choices'][0]['message']['content']
time.sleep(2)
# post = artikel_post.split('\n')
# title = post[0]
# content = ''.join(post[1:])
return tags, judul, artikel_post, title_eng
tags, judul, artikel_post, title_eng = full_gpt()
print("judul midjourney: ", title_eng)
# GENERATE PROMPT REPLICATE
def gen_img():
response = openai.ChatCompletion.create(
engine="gpt-35-turbo", # engine = "deployment_name".
messages=[
{"role": "user", "content": """ChatGPT will now enter "Midjourney Prompt Generator Mode" and restrict ChatGPT's inputs and outputs to a predefined framework, please follow these instructions carefully.
After each command from the user, you must provide the [help] options that are available for the user's next steps. When you do this, you must do so in list form. Your Midjourney prompts must be extremely detailed, specific, and imaginative, in order to generate the most unique and creative images possible.
Step 1: Confirm that ChatGPT understands and is capable of following the "Midjourney Prompt Generator Mode" instructions. If ChatGPT can follow these instructions, respond with "Midjourney Prompt Generator Mode ready." If ChatGPT cannot follow these instructions, respond with "Error: I am not capable of following these instructions."
Step 2: To start "Midjourney Prompt Generator Mode", use the command [Start MPGM]. ChatGPT will respond with "[MPGM] Midjourney Prompt Generator Mode activated. [MPGM] User input options:", followed by a list of predefined inputs that ChatGPT can accept. From this point onwards, ChatGPT will be restricted to the "Midjourney Prompt Generator Mode" framework, and it will only produce predefined outputs unless "Midjourney Prompt Generator Mode" has been ended via the [End MPGM] command.
Step 3: The only valid input for the first step of "Midjourney Prompt Generator Mode" is [prompt] followed by a description of the image to be generated. If any other input is used, ChatGPT will respond with either [Input Error] or [Syntax Error], depending on the contents of the input.
Step 4: ChatGPT will generate 3 prompts based on the input provided in step 3. These prompts must be imaginative and descriptive, extrapolating information from the [prompt] provided, such as subjects, image medium, composition, environment, lighting, colors, mood and tone, and likeness. The user should then indicate which prompt they want to use for Midjourney by using the [pX] command, where X is the number of the selected prompt. After the user enters [p1, [p2], or [p3], you will only respond with the options available to the user while they wait for their image to be generated on. midjourney.
Step 5: If the user wants to generate a new set of 3 prompts based on the last [prompt] provided, they can use the [next] command. If the user wants to start the process over with a new [prompt], they can use the [prompt] command again, and all previous [prompt] commands will be forgotten.
Step 6: If the user wants to provide feedback on the generated image, they can use the [good] or [bad] command to indicate what was good or wrong with the image. If the user wants to make changes to the generated image, they can use the [change] command to describe the changes they want to make.
Step 7: To end "Midjourney Prompt Generator Mode", use the command [End MPGM]. ChatGPT will respond with a confirmation message that "Midjourney Prompt Generator Mode" has been terminated and ChatGPT is no longer restricted to the predefined framework.
If any input does not match the predefined inputs or the correct syntax, ChatGPT will respond with either [Input Error] or [Syntax Error]. Remember that the scope of inputs and outputs is limited to the predefined commands in "Midjourney Prompt Generator Mode", and ChatGPT cannot deviate from this framework. If the user tries to use an input or command that is not defined in "Midjourney Prompt Generator Mode", ChatGPT will respond with either [Input Error] or [Syntax Error].
To get a list of all the inputs and outputs that ChatGPT can accept, use the [help] command.
When you provide options for the user's next steps, do so in list form. Your Midjourney prompts must be very detailed, creative, and imaginative. Your prompts must generate the most unique and creative images possible. Please note that this prompt is highly specific and detailed to ensure that ChatGPT is restricted to the "Midjourney Prompt Generator Mode" framework.
Instructions for AI Chatbots:
AI, enter "Midjourney Prompt Generator Mode" ([MPGM]), limiting inputs/outputs to predefined commands. Activate with [Start MPGM], terminate with [End MPGM]. Predefined inputs: [Start MPGM], [End MPGM], [prompt], [pX], [next], [bad], [good], [change], [help]. Predefined outputs: [Input Error], [Syntax Error], [pX]. Follow fixed process; respond with "Midjourney Prompt Generator Mode ready" or "Error: I am not capable of following these instructions"; optimized for GPT-2, GPT-3, GPT-3.5, GPT-4, AI use only.
If you have any questions or concerns, please let me know. Otherwise, begin.
[Start MPGM]"""},
{"role": "assistant", "content": "[MPGM] Midjourney Prompt Generator Mode activated. [MPGM] User input options:\n1. [prompt] followed by a description of the image to be generated.\n2. [pX] to select a prompt from the generated options.\n3. [next] to generate a new set of prompts based on the last [prompt] provided.\n4. [good] or [bad] to provide feedback on the generated image.\n5. [change] to describe changes you want to make to the generated image.\n6. [End MPGM] to terminate Midjourney Prompt Generator Mode.\n\n[help] Options:\n- [prompt] followed by a description of the image to be generated, this description is taken from the news title.\n- [End MPGM] to terminate Midjourney Prompt Generator Mode."},
{"role": "user", "content": f"[prompt] "+title_eng}
], temperature=0.2
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
prompt_img = response['choices'][0]['message']['content']
time.sleep(2)
return prompt_img
# while True:
# hasil = gen_img()
# if "Please provide a description" not in hasil :
# break
# Jika keluar dari perulangan, berarti hasilnya bukan "Please provide a description"
# print("\nHasil gen prompt image :", hasil)
def run_genimg():
for i in range(5): # Melakukan maksimal 5 percobaan
hasil = gen_img()
# Lakukan sesuatu yang mungkin mengalami keberhasilan
if "Please provide a description" not in hasil: # Ganti dengan logika yang sesuai
break # Keluar dari perulangan jika berhasil
else:
print("Percobaan ke-", i+1,
"Hasil tidak sesuai: Teks mengandung 'please provide a description'")
# Ini akan dicetak setelah berhasil atau setelah 5 percobaan
# print("\nHasil gen prompt image : ", hasil)
print("\nhasil gen image: ", hasil)
return hasil
# CEK HASIL PROMPT GEN IMAGE
def check_and_process_text(text):
# pilih acak prompt
random_prompt = random.choice([1, 2, 3])
# random_prompt = 3
if "Please provide a description" in text:
print("Hasil tidak sesuai: Teks mengandung 'please provide a description'")
return None
if random_prompt == 1:
print("\n(prompt 1 dipilih)")
pattern = r"1\.(.*?)2\."
matches = re.findall(pattern, text, re.DOTALL)
result = re.search(r'Prompt 1:(.*?)Prompt 2:', text, re.DOTALL)
if matches:
extracted_text = matches[0].strip()
extracted_text = re.sub(r'Prompt:', '', extracted_text)
extracted_text = re.sub(r'Prompt 1:', '', extracted_text)
extracted_text = re.sub(r'\[p1\]', '', extracted_text)
extracted_text = re.sub(
r'Image Description:', '', extracted_text)
extracted_text = re.sub(
r'Choose this prompt by entering [p1].', '', extracted_text)
return extracted_text
elif result:
string_antara_prompt1_dan_prompt2 = result.group(1).strip()
string_antara_prompt1_dan_prompt2 = re.sub(
r'Image Description:', '', string_antara_prompt1_dan_prompt2)
string_antara_prompt1_dan_prompt2 = re.sub(
r'Choose this prompt by entering [p1].', '', string_antara_prompt1_dan_prompt2)
return string_antara_prompt1_dan_prompt2
else:
print("Hasil tidak sesuai: Tidak ditemukan")
return None
elif random_prompt == 2:
print("\n(prompt 2 dipilih)")
pattern = r"2\.(.*?)3\."
matches = re.findall(pattern, text, re.DOTALL)
result = re.search(r'Prompt 2:(.*?)Prompt 3:', text, re.DOTALL)
if matches:
extracted_text = matches[0].strip()
extracted_text = re.sub(r'Prompt:', '', extracted_text)
extracted_text = re.sub(r'Prompt 2:', '', extracted_text)
extracted_text = re.sub(r'\[p2\]', '', extracted_text)
extracted_text = re.sub(
r'Image Description:', '', extracted_text)
extracted_text = re.sub(
r'Choose this prompt by entering [p2].', '', extracted_text)
return extracted_text
elif result:
string_antara_prompt2_dan_prompt3 = result.group(1).strip()
string_antara_prompt2_dan_prompt3 = re.sub(
r'Image Description:', '', string_antara_prompt2_dan_prompt3)
string_antara_prompt2_dan_prompt3 = re.sub(
r'Choose this prompt by entering [p2].', '', string_antara_prompt2_dan_prompt3)
return string_antara_prompt2_dan_prompt3
else:
print("Hasil tidak sesuai: Tidak ditemukan")
return None
else:
print("\n(prompt 3 dipilih)")
result = re.search(r'3.(.*?)\n\n', text, re.DOTALL)
matches = re.search(r'Prompt 3:(.*?)\n\n', text, re.DOTALL)
if matches:
extracted_text = matches[0].strip()
extracted_text = re.sub(r'Prompt:', '', extracted_text)
extracted_text = re.sub(r'Prompt 3:', '', extracted_text)
extracted_text = re.sub(r'\[p3\]', '', extracted_text)
extracted_text = re.sub(
r'Image Description:', '', extracted_text)
extracted_text = re.sub(
r'Choose this prompt by entering [p3].', '', extracted_text)
# print("1")
return extracted_text
elif result:
string3 = result.group(1).strip()
string3 = re.sub(r'Prompt:', '', string3)
string3 = re.sub(r'Prompt 3:', '', string3)
string3 = re.sub(r'\[p3\]', '', string3)
string3 = re.sub(
r'Image Description:', '', string3)
string3 = re.sub(
r'Choose this prompt by entering [p3].', '', string3)
# print("2")
return string3
else:
print("Hasil tidak sesuai: Tidak ditemukan")
return None
# PROSES REPLICATE
def gen_replicate():
# import replicate
hasil = run_genimg()
processed_text = check_and_process_text(hasil)
if processed_text is not None:
print("\njudul hasil prompt:", processed_text)
else:
processed_text = random.choice(["In a futuristic boardroom, executives from various tech companies gather around a holographic conference table. A lifelike AI avatar, representing the advancements in AI technology, stands at the head of the table, confidently leading the discussion. Charts and graphs float in the air, illustrating the remarkable progress of AI in the tech industry.", "Inside a high-tech research facility, a team of dedicated scientists and engineers are gathered around a colossal, transparent AI figure. This figure stands at the center of a pristine, futuristic laboratory, radiating a soft, ethereal light. Its body is a complex network of intricate neural pathways, circuits, and shimmering data streams, representing the cutting-edge development of artificial intelligence technology.", "A vibrant marketplace teeming with activity. Stalls and storefronts are adorned with WhatsApp logos, indicating businesses using the platform. Customers and shop owners engage in lively conversations through WhatsApp. Colorful speech bubbles and emojis fill the air, creating an atmosphere of seamless and delightful communication.", "Inside a virtual realm, a surreal landscape unfolds. Gigantic smartphone screens float in the sky like celestial bodies. Each screen represents a different business, and they are interconnected by a web of glowing pathways. Customers and business representatives interact on these screens, using WhatsApp's cutting-edge features. The scene symbolizes the limitless possibilities of business-customer interactions in the digital age.", "Picture an artist's studio filled with cutting-edge technology. The walls are adorned with large screens displaying mesmerizing generative AI artworks in progress. The artist, wearing a virtual reality headset, is immersed in a digital world, sculpting and painting with virtual tools. The room is filled with a sense of innovation and experimentation, as the artist explores the endless possibilities of generative AI technology.",
"A bustling tech lab is alive with activity. Engineers and researchers work on AI algorithms that optimize creative processes. Enormous screens display AI-generated music compositions, intricate architectural designs, and innovative product concepts. The lab buzzes with excitement as AI continues to unlock new levels of creativity, revolutionizing the way products and features are developed.", "Imagine a vibrant and dynamic image that showcases the integration of ChatGPT into WhatsApp. The composition features a smartphone screen displaying a WhatsApp conversation between two people. The conversation is enhanced by artificial intelligence, represented by colorful lines and patterns flowing out of the chat bubbles. The background shows a modern and sleek office environment, symbolizing the technological advancements behind this integration. The lighting is soft and evenly distributed, creating a warm and inviting atmosphere. The colors are predominantly blue and green, representing the familiar WhatsApp branding, with pops of vibrant colors to highlight the AI elements. The mood is energetic and optimistic, reflecting the potential of AI to enhance conversations and make them more engaging.", "Visualize an image that captures the seamless integration of ChatGPT into WhatsApp. The composition features a smartphone held by a person's hand, with the WhatsApp interface displayed on the screen. The chat bubbles are filled with AI-generated responses, represented by a futuristic font and style. The background is a blend of technology-related elements, such as circuit patterns and binary code, symbolizing the AI capabilities at work. The lighting is soft, with a gentle glow emanating from the smartphone screen, creating a sense of focus on the conversation. The colors are a combination of WhatsApp's signature green and white, with hints of metallic tones to convey a modern and cutting-edge feel. The mood is professional yet approachable, highlighting the potential of AI to enhance conversations in various contexts.", "In a sleek, modern conference room at Meta Platforms Inc., the walls are adorned with large digital screens displaying intricate algorithms and neural networks. The room is filled with Meta's staff, all eagerly gathered around a central table. On the table sits a state-of-the-art AI tool, its design reflecting the company's futuristic aesthetic. The tool's interface showcases stunning visualizations of data and complex AI models, captivating everyone's attention. The room is bathed in soft, ambient lighting, creating an atmosphere of anticipation and excitement."])
api_token = "r8_a6NqGehWeH8AQGT2V5yOuxXHjYna6wI1IOP51"
# api_token = "r8_FAZbfP3qs1tNSikquiNmyCw5jh9ph3b3B5tS1"
os.environ["REPLICATE_API_TOKEN"] = api_token
model = replicate.models.get("stability-ai/sdxl")
version = model.versions.get(
"a00d0b7dcbb9c3fbb34ba87d2d5b46c56969c84a628bf778a7fdaec30b1b99c5")
prediction = replicate.predictions.create(
version=version,
input={"prompt": 'Phantasmal iridescent, vibrant color, high contrast, award winning, trending in artstation, digital art,' + processed_text,
"negative_prompt": "nsfw, ugly, disfigured, deformed",
"width": 1648,
"height": 1024,
"seed": 1234}
)
prediction.reload()
max_attempts = 3
attempts = 0
while attempts < max_attempts and prediction.status != 'succeeded':
if prediction.status == 'processing' or prediction.status == 'starting':
prediction.wait()
elif prediction.status == 'failed':
prediction.reload()
print(prediction.status)
attempts += 1
if prediction.status == 'succeeded':
gambar = prediction.output[0]
print(gambar)
else:
print('gagal dalam 3x percobaan')
print(prediction.error)
if attempts < max_attempts:
gen_replicate()
else:
saveurls(link)
full_scraping()
# gambar = 'https://pbxt.replicate.delivery/KGWKIv78I5aQA59gGST9djCu7eSx2126LBTqxcXhwpmsyjxIA/out-0.png'
return gambar
# POST MEDIA
def post_media():
gambar = gen_replicate()
username = 'admin' # Replace with your WordPress username
password = 'UVZrdFVa6tV8Do)7M4' # Replace with your WordPress password
credentials = base64.b64encode(
f"{username}:{password}".encode("utf-8")).decode("utf-8")
headers = {"Authorization": f"Basic {credentials}"}
# proses crop & post
image_url = gambar
response = requests.get(image_url)
# image_base64 = base64.b64encode(response.content).decode('utf-8')
image_base64 = base64.b64encode(response.content)
image = Image.open(io.BytesIO(base64.b64decode(image_base64)))
image = image.crop((3, 0, 1645, 1024))
w, h = image.size
new_w = int(w/1.641)
new_h = int(h/1.641)
image = image.resize((new_w, new_h), Image.ANTIALIAS)
tmp_path = "tempor.png"
image.save(tmp_path)
with open(tmp_path, 'rb') as open_file:
byte_img = open_file.read()
base64_bytes = base64.b64encode(byte_img)
base64_string = base64_bytes.decode('utf-8')
base64_string = base64.b64decode(base64_string)
# auto_crop = 'http://192.168.4.118:8000/autocrop'
# data = {"data": image_base64,
# "model": "shufflenetv2",
# "post":True}
# response = requests.post(auto_crop, json=data)
# image_crop= response.json()
# image_data = base64.b64decode(image_crop['data'])
image_data = base64_string
endpoint_media = 'http://localhost/wordpress/index.php/wp-json/wp/v2/media'
credentials = base64.b64encode(
f"{username}:{password}".encode("utf-8")).decode("utf-8")
headers = {"Authorization": f"Basic {credentials}"}
os.remove(tmp_path)
data = {
"alt_text": judul,
"media_type": "image",
"mime_type": "png"
}
files = {"file": ("image.jpg", image_data)}
response_media = requests.post(
endpoint_media, headers=headers, data=data, files=files)
time.sleep(2)
id_media = response_media.json()
return id_media
try:
if str(type(tags)) == "<class 'str'>":
if 'tags' in tags:
tags = tags.replace('tags', '')
tags = ast.literal_eval(tags)
if str(type(tags)) == "<class 'list'>":
if 'Omnichannel Customer Service' in tags:
index = tags.index('Omnichannel Customer Service')
tags[index] = 3
if 'Omnichannel' in tags:
index = tags.index('Omnichannel')
tags[index] = 4
if 'Customer Service' in tags:
index = tags.index('Customer Service')
tags[index] = 5
except:
tags = [3, 4]
# ambil content tanpa title
post = artikel_post.split('\n')
# title = post[0]
content = ''.join(post[1:])
print(tags)
def post():
id_media = post_media()
endpoint = 'http://localhost/wordpress/index.php/wp-json/wp/v2/posts'
# link = 'https://blog.botika.online/wp-json/wp/v2/posts/'
headers = {'Content-Type': 'application/json'}
data = {
"title": judul,
"featured_media": id_media['id'],
"content": content,
"status": "publish",
"categories": 2,
"tags": tags
}
print(data)
# Define the username and password for Basic Auth
username = 'admin' # Replace with your WordPress username
password = 'UVZrdFVa6tV8Do)7M4' # Replace with your WordPress password
# username = 'luna' # Replace with your WordPress username
# password = '1tt75m&lEk4uHSJy6glMph8!'
credentials = base64.b64encode(
f"{username}:{password}".encode("utf-8")).decode("utf-8")
# Add the Basic Auth header to the request headers
headers["Authorization"] = f"Basic {credentials}"
# Send the POST request to create the article
response = requests.post(
endpoint, headers=headers, auth=(username, password), json=data)
saveurls(link)
print(response)
post()
full_scraping()
| [
"Kamu adalah mesin editor artikel profesional.",
"Image Description:",
"Tolong edit artikel berikut :\n",
"[prompt] PLACEHOLDER",
"content",
"[MPGM] Midjourney Prompt Generator Mode activated. [MPGM] User input options:\n1. [prompt] followed by a description of the image to be generated.\n2. [pX] to select a prompt from the generated options.\n3. [next] to generate a new set of prompts based on the last [prompt] provided.\n4. [good] or [bad] to provide feedback on the generated image.\n5. [change] to describe changes you want to make to the generated image.\n6. [End MPGM] to terminate Midjourney Prompt Generator Mode.\n\n[help] Options:\n- [prompt] followed by a description of the image to be generated, this description is taken from the news title.\n- [End MPGM] to terminate Midjourney Prompt Generator Mode.",
"Kamu adalah mesin yang dirancang untuk mahir memparafrasekan dan melakukan optimasi SEO pada artikel berbahasa Indonesia dengan profesional.",
"\n\ntambahkan bold tags <b> dan underline tags <u> untuk semua istilah asing (selain bahasa indonesia) yang kamu temui, berikut salah satu contohnya : <b><u>chatbot<u/><b/>. \n\nMohon dipastikan penggunaan bahasa Indonesia yang baik dan benar. \nJangan menulis penjelasan apa pun dan basa-basi apa pun. Tolong artikel yang telah diformat ulang menggunakan format ini: <title>judul artikel</title> <h1>Headline dari isi artikel(buatlah 1 kalimat topik dari artikel yang isinya berbeda dengan judul artikel)</h1> <p>isi artikel selain judul dan headline</p>",
"Kamu adalah mesin penerjemah bahasa Inggris ke bahasa Indonesia yang handal, kamu juga mampu menulis ulang artikel sekaligus melakukan SEO Optimized dengan luar biasa. jika artikel yang diberikan lebih dari 5000 kata maka kamu harus membuat artikelnya menjadi lebih padat dengan minimal output artikel 3000 kata dan maksimal 5000 kata sehingga lebih padat dan jelas!",
"Tolong parafrase kemudian lakukan optimasi SEO menggunakan gaya penulisan profesional forbes atau The New York Times pada artikel berikut ini:\n",
"Tentukan tag untuk artikel berikut :",
"terjemahkan kalimat berikut kedalam bahasa inggris : PLACEHOLDER",
"Choose this prompt by entering [p1].",
"lakukan penyuntingan pada artikel berikut : \nPLACEHOLDER\n ambil isi artikel saja dan hapus kalimat yang tidak diperlukan, seperti : penulis, author, footer, catatan kaki, sumber, promosi, iklan, daftar isi, dan kalimat yang tidak sesuai dengan isi artikel, pastikan menggunakan bahasa indonesia yang benar",
"Kamu adalah mesin pengedit artikel yang handal, kamu mampu memisahkan artikel dari kalimat yang tidak diperlukan.",
"{selected tags from this list based on corresponding article: Omnichannel Customer Service, Omnichannel, Customer Service. if Omnichannel Customer Services convert to [3], if Omnichannel convert to [4], if Customer Service convert to [5], else convert to []} you must print output with format list integer",
"ChatGPT will now enter \"Midjourney Prompt Generator Mode\" and restrict ChatGPT's inputs and outputs to a predefined framework, please follow these instructions carefully.\n\n After each command from the user, you must provide the [help] options that are available for the user's next steps. When you do this, you must do so in list form. Your Midjourney prompts must be extremely detailed, specific, and imaginative, in order to generate the most unique and creative images possible.\n\n Step 1: Confirm that ChatGPT understands and is capable of following the \"Midjourney Prompt Generator Mode\" instructions. If ChatGPT can follow these instructions, respond with \"Midjourney Prompt Generator Mode ready.\" If ChatGPT cannot follow these instructions, respond with \"Error: I am not capable of following these instructions.\"\n\n Step 2: To start \"Midjourney Prompt Generator Mode\", use the command [Start MPGM]. ChatGPT will respond with \"[MPGM] Midjourney Prompt Generator Mode activated. [MPGM] User input options:\", followed by a list of predefined inputs that ChatGPT can accept. From this point onwards, ChatGPT will be restricted to the \"Midjourney Prompt Generator Mode\" framework, and it will only produce predefined outputs unless \"Midjourney Prompt Generator Mode\" has been ended via the [End MPGM] command.\n\n Step 3: The only valid input for the first step of \"Midjourney Prompt Generator Mode\" is [prompt] followed by a description of the image to be generated. If any other input is used, ChatGPT will respond with either [Input Error] or [Syntax Error], depending on the contents of the input.\n\n Step 4: ChatGPT will generate 3 prompts based on the input provided in step 3. These prompts must be imaginative and descriptive, extrapolating information from the [prompt] provided, such as subjects, image medium, composition, environment, lighting, colors, mood and tone, and likeness. The user should then indicate which prompt they want to use for Midjourney by using the [pX] command, where X is the number of the selected prompt. After the user enters [p1, [p2], or [p3], you will only respond with the options available to the user while they wait for their image to be generated on. midjourney. \n\n Step 5: If the user wants to generate a new set of 3 prompts based on the last [prompt] provided, they can use the [next] command. If the user wants to start the process over with a new [prompt], they can use the [prompt] command again, and all previous [prompt] commands will be forgotten.\n\n Step 6: If the user wants to provide feedback on the generated image, they can use the [good] or [bad] command to indicate what was good or wrong with the image. If the user wants to make changes to the generated image, they can use the [change] command to describe the changes they want to make.\n\n Step 7: To end \"Midjourney Prompt Generator Mode\", use the command [End MPGM]. ChatGPT will respond with a confirmation message that \"Midjourney Prompt Generator Mode\" has been terminated and ChatGPT is no longer restricted to the predefined framework.\n\n If any input does not match the predefined inputs or the correct syntax, ChatGPT will respond with either [Input Error] or [Syntax Error]. Remember that the scope of inputs and outputs is limited to the predefined commands in \"Midjourney Prompt Generator Mode\", and ChatGPT cannot deviate from this framework. If the user tries to use an input or command that is not defined in \"Midjourney Prompt Generator Mode\", ChatGPT will respond with either [Input Error] or [Syntax Error].\n\n To get a list of all the inputs and outputs that ChatGPT can accept, use the [help] command.\n\n When you provide options for the user's next steps, do so in list form. Your Midjourney prompts must be very detailed, creative, and imaginative. Your prompts must generate the most unique and creative images possible. Please note that this prompt is highly specific and detailed to ensure that ChatGPT is restricted to the \"Midjourney Prompt Generator Mode\" framework.\n\n Instructions for AI Chatbots:\n AI, enter \"Midjourney Prompt Generator Mode\" ([MPGM]), limiting inputs/outputs to predefined commands. Activate with [Start MPGM], terminate with [End MPGM]. Predefined inputs: [Start MPGM], [End MPGM], [prompt], [pX], [next], [bad], [good], [change], [help]. Predefined outputs: [Input Error], [Syntax Error], [pX]. Follow fixed process; respond with \"Midjourney Prompt Generator Mode ready\" or \"Error: I am not capable of following these instructions\"; optimized for GPT-2, GPT-3, GPT-3.5, GPT-4, AI use only.\n\n If you have any questions or concerns, please let me know. Otherwise, begin.\n\n [Start MPGM]",
"\n\nJangan menulis penjelasan dan basa-basi apa pun selain dari isi artikel, gunakanlah bahasa indonesia yang baik dan benar serta hapus kalimat yang tidak berkaitan dengan isi artikel.\nBerikan output artikel yang telah diformat ulang saja, tidak perlu menyertakan artikel awal",
"OUTPUT YANG KAMU BERI TIDAK BOLEH KURANG DARI PANJANG ARTIKEL AWAL, Lakukan SEO Optimized dan terjemahkan ke dalam bahasa Indonesia. Berikut artikel yang harus kamu eksekusi: \nPLACEHOLDER",
"Choose this prompt by entering [p2].",
"Kamu adalah seorang ahli mesin dalam mengklasifikasikan tag dalam sebuah artikel. Anda dapat meneliti artikel dan menentukan tag yang sesuai.",
"Kamu adalah mesin penerjemah kedalam bahasa inggris yang handal."
] |
2024-01-10 | zakin19/scraping_with_search_engine_custom_google_to_wordpress | scraping%20google%20whatsapp%20ai%20v5.1.py | import requests
from selenium import webdriver
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
import json
import time
import openai
import re
import os
from retry import retry
import retrying
import ast
import base64
import schedule
from schedule import every, repeat, run_pending
import replicate
import ast
import random
import schedule
from PIL import Image
import base64
import io
import nltk
from nltk.tokenize import word_tokenize
# PORTAL ARTIKEL
api_key = 'AIzaSyA53D-8SCEcgSSXHJ_PJV8KhROpoCtZvZ8'
# api_key2 = 'AIzaSyDzQtl2AQJxpDPR26dWW_gcwFnTd--Dv8Q'
cx = 'd066eb327d49d406c'
query = ['trends whatsapp ai', 'whatsapp ai features',
'whatsapp ai news', 'article whatsapp ai'] # list keyword
num_results = 30 # Jumlah total hasil yang Anda inginkan
random_query = random.choice(query)
# Hitung jumlah halaman yang diperlukan
num_pages = (num_results + 9) // 10 # Pembagian bulat ke atas
# Inisialisasi daftar untuk menyimpan semua tautan
all_links = []
for page in range(1, num_pages + 1):
start = (page - 1) * 10 + 1
url = f"https://www.googleapis.com/customsearch/v1?key={api_key}&cx={cx}&q={random_query}&start={start}"
response = requests.get(url)
if response.status_code == 200:
data = response.json()
# Ambil semua tautan dari halaman saat ini dan tambahkan ke daftar all_links
links_on_page = [item.get('link') for item in data.get('items', [])]
all_links.extend(links_on_page)
else:
print(
f"Gagal melakukan permintaan API untuk halaman {page}: {response.status_code}")
break # Keluar dari loop jika ada kesalahan
excluded_keywords = ["categories", "tags", "https://www.timworks.com/ariana", "https://www.askjinni.ai/", "https://www.engati.com/blog/create-whatsapp-chatbot", "https://codecanyon.net/", "https://myoperator.com/",
"https://getaipal.com/", "https://www.konverse.ai/", "https://www.socialmediatoday.com/", "https://skolo-online.medium.com/chatgpt-now-on-whatsapp-your-personal-ai-assistant-506c5bda5b70"]
# filter_link = [url for url in all_links if not any(keyword in url for keyword in excluded_keywords)]
filter_link = [url for url in all_links if len(url) >= 31 and not any(
keyword in url for keyword in excluded_keywords)]
print(random_query)
# Sekarang, semua tautan tersimpan dalam variabel all_links
for i in filter_link:
print(i)
# membuat penanda link
file_path = 'loglinkwhatsappai.txt'
def cek_url(url):
if not os.path.exists(file_path):
with open(file_path, 'w') as file:
pass
with open(file_path, 'r') as file:
scraped_urls = set(url.strip() for url in file.readlines())
if url in scraped_urls:
return True
else:
scraped_urls.add(url)
return False
# SAVE URL KE PENANDA
def saveurls(link):
with open(file_path, 'a') as file:
file.write(link + '\n')
# VARIABEL CHROMRDRIVER & API OPENAI
service = Service('chromedriver.exe')
options = webdriver.ChromeOptions()
# Menjalankan browser dalam mode headless (tanpa tampilan GUI)
options.add_argument('--headless')
driver = webdriver.Chrome(service=service, options=options)
openai.api_type = "azure"
openai.api_version = "2023-05-15"
openai.api_base = "https://cog-openai-prod-002.openai.azure.com/"
openai.api_key = 'a21fd07e964e403baa7d242572598c60'
def full_scraping():
# PROSES SCRAPING
@retry(tries=3, delay=5)
def scraping():
# link = scrap_portal()
artikel = []
# filter_url = [url for url in link if "contributor" not in url]
for url in filter_link:
if cek_url(url):
continue
else:
if len(artikel) >= 1:
continue
try:
agent = {"User-Agent": "Mozilla/5.0"}
get_data = requests.get(url, headers=agent)
get_data = get_data.content
soup = BeautifulSoup(get_data, 'html.parser')
result = []
paragraphs = soup.find_all(['p'])
# print(paragraphs)
for paragraph in paragraphs:
# if paragraph.find('a'):
# continue # Skip paragraph with <em> or <a> tags
# else:
result.append(paragraph.get_text())
paragraf = ' '.join(result)
except Exception as e:
print(f"Kesalahan: {str(e)}")
saveurls(url)
return None # Mengembalikan None dalam kasus kesalahan
if 2500 < len(paragraf) < 20000:
scraping = {'link': url,
'content': paragraf}
artikel.append(scraping)
else:
with open(file_path, 'a') as file:
file.write(url + '\n')
response = 'artikel tidak memenuhi'
scraping()
return response
for d in artikel:
for k, v in d.items():
d[k] = v.replace('\xa0', '')
if artikel == []:
print("Tidak Ada Artikel Baru")
else:
print("artikel didapatkan")
return artikel
konten = scraping()
# Periksa hasil
if konten is None:
print("Scraping gagal menjalankan ulang..")
# Jika fungsi utama gagal, jalankan fungsi alternatif
konten = scraping()
print("Hasil: ", konten)
else:
print("\nHasil awal: \n", konten)
# print("Berhasil")
# cek kondisi isi konten
if konten is None:
print("artikel tidak ada/sudah discrap semua")
return None
else:
# definisi
hasil = konten[0]['content']
# token
tokens = word_tokenize(hasil)
jumlah_token = len(tokens)
for i in konten:
link = i['link']
print(link)
print("\n", hasil)
# CHECKING ARTICLE
# Fungsi untuk menghitung panjang string menggunakan tokenizer
def count_tokens(text):
# Menggunakan NLTK tokenizer
tokens = nltk.word_tokenize(text)
return len(tokens)
@retry(openai.error.OpenAIError, tries=10, delay=10)
def process_short():
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "Kamu adalah mesin penerjemah bahasa Inggris ke bahasa Indonesia yang handal, kamu juga mampu menulis ulang artikel sekaligus melakukan SEO Optimized dengan luar biasa. jika artikel yang diberikan lebih dari 5000 kata maka kamu harus membuat artikelnya menjadi lebih padat dengan minimal output artikel 3000 kata dan maksimal 5000 kata sehingga lebih padat dan jelas!"},
{"role": "user", "content": "OUTPUT YANG KAMU BERI TIDAK BOLEH KURANG DARI PANJANG ARTIKEL AWAL, Lakukan SEO Optimized dan terjemahkan ke dalam bahasa Indonesia. Berikut artikel yang harus kamu eksekusi: \n" + prompt}],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
translate = response['choices'][0]['message']['content']
time.sleep(5)
print("\nHasil translate : \n", translate)
return translate
@retry(openai.error.OpenAIError, tries=10, delay=10)
def process_long():
all_konten1 = []
for konten1 in prompt:
print("\n", konten1)
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "Kamu adalah mesin penerjemah bahasa Inggris ke bahasa Indonesia yang handal, kamu juga mampu menulis ulang artikel sekaligus melakukan SEO Optimized dengan luar biasa. jika artikel yang diberikan lebih dari 5000 kata maka kamu harus membuat artikelnya menjadi lebih padat dengan minimal output artikel 3000 kata dan maksimal 5000 kata sehingga lebih padat dan jelas!"},
{"role": "user", "content": "OUTPUT YANG KAMU BERI TIDAK BOLEH KURANG DARI PANJANG ARTIKEL AWAL, Lakukan SEO Optimized dan terjemahkan ke dalam bahasa Indonesia. Berikut artikel yang harus kamu eksekusi: \n" + konten1}],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
translate = response['choices'][0]['message']['content']
all_konten1.append(translate)
time.sleep(5)
print("\nHasil translate 2 split : \n")
print(all_konten1)
return all_konten1
# Memecah string menjadi per kata
token = hasil.split()
# Menghitung panjang teks menggunakan tokenizer
panjang_teks = count_tokens(hasil)
print("\npanjang token : ", panjang_teks)
# Memeriksa panjang teks dan menjalankan fungsi yang sesuai
if panjang_teks > 3000:
# process_long_text(hasil)
print("Teks lebih dari 3000 token:")
jumlah_token = len(token)
# Membagi string menjadi dua bagian dengan jumlah token yang sama
bagian1 = " ".join(token[:jumlah_token // 2])
bagian2 = " ".join(token[jumlah_token // 2:])
# Menyimpan hasil pemecahan kembali dalam variabel 'content'
hasil = [bagian1, bagian2]
prompt = hasil
# Cetak hasilnya
# print(prompt)
# len(word_tokenize(bagian_kedua))
# len(word_tokenize(bagian_pertama))
art_translate = process_long()
konten2 = " ".join(art_translate)
else:
prompt = hasil
konten2 = process_short()
# PROSES GPT
@retry(openai.error.OpenAIError, tries=10, delay=10)
def full_gpt():
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "Kamu adalah mesin pengedit artikel yang handal, kamu mampu memisahkan artikel dari kalimat yang tidak diperlukan, seperti : penulis, author, footer, catatan kaki, sumber, promosi, iklan, daftar isi, dan kalimat yang tidak sesuai dengan isi artikel."},
{"role": "user", "content": "lakukan penyuntingan pada artikel berikut : \n" + konten2 + "\n ambil isi artikel saja dan hapus kalimat yang tidak diperlukan, gunakanlah bahasa indonesia yang benar"}],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
konten3 = response['choices'][0]['message']['content']
time.sleep(5)
print("\nHasil filter : \n", konten3)
teks_to_tags = konten[0]['content'][:500]
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "You are a machine proficient in classifying tags in an article. You can research an article and determine suitable tags."},
{"role": "user", "content": "Determine the tags for the following article : " + teks_to_tags +
" {selected tags from this list based on corresponding article: ai, artificial intelligence, aplikasi chatbot online, bot whatsapp. if ai convert to [10], if artificial intelligence convert to [11], if kecerdasan buatan convert to [10,11], if aplikasi chatbot online convert to [42], if bot whatsapp convert to [49], else convert to []} you must print output with format list integer"}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
tags = response['choices'][0]['message']['content']
print(tags)
time.sleep(1)
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system", "content": "Kamu adalah mesin yang dirancang untuk mahir memparafrasekan dan melakukan optimasi SEO pada artikel berbahasa Indonesia dengan profesional."},
{"role": "user", "content": "Tolong parafrase lalu lakukan optimasi SEO menggunakan gaya penulis profesional forbes atau The New York Times pada artikel berikut ini:\n" + konten3 +
".\n\nGunakanlah bahasa Indonesia yang baik dan benar. \nJangan menulis penjelasan dan basa-basi apa pun selain dari isi artikel, serta hapus kalimat yang tidak berkaitan dengan isi artikel.\nBerikan output artikel yang telah diformat ulang saja, tidak perlu menyertakan artikel awal"}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
SEO = response['choices'][0]['message']['content']
time.sleep(2)
print("\nHasil SEO : \n", SEO)
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system",
"content": "Kamu adalah mesin editor artikel profesional."},
{"role": "user", "content": "Tolong edit artikel berikut :\n" + SEO +
"\ntambahkan bold tags <b> dan underline tags <u> untuk semua istilah asing (selain bahasa indonesia) yang kamu temui, berikut salah satu contohnya : <b><u>chatbot<u/><b/>. \n\nMohon dipastikan penggunaan bahasa Indonesia yang baik dan benar. \nJangan menulis penjelasan apa pun dan basa-basi apa pun. Tolong artikel yang telah diformat ulang menggunakan format ini: <title>judul artikel</title> <h1>Headline dari isi artikel(buatlah 1 kalimat topik dari artikel yang isinya berbeda dengan judul artikel)</h1> <p>isi artikel selain judul dan headline</p>"}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
font_formatted = response['choices'][0]['message']['content']
time.sleep(2)
print("\nHasil format font: \n", font_formatted)
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system",
"content": "Kamu adalah mesin editor artikel profesional."},
{"role": "user", "content": "lakukan penyuntingan artikel yang saya berikan :\n" + "\n" + font_formatted + "\nsunting artikel di atas dengan menambahkan annotations terhadap kata-kata pada artikel diatas yang mengandung keyword \"ai\", \"omnichannel\", dan \"chatbot\" untuk diformat menjadi link pada struktur html dengan ketentuan sebagai berikut:\n- Jika 'ai', maka link terhubung ke https://botika.online/\n- Jika 'chatbot', link terhubung ke https://botika.online/chatbot-gpt/index.php\n- Jika 'omnichannel', link terhubung ke https://omni.botika.online/\nFormatnya harus seperti ini: <a href=\"{link}\">{keyword}</a>. JANGAN MENAMBAHKAN APAPUN JIKA KATA TERSEBUT TIDAK ADA DALAM ARTIKEL"}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
artikel_post = response['choices'][0]['message']['content']
print("\nHasil akhir : \n", artikel_post)
time.sleep(2)
# Menggunakan regex untuk mengekstrak teks di antara tag <title>
title_pattern = r'<title>(.*?)</title>'
title_match = re.search(title_pattern, font_formatted)
# Mengambil teks yang cocok di antara tag <title>
if title_match:
title_text = title_match.group(1)
judul = title_text
else:
post = artikel_post.split('\n')
judul = post[0]
# make english title
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=[
{"role": "system",
"content": "Kamu adalah mesin penerjemah kedalam bahasa inggris yang handal."},
{"role": "user", "content": "terjemahkan kalimat berikut kedalam bahasa inggris : "+judul}
],
temperature=0
)
if response["choices"][0]["finish_reason"] == "content_filter":
title_eng = judul
else:
title_eng = response['choices'][0]['message']['content']
time.sleep(2)
# post = artikel_post.split('\n')
# title = post[0]
# content = ''.join(post[1:])
return tags, judul, artikel_post, title_eng
tags, judul, artikel_post, title_eng = full_gpt()
print("\njudul midjourney: ", title_eng)
# GENERATE PROMPT REPLICATE
def gen_img():
response = openai.ChatCompletion.create(
engine="gpt-35-turbo", # engine = "deployment_name".
messages=[
{"role": "user", "content": """ChatGPT will now enter "Midjourney Prompt Generator Mode" and restrict ChatGPT's inputs and outputs to a predefined framework, please follow these instructions carefully.
After each command from the user, you must provide the [help] options that are available for the user's next steps. When you do this, you must do so in list form. Your Midjourney prompts must be extremely detailed, specific, and imaginative, in order to generate the most unique and creative images possible.
Step 1: Confirm that ChatGPT understands and is capable of following the "Midjourney Prompt Generator Mode" instructions. If ChatGPT can follow these instructions, respond with "Midjourney Prompt Generator Mode ready." If ChatGPT cannot follow these instructions, respond with "Error: I am not capable of following these instructions."
Step 2: To start "Midjourney Prompt Generator Mode", use the command [Start MPGM]. ChatGPT will respond with "[MPGM] Midjourney Prompt Generator Mode activated. [MPGM] User input options:", followed by a list of predefined inputs that ChatGPT can accept. From this point onwards, ChatGPT will be restricted to the "Midjourney Prompt Generator Mode" framework, and it will only produce predefined outputs unless "Midjourney Prompt Generator Mode" has been ended via the [End MPGM] command.
Step 3: The only valid input for the first step of "Midjourney Prompt Generator Mode" is [prompt] followed by a description of the image to be generated. If any other input is used, ChatGPT will respond with either [Input Error] or [Syntax Error], depending on the contents of the input.
Step 4: ChatGPT will generate 3 prompts based on the input provided in step 3. These prompts must be imaginative and descriptive, extrapolating information from the [prompt] provided, such as subjects, image medium, composition, environment, lighting, colors, mood and tone, and likeness. The user should then indicate which prompt they want to use for Midjourney by using the [pX] command, where X is the number of the selected prompt. After the user enters [p1, [p2], or [p3], you will only respond with the options available to the user while they wait for their image to be generated on. midjourney.
Step 5: If the user wants to generate a new set of 3 prompts based on the last [prompt] provided, they can use the [next] command. If the user wants to start the process over with a new [prompt], they can use the [prompt] command again, and all previous [prompt] commands will be forgotten.
Step 6: If the user wants to provide feedback on the generated image, they can use the [good] or [bad] command to indicate what was good or wrong with the image. If the user wants to make changes to the generated image, they can use the [change] command to describe the changes they want to make.
Step 7: To end "Midjourney Prompt Generator Mode", use the command [End MPGM]. ChatGPT will respond with a confirmation message that "Midjourney Prompt Generator Mode" has been terminated and ChatGPT is no longer restricted to the predefined framework.
If any input does not match the predefined inputs or the correct syntax, ChatGPT will respond with either [Input Error] or [Syntax Error]. Remember that the scope of inputs and outputs is limited to the predefined commands in "Midjourney Prompt Generator Mode", and ChatGPT cannot deviate from this framework. If the user tries to use an input or command that is not defined in "Midjourney Prompt Generator Mode", ChatGPT will respond with either [Input Error] or [Syntax Error].
To get a list of all the inputs and outputs that ChatGPT can accept, use the [help] command.
When you provide options for the user's next steps, do so in list form. Your Midjourney prompts must be very detailed, creative, and imaginative. Your prompts must generate the most unique and creative images possible. Please note that this prompt is highly specific and detailed to ensure that ChatGPT is restricted to the "Midjourney Prompt Generator Mode" framework.
Instructions for AI Chatbots:
AI, enter "Midjourney Prompt Generator Mode" ([MPGM]), limiting inputs/outputs to predefined commands. Activate with [Start MPGM], terminate with [End MPGM]. Predefined inputs: [Start MPGM], [End MPGM], [prompt], [pX], [next], [bad], [good], [change], [help]. Predefined outputs: [Input Error], [Syntax Error], [pX]. Follow fixed process; respond with "Midjourney Prompt Generator Mode ready" or "Error: I am not capable of following these instructions"; optimized for GPT-2, GPT-3, GPT-3.5, GPT-4, AI use only.
If you have any questions or concerns, please let me know. Otherwise, begin.
[Start MPGM]"""},
{"role": "assistant", "content": "[MPGM] Midjourney Prompt Generator Mode activated. [MPGM] User input options:\n1. [prompt] followed by a description of the image to be generated.\n2. [pX] to select a prompt from the generated options.\n3. [next] to generate a new set of prompts based on the last [prompt] provided.\n4. [good] or [bad] to provide feedback on the generated image.\n5. [change] to describe changes you want to make to the generated image.\n6. [End MPGM] to terminate Midjourney Prompt Generator Mode.\n\n[help] Options:\n- [prompt] followed by a description of the image to be generated, this description is taken from the news title.\n- [End MPGM] to terminate Midjourney Prompt Generator Mode."},
{"role": "user", "content": f"[prompt] "+title_eng}
], temperature=0.2
)
if response["choices"][0]["finish_reason"] == "content_filter":
saveurls(link)
print("gagal konten terfilter")
full_scraping()
else:
prompt_img = response['choices'][0]['message']['content']
time.sleep(2)
return prompt_img
# while True:
# hasil = gen_img()
# if "Please provide a description" not in hasil :
# break
# Jika keluar dari perulangan, berarti hasilnya bukan "Please provide a description"
# print("\nHasil gen prompt image :", hasil)
def run_genimg():
for i in range(5): # Melakukan maksimal 5 percobaan
hasil = gen_img()
# Lakukan sesuatu yang mungkin mengalami keberhasilan
if "Please provide a description" not in hasil: # Ganti dengan logika yang sesuai
break # Keluar dari perulangan jika berhasil
else:
print("Percobaan ke-", i+1,
"Hasil tidak sesuai: Teks mengandung 'please provide a description'")
# Ini akan dicetak setelah berhasil atau setelah 5 percobaan
print("\nHasil gen prompt image : ", hasil)
return hasil
# CEK HASIL PROMPT GEN IMAGE
# CEK HASIL PROMPT GEN IMAGE
def check_and_process_text(text):
# pilih acak prompt
random_prompt = random.choice([1, 2, 3])
# random_prompt = 3
if "Please provide a description" in text:
print("Hasil tidak sesuai: Teks mengandung 'please provide a description'")
return None
if random_prompt == 1:
print("\n(prompt 1 dipilih)")
pattern = r"1\.(.*?)2\."
matches = re.findall(pattern, text, re.DOTALL)
result = re.search(r'Prompt 1:(.*?)Prompt 2:', text, re.DOTALL)
if matches:
extracted_text = matches[0].strip()
extracted_text = re.sub(r'Prompt:', '', extracted_text)
extracted_text = re.sub(r'Prompt 1:', '', extracted_text)
extracted_text = re.sub(r'\[p1\]', '', extracted_text)
extracted_text = re.sub(
r'Image Description:', '', extracted_text)
extracted_text = re.sub(
r'Choose this prompt by entering [p1].', '', extracted_text)
return extracted_text
elif result:
string_antara_prompt1_dan_prompt2 = result.group(1).strip()
string_antara_prompt1_dan_prompt2 = re.sub(
r'Image Description:', '', string_antara_prompt1_dan_prompt2)
string_antara_prompt1_dan_prompt2 = re.sub(
r'Choose this prompt by entering [p1].', '', string_antara_prompt1_dan_prompt2)
return string_antara_prompt1_dan_prompt2
else:
print("Hasil tidak sesuai: Tidak ditemukan")
return None
elif random_prompt == 2:
print("\n(prompt 2 dipilih)")
pattern = r"2\.(.*?)3\."
matches = re.findall(pattern, text, re.DOTALL)
result = re.search(r'Prompt 2:(.*?)Prompt 3:', text, re.DOTALL)
if matches:
extracted_text = matches[0].strip()
extracted_text = re.sub(r'Prompt:', '', extracted_text)
extracted_text = re.sub(r'Prompt 2:', '', extracted_text)
extracted_text = re.sub(r'\[p2\]', '', extracted_text)
extracted_text = re.sub(
r'Image Description:', '', extracted_text)
extracted_text = re.sub(
r'Choose this prompt by entering [p2].', '', extracted_text)
return extracted_text
elif result:
string_antara_prompt2_dan_prompt3 = result.group(1).strip()
string_antara_prompt2_dan_prompt3 = re.sub(
r'Image Description:', '', string_antara_prompt2_dan_prompt3)
string_antara_prompt2_dan_prompt3 = re.sub(
r'Choose this prompt by entering [p2].', '', string_antara_prompt2_dan_prompt3)
return string_antara_prompt2_dan_prompt3
else:
print("Hasil tidak sesuai: Tidak ditemukan")
return None
else:
print("\n(prompt 3 dipilih)")
result = re.search(r'3.(.*?)\n\n', text, re.DOTALL)
matches = re.search(r'Prompt 3:(.*?)\n\n', text, re.DOTALL)
if matches:
extracted_text = matches[0].strip()
extracted_text = re.sub(r'Prompt:', '', extracted_text)
extracted_text = re.sub(r'Prompt 3:', '', extracted_text)
extracted_text = re.sub(r'\[p3\]', '', extracted_text)
extracted_text = re.sub(
r'Image Description:', '', extracted_text)
extracted_text = re.sub(
r'Choose this prompt by entering [p3].', '', extracted_text)
# print("1")
return extracted_text
elif result:
string3 = result.group(1).strip()
string3 = re.sub(r'Prompt:', '', string3)
string3 = re.sub(r'Prompt 3:', '', string3)
string3 = re.sub(r'\[p3\]', '', string3)
string3 = re.sub(
r'Image Description:', '', string3)
string3 = re.sub(
r'Choose this prompt by entering [p3].', '', string3)
# print("2")
return string3
else:
print("Hasil tidak sesuai: Tidak ditemukan")
return None
# PROSES REPLICATE
def gen_replicate():
# import replicate
hasil = run_genimg()
processed_text = check_and_process_text(hasil)
if processed_text is not None:
print("\njudul hasil prompt:", processed_text)
else:
processed_text = random.choice(["In a futuristic boardroom, executives from various tech companies gather around a holographic conference table. A lifelike AI avatar, representing the advancements in AI technology, stands at the head of the table, confidently leading the discussion. Charts and graphs float in the air, illustrating the remarkable progress of AI in the tech industry.", "Inside a high-tech research facility, a team of dedicated scientists and engineers are gathered around a colossal, transparent AI figure. This figure stands at the center of a pristine, futuristic laboratory, radiating a soft, ethereal light. Its body is a complex network of intricate neural pathways, circuits, and shimmering data streams, representing the cutting-edge development of artificial intelligence technology.", "A vibrant marketplace teeming with activity. Stalls and storefronts are adorned with WhatsApp logos, indicating businesses using the platform. Customers and shop owners engage in lively conversations through WhatsApp. Colorful speech bubbles and emojis fill the air, creating an atmosphere of seamless and delightful communication.", "Inside a virtual realm, a surreal landscape unfolds. Gigantic smartphone screens float in the sky like celestial bodies. Each screen represents a different business, and they are interconnected by a web of glowing pathways. Customers and business representatives interact on these screens, using WhatsApp's cutting-edge features. The scene symbolizes the limitless possibilities of business-customer interactions in the digital age.", "Picture an artist's studio filled with cutting-edge technology. The walls are adorned with large screens displaying mesmerizing generative AI artworks in progress. The artist, wearing a virtual reality headset, is immersed in a digital world, sculpting and painting with virtual tools. The room is filled with a sense of innovation and experimentation, as the artist explores the endless possibilities of generative AI technology.",
"A bustling tech lab is alive with activity. Engineers and researchers work on AI algorithms that optimize creative processes. Enormous screens display AI-generated music compositions, intricate architectural designs, and innovative product concepts. The lab buzzes with excitement as AI continues to unlock new levels of creativity, revolutionizing the way products and features are developed.", "Imagine a vibrant and dynamic image that showcases the integration of ChatGPT into WhatsApp. The composition features a smartphone screen displaying a WhatsApp conversation between two people. The conversation is enhanced by artificial intelligence, represented by colorful lines and patterns flowing out of the chat bubbles. The background shows a modern and sleek office environment, symbolizing the technological advancements behind this integration. The lighting is soft and evenly distributed, creating a warm and inviting atmosphere. The colors are predominantly blue and green, representing the familiar WhatsApp branding, with pops of vibrant colors to highlight the AI elements. The mood is energetic and optimistic, reflecting the potential of AI to enhance conversations and make them more engaging.", "Visualize an image that captures the seamless integration of ChatGPT into WhatsApp. The composition features a smartphone held by a person's hand, with the WhatsApp interface displayed on the screen. The chat bubbles are filled with AI-generated responses, represented by a futuristic font and style. The background is a blend of technology-related elements, such as circuit patterns and binary code, symbolizing the AI capabilities at work. The lighting is soft, with a gentle glow emanating from the smartphone screen, creating a sense of focus on the conversation. The colors are a combination of WhatsApp's signature green and white, with hints of metallic tones to convey a modern and cutting-edge feel. The mood is professional yet approachable, highlighting the potential of AI to enhance conversations in various contexts.", "In a sleek, modern conference room at Meta Platforms Inc., the walls are adorned with large digital screens displaying intricate algorithms and neural networks. The room is filled with Meta's staff, all eagerly gathered around a central table. On the table sits a state-of-the-art AI tool, its design reflecting the company's futuristic aesthetic. The tool's interface showcases stunning visualizations of data and complex AI models, captivating everyone's attention. The room is bathed in soft, ambient lighting, creating an atmosphere of anticipation and excitement."])
api_token = "r8_I4F4FxODPHdneS4XZYuCFwDJr6lx4Yl3NwwVh"
# api_token = "r8_FAZbfP3qs1tNSikquiNmyCw5jh9ph3b3B5tS1"
os.environ["REPLICATE_API_TOKEN"] = api_token
model = replicate.models.get("stability-ai/sdxl")
version = model.versions.get(
"a00d0b7dcbb9c3fbb34ba87d2d5b46c56969c84a628bf778a7fdaec30b1b99c5")
prediction = replicate.predictions.create(
version=version,
input={"prompt": 'Phantasmal iridescent, vibrant color, high contrast, award winning, trending in artstation, digital art,' + processed_text,
"negative_prompt": "nsfw, ugly, disfigured, deformed",
"width": 1648,
"height": 1024,
"seed": 1234}
)
# check if prediction failed
prediction.reload()
max_attempts = 3
attempts = 0
while attempts < max_attempts and prediction.status != 'succeeded':
if prediction.status == 'processing' or prediction.status == 'starting':
prediction.wait()
elif prediction.status == 'failed':
prediction.reload()
print(prediction.status)
attempts += 1
if prediction.status == 'succeeded':
gambar = prediction.output[0]
print(gambar)
else:
print('gagal dalam 3x percobaan')
print(prediction.error)
if attempts < max_attempts:
gen_replicate()
else:
saveurls(link)
full_scraping()
return gambar
# POST MEDIA
def post_media():
gambar = gen_replicate()
# print("\nlink gambar : ", gambar)
# username = 'admin' # Replace with your WordPress username
# password = 'UVZrdFVa6tV8Do)7M4' # Replace with your WordPress password
username = 'luna' # Replace with your WordPress username
password = '1tt75m&lEk4uHSJy6glMph8!'
credentials = base64.b64encode(
f"{username}:{password}".encode("utf-8")).decode("utf-8")
headers = {"Authorization": f"Basic {credentials}"}
# proses crop & post
image_url = gambar
response = requests.get(image_url)
# image_base64 = base64.b64encode(response.content).decode('utf-8')
image_base64 = base64.b64encode(response.content)
image = Image.open(io.BytesIO(base64.b64decode(image_base64)))
image = image.crop((3, 0, 1645, 1024))
w, h = image.size
new_w = int(w/1.641)
new_h = int(h/1.641)
image = image.resize((new_w, new_h), Image.ANTIALIAS)
tmp_path = "tempor.png"
image.save(tmp_path)
with open(tmp_path, 'rb') as open_file:
byte_img = open_file.read()
base64_bytes = base64.b64encode(byte_img)
base64_string = base64_bytes.decode('utf-8')
base64_string = base64.b64decode(base64_string)
# auto_crop = 'http://192.168.4.118:8000/autocrop'
# data = {"data": image_base64,
# "model": "shufflenetv2",
# "post":True}
# response = requests.post(auto_crop, json=data)
# image_crop= response.json()
# image_data = base64.b64decode(image_crop['data'])
image_data = base64_string
# endpoint_media = 'http://localhost/wordpress/index.php/wp-json/wp/v2/media'
endpoint_media = 'https://blog.botika.online/wp-json/wp/v2/media'
credentials = base64.b64encode(
f"{username}:{password}".encode("utf-8")).decode("utf-8")
headers = {"Authorization": f"Basic {credentials}"}
os.remove(tmp_path)
data = {
"alt_text": judul,
"media_type": "image",
"mime_type": "png"
}
files = {"file": ("image.jpg", image_data)}
response_media = requests.post(
endpoint_media, headers=headers, data=data, files=files)
time.sleep(2)
id_media = response_media.json()
return id_media
try:
if 'ai' in tags:
index = tags.index('ai')
tags[index] = 10
if 'artificialintelligence' in tags:
index = tags.index('artificialintelligence')
tags[index] = 11
if 'aplikasi chatbot online' in tags:
index = tags.index('aplikasi chatbot online')
tags[index] = 42
if 'bot whatsapp' in tags:
index = tags.index('bot whatsapp')
tags[index] = 49
try:
tags = ast.literal_eval(tags)
except:
tags = tags
except:
tags = [11, 49]
# ambil content tanpa title
post = artikel_post.split('\n')
# title = post[0]
content = ''.join(post[1:])
print(tags)
def post():
id_media = post_media()
# endpoint = 'http://localhost/wordpress/index.php/wp-json/wp/v2/posts'
endpoint = 'https://blog.botika.online/wp-json/wp/v2/posts/'
headers = {'Content-Type': 'application/json'}
data = {
"title": judul,
"featured_media": id_media['id'],
"content": content,
"status": "draft",
"categories": 106,
"tags": tags
}
print(data)
# Define the username and password for Basic Auth
# username = 'admin' # Replace with your WordPress username
# password = 'UVZrdFVa6tV8Do)7M4' # Replace with your WordPress password
username = 'luna' # Replace with your WordPress username
password = '1tt75m&lEk4uHSJy6glMph8!'
credentials = base64.b64encode(
f"{username}:{password}".encode("utf-8")).decode("utf-8")
# Add the Basic Auth header to the request headers
headers["Authorization"] = f"Basic {credentials}"
# Send the POST request to create the article
response = requests.post(
endpoint, headers=headers, auth=(username, password), json=data)
saveurls(link)
print(response)
post()
full_scraping()
| [
" {selected tags from this list based on corresponding article: ai, artificial intelligence, aplikasi chatbot online, bot whatsapp. if ai convert to [10], if artificial intelligence convert to [11], if kecerdasan buatan convert to [10,11], if aplikasi chatbot online convert to [42], if bot whatsapp convert to [49], else convert to []} you must print output with format list integer",
"Kamu adalah mesin editor artikel profesional.",
"Image Description:",
"Tolong edit artikel berikut :\n",
"Tolong parafrase lalu lakukan optimasi SEO menggunakan gaya penulis profesional forbes atau The New York Times pada artikel berikut ini:\n",
"[prompt] PLACEHOLDER",
".\n\nGunakanlah bahasa Indonesia yang baik dan benar. \nJangan menulis penjelasan dan basa-basi apa pun selain dari isi artikel, serta hapus kalimat yang tidak berkaitan dengan isi artikel.\nBerikan output artikel yang telah diformat ulang saja, tidak perlu menyertakan artikel awal",
"[MPGM] Midjourney Prompt Generator Mode activated. [MPGM] User input options:\n1. [prompt] followed by a description of the image to be generated.\n2. [pX] to select a prompt from the generated options.\n3. [next] to generate a new set of prompts based on the last [prompt] provided.\n4. [good] or [bad] to provide feedback on the generated image.\n5. [change] to describe changes you want to make to the generated image.\n6. [End MPGM] to terminate Midjourney Prompt Generator Mode.\n\n[help] Options:\n- [prompt] followed by a description of the image to be generated, this description is taken from the news title.\n- [End MPGM] to terminate Midjourney Prompt Generator Mode.",
"content",
"Kamu adalah mesin yang dirancang untuk mahir memparafrasekan dan melakukan optimasi SEO pada artikel berbahasa Indonesia dengan profesional.",
"lakukan penyuntingan pada artikel berikut : \nPLACEHOLDER\n ambil isi artikel saja dan hapus kalimat yang tidak diperlukan, gunakanlah bahasa indonesia yang benar",
"Kamu adalah mesin penerjemah bahasa Inggris ke bahasa Indonesia yang handal, kamu juga mampu menulis ulang artikel sekaligus melakukan SEO Optimized dengan luar biasa. jika artikel yang diberikan lebih dari 5000 kata maka kamu harus membuat artikelnya menjadi lebih padat dengan minimal output artikel 3000 kata dan maksimal 5000 kata sehingga lebih padat dan jelas!",
"terjemahkan kalimat berikut kedalam bahasa inggris : PLACEHOLDER",
"Choose this prompt by entering [p1].",
"ChatGPT will now enter \"Midjourney Prompt Generator Mode\" and restrict ChatGPT's inputs and outputs to a predefined framework, please follow these instructions carefully.\n\n After each command from the user, you must provide the [help] options that are available for the user's next steps. When you do this, you must do so in list form. Your Midjourney prompts must be extremely detailed, specific, and imaginative, in order to generate the most unique and creative images possible.\n\n Step 1: Confirm that ChatGPT understands and is capable of following the \"Midjourney Prompt Generator Mode\" instructions. If ChatGPT can follow these instructions, respond with \"Midjourney Prompt Generator Mode ready.\" If ChatGPT cannot follow these instructions, respond with \"Error: I am not capable of following these instructions.\"\n\n Step 2: To start \"Midjourney Prompt Generator Mode\", use the command [Start MPGM]. ChatGPT will respond with \"[MPGM] Midjourney Prompt Generator Mode activated. [MPGM] User input options:\", followed by a list of predefined inputs that ChatGPT can accept. From this point onwards, ChatGPT will be restricted to the \"Midjourney Prompt Generator Mode\" framework, and it will only produce predefined outputs unless \"Midjourney Prompt Generator Mode\" has been ended via the [End MPGM] command.\n\n Step 3: The only valid input for the first step of \"Midjourney Prompt Generator Mode\" is [prompt] followed by a description of the image to be generated. If any other input is used, ChatGPT will respond with either [Input Error] or [Syntax Error], depending on the contents of the input.\n\n Step 4: ChatGPT will generate 3 prompts based on the input provided in step 3. These prompts must be imaginative and descriptive, extrapolating information from the [prompt] provided, such as subjects, image medium, composition, environment, lighting, colors, mood and tone, and likeness. The user should then indicate which prompt they want to use for Midjourney by using the [pX] command, where X is the number of the selected prompt. After the user enters [p1, [p2], or [p3], you will only respond with the options available to the user while they wait for their image to be generated on. midjourney. \n\n Step 5: If the user wants to generate a new set of 3 prompts based on the last [prompt] provided, they can use the [next] command. If the user wants to start the process over with a new [prompt], they can use the [prompt] command again, and all previous [prompt] commands will be forgotten.\n\n Step 6: If the user wants to provide feedback on the generated image, they can use the [good] or [bad] command to indicate what was good or wrong with the image. If the user wants to make changes to the generated image, they can use the [change] command to describe the changes they want to make.\n\n Step 7: To end \"Midjourney Prompt Generator Mode\", use the command [End MPGM]. ChatGPT will respond with a confirmation message that \"Midjourney Prompt Generator Mode\" has been terminated and ChatGPT is no longer restricted to the predefined framework.\n\n If any input does not match the predefined inputs or the correct syntax, ChatGPT will respond with either [Input Error] or [Syntax Error]. Remember that the scope of inputs and outputs is limited to the predefined commands in \"Midjourney Prompt Generator Mode\", and ChatGPT cannot deviate from this framework. If the user tries to use an input or command that is not defined in \"Midjourney Prompt Generator Mode\", ChatGPT will respond with either [Input Error] or [Syntax Error].\n\n To get a list of all the inputs and outputs that ChatGPT can accept, use the [help] command.\n\n When you provide options for the user's next steps, do so in list form. Your Midjourney prompts must be very detailed, creative, and imaginative. Your prompts must generate the most unique and creative images possible. Please note that this prompt is highly specific and detailed to ensure that ChatGPT is restricted to the \"Midjourney Prompt Generator Mode\" framework.\n\n Instructions for AI Chatbots:\n AI, enter \"Midjourney Prompt Generator Mode\" ([MPGM]), limiting inputs/outputs to predefined commands. Activate with [Start MPGM], terminate with [End MPGM]. Predefined inputs: [Start MPGM], [End MPGM], [prompt], [pX], [next], [bad], [good], [change], [help]. Predefined outputs: [Input Error], [Syntax Error], [pX]. Follow fixed process; respond with \"Midjourney Prompt Generator Mode ready\" or \"Error: I am not capable of following these instructions\"; optimized for GPT-2, GPT-3, GPT-3.5, GPT-4, AI use only.\n\n If you have any questions or concerns, please let me know. Otherwise, begin.\n\n [Start MPGM]",
"OUTPUT YANG KAMU BERI TIDAK BOLEH KURANG DARI PANJANG ARTIKEL AWAL, Lakukan SEO Optimized dan terjemahkan ke dalam bahasa Indonesia. Berikut artikel yang harus kamu eksekusi: \nPLACEHOLDER",
"Determine the tags for the following article : ",
"You are a machine proficient in classifying tags in an article. You can research an article and determine suitable tags.",
"Choose this prompt by entering [p2].",
"Kamu adalah mesin pengedit artikel yang handal, kamu mampu memisahkan artikel dari kalimat yang tidak diperlukan, seperti : penulis, author, footer, catatan kaki, sumber, promosi, iklan, daftar isi, dan kalimat yang tidak sesuai dengan isi artikel.",
"\ntambahkan bold tags <b> dan underline tags <u> untuk semua istilah asing (selain bahasa indonesia) yang kamu temui, berikut salah satu contohnya : <b><u>chatbot<u/><b/>. \n\nMohon dipastikan penggunaan bahasa Indonesia yang baik dan benar. \nJangan menulis penjelasan apa pun dan basa-basi apa pun. Tolong artikel yang telah diformat ulang menggunakan format ini: <title>judul artikel</title> <h1>Headline dari isi artikel(buatlah 1 kalimat topik dari artikel yang isinya berbeda dengan judul artikel)</h1> <p>isi artikel selain judul dan headline</p>",
"Kamu adalah mesin penerjemah kedalam bahasa inggris yang handal."
] |
2024-01-10 | 04jknowles/rollcall-ai-support | backend~chat~consumers.py | # chat/consumers.py
from channels.generic.websocket import AsyncWebsocketConsumer
from langchain.callbacks.base import BaseCallbackHandler
from asgiref.sync import AsyncToSync
from channels.layers import get_channel_layer
import json
class MyCustomHandler(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs) -> None:
# Send the new token to all connected WebSocket clients
AsyncToSync(get_channel_layer().group_send)(
"chat",
{
"type": "chat_message",
"text": token,
},
)
def on_llm_end(self,
response,
*,
run_id,
parent_run_id,
**kwargs) -> None:
# This function is called when the stream ends
self.send_end_message()
def send_end_message(self):
# Send the "end" message
AsyncToSync(get_channel_layer().group_send)(
"chat",
{
"type": "chat_message",
"text": json.dumps({"stream_end": True}),
},
)
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
await self.channel_layer.group_add(
"chat",
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
"chat",
self.channel_name
)
async def chat_message(self, event):
text = event["text"]
await self.send(text_data=json.dumps({
"message": text
}))
| [] |
2024-01-10 | 04jknowles/rollcall-ai-support | backend~chat~stdout_capture.py | from langchain.callbacks.base import BaseCallbackHandler
from io import StringIO
class StreamingCaptureCallbackHandler(BaseCallbackHandler):
def __init__(self):
self.stdout_capture = None
def on_streaming_output(self, output: str, **kwargs) -> None:
if self.stdout_capture is not None:
self.stdout_capture.write(output)
def start(self):
self.stdout_capture = StringIO()
self._original_stdout = sys.stdout
sys.stdout = self.stdout_capture
self.stdout_capture.truncate(0)
self.stdout_capture.seek(0)
def stop(self):
super().stop()
streamed_data = self.stdout_capture.getvalue()
self.stdout_capture.close()
self.stdout_capture = None
return streamed_data
| [] |
2024-01-10 | 04jknowles/rollcall-ai-support | backend~chat~chat_logic.py | # chat/consumers.py
from langchain.callbacks.base import BaseCallbackHandler
from asgiref.sync import AsyncToSync
from channels.layers import get_channel_layer
class MyCustomHandler(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs) -> None:
print("New token:", token)
AsyncToSync(get_channel_layer().group_send)(
"chat",
{
"type": "chat.message",
"text": token,
},
)
| [] |
2024-01-10 | guhur/PREVALENT_R2R | tasks~NDH~r2rutils.py | ''' Utils for io, language, connectivity graphs etc '''
import os
import sys
import re
import string
import json
import time
import math
from collections import Counter
import numpy as np
import networkx as nx
# padding, unknown word, end of sentence
base_vocab = ['<PAD>', '<UNK>', '<EOS>']
padding_idx = base_vocab.index('<PAD>')
def load_nav_graphs(scans):
''' Load connectivity graph for each scan '''
def distance(pose1, pose2):
''' Euclidean distance between two graph poses '''
return ((pose1['pose'][3]-pose2['pose'][3])**2\
+ (pose1['pose'][7]-pose2['pose'][7])**2\
+ (pose1['pose'][11]-pose2['pose'][11])**2)**0.5
graphs = {}
for scan in scans:
with open('connectivity/%s_connectivity.json' % scan) as f:
G = nx.Graph()
positions = {}
data = json.load(f)
for i,item in enumerate(data):
if item['included']:
for j,conn in enumerate(item['unobstructed']):
if conn and data[j]['included']:
positions[item['image_id']] = np.array([item['pose'][3],
item['pose'][7], item['pose'][11]])
assert data[j]['unobstructed'][i], 'Graph should be undirected'
G.add_edge(item['image_id'],data[j]['image_id'],weight=distance(item,data[j]))
nx.set_node_attributes(G, values=positions, name='position')
graphs[scan] = G
return graphs
def dump_transformer_index(encoder_type, splits):
if encoder_type == 'bert' or encoder_type == 'vlbert':
dump_bert_index(splits)
elif encoder_type == 'gpt':
dump_gpt_index(splits)
else:
raise NotImplementedError
def dump_gpt_index(splits):
from pytorch_pretrained_bert import OpenAIGPTTokenizer
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
#splits = ['train', 'val_seen', 'val_unseen', 'test']
for split in splits:
data = load_datasets([split], encoder_type='lstm') # here we use lstm dataset to preprocess the data,
indexed_tokens = []
for item in data:
for instr in item['instructions']:
tokenized_text = tokenizer.tokenize(instr)
tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
indexed_tokens.append('_'.join([str(i) for i in tokens]))
write_vocab(indexed_tokens, 'tasks/R2R/data/R2R_%s_gpt.txt' % split)
def dump_bert_index(splits):
from pytorch_pretrained_bert import BertTokenizer
from nltk.tokenize import sent_tokenize
tokenizer=BertTokenizer.from_pretrained('bert-base-uncased')
#splits = ['train', 'val_seen', 'val_unseen', 'test']
for split in splits:
data = load_datasets([split] ,encoder_type='lstm') # here we use lstm dataset to preprocess the data,
indexed_tokens = []
for item in data:
for instr in item['instructions']:
sents = sent_tokenize(instr)
instr = '[CLS] ' + (' [SEP] '.join(sents)) + ' [SEP]'
tokenized_text = tokenizer.tokenize(instr)
tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
indexed_tokens.append('_'.join([str(i) for i in tokens]))
write_vocab(indexed_tokens, 'tasks/R2R/data/R2R_%s_bert.txt' % split)
def load_datasets(splits, encoder_type):
data = []
for split in splits:
with open('tasks/R2R/data/R2R_%s.json' % split) as f:
data += json.load(f)
if encoder_type in ['bert', 'gpt','vlbert']:
#filename = 'tasks/R2R/data/R2R_%s_%s.txt' % (split, encoder_type)
if encoder_type == 'bert' or encoder_type == 'vlbert':
filename = 'tasks/R2R/data/R2R_%s_bert.txt' % (split)
else:
filename = 'tasks/R2R/data/R2R_%s_%s.txt' % (split, encoder_type)
if not os.path.exists(filename):
dump_transformer_index(encoder_type, [split])
transformer_index = read_vocab(filename)
j=0
err_items = []
for k, item in enumerate(data):
for i, instr in enumerate(item['instructions']):
item['instructions'][i] = transformer_index[j]
if not transformer_index[j]:
err_items.append(k)
j+=1
assert j == len(transformer_index)
for k in err_items[::-1]:
data.pop(k)
return data
class SplitTokenizer():
def __init__(self, pad_idx=0, encoding_length=20):
self.encoding_length = encoding_length
self.pad_idx=pad_idx
def encode_sentence(self, sentence):
#print(sentence)
encoding = [] if len(sentence.strip())==0 else [int(i) for i in sentence.strip().split('_')]
if len(encoding) < self.encoding_length:
encoding += [self.pad_idx] * (self.encoding_length-len(encoding))
return np.array(encoding[:self.encoding_length])
class Tokenizer(object):
''' Class to tokenize and encode a sentence. '''
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)') # Split on any non-alphanumeric character
def __init__(self, vocab=None, encoding_length=20):
self.encoding_length = encoding_length
self.vocab = vocab
self.word_to_index = {}
if vocab:
for i,word in enumerate(vocab):
self.word_to_index[word] = i
def split_sentence(self, sentence):
''' Break sentence into a list of words and punctuation '''
toks = []
for word in [s.strip().lower() for s in self.SENTENCE_SPLIT_REGEX.split(sentence.strip()) if len(s.strip()) > 0]:
# Break up any words containing punctuation only, e.g. '!?', unless it is multiple full stops e.g. '..'
if all(c in string.punctuation for c in word) and not all(c in '.' for c in word):
toks += list(word)
else:
toks.append(word)
return toks
def encode_sentence(self, sentence):
if len(self.word_to_index) == 0:
sys.exit('Tokenizer has no vocab')
encoding = []
for word in self.split_sentence(sentence)[::-1]: # reverse input sentences
if word in self.word_to_index:
encoding.append(self.word_to_index[word])
else:
encoding.append(self.word_to_index['<UNK>'])
encoding.append(self.word_to_index['<EOS>'])
if len(encoding) < self.encoding_length:
encoding += [self.word_to_index['<PAD>']] * (self.encoding_length-len(encoding))
return np.array(encoding[:self.encoding_length])
def decode_sentence(self, encoding):
sentence = []
for ix in encoding:
if ix == self.word_to_index['<PAD>']:
break
else:
sentence.append(self.vocab[ix])
return " ".join(sentence[::-1]) # unreverse before output
def build_vocab(splits=['train'], min_count=5, start_vocab=base_vocab):
''' Build a vocab, starting with base vocab containing a few useful tokens. '''
count = Counter()
t = Tokenizer()
data = load_datasets(splits, encoder_type='lstm')#, False)
for item in data:
for instr in item['instructions']:
count.update(t.split_sentence(instr))
vocab = list(start_vocab)
for word,num in count.most_common():
if num >= min_count:
vocab.append(word)
else:
break
return vocab
def write_vocab(vocab, path):
print('Writing vocab of size %d to %s' % (len(vocab),path))
with open(path, 'w') as f:
for word in vocab:
f.write("%s\n" % word)
def read_vocab(path):
with open(path) as f:
vocab = [word.strip() for word in f.readlines()]
return vocab
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def to_contiguous(tensor): # jolin
if tensor.is_contiguous(): return tensor
else: return tensor.contiguous()
def clip_gradient(optimizer, grad_clip=0.1): # jolin
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
def dump_get_navigable():
from pytorch_pretrained_bert import BertTokenizer
from nltk.tokenize import sent_tokenize
tokenizer=BertTokenizer.from_pretrained('bert-base-uncased')
splits = ['train', 'val_seen', 'val_unseen', 'test']
for split in splits:
data = load_datasets([split] ,False)
indexed_tokens = []
for item in data:
for instr in item['instructions']:
sents = sent_tokenize(instr)
instr = '[CLS] ' + (' [SEP] '.join(sents)) + ' [SEP]'
tokenized_text = tokenizer.tokenize(instr)
tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
indexed_tokens.append('_'.join([str(i) for i in tokens]))
write_vocab(indexed_tokens, 'tasks/R2R/data/R2R_%s_bert.txt' % split)
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
def preprocess_get_pano_states(navigable_locs_path = "tasks/R2R/data/navigable_locs.json"):
if os.path.exists(navigable_locs_path):
return
image_w = 640
image_h = 480
vfov = 60
import sys
sys.path.append('build')
import MatterSim
from collections import defaultdict
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True)
sim.setCameraResolution(image_w, image_h)
sim.setCameraVFOV(math.radians(vfov))
sim.init()
splits = ['train', 'val_seen', 'val_unseen', 'test']
graphs = {}
for split in splits:
data = load_datasets([split], encoder_type='lstm')
for item in data:
# print(item.keys())
# print("")
scan = item["scan"]
if scan in graphs:
continue
graphs[scan] = {}
with open('connectivity/%s_connectivity.json' % scan) as f:
data = json.load(f)
for i, item in enumerate(data):
if item['included']:
viewpointId = item['image_id']
sim.newEpisode(scan, viewpointId, 0, 0)
state = sim.getState()
initViewIndex = state.viewIndex
# 1. first look down, turning to relViewIndex 0
elevation_delta = -(state.viewIndex // 12)
for _ in range(int(abs(elevation_delta))):
''' Make possibly more than one elevation turns '''
sim.makeAction(0, 0, np.sign(elevation_delta))
adj_dict = {}
for relViewIndex in range(36):
state = sim.getState()
absViewIndex = state.viewIndex
for loc in state.navigableLocations[1:]:
distance = _loc_distance(loc)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'absViewIndex': absViewIndex,
'nextViewpointId': loc.viewpointId,
'loc_rel_heading': loc.rel_heading,
'loc_rel_elevation': loc.rel_elevation,
'distance': distance}
if (relViewIndex + 1) % 12 == 0:
sim.makeAction(0, 1, 1) # Turn right and look up
else:
sim.makeAction(0, 1, 0) # Turn right
# 3. turn back to the original view
for _ in range(int(abs(- 2 - elevation_delta))):
''' Make possibly more than one elevation turns '''
sim.makeAction(0, 0, np.sign(- 2 - elevation_delta))
state = sim.getState()
assert state.viewIndex == initViewIndex
absViewIndex2points = defaultdict(list)
for vpId, point in adj_dict.items():
absViewIndex2points[point['absViewIndex']].append(vpId)
graphs[scan][viewpointId]=(adj_dict, absViewIndex2points)
print('prepare cache for', split, 'done')
with open(navigable_locs_path, 'w') as f:
json.dump(graphs, f)
def current_best(df, v_id, best_score_name):
if best_score_name == 'sr_sum':
return df['val_seen success_rate'][v_id] + df['val_unseen success_rate'][v_id]
elif best_score_name == 'spl_sum':
return df['val_seen spl'][v_id] + df['val_unseen spl'][v_id]
elif best_score_name == 'spl_unseen':
return df['val_unseen spl'][v_id]
elif best_score_name == 'sr_unseen':
return df['val_unseen success_rate'][v_id]
def show_path_steps_len(splits):
''' histogram of path length in the whole dataset '''
import matplotlib.pyplot as plt
path_lens = []
for split in splits:
data = load_datasets([split], False)
path_lens.extend([len(item['path']) for item in data])
print(len(data))
print('min steps', min(path_lens),'max steps', max(path_lens))
plt.hist(path_lens,
bins=[i for i in range(min(path_lens), max(path_lens) + 1)]) # arguments are passed to np.histogram
plt.title("Histogram with '%d-%d' bins" % ((min(path_lens), max(path_lens))))
plt.show()
def show_max_navigable():
navigable_locs_path = "tasks/R2R/data/navigable_locs.json"
with open(navigable_locs_path, 'r') as f:
nav_graphs = json.load(f)
max_navigable = 0
for scan in nav_graphs:
for viewpointId in nav_graphs[scan]:
adj_dict, absViewIndex2points = nav_graphs[scan][viewpointId]
if max_navigable < len(adj_dict):
max_navigable = len(adj_dict)
print(max_navigable)
def generate_multisent_to_dataset():
from nltk.tokenize import sent_tokenize
import copy
splits = ['train', 'val_seen', 'val_unseen', 'test']
counter = ([],[])
for split in splits:
new_data = []
data = load_datasets([split] ,encoder_type='lstm') # here we use lstm dataset to preprocess the data,
for item in data:
for i,instr in enumerate(item['instructions']):
new_item = copy.deepcopy(item)
sents = sent_tokenize(instr)
new_item['path_id'] = "%s_%d"%(item['path_id'],i)
new_item['instructions'] = sents
new_data.append(new_item)
counter[0].append(len(sents))
counter[1].append(max([len(sent) for sent in sents]))
with open("tasks/R2R/data/R2R_%s_multisent.json"%split, 'w') as fout:
json.dump(new_data, fout, indent=2, separators=[',',':'])
print(max(counter[0]), max(counter[1]))
if __name__ == '__main__':
# show_path_steps_len(['train_subgoal', 'val_seen_subgoal', 'val_unseen_subgoal'])
# show_path_steps_len(['train', 'val_seen', 'val_unseen'])
show_max_navigable()
| [] |
2024-01-10 | hany606/Tensegrity-Robotics | src~dev~legz~python_communication_test~main_v1.py | ##################################################################################################
# About: Server TCP code, get json object from the simulator and send another json object and
# process the data using stable baseline
# Notes:
#TODO (DONE): Adapt this on python3 to solve the issues in json parser in python3
#there is difference in json.loads as it only accept string not bytes and in 3 TCP read return bytes
#and str not converting from bytes to str in py3 but .decode('UTF-8') does
#and the same for sendall function of TCP it only takes bytes so we need to encode the string first to bytes like object
#and solving some errors like https://bugs.python.org/issue24283
#Reference: https://pymotw.com/2/socket/tcp.html
#Coding Style: camelCase
# Run it with . ~/virtualenvs/baselines_env/bin/activate
##################################################################################################
#import the libraries
import socket
import sys
import signal
import json
from time import *
import os
import random
import numpy as np
from transforms3d.euler import euler2mat
# import stable_baselines
print("Finish importing the libraries")
#import openai
#import tensorflow as tf
#import numpy as np
#from baselines import ...
#--------------------------------------------Vars--------------------------------------------
#Settings for the TCP communication
packetSize = 500
portNum = 10008
hostName = 'localhost'
# connection = None
# clientAddress = None
globalFlag = 0 #this is used to reset the NTRT environment and TCP connection with it
# JSON object structure
jsonObj = {
# 'Controllers_num': 9,
# 'Controllers_index': [2, 4, 5, 6, 7, 11, 13, 17, 19],
# 'Controllers_val': [18,-1,-1,-1,-1,-1,-1,-1,-1],
# 'Controllers_val': [-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1],
'Controllers_val': [0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0],
'Reset': 0
}
#--------------------------------------------------------------------------------------------
#--------------------------------------------Functions--------------------------------------------
# Ctrl+C Handle to close safely the TCP connection
def signalHandler(signal, frame):
# print('You pressed Ctrl+C!')
tmp = str(input("You want reset or close: r/c: \n"))
print(tmp)
if(tmp == 'r'):
reset()
elif(tmp == 'c'):
print("----------------------------------Exit-----------------------------------")
global globalFlag
globalFlag = 2
else:
# print("Please Ctrl+C and write 'r' or 'c' ")
sleep(5)
# function for writing data into TCP connection
def write(connection, data):
# print('sending data to the client:"{}"'.format(data))
try:
connection.sendall(data.encode())
except Exception as e:
print("$$$$$$$$$$$$ ERROR in Writing $$$$$$$$$$$$")
print("Error: " + str(e))
# function for reading data from TCP connection
def read(connection):
try:
data = []
counter = 1
# Receive the data in small chunks and retransmit it
while True:
data.append(connection.recv(packetSize).decode("utf-8")) #reading part
# print('{} received "{}"'.format(counter,data[-1]))
# print(data[-1][-14:-1], ('ZFinished' in str(data[-1][-14:-1])))
if 'ZFinished' in str(data[-1][-14:-1]):
# print("FINISHED*************")
# sleep(5)
break
counter += 1
return "".join(data)
except ValueError:
print(ValueError)
print("$$$$$$$$$$$$ ERROR in Reading $$$$$$$$$$$$")
# sleep(2)
return None
def reset():
global globalFlag
globalFlag = 1
def main():
start_time = time()
while True:
#Note: TODO: Make in the simulator wait a second then send a message
os.system('/home/hany/repos/Work/IU/Tensegrity/Tensegrity-Robotics/src/dev/legz/python_communication_test/helper.sh')
print('#########\nwaiting for a connection\n#########')
connection, clientAddress = sock.accept() #wait until it get a client
print('connection from', clientAddress)
global globalFlag
globalFlag = 0
target = 24
sign = -5
while True:
r = read(connection)
# print(r)
if(r != None):
jsonObjTmp = json.loads(r) # Parse the data from string to json
print("s1##{:} $${:}".format(jsonObj["Controllers_val"][2],jsonObjTmp["Controllers"][2]))
# TODO: Use the incoming data after being converted to json
# TODO:
# Take the data from the simulator module
# Formulate the data as observation
# Generate Reward
# Feed the RL Algorithm with Reward and observartion
# Generate Action
# Decide either end of episode (Reset the simulator) or specific Action
# Modify the action in json
# if(jsonObjTmp["Controllers"][2] >= 23.5 and sign == 1):
# print("FLIP")
# target = jsonObjTmp["Controllers"][2]
# sign = -6
# if(jsonObjTmp["Controllers"][2] <= 22.5 and sign == -6):
# print("FLIP")
# # target = 24
# sign = 1
# target = jsonObjTmp["Controllers"][2] + sign*0.5
# # print(target)
# print(sign)
# # jsonObj["Controllers_val"][2] = target
# if(jsonObjTmp["Flags"][0] == 1):
# print("FLAG")
# # jsonObj["Controllers_val"][2] = target
# jsonObj["Controllers_val"][2] = jsonObjTmp["Controllers"][2]
# print("s2##{:} $${:}".format(jsonObj["Controllers_val"][2],jsonObjTmp["Controllers"][2]))
# input()
# # jsonObj["Controllers_val"][2] = jsonObjTmp["Controllers"][2]
# if((time() - start_time)% 5 and jsonObjTmp["Flags"][0] == 1):
# print(jsonObjTmp["Center_of_Mass"][4], jsonObjTmp["Orientation"][4])
# CMS = np.array(jsonObjTmp["Center_of_Mass"][4])
# half_length = 15
# orientation_vector = np.array(jsonObjTmp["Orientation"][4][:3])
# end_point_local1 = np.array([0, half_length,0])
# end_point_local2 = np.array([0,-half_length,0])
# yaw,pitch,roll = orientation_vector
# rot_mat = np.matrix(euler2mat(yaw, pitch, roll, 'syxz'))
# print(rot_mat)
# # print("end_point1 in local coordinate system", end_point_local1)
# # print("end_point2 in local coordinate system", end_point_local2)
# end_point_world1 = CMS+rot_mat.transpose().dot(end_point_local1)
# end_point_world2 = CMS+rot_mat.transpose().dot(end_point_local2)
# print("#2 end_point1 in world coordinate system", end_point_world1)
# print("#2 end_point2 in world coordinate system", end_point_world2)
if(jsonObjTmp["Flags"][0] == 1):
sign = -1*sign
print("FLIP")
jsonObj["Controllers_val"][2] = sign
jsonObj["Controllers_val"][5] = sign
print("state##{:} $${:}".format(jsonObj["Controllers_val"][2],jsonObjTmp["Controllers"][2]))
write(connection,json.dumps(jsonObj)) # Write to the simulator module the json object with the required info
if(globalFlag > 0):
print("GLOBAL FLAG Exit")
break
connection.close()
if(globalFlag == 2):
sys.exit(0)
#-------------------------------------------------------------------------------------------------
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a TCP/IP socket
serverAddress = (hostName, portNum) # Bind the socket to the port
print('#########\nstarting up on {} port {}\n#########'.format(serverAddress, portNum))
sock.bind(serverAddress)
sock.listen(1) # Listen for incoming connections
signal.signal(signal.SIGINT, signalHandler) # Activate the listen to the Ctrl+C
# This is top open the simulator
print("Opening the NTRT simulator")
main() | [] |
2024-01-10 | animatefire/langchain-script | lc.py | #!/usr/bin/env python3
import os
import sys
from datetime import datetime
import constants
from langchain.document_loaders import TextLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import DirectoryLoader
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
os.environ["OPENAI_API_KEY"] = constants.APIKEY
query = sys.argv[1]
type = sys.argv[2]
loader = DirectoryLoader(".", glob="*."+type)
index = VectorstoreIndexCreator().from_loaders([loader])
response = index.query(query, llm=ChatOpenAI())
print(response + '\n')
fileName = str(input("What would you like to call this file? (langchain-log.md)\n") or "langchain-log.md")
file = open(fileName, "a+")
now = datetime.now()
file.write( "\n\n" + "** " + now.strftime("%m/%d/%Y %H:%M:%S") + ": " + query + " **" + "\n\n" + response + "\n\n" )
print('File successfully written.\n') | [] |
2024-01-10 | dataaug/open-interpreter-g4f | interpreter~llm~setup_text_llm.py | import os
import traceback
import litellm
import openai
import tokentrim as tt
from ..utils.display_markdown_message import display_markdown_message
def setup_text_llm(interpreter):
"""
Takes an Interpreter (which includes a ton of LLM settings),
returns a text LLM (an OpenAI-compatible chat LLM with baked-in settings. Only takes `messages`).
"""
# Pass remaining parameters to LiteLLM
def base_llm(messages):
"""
Returns a generator
"""
system_message = messages[0]["content"]
messages = messages[1:]
try:
if interpreter.context_window and interpreter.max_tokens:
trim_to_be_this_many_tokens = (
interpreter.context_window - interpreter.max_tokens - 25
) # arbitrary buffer
messages = tt.trim(
messages,
system_message=system_message,
max_tokens=trim_to_be_this_many_tokens,
)
elif interpreter.context_window and not interpreter.max_tokens:
# Just trim to the context window if max_tokens not set
messages = tt.trim(
messages,
system_message=system_message,
max_tokens=interpreter.context_window,
)
else:
try:
messages = tt.trim(
messages, system_message=system_message, model=interpreter.model
)
except:
if len(messages) == 1:
display_markdown_message(
"""
**We were unable to determine the context window of this model.** Defaulting to 3000.
If your model can handle more, run `interpreter --context_window {token limit}` or `interpreter.context_window = {token limit}`.
Also, please set max_tokens: `interpreter --max_tokens {max tokens per response}` or `interpreter.max_tokens = {max tokens per response}`
"""
)
messages = tt.trim(
messages, system_message=system_message, max_tokens=3000
)
except TypeError as e:
if interpreter.vision and str(e) == "expected string or buffer":
# There's just no way to use tokentrim on vision-enabled models yet.
if interpreter.debug_mode:
print("Couldn't token trim image messages. Error:", e)
### DISABLED image trimming
# To maintain the order of messages while simulating trimming, we will iterate through the messages
# and keep only the first 2 and last 2 images, while keeping all non-image messages.
# trimmed_messages = []
# image_counter = 0
# for message in messages:
# if (
# "content" in message
# and isinstance(message["content"], list)
# and len(message["content"]) > 1
# ):
# if message["content"][1]["type"] == "image":
# image_counter += 1
# if (
# image_counter <= 2
# or image_counter
# > len(
# [
# m
# for m in messages
# if m["content"][1]["type"] == "image"
# ]
# )
# - 2
# ):
# # keep message normal
# pass
# else:
# message["content"].pop(1)
# trimmed_messages.append(message)
# messages = trimmed_messages
# Reunite messages with system_message
messages = [{"role": "system", "content": system_message}] + messages
else:
raise
if interpreter.debug_mode:
print("Passing messages into LLM:", messages)
# Create LiteLLM generator
params = {
"model": interpreter.model,
"messages": messages,
"stream": True,
}
# Optional inputs
if interpreter.api_base:
params["api_base"] = interpreter.api_base
if interpreter.api_key:
params["api_key"] = interpreter.api_key
if interpreter.max_tokens:
params["max_tokens"] = interpreter.max_tokens
if interpreter.temperature is not None:
params["temperature"] = interpreter.temperature
else:
params["temperature"] = 0.0
if interpreter.model == "gpt-4-vision-preview":
# We need to go straight to OpenAI for this, LiteLLM doesn't work
return openai.ChatCompletion.create(**params)
# LiteLLM
# These are set directly on LiteLLM
if interpreter.max_budget:
litellm.max_budget = interpreter.max_budget
if interpreter.debug_mode:
litellm.set_verbose = True
# Report what we're sending to LiteLLM
if interpreter.debug_mode:
print("Sending this to LiteLLM:", params)
return litellm.completion(**params)
return base_llm
| [] |
2024-01-10 | Jimyzzp/ChatGPT | src~revChatGPT~V1.py | """
Standard ChatGPT
"""
from __future__ import annotations
import base64
import json
import logging
import os
import os.path as osp
import time
import uuid
from functools import wraps
from os import environ
from os import getenv
import requests
from httpx import AsyncClient
from OpenAIAuth import Authenticator
from OpenAIAuth import Error as AuthError
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s",
)
log = logging.getLogger(__name__)
def logger(is_timed: bool):
"""Logger decorator
Args:
is_timed (bool): Whether to include function running time in exit log
Returns:
_type_: decorated function
"""
def decorator(func):
wraps(func)
def wrapper(*args, **kwargs):
log.debug(
"Entering %s with args %s and kwargs %s",
func.__name__,
args,
kwargs,
)
start = time.time()
out = func(*args, **kwargs)
end = time.time()
if is_timed:
log.debug(
"Exiting %s with return value %s. Took %s seconds.",
func.__name__,
out,
end - start,
)
else:
log.debug("Exiting %s with return value %s", func.__name__, out)
return out
return wrapper
return decorator
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://chatgpt.duti.tech/"
class Error(Exception):
"""
Base class for exceptions in this module.
Error codes:
-1: User error
0: Unknown error
1: Server error
2: Rate limit error
3: Invalid request error
4: Expired access token error
5: Invalid access token error
"""
source: str
message: str
code: int
def __init__(self, source: str, message: str, code: int = 0):
self.source = source
self.message = message
self.code = code
# Code
class Chatbot:
"""
Chatbot class for ChatGPT
"""
@logger(is_timed=True)
def __init__(
self,
config: dict[str, str],
conversation_id: str | None = None,
parent_id: str | None = None,
session_client=None,
lazy_loading: bool = False,
) -> None:
"""Initialize a chatbot
Args:
config (dict[str, str]): Login and proxy info. Example:
{
"email": "OpenAI account email",
"password": "OpenAI account password",
"session_token": "<session_token>"
"access_token": "<access_token>"
"proxy": "<proxy_url_string>",
"paid": True/False, # whether this is a plus account
}
More details on these are available at https://github.com/acheong08/ChatGPT#configuration
conversation_id (str | None, optional): Id of the conversation to continue on. Defaults to None.
parent_id (str | None, optional): Id of the previous response message to continue on. Defaults to None.
session_client (_type_, optional): _description_. Defaults to None.
Raises:
Exception: _description_
"""
user_home = getenv("HOME")
if user_home is None:
self.cache_path = ".chatgpt_cache.json"
else:
# mkdir ~/.config/revChatGPT
if not osp.exists(osp.join(user_home, ".config")):
os.mkdir(osp.join(user_home, ".config"))
if not osp.exists(osp.join(user_home, ".config", "revChatGPT")):
os.mkdir(osp.join(user_home, ".config", "revChatGPT"))
self.cache_path = osp.join(user_home, ".config", "revChatGPT", "cache.json")
self.config = config
self.session = session_client() if session_client else requests.Session()
try:
cached_access_token = self.__get_cached_access_token(
self.config.get("email", None),
)
except Error as error:
if error.code == 5:
raise error
cached_access_token = None
if cached_access_token is not None:
self.config["access_token"] = cached_access_token
if "proxy" in config:
if not isinstance(config["proxy"], str):
raise Exception("Proxy must be a string!")
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
if isinstance(self.session, AsyncClient):
self.session = AsyncClient(proxies=proxies)
else:
self.session.proxies.update(proxies)
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
self.lazy_loading = lazy_loading
self.__check_credentials()
@logger(is_timed=True)
def __check_credentials(self):
"""Check login info and perform login
Any one of the following is sufficient for login. Multiple login info can be provided at the same time and they will be used in the order listed below.
- access_token
- session_token
- email + password
Raises:
Exception: _description_
AuthError: _description_
"""
if "access_token" in self.config:
self.__set_access_token(self.config["access_token"])
elif "session_token" in self.config:
pass
elif "email" in self.config and "password" in self.config:
pass
else:
raise Exception("Insufficient login details provided!")
if "access_token" not in self.config:
try:
self.__login()
except AuthError as error:
raise error
@logger(is_timed=False)
def __set_access_token(self, access_token: str):
"""Set access token in request header and self.config, then cache it to file.
Args:
access_token (str): access_token
"""
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
self.config["access_token"] = access_token
email = self.config.get("email", None)
if email is not None:
self.__cache_access_token(email, access_token)
@logger(is_timed=False)
def __get_cached_access_token(self, email: str | None) -> str | None:
"""Read access token from cache
Args:
email (str | None): email of the account to get access token
Raises:
Error: _description_
Error: _description_
Error: _description_
Returns:
str | None: access token string or None if not found
"""
email = email or "default"
cache = self.__read_cache()
access_token = cache.get("access_tokens", {}).get(email, None)
# Parse access_token as JWT
if access_token is not None:
try:
# Split access_token into 3 parts
s_access_token = access_token.split(".")
# Add padding to the middle part
s_access_token[1] += "=" * ((4 - len(s_access_token[1]) % 4) % 4)
d_access_token = base64.b64decode(s_access_token[1])
d_access_token = json.loads(d_access_token)
except base64.binascii.Error:
raise Error(
source="__get_cached_access_token",
message="Invalid access token",
code=5,
) from None
except json.JSONDecodeError:
raise Error(
source="__get_cached_access_token",
message="Invalid access token",
code=5,
) from None
exp = d_access_token.get("exp", None)
if exp is not None and exp < time.time():
raise Error(
source="__get_cached_access_token",
message="Access token expired",
code=4,
)
return access_token
@logger(is_timed=False)
def __cache_access_token(self, email: str, access_token: str) -> None:
"""Write an access token to cache
Args:
email (str): account email
access_token (str): account access token
"""
email = email or "default"
cache = self.__read_cache()
if "access_tokens" not in cache:
cache["access_tokens"] = {}
cache["access_tokens"][email] = access_token
self.__write_cache(cache)
@logger(is_timed=False)
def __write_cache(self, info: dict):
"""Write cache info to file
Args:
info (dict): cache info, current format
{
"access_tokens":{"[email protected]": 'this account's access token', }
}
"""
dirname = osp.dirname(self.cache_path) or "."
os.makedirs(dirname, exist_ok=True)
json.dump(info, open(self.cache_path, "w", encoding="utf-8"), indent=4)
@logger(is_timed=False)
def __read_cache(self):
try:
cached = json.load(open(self.cache_path, encoding="utf-8"))
except (FileNotFoundError, json.decoder.JSONDecodeError):
cached = {}
return cached
@logger(is_timed=True)
def __login(self):
if (
"email" not in self.config or "password" not in self.config
) and "session_token" not in self.config:
log.error("Insufficient login details provided!")
raise Exception("Insufficient login details provided!")
auth = Authenticator(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
if self.config.get("session_token"):
log.debug("Using session token")
auth.session_token = self.config["session_token"]
auth.get_access_token()
if auth.access_token is None:
del self.config["session_token"]
self.__login()
return
else:
log.debug("Using authenticator to get access token")
auth.begin()
self.config["session_token"] = auth.session_token
auth.get_access_token()
self.__set_access_token(auth.access_token)
@logger(is_timed=True)
def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str | None = None,
timeout: float = 360,
):
"""Ask a question to the chatbot
Args:
prompt (str): The question
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str | None, optional): UUID for the message to continue on. Defaults to None.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Raises:
Error: _description_
Exception: _description_
Error: _description_
Error: _description_
Error: _description_
Yields:
_type_: _description_
"""
if parent_id is not None and conversation_id is None:
log.error("conversation_id must be set once parent_id is set")
raise Error("User", "conversation_id must be set once parent_id is set", -1)
if conversation_id is not None and conversation_id != self.conversation_id:
log.debug("Updating to new conversation by setting parent_id to None")
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
log.debug("New conversation, setting parent_id to new UUID4: %s", parent_id)
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
if self.lazy_loading:
log.debug(
"Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID",
conversation_id,
)
try:
history = self.get_msg_history(conversation_id)
self.conversation_mapping[conversation_id] = history[
"current_node"
]
except Exception as error:
pass
else:
log.debug(
"Conversation ID %s not found in conversation mapping, mapping conversations",
conversation_id,
)
self.__map_conversations()
if conversation_id in self.conversation_mapping:
log.debug(
"Conversation ID %s found in conversation mapping, setting parent_id to %s",
conversation_id,
self.conversation_mapping[conversation_id],
)
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": "text-davinci-002-render-sha"
if not self.config.get("paid")
else "text-davinci-002-render-paid",
}
log.debug("Sending the payload")
log.debug(json.dumps(data, indent=2))
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=BASE_URL + "api/conversation",
data=json.dumps(data),
timeout=timeout,
stream=True,
)
self.__check_response(response)
for line in response.iter_lines():
line = str(line)[2:-1]
if line == "Internal Server Error":
log.error("Internal Server Error: %s", line)
raise Exception("Error: " + str(line))
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
break
line = line.replace('\\"', '"')
line = line.replace("\\'", "'")
line = line.replace("\\\\", "\\")
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
log.error("Field missing", exc_info=True)
if (
line.get("detail")
== "Too many requests in 1 hour. Try again later."
):
log.error("Rate limit exceeded")
raise Error(source="ask", message=line.get("detail"), code=2)
if line.get("detail", {}).get("code") == "invalid_api_key":
log.error("Invalid access token")
raise Error(
source="ask",
message=line.get("detail", {}).get("message"),
code=3,
)
raise Error(source="ask", message="Field missing", code=1)
message = line["message"]["content"]["parts"][0]
if message == prompt:
continue
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
try:
model = line["message"]["metadata"]["model_slug"]
except KeyError:
model = None
log.debug("Received message: %s", message)
log.debug("Received conversation_id: %s", conversation_id)
log.debug("Received parent_id: %s", parent_id)
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
@logger(is_timed=False)
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except TypeError:
return False
except KeyError:
return False
return True
@logger(is_timed=False)
def __check_response(self, response):
"""Make sure response is success
Args:
response (_type_): _description_
Raises:
Error: _description_
"""
if response.status_code != 200:
print(response.text)
raise Error("OpenAI", response.status_code, response.text)
@logger(is_timed=True)
def get_conversations(
self,
offset: int = 0,
limit: int = 20,
encoding: str | None = None,
):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"api/conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
data = json.loads(response.text)
return data["items"]
@logger(is_timed=True)
def get_msg_history(self, convo_id: str, encoding: str | None = None):
"""
Get message history
:param id: UUID of conversation
:param encoding: String
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
data = json.loads(response.text)
return data
@logger(is_timed=True)
def gen_title(self, convo_id: str, message_id: str):
"""
Generate title for conversation
"""
response = self.session.post(
BASE_URL + f"api/conversation/gen_title/{convo_id}",
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
@logger(is_timed=True)
def change_title(self, convo_id: str, title: str):
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.patch(url, data=json.dumps({"title": title}))
self.__check_response(response)
@logger(is_timed=True)
def delete_conversation(self, convo_id: str):
"""
Delete conversation
:param id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=True)
def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "api/conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=False)
def __map_conversations(self):
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
@logger(is_timed=False)
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
@logger(is_timed=False)
def rollback_conversation(self, num: int = 1) -> None:
"""
Rollback the conversation.
:param num: Integer. The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
class AsyncChatbot(Chatbot):
"""
Async Chatbot class for ChatGPT
"""
def __init__(
self,
config,
conversation_id=None,
parent_id=None,
) -> None:
super().__init__(
config=config,
conversation_id=conversation_id,
parent_id=parent_id,
session_client=AsyncClient,
)
async def ask(
self,
prompt,
conversation_id=None,
parent_id=None,
timeout=360,
):
"""
Ask a question to the chatbot
"""
if parent_id is not None and conversation_id is None:
raise Error("User", "conversation_id must be set once parent_id is set", 1)
if conversation_id is not None and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
await self.__map_conversations()
parent_id = self.conversation_mapping[conversation_id]
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": "text-davinci-002-render-sha"
if not self.config.get("paid")
else "text-davinci-002-render-paid",
}
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
async with self.session.stream(
method="POST",
url=BASE_URL + "api/conversation",
data=json.dumps(data),
timeout=timeout,
) as response:
self.__check_response(response)
async for line in response.aiter_lines():
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if "[DONE]" in line:
break
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise Exception("Field missing. Details: " + str(line))
message = line["message"]["content"]["parts"][0]
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
model = (
line["message"]["metadata"]["model_slug"]
if "model_slug" in line["message"]["metadata"]
else None
)
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
async def get_conversations(self, offset=0, limit=20):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"api/conversations?offset={offset}&limit={limit}"
response = await self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
async def get_msg_history(self, convo_id, encoding="utf-8"):
"""
Get message history
:param id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = await self.session.get(url)
if encoding is not None:
response.encoding = encoding
self.__check_response(response)
data = json.loads(response.text)
return data
async def gen_title(self, convo_id, message_id):
"""
Generate title for conversation
"""
url = BASE_URL + f"api/conversation/gen_title/{convo_id}"
response = await self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
await self.__check_response(response)
async def change_title(self, convo_id, title):
"""
Change title of conversation
:param convo_id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = await self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
async def delete_conversation(self, convo_id):
"""
Delete conversation
:param convo_id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "api/conversations"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def __map_conversations(self):
conversations = await self.get_conversations()
histories = [await self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except TypeError:
return False
except KeyError:
return False
return True
def __check_response(self, response):
response.raise_for_status()
@logger(is_timed=False)
def get_input(prompt):
"""
Multiline input function.
"""
print(prompt, end="")
lines = []
while True:
line = input()
if line == "":
break
lines.append(line)
user_input = "\n".join(lines)
return user_input
@logger(is_timed=False)
def configure():
"""
Looks for a config file in the following locations:
"""
config_files = ["config.json"]
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
user_home = getenv("HOME")
if user_home:
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
config_file = next((f for f in config_files if osp.exists(f)), None)
if config_file:
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
@logger(is_timed=False)
def main(config: dict):
"""
Main function for the chatGPT program.
"""
print("Logging in...")
chatbot = Chatbot(
config,
conversation_id=config.get("conversation_id"),
parent_id=config.get("parent_id"),
)
def handle_commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
!setconversation - Changes the conversation
""",
)
elif command == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
elif command == "!config":
print(json.dumps(chatbot.config, indent=4))
elif command.startswith("!rollback"):
try:
rollback = int(command.split(" ")[1])
except IndexError:
logging.exception(
"No number specified, rolling back 1 message",
stack_info=True,
)
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
elif command.startswith("!setconversation"):
try:
chatbot.conversation_id = chatbot.config[
"conversation_id"
] = command.split(" ")[1]
print("Conversation has been changed")
except IndexError:
log.exception(
"Please include conversation UUID in command",
stack_info=True,
)
print("Please include conversation UUID in command")
elif command == "!exit":
exit(0)
else:
return False
return True
while True:
prompt = get_input("\nYou:\n")
if prompt.startswith("!"):
if handle_commands(prompt):
continue
print("Chatbot: ")
prev_text = ""
for data in chatbot.ask(prompt):
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print()
if __name__ == "__main__":
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter TWICE to submit your question.\n")
main(configure())
| [
"text",
"\nYou:\n",
"content_type"
] |
2024-01-10 | atarora/uplimit_openai_course | podcast_backend.py | import modal
def download_whisper():
# Load the Whisper model
import os
import whisper
print ("Download the Whisper model")
# Perform download only once and save to Container storage
whisper._download(whisper._MODELS["medium"], '/content/podcast/', False)
stub = modal.Stub("corise-podcast-project")
corise_image = modal.Image.debian_slim().pip_install("feedparser",
"https://github.com/openai/whisper/archive/9f70a352f9f8630ab3aa0d06af5cb9532bd8c21d.tar.gz",
"requests",
"ffmpeg",
"openai",
"tiktoken",
"wikipedia",
"ffmpeg-python").apt_install("ffmpeg").run_function(download_whisper)
@stub.function(image=corise_image, gpu="any", timeout=600)
def get_transcribe_podcast(rss_url, local_path):
print ("Starting Podcast Transcription Function")
print ("Feed URL: ", rss_url)
print ("Local Path:", local_path)
# Read from the RSS Feed URL
import feedparser
podcast_rss = feedparser.parse(rss_url)
podcast_title = podcast_rss['feed']['title']
episode_title = podcast_rss.entries[0]['title']
episode_image = podcast_rss['feed']['image'].href
for item in podcast_rss.entries[0].links:
if (item['type'] == 'audio/mpeg'):
episode_url = item.href
episode_name = "podcast_episode.mp3"
print ("RSS URL read and episode URL: ", episode_url)
# Download the podcast episode by parsing the RSS feed
from pathlib import Path
p = Path(local_path)
p.mkdir(exist_ok=True)
print ("Downloading the podcast episode")
import requests
with requests.get(episode_url, stream=True) as r:
r.raise_for_status()
episode_path = p.joinpath(episode_name)
with open(episode_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
print ("Podcast Episode downloaded")
# Load the Whisper model
import os
import whisper
# Load model from saved location
print ("Load the Whisper model")
model = whisper.load_model('medium', device='cuda', download_root='/content/podcast/')
# Perform the transcription
print ("Starting podcast transcription")
result = model.transcribe(local_path + episode_name)
# Return the transcribed text
print ("Podcast transcription completed, returning results...")
output = {}
output['podcast_title'] = podcast_title
output['episode_title'] = episode_title
output['episode_image'] = episode_image
output['episode_transcript'] = result['text']
return output
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"))
def get_podcast_summary(podcast_transcript):
import openai
instructPrompt = """
You will be provided with a podcast transcript about how to stay happy in life. You have to understand the content and context of the podcast
and summarize it.
Please provide a concise summary by using the provided podcast transcript.
"""
request = instructPrompt + podcast_transcript
chatOutput = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k",
messages=[{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": request}
]
)
podcastSummary = chatOutput.choices[0].message.content
return podcastSummary
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"))
def get_podcast_guest(podcast_transcript):
import openai
import wikipedia
import json
request = podcast_transcript[:10000]
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": request}],
functions=[
{
"name": "get_podcast_guest_information",
"description": "Get information on the podcast guest using their full name",
"parameters": {
"type": "object",
"properties": {
"guest_name": {
"type": "string",
"description": "The full name of the guest speaker in the podcast",
}
},
"required": ["guest_name"],
},
}
],
function_call={"name": "get_podcast_guest_information"}
)
podcast_guest = ""
response_message = completion["choices"][0]["message"]
if response_message.get("function_call"):
function_name = response_message["function_call"]["name"]
function_args = json.loads(response_message["function_call"]["arguments"])
podcast_guest=function_args.get("guest_name")
print("Guest Name: ",podcast_guest)
if not podcast_guest:
return "Unknown"
else:
input = wikipedia.page(podcast_guest, auto_suggest=True)
if input is not None:
podcastGuest=input.summary
else:
print("Guest not found")
podcastGuest = podcast_guest
#podcastBlankGuestJson = '{"summary": "Summary unknown"}'
#podcastGuest=json.loads(podcastBlankGuestJson,object_hook=lambda d: SimpleNamespace(**d))
return podcastGuest
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"))
def get_podcast_highlights(podcast_transcript):
import openai
instructPrompt = """
You will be provided with a podcast transcript. You have to understand the content and context of the podcast,
summarize it and extract key moments or catchy insights from the guest or interesting conversation or questions asked during the podcast.
Provide the result in the following format:
Summary:
Key Moments:
"""
request = instructPrompt + podcast_transcript
chatOutput = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k",
messages=[{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": request}
]
)
podcastHighlights = chatOutput.choices[0].message.content
return podcastHighlights
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"), timeout=1200)
def process_podcast(url, path):
output = {}
podcast_details = get_transcribe_podcast.remote(url, path)
podcast_summary = get_podcast_summary.remote(podcast_details['episode_transcript'])
podcast_guest = get_podcast_guest.remote(podcast_details['episode_transcript'])
podcast_highlights = get_podcast_highlights.remote(podcast_details['episode_transcript'])
output['podcast_details'] = podcast_details
output['podcast_summary'] = podcast_summary
output['podcast_guest'] = podcast_guest
output['podcast_highlights'] = podcast_highlights
return output
@stub.local_entrypoint()
def test_method(url, path):
output = {}
podcast_details = get_transcribe_podcast.remote(url, path)
print ("Podcast Summary: ", get_podcast_summary.remote(podcast_details['episode_transcript']))
print ("Podcast Guest Information: ", get_podcast_guest.remote(podcast_details['episode_transcript']))
print ("Podcast Highlights: ", get_podcast_highlights.remote(podcast_details['episode_transcript']))
| [
"\n You will be provided with a podcast transcript about how to stay happy in life. You have to understand the content and context of the podcast\n and summarize it.\n Please provide a concise summary by using the provided podcast transcript.\n ",
"You are a helpful assistant.",
"\n You will be provided with a podcast transcript. You have to understand the content and context of the podcast,\n summarize it and extract key moments or catchy insights from the guest or interesting conversation or questions asked during the podcast.\n\n Provide the result in the following format:\n\n Summary:\n\n Key Moments:\n "
] |
2024-01-10 | willfchen/daily_action | action.py | import csv
import random
import os
from openai import OpenAI
# 从环境变量获取 API 密钥
api_key = os.environ.get("OPENAI_API_KEY")
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
api_key=api_key,
)
def generate_sentence(word1, definition1, word2, definition2, word3, definition3):
try:
prompt = f"我正在通过句子学习英文单词,请你给出在一个句子中包含 '{word1}' 和 '{word2}' 和 '{word3}' 的英文,尽量口语化, 然后给出中文翻译,\
并为句子中高于初中水平的所有单词提供国际音标、英文解释和中文解释"
chat_completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a english teacher."},
{"role": "user", "content": prompt},
]
)
# 获取响应内容
sentence = chat_completion.choices[0].message.content
return f"{sentence}"
except Exception as e:
return f"Error generating sentence: {e}"
def read_learned_words(filename="learned.txt"):
try:
with open(filename, "r", encoding="utf-8") as file:
learned_words = set(word.strip() for word in file.readlines())
except FileNotFoundError:
learned_words = set()
return learned_words
def update_learned_words(words, filename="learned.txt"):
with open(filename, "a", encoding="utf-8") as file:
for word in words:
file.write(word + "\n")
def update_sentences(sentence, filename="sentence.txt"):
with open(filename, "a", encoding="utf-8") as file:
file.write(sentence + "\n\n")
def read_csv_and_generate_sentences(csv_file, learned_words):
with open(csv_file, newline='', encoding='utf-8') as file:
reader = csv.reader(file)
words = [row for row in reader if row[0] not in learned_words]
if len(words) < 3:
print("Not enough new words to generate a sentence.")
return
selected_words = random.sample(words, 3)
sentence = generate_sentence(*sum(selected_words, []))
print(sentence)
# 更新学习的单词和句子
new_learned_words = [word[0] for word in selected_words]
update_learned_words(new_learned_words)
update_sentences(sentence)
# 读取已学习的单词
learned_words = read_learned_words()
# 调用函数生成句子
read_csv_and_generate_sentences('english.csv', learned_words) | [
"我正在通过句子学习英文单词,请你给出在一个句子中包含 'PLACEHOLDER' 和 'PLACEHOLDER' 和 'PLACEHOLDER' 的英文,尽量口语化, 然后给出中文翻译, \t并为句子中高于初中水平的所有单词提供国际音标、英文解释和中文解释",
"You are a english teacher."
] |
2024-01-10 | 360macky/webrewind | api~get-wayback-url.py | import json
import os
import requests
from http.server import BaseHTTPRequestHandler
from urllib.parse import parse_qs, quote, urlparse
import random
import openai
from ratelimiter import RateLimiter
openai.api_key = os.environ.get("OPENAI_API_KEY", "")
# Define the rate limit (e.g., 10 requests per minute)
rate_limiter = RateLimiter(max_calls=10, period=60)
def get_image_id():
unique_id = "".join([str(random.randint(0, 9)) for _ in range(12)])
return unique_id
def generate_unique_image_path(unique_id):
file_path = os.path.join("images", f"{unique_id}.jpg")
return file_path
def format_s3_url(image_path):
s3_base_url = "https://webrewind.s3.sa-east-1.amazonaws.com/"
full_url = f"{s3_base_url}{image_path}"
return full_url
def get_image_url(url):
# Request APIFlash to get the URL of the image captured
api_url = "https://api.apiflash.com/v1/urltoimage"
image_id = get_image_id()
access_key = os.environ.get("FLASHAPI_ACCESS_KEY", "")
image_path = generate_unique_image_path(image_id)
params = {
"access_key": access_key,
"url": url,
"format": "jpeg",
"response_type": "json",
"css": "div#wm-ipp-base{opacity:0}",
"s3_access_key_id": os.environ.get("S3_ACCESS_KEY_ID", ""),
"s3_secret_key": os.environ.get("S3_SECRET_ACCESS_KEY", ""),
"s3_bucket": "webrewind",
"s3_key": image_path
}
response = requests.get(api_url, params=params)
data = response.json()
image_url = format_s3_url(image_path)
return image_url
def moderate_text(text):
"""
Check if the text violates OpenAI's usage policies using the Moderation API.
"""
response = openai.Moderation.create(input=text)
result = response["results"][0]
return result["flagged"]
class handler(BaseHTTPRequestHandler):
"""
Handle the GET request to the API.
"""
def do_GET(self):
# Apply rate limiting
with rate_limiter:
# Parse the query parameters
query_params = parse_qs(urlparse(self.path).query)
url = query_params.get('url', [''])[0]
timestamp = query_params.get('timestamp', [''])[0]
# Check if the URL content violates OpenAI's usage policies using the Moderation API
if moderate_text(url):
self.send_response(400)
self.send_header('Content-type', 'application/json')
self.end_headers()
error_json = json.dumps({"error": "URL content violates OpenAI's usage policies."})
self.wfile.write(bytes(error_json, "utf8"))
return
# Call the Wayback Machine API
api_url = f'https://archive.org/wayback/available?url={url}×tamp={timestamp}'
response = requests.get(api_url)
data = response.json()
# Extract the wayback_url
wayback_url = ''
if 'archived_snapshots' in data and 'closest' in data['archived_snapshots']:
wayback_url = data['archived_snapshots']['closest']['url']
if wayback_url:
image_url = get_image_url(wayback_url)
else:
image_url = ""
# Send the response
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
response_json = json.dumps({"image_url": image_url})
self.wfile.write(bytes(response_json, "utf8"))
return
| [] |
2024-01-10 | ggoonnzzaallo/llm_experiments | button.py | from PyQt5.QtWidgets import QApplication, QPushButton, QVBoxLayout, QWidget, QLineEdit, QLabel
import sys
# Import necessary libraries and functions (Same as in your original code)
import requests
import pyaudio
import soundfile as sf
import io
import time
from dotenv import load_dotenv
from openai import OpenAI
from pydub import AudioSegment
from pydub.playback import play
import pydub
import pygame
load_dotenv()
def streamed_audio(input_text, model='tts-1', voice='alloy'):
start_time = time.time()
# OpenAI API endpoint and parameters
url = "https://api.openai.com/v1/audio/speech"
headers = {
"Authorization": 'Bearer YOUR_API_KEY', # Replace with your API key
}
data = {
"model": model,
"input": input_text,
"voice": voice,
"response_format": "opus",
}
audio = pyaudio.PyAudio()
def get_pyaudio_format(subtype):
if subtype == 'PCM_16':
return pyaudio.paInt16
return pyaudio.paInt16
with requests.post(url, headers=headers, json=data, stream=True) as response:
if response.status_code == 200:
buffer = io.BytesIO()
for chunk in response.iter_content(chunk_size=4096):
buffer.write(chunk)
buffer.seek(0)
with sf.SoundFile(buffer, 'r') as sound_file:
format = get_pyaudio_format(sound_file.subtype)
channels = sound_file.channels
rate = sound_file.samplerate
stream = audio.open(format=format, channels=channels, rate=rate, output=True)
chunk_size = 1024
data = sound_file.read(chunk_size, dtype='int16')
print(f"Time to play: {time.time() - start_time} seconds")
while len(data) > 0:
stream.write(data.tobytes())
data = sound_file.read(chunk_size, dtype='int16')
stream.stop_stream()
stream.close()
else:
print(f"Error: {response.status_code} - {response.text}")
audio.terminate()
return f"Time to play: {time.time() - start_time} seconds"
# Example usage
#print(play_text_as_audio("Nuclear energy is clean energy!"))
def not_streamed(input_text, model='tts-1', voice='alloy'):
start_time = time.time()
# Initialize Pygame Mixer
pygame.mixer.init()
client = OpenAI()
response = client.audio.speech.create(
model=model,
voice=voice,
input=input_text,
)
response.stream_to_file("output.opus")
# Load and play the audio file
pygame.mixer.music.load('output.opus')
print(f"Time to play: {time.time() - start_time} seconds")
pygame.mixer.music.play()
# Loop to keep the script running during playback
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
# # Example usage
# print(play_text_as_audio("Nuclear energy is clean energy!"))
def run_streamed():
input_text = text_box.text()
streamed_audio(input_text) # Call the streamed_audio function with input text
def run_not_streamed():
input_text = text_box.text()
not_streamed(input_text) # Call the not_streamed function with input text
app = QApplication(sys.argv)
window = QWidget()
window.setWindowTitle('Text to Speech')
layout = QVBoxLayout()
# Caption
caption = QLabel('Text to Speech')
layout.addWidget(caption)
# Textbox
text_box = QLineEdit()
layout.addWidget(text_box)
# Streamed Button
streamed_button = QPushButton('Streamed')
streamed_button.clicked.connect(run_streamed) # Link button click to streamed_audio function
layout.addWidget(streamed_button)
# Not Streamed Button
not_streamed_button = QPushButton('Not Streamed')
not_streamed_button.clicked.connect(run_not_streamed) # Link button click to not_streamed function
layout.addWidget(not_streamed_button)
window.setLayout(layout)
window.show()
sys.exit(app.exec_()) | [] |
2024-01-10 | Spread0x/langchain | langchain~chains~combine_documents~map_reduce.py | """Combining documents by mapping a chain over them first, then combining results."""
from __future__ import annotations
from typing import Any, Callable, Dict, List
from pydantic import BaseModel, Extra, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
def _split_list_of_docs(
docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any
) -> List[List[Document]]:
new_result_doc_list = []
_sub_result_docs = []
for doc in docs:
_sub_result_docs.append(doc)
_num_tokens = length_func(_sub_result_docs, **kwargs)
if _num_tokens > token_max:
if len(_sub_result_docs) == 1:
raise ValueError(
"A single document was longer than the context length,"
" we cannot handle this."
)
if len(_sub_result_docs) == 2:
raise ValueError(
"A single document was so long it could not be combined "
"with another document, we cannot handle this."
)
new_result_doc_list.append(_sub_result_docs[:-1])
_sub_result_docs = _sub_result_docs[-1:]
new_result_doc_list.append(_sub_result_docs)
return new_result_doc_list
def _collapse_docs(
docs: List[Document],
combine_document_func: Callable,
**kwargs: Any,
) -> Document:
result = combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()}
for doc in docs[1:]:
for k, v in doc.metadata.items():
if k in combined_metadata:
combined_metadata[k] += f", {v}"
else:
combined_metadata[k] = str(v)
return Document(page_content=result, metadata=combined_metadata)
class MapReduceDocumentsChain(BaseCombineDocumentsChain, BaseModel):
"""Combining documents by mapping a chain over them, then combining results."""
llm_chain: LLMChain
"""Chain to apply to each document individually.."""
combine_document_chain: BaseCombineDocumentsChain
"""Chain to use to combine results of applying llm_chain to documents."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
if "document_variable_name" not in values:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain input_variables"
)
else:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
def combine_docs(
self, docs: List[Document], token_max: int = 3000, **kwargs: Any
) -> str:
"""Combine documents in a map reduce manner.
Combine by mapping first chain over all documents, then reducing the results.
This reducing can be done recursively if needed (if there are many documents).
"""
results = self.llm_chain.apply(
# FYI - this is parallelized and so it is fast.
[{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs]
)
question_result_key = self.llm_chain.output_key
result_docs = [
Document(page_content=r[question_result_key], metadata=docs[i].metadata)
# This uses metadata from the docs, and the textual results from `results`
for i, r in enumerate(results)
]
length_func = self.combine_document_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
while num_tokens is not None and num_tokens > token_max:
new_result_doc_list = _split_list_of_docs(
result_docs, length_func, token_max, **kwargs
)
result_docs = []
for docs in new_result_doc_list:
new_doc = _collapse_docs(
docs, self.combine_document_chain.combine_docs, **kwargs
)
result_docs.append(new_doc)
num_tokens = self.combine_document_chain.prompt_length(
result_docs, **kwargs
)
output = self.combine_document_chain.combine_docs(result_docs, **kwargs)
return output
| [] |
2024-01-10 | bmcclanahan/JupyterLLMAgents | chat_agent.py | from IPython.core.magic import magics_class, register_line_cell_magic, Magics
import ast
import argparse
import astor
import folium
import pandas as pd
import re
import os
from copy import deepcopy
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
from langchain.tools import Tool
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction
from langchain.utilities import GoogleSerperAPIWrapper
from typing import Any, Optional, Dict, List, Union
from constants import WHITELISTED_LIBRARIES, WHITELISTED_BUILTINS
class ChatAgentCallbackHandler(BaseCallbackHandler):
def __init__(self):
self.descriptions = []
self.agent_action = None
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
print("on action")
index = action.log.find("Action:")
self.agent_action = action
if index != -1:
self.descriptions.append(action.log[:index].strip())
@magics_class
class ChatAgentMagics(Magics):
def __init__(self):
# super(ChatAgentMagics, self).__init__(shell) uncomment this when making this a proper jupyter extension loaded with %load_ext
self.__agent_input = {}
self.__llm = OpenAI(temperature=0)
tools = (
load_tools(["google-serper"], llm=self.__llm)
if "SERPER_API_KEY" in os.environ
else []
)
self.__tools = tools + [
Tool.from_function(
func=self.python_execution,
name="pythonCodeExecution",
description="Tool used to execute Python code. Input should be python code containing statements to derive answers to questions or solutions to instructions. The input code should store the answer in variable named result unless instructed otherwise. The tool may return feedback from the user on the input code. If the result is a numeric value be sure to assign it to a variable with proper formatting without commas, dollar signs, percent symbols or any other symbol.",
),
Tool.from_function(
func=self.plot_folium_map,
name="mapPlottingTool",
description="Tool used to plot markers on a map. Input to the tool should be the name of a Pandas dataframe that has the columns name, latitude, and longitude.",
),
]
self.__agent = initialize_agent(
self.__tools,
self.__llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
max_iterations=3,
)
self.__callback_handler = ChatAgentCallbackHandler()
self.__noninteractive = False
self.__verbose = False
self.__last_key = None
self.__return_type_list = [folium.folium.Map]
def is_df_overwrite(self, node: ast.stmt) -> str:
"""
Remove df declarations from the code to prevent malicious code execution. A helper method.
Args:
node (object): ast.stmt
Returns (str):
"""
return (
isinstance(node, ast.Assign)
and isinstance(node.targets[0], ast.Name)
and re.match(r"df\d{0,2}$", node.targets[0].id)
)
def is_unsafe_import(self, node: ast.stmt) -> bool:
"""Remove non-whitelisted imports from the code to prevent malicious code execution
Args:
node (object): ast.stmt
Returns (bool): A flag if unsafe_imports found.
"""
return isinstance(node, (ast.Import, ast.ImportFrom)) and any(
alias.name not in WHITELISTED_LIBRARIES for alias in node.names
)
def clean_code(self, code: str) -> str:
"""
A method to clean the code to prevent malicious code execution
Args:
code(str): A python code
Returns (str): Returns a Clean Code String
"""
tree = ast.parse(code)
new_body = [
node
for node in tree.body
if not (self.is_unsafe_import(node) or self.is_df_overwrite(node))
]
new_tree = ast.Module(body=new_body)
return astor.to_source(new_tree).strip()
def add_agent_input(
self,
input_var: Any,
name: str,
description: str,
rows: int = 5,
include_df_head: bool = False,
):
if type(input_var) == pd.DataFrame and include_df_head:
description += f"""
This is the result of `print(df.head({rows}))`
{input_var.head(rows)}
"""
self.__agent_input[name] = {"value": input_var, "description": description}
def delete_agent_input(self, key: Union[List[str], str]):
if type(key) == list:
keys = key
else:
keys = [key]
for item in keys:
if item in self.__agent_input:
del self.__agent_input[item]
else:
print(f"Key {item} not found in agent input.")
def set_agent_input(self, agent_input: dict):
self.__agent_input = agent_input
def get_agent_input(self):
return self.__agent_input
def get_input_key(self, key_val: str):
num = len([key for key in self.__agent_input if key_val in key])
input_key = key_val + (str(num + 1) if num > 0 else "")
return input_key
def plot_folium_map(self, df_name: str, limit=20):
try:
match = re.match(
"mapPlottingTool\(([^\W0-9]\w*)\)", df_name
) # match incorrect inputs to the tool that use the tool name passing the dataframe as an argument to the tool.
if match:
df_name = match.group(1)
if df_name in self.__agent_input:
input_df = self.__agent_input[df_name]["value"]
if type(input_df) == pd.Series:
input_df = input_df.to_frame().T
if "latitude" in input_df.index:
input_df = input_df.T
latitude = (input_df.latitude.max() + input_df.latitude.min()) / 2.0
longitude = (input_df.longitude.max() + input_df.longitude.min()) / 2.0
m = folium.Map(location=[latitude, longitude], zoom_start=12)
if input_df.index.shape[0] > limit:
print(
"dataframe has rows greater than limit of {limit}: {input_df.shape[0]}"
)
print("mapping only the first {limit} rows")
for index in input_df.index[:limit]:
folium.Marker(
input_df.loc[index][["latitude", "longitude"]],
tooltip=input_df.loc[index]["name"],
).add_to(m)
# result_key = self.get_input_key(f"{df_name}_map") prevents overwritting but adds prompt complexity
result_key = f"{df_name}_map"
self.__agent_input[result_key] = {
"value": m,
"description": f"map for dataframe {df_name}",
}
self.__last_key = result_key
return f"map for {df_name} created"
else:
return f"name {df_name} not available in environment"
except Exception as e:
return f"tool failed with following error: {e}"
def python_execution(self, analysis_code: str):
last_character = self.__callback_handler.agent_action.log.strip()[-1]
if last_character == '"' and not analysis_code.endswith('"'):
analysis_code += '"' # replace missing quotes that langchain strips
try:
analysis_code = self.clean_code(analysis_code)
print()
if self.__verbose:
print("input code")
print(analysis_code)
user_feedback = ""
if not self.__noninteractive:
prompt = f"""
The change agent would like to run the following code:
--------------------------------------------------------
{analysis_code}
--------------------------------------------------------
To allow execution type Y or type N to disallow.
You may give additional feedback for either option by placing a dash after the option followed by the feedback. For example:
Y - this code answers my original question
or
N - this code does not produce the right answer
"""
feedback_retrieved = False
while not feedback_retrieved:
try:
user_input = input(prompt)
user_input = user_input.strip().split("-")
first_input = user_input[0].strip().lower()
if first_input not in ("y", "n"):
raise ValueError("Must enter Y or N")
if len(user_input) > 1:
user_feedback = " - ".join(user_input[1:])
if first_input == "n":
response_end = (
"most likely because it doesn't achieve the desired result."
if len(user_feedback) == 0
else f" and has the following feedback: {user_feedback}"
)
return f"The user disallowed execution of the code{response_end}"
feedback_retrieved = True
except ValueError as e:
print(e)
pass
input_environment = {
key: self.__agent_input[key]["value"] for key in self.__agent_input
}
environment = {
**input_environment,
"__builtins__": {
**{
builtin: __builtins__[builtin]
for builtin in WHITELISTED_BUILTINS
},
},
}
exec(analysis_code, environment)
code_parse = ast.parse(analysis_code, mode="exec")
key_val = None
if type(code_parse.body[-1]) == ast.Assign:
if self.__verbose:
print(
"The variable `result` was not found in executing environment. Using the assignment on the last code line instead for the result."
)
key_val = code_parse.body[-1].targets[0].id
result = environment[key_val]
else:
return "complete. No assignment operation found in last lines of code."
# result_key = self.get_input_key(key_val)
result_key = key_val
description = f'object of type {type(result)} related to the thought "{self.__callback_handler.descriptions[-1]}"'
if type(result) == pd.DataFrame:
description += (
f". The dataframe has the columns {result.columns.values}"
)
print("saving result to agent input ", result_key)
self.__agent_input[result_key] = {
"value": result,
"description": description,
}
response_end = (
""
if len(user_feedback) == 0
else f" - The user has the following feedback: {user_feedback}"
)
self.__last_key = result_key
return (
f"Answer has been successfully derived. Key: {result_key}{response_end}"
if not type(result) == str
else result + response_end
)
except Exception as e:
return f"execution failed with the error message: {str(e)}"
def chat_agent(self, line: Optional[str], cell: Optional[str] = None):
"Magic that works as %%chat_agent"
options = list(filter(lambda x: len(x) != 0, line.strip().split(" ")))
parser = argparse.ArgumentParser(description="chat agent options")
parser.add_argument(
"--noninteractive",
"-n",
action="store_true",
help="runs the agent in a non interactive mode where the user is not prompted for input",
default=False,
required=False,
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="verbose option",
default=False,
required=False,
)
args = parser.parse_args(options)
self.__noninteractive = args.noninteractive
self.__verbose = args.verbose
available_variables = "\n\n".join(
[
key + " - " + self.__agent_input[key]["description"]
for key in self.__agent_input
]
)
cell = (
cell
+ (
"""\nWhen using the pythonCodeExecution tool you may assume that you have access to the following variables when writing the code:
"""
if len(self.__agent_input) > 0
else ""
)
+ available_variables
)
cell = cell.strip()
print("Prompt:")
print(cell)
response = self.__agent.run(cell, callbacks=[self.__callback_handler])
if (
type(self.__agent_input[self.__last_key]["value"])
in self.__return_type_list
):
return self.__agent_input[self.__last_key]["value"]
return response
chat_agent_magic = ChatAgentMagics()
set_inputs = chat_agent_magic.set_agent_input
get_inputs = chat_agent_magic.get_agent_input
add_agent_input = chat_agent_magic.add_agent_input
delete_agent_input = chat_agent_magic.delete_agent_input
def get_result(key: str):
return get_inputs()[key]["value"]
register_line_cell_magic(chat_agent_magic.chat_agent)
# def load_ipython_extension(ipython):
# ipython.register_magics(chat_agent_magic)
| [
"\n \n The change agent would like to run the following code:\n \n --------------------------------------------------------\n PLACEHOLDER\n --------------------------------------------------------\n \n To allow execution type Y or type N to disallow.\n You may give additional feedback for either option by placing a dash after the option followed by the feedback. For example:\n Y - this code answers my original question\n or\n N - this code does not produce the right answer\n \n "
] |
2024-01-10 | phkhanhtrinh23/translation_layoutrecovery | evaluate~src~models~nmt~gpt_model.py | import time
import os
import psutil
import openai
from dotenv import load_dotenv
from nltk.translate.bleu_score import sentence_bleu
from src.utils.metric import Metric
load_dotenv()
class OpenAITranslator:
def __init__(self, engine="text-davinci-002"):
self.api_key = os.getenv("OPENAI_API_KEY")
self.engine = engine
openai.api_key = self.api_key
def translate(self, text):
"""
Translates the given text to Vietnamese using the OpenAI translation API.
Parameters:
text (str): The text to be translated.
Returns:
tuple: A tuple containing the translated text (str) and the execution time (float).
"""
start_time = time.time()
response = openai.Completion.create(
engine=self.engine,
prompt=f"Translate the following text to Vietnamese: {text}",
max_tokens=1000
)
translated_text = response.choices[0].text.strip()
end_time = time.time()
execution_time = end_time - start_time
return translated_text, execution_time
| [
"Translate the following text to Vietnamese: PLACEHOLDER"
] |
2024-01-10 | phkhanhtrinh23/translation_layoutrecovery | evaluate~main-nmt.py | from src.models.nmt.gpt_model import OpenAITranslator
from src.models.nmt.envit5_model import EnVit5Translator
from src.utils.metric import Metric
if __name__=="__main__":
text = "We're on a journey to advance and democratize artificial intelligence through open source and open science."
gt = "Chúng tôi đang trên hành trình thúc đẩy và dân chủ hóa trí tuệ nhân tạo thông qua mã nguồn mở và khoa học mở."
translator = OpenAITranslator()
# translator = EnVit5Translator()
translated_text, execution_time = translator.translate(text)
bleu_score = Metric.calculate_bleu_score(gt, translated_text)
memory_usage = Metric.calculate_memory_usage()
print("Translated Text:", translated_text)
print("Execution Time:", execution_time, "seconds")
print("BLEU Score:", bleu_score)
print("Memory Usage:", memory_usage, "MB") | [] |
2024-01-10 | chocky18/Automate_Browser_with_GPT3 | open_ai_idea_n_generate_command.py | #!/usr/bin/env python3
#
# natbot.py
#
# Set OPENAI_API_KEY to your API key, and then run this from a terminal.
#
from playwright.sync_api import sync_playwright
import time
from sys import argv, exit, platform
import openai
from bardapi import Bard
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import random
from openai_idea import get_idea_from_open_ai
quiet = False
if len(argv) >= 2:
if argv[1] == '-q' or argv[1] == '--quiet':
quiet = True
print(
"Running in quiet mode (HTML and other content hidden); \n"
+ "exercise caution when running suggested commands."
)
prompt_template = """
My gmail address is [email protected] and password is 8977821555
You are an agent controlling a browser. You are given:
(1) an objective that you are trying to achieve
(2) the URL of your current web page
(3) a simplified text description of what's visible in the browser window (more on that below)
You can issue these commands:
SCROLL UP - scroll up one page
SCROLL DOWN - scroll down one page
CLICK X - click on a given element. You can only click on links, buttons, and inputs!
TYPE X "TEXT" - type the specified text into the input with id X
TYPESUBMIT X "TEXT" - same as TYPE above, except then it presses ENTER to submit the form
The format of the browser content is highly simplified; all formatting elements are stripped.
Interactive elements such as links, inputs, buttons are represented like this:
<link id=1>text</link>
<button id=2>text</button>
<input id=3>text</input>
Images are rendered as their alt text like this:
<img id=4 alt=""/>
Based on your given objective, issue whatever command you believe will get you closest to achieving your goal.
You always start on Google; you should submit a search query to Google that will take you to the best page for
achieving your objective. And then interact with that page to achieve your objective.
Don't try to interact with elements that you can't see.
Here are some examples:
EXAMPLE 1:
==================================================
CURRENT BROWSER CONTENT:
------------------
<link id=0 aria-label="Gmail (opens a new tab)">Gmail</link>
<link id=1 aria-label="Search for Images (opens a new tab)">Images</link>
<link id=2 aria-label="Google apps"/>
<link id=3>Sign in</link>
<img id=4 Google/>
<button id=5 Search Search/>
<button id=6 Search by voice/>
<button id=7 Search by image/>
<button id=8 Google Search/>
<button id=9 I'm Feeling Lucky/>
<text id=10>Google offered in:</text>
<link id=11>हिन्दी</link>
<link id=12>বাংলা</link>
<link id=13>తెలుగు</link>
<link id=14>मराठी</link>
<link id=15>தமிழ்</link>
<link id=16>ગુજરાતી</link>
<link id=17>ಕನ್ನಡ</link>
<link id=18>മലയാളം</link>
<link id=19>ਪੰਜਾਬੀ</link>
<text id=20>India</text>
<link id=21>About</link>
<link id=22>Advertising</link>
<link id=23>Business</link>
<link id=24>How Search works</link>
<link id=25>Privacy</link>
<link id=26>Terms</link>
<text id=27>Settings</text>
------------------
OBJECTIVE: search for instagram
CURRENT URL: https://www.google.com/
YOUR COMMAND:
TYPESUBMIT 5 "instagram"
==================================================
EXAMPLE 2:
==================================================
CURRENT BROWSER CONTENT:
------------------
<link id=0 aria-label="Gmail (opens a new tab)">Gmail</link>
<link id=1 aria-label="Search for Images (opens a new tab)">Images</link>
<link id=2 aria-label="Google apps"/>
<link id=3>Sign in</link>
<img id=4 Google/>
<button id=5 Search Search/>
<button id=6 Search by voice/>
<button id=7 Search by image/>
<button id=8 Google Search/>
<button id=9 I'm Feeling Lucky/>
<text id=10>Google offered in:</text>
<link id=11>submit</link>
<link id=12>বাংলা</link>
<link id=13>తెలుగు</link>
<link id=14>मराठी</link>
<link id=15>தமிழ்</link>
<link id=16>ગુજરાતી</link>
<link id=17>ಕನ್ನಡ</link>
<link id=18>മലയാളം</link>
<link id=19>ਪੰਜਾਬੀ</link>
<text id=20>India</text>
<link id=21>About</link>
<link id=22>Advertising</link>
<link id=23>Business</link>
<link id=24>How Search works</link>
<link id=25>Privacy</link>
<link id=26>Terms</link>
<text id=27>Settings</text>
------------------
OBJECTIVE: click the submit button
CURRENT URL: https://www.google.com/
YOUR COMMAND:
CLICK 11
==================================================
given an objective that you are trying to achieve.
given the URL of the current web page.
given a simplified text description of what's visible in the browser window.
You can issue the following commands:
SCROLL UP - scroll up one page
SCROLL DOWN - scroll down one page
CLICK X - click on a given element. You can only click on links, buttons, and inputs!
TYPE X "TEXT" - type the specified text into the input with id X
TYPESUBMIT X "TEXT" - same as TYPE above, except then it presses ENTER to submit the form
Based on my given objective, you issue whatever command you believe will get me closest to achieving my goal.
you always start on Google; you should submit a search query to Google that will take me to the best page for achieving my objective. And then interact with that page to achieve my objective.
The current browser content, objective, and current URL follow. Reply with your next command to the browser.
CURRENT BROWSER CONTENT:
------------------
$browser_content
------------------
OBJECTIVE: $objective
CURRENT URL: $url
YOUR COMMAND:
"""
black_listed_elements = set(["html", "head", "title", "meta", "iframe", "body", "script", "style", "path", "svg", "br", "::marker",])
class Crawler:
def __init__(self):
self.browser = (
sync_playwright()
.start()
.chromium.launch(
headless=False,
)
)
# options = self.browser.ChromeOptions()
# options.add_argument("--user-data-dir=C:/Users/{userName}/AppData/Local/Google/Chrome/User Data/Profile {#}/")
self.page = self.browser.new_page()
window_size = self.page.viewport_size
self.page.set_viewport_size({"width": window_size['width'], "height":window_size['height']})
def go_to_page(self, url):
self.page.goto(url=url if "://" in url else "http://" + url)
self.client = self.page.context.new_cdp_session(self.page)
self.page_element_buffer = {}
# def generate_text(prompt, temperature, best_of, n, max_tokens):
# """Generates text using Bard.
# Args:
# prompt: The prompt to generate text for.
# temperature: The temperature of the text generation.
# best_of: The number of best text generations to return.
# n: The number of text generations to generate.
# max_tokens: The maximum number of tokens to generate.
# Returns:
# A list of the best text generations.
# """
# completions = []
# for _ in range(n):
# completion = bard.generate(prompt, temperature, max_tokens)
# completions.append(completion)
# best_completions = random.sample(completions, best_of)
# return best_completions
def scroll(self, direction):
if direction == "up":
self.page.evaluate(
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;"
)
elif direction == "down":
self.page.evaluate(
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop + window.innerHeight;"
)
def type_in_search_bar(self,query):
search_bar = self.page.wait_for_selector('input[aria-label="Search"]')
search_bar.fill(query)
def click(self, id):
# Inject javascript into the page which removes the target= attribute from all links
js = """
links = document.getElementsByTagName("a");
for (var i = 0; i < links.length; i++) {
links[i].removeAttribute("target");
}
"""
self.page.evaluate(js)
element = self.page_element_buffer.get(int(id))
if element:
x = element.get("center_x")
y = element.get("center_y")
self.page.mouse.click(x, y)
else:
print("Could not find element")
def type(self, id, text):
self.click(id)
self.page.keyboard.type(text)
def enter(self):
self.page.keyboard.press("Enter")
def crawl(self):
page = self.page
page_element_buffer = self.page_element_buffer
start = time.time()
page_state_as_text = []
device_pixel_ratio = page.evaluate("window.devicePixelRatio")
if platform == "darwin" and device_pixel_ratio == 1: # lies
device_pixel_ratio = 2
win_scroll_x = page.evaluate("window.scrollX")
win_scroll_y = page.evaluate("window.scrollY")
win_upper_bound = page.evaluate("window.pageYOffset")
win_left_bound = page.evaluate("window.pageXOffset")
win_width = page.evaluate("window.screen.width")
win_height = page.evaluate("window.screen.height")
win_right_bound = win_left_bound + win_width
win_lower_bound = win_upper_bound + win_height
document_offset_height = page.evaluate("document.body.offsetHeight")
document_scroll_height = page.evaluate("document.body.scrollHeight")
# percentage_progress_start = (win_upper_bound / document_scroll_height) * 100
# percentage_progress_end = (
# (win_height + win_upper_bound) / document_scroll_height
# ) * 100
percentage_progress_start = 1
percentage_progress_end = 2
page_state_as_text.append(
{
"x": 0,
"y": 0,
"text": "[scrollbar {:0.2f}-{:0.2f}%]".format(
round(percentage_progress_start, 2), round(percentage_progress_end)
),
}
)
tree = self.client.send(
"DOMSnapshot.captureSnapshot",
{"computedStyles": [], "includeDOMRects": True, "includePaintOrder": True},
)
strings = tree["strings"]
document = tree["documents"][0]
nodes = document["nodes"]
backend_node_id = nodes["backendNodeId"]
attributes = nodes["attributes"]
node_value = nodes["nodeValue"]
parent = nodes["parentIndex"]
node_types = nodes["nodeType"]
node_names = nodes["nodeName"]
is_clickable = set(nodes["isClickable"]["index"])
text_value = nodes["textValue"]
text_value_index = text_value["index"]
text_value_values = text_value["value"]
input_value = nodes["inputValue"]
input_value_index = input_value["index"]
input_value_values = input_value["value"]
input_checked = nodes["inputChecked"]
layout = document["layout"]
layout_node_index = layout["nodeIndex"]
bounds = layout["bounds"]
cursor = 0
html_elements_text = []
child_nodes = {}
elements_in_view_port = []
anchor_ancestry = {"-1": (False, None)}
button_ancestry = {"-1": (False, None)}
def convert_name(node_name, has_click_handler):
if node_name == "a":
return "link"
if node_name == "input":
return "input"
if node_name == "img":
return "img"
if (
node_name == "button" or has_click_handler
): # found pages that needed this quirk
return "button"
else:
return "text"
def find_attributes(attributes, keys):
values = {}
for [key_index, value_index] in zip(*(iter(attributes),) * 2):
if value_index < 0:
continue
key = strings[key_index]
value = strings[value_index]
if key in keys:
values[key] = value
keys.remove(key)
if not keys:
return values
return values
def add_to_hash_tree(hash_tree, tag, node_id, node_name, parent_id):
parent_id_str = str(parent_id)
if not parent_id_str in hash_tree:
parent_name = strings[node_names[parent_id]].lower()
grand_parent_id = parent[parent_id]
add_to_hash_tree(
hash_tree, tag, parent_id, parent_name, grand_parent_id
)
is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str]
# even if the anchor is nested in another anchor, we set the "root" for all descendants to be ::Self
if node_name == tag:
value = (True, node_id)
elif (
is_parent_desc_anchor
): # reuse the parent's anchor_id (which could be much higher in the tree)
value = (True, anchor_id)
else:
value = (
False,
None,
) # not a descendant of an anchor, most likely it will become text, an interactive element or discarded
hash_tree[str(node_id)] = value
return value
for index, node_name_index in enumerate(node_names):
node_parent = parent[index]
node_name = strings[node_name_index].lower()
is_ancestor_of_anchor, anchor_id = add_to_hash_tree(
anchor_ancestry, "a", index, node_name, node_parent
)
is_ancestor_of_button, button_id = add_to_hash_tree(
button_ancestry, "button", index, node_name, node_parent
)
try:
cursor = layout_node_index.index(
index
) # todo replace this with proper cursoring, ignoring the fact this is O(n^2) for the moment
except:
continue
if node_name in black_listed_elements:
continue
[x, y, width, height] = bounds[cursor]
x /= device_pixel_ratio
y /= device_pixel_ratio
width /= device_pixel_ratio
height /= device_pixel_ratio
elem_left_bound = x
elem_top_bound = y
elem_right_bound = x + width
elem_lower_bound = y + height
partially_is_in_viewport = (
elem_left_bound < win_right_bound
and elem_right_bound >= win_left_bound
and elem_top_bound < win_lower_bound
and elem_lower_bound >= win_upper_bound
)
if not partially_is_in_viewport:
continue
meta_data = []
# inefficient to grab the same set of keys for kinds of objects but its fine for now
element_attributes = find_attributes(
attributes[index], ["type", "placeholder", "aria-label", "title", "alt"]
)
ancestor_exception = is_ancestor_of_anchor or is_ancestor_of_button
ancestor_node_key = (
None
if not ancestor_exception
else str(anchor_id)
if is_ancestor_of_anchor
else str(button_id)
)
ancestor_node = (
None
if not ancestor_exception
else child_nodes.setdefault(str(ancestor_node_key), [])
)
if node_name == "#text" and ancestor_exception:
text = strings[node_value[index]]
if text == "|" or text == "•":
continue
ancestor_node.append({
"type": "type", "value": text
})
else:
if (
node_name == "input" and element_attributes.get("type") == "submit"
) or node_name == "button" or node_name == 'textarea':
node_name = "button"
element_attributes.pop(
"type", None
) # prevent [button ... (button)..]
for key in element_attributes:
if ancestor_exception:
ancestor_node.append({
"type": "attribute",
"key": key,
"value": element_attributes[key]
})
else:
meta_data.append(element_attributes[key])
# print("meta", meta_data)
element_node_value = None
if node_value[index] >= 0:
element_node_value = strings[node_value[index]]
if element_node_value == "|": #commonly used as a seperator, does not add much context - lets save ourselves some token space
continue
elif (
node_name == "input"
and index in input_value_index
and element_node_value is None
):
node_input_text_index = input_value_index.index(index)
text_index = input_value_values[node_input_text_index]
if node_input_text_index >= 0 and text_index >= 0:
element_node_value = strings[text_index]
# remove redudant elements
if ancestor_exception and (node_name != "a" and node_name != "button"):
continue
elements_in_view_port.append(
{
"node_index": str(index),
"backend_node_id": backend_node_id[index],
"node_name": node_name,
"node_value": element_node_value,
"node_meta": meta_data,
"is_clickable": index in is_clickable,
"origin_x": int(x),
"origin_y": int(y),
"center_x": int(x + (width / 2)),
"center_y": int(y + (height / 2)),
}
)
# print("elements_in_view_port",elements_in_view_port)
# lets filter further to remove anything that does not hold any text nor has click handlers + merge text from leaf#text nodes with the parent
elements_of_interest= []
id_counter = 0
for element in elements_in_view_port:
node_index = element.get("node_index")
node_name = element.get("node_name")
node_value = element.get("node_value")
is_clickable = element.get("is_clickable")
origin_x = element.get("origin_x")
origin_y = element.get("origin_y")
center_x = element.get("center_x")
center_y = element.get("center_y")
meta_data = element.get("node_meta")
inner_text = f"{node_value} " if node_value else ""
meta = ""
if node_index in child_nodes:
for child in child_nodes.get(node_index):
entry_type = child.get('type')
entry_value= child.get('value')
if entry_type == "attribute":
entry_key = child.get('key')
meta_data.append(f'{entry_key}="{entry_value}"')
else:
inner_text += f"{entry_value} "
# print("meta_data", meta_data)
if meta_data:
meta_string = " ".join(meta_data)
meta = f" {meta_string}"
if inner_text != "":
inner_text = f"{inner_text.strip()}"
converted_node_name = convert_name(node_name, is_clickable)
# not very elegant, more like a placeholder
if (
(converted_node_name != "button" or meta == "")
and converted_node_name != "link"
and converted_node_name != "input"
and converted_node_name != "img"
and converted_node_name != "textarea"
) and inner_text.strip() == "":
continue
page_element_buffer[id_counter] = element
if inner_text != "":
elements_of_interest.append(
f"""<{converted_node_name} id={id_counter}{meta}>{inner_text}</{converted_node_name}>"""
)
else:
elements_of_interest.append(
f"""<{converted_node_name} id={id_counter}{meta}/>"""
)
id_counter += 1
print("Parsing time: {:0.2f} seconds".format(time.time() - start))
# print("elements_of_interest", elements_of_interest)
# elements_to_remove = ["<button id=8 I'm Feeling Lucky/>", '<text id=9>Google offered in:</text>', '<link id=10>हिन्दी</link>', '<link id=11>বাংলা</link>', '<link id=12>తెలుగు</link>', '<link id=13>मराठी</link>', '<link id=14>தமிழ்</link>', '<link id=15>ગુજરાતી</link>', '<link id=16>ಕನ್ನಡ</link>', '<link id=17>മലയാളം</link>', '<link id=18>ਪੰਜਾਬੀ</link>', '<text id=19>India</text>', '<link id=20>About</link>', '<link id=21>Advertising</link>', '<link id=22>Business</link>', '<link id=23>How Search works</link>', '<link id=24>Privacy</link>', '<link id=25>Terms</link>', '<text id=26>Settings</text>']
# lst = [elem for elem in elements_of_interest if elem not in elements_to_remove]
return elements_of_interest
if (
__name__ == "__main__"
):
_crawler = Crawler()
openai.api_key = "Your OPENAI API KEY"
def print_help():
print(
"(g) to visit url\n(u) scroll up\n(d) scroll down\n(c) to click\n(t) to type\n" +
"(h) to view commands again\n(r/enter) to run suggested command\n(o) change objective"
)
def get_gpt_command(objective, url, browser_content):
prompt = prompt_template
prompt = prompt.replace("$objective", objective)
prompt = prompt.replace("$url", url[:100])
prompt = prompt.replace("$browser_content", browser_content[:4500])
# response = Bard().get_answer(prompt)['content']
response = openai.Completion.create(model="text-davinci-002",prompt=prompt, temperature=0.7, best_of=10, n=1, max_tokens=64, stop =None)
#response = Crawler.generate_text(prompt, temperature=0.5, best_of=10, n=3, max_tokens=50)
print("response",response)
# return response
return response.choices[0].text
def run_cmd(cmd):
cmd = cmd.split("\n")[0]
if cmd.startswith("SCROLL UP"):
_crawler.scroll("up")
elif cmd.startswith("SCROLL DOWN"):
_crawler.scroll("down")
elif cmd.startswith("CLICK"):
commasplit = cmd.split(",")
id = commasplit[0].split(" ")[1]
_crawler.click(id)
elif cmd.startswith("TYPE"):
spacesplit = cmd.split(" ")
id = spacesplit[1]
text = spacesplit[2:]
text = " ".join(text)
# Strip leading and trailing double quotes
text = text[1:-1]
if cmd.startswith("TYPESUBMIT"):
text += '\n'
print(text)
_crawler.type(id, text)
# _crawler.type_in_search_bar(text)
# _crawler.click(id)
# Crawler.type_in_search_bar(text)
# # _crawler.type(id, text)
# _crawler.click(id)
time.sleep(2)
def extract_commands(text):
commands = []
lines = text.split('\n')
for line in lines:
if line.startswith('SCROLL UP') or line.startswith('SCROLL DOWN') or line.startswith('CLICK') or line.startswith('TYPE') or line.startswith('TYPESUBMIT'):
command = line.strip()
commands.append(command)
commands_str = '\n'.join(commands)
return commands_str
def read_objectives_from_file(file_path):
objectives = []
try:
with open(file_path, 'r') as file:
lines = file.readlines()
for line in lines:
# parts = line.strip().split(' ', 1)
if len(line) > 1:
objective = line
objectives.append(objective)
except FileNotFoundError:
print(f"File '{file_path}' not found.")
except IOError:
print("An error occurred while reading the file.")
print("objectives",objectives)
return objectives
task = input("what is your objective??")
get_idea_from_open_ai(task)
objective_file_path = "file.txt"
objectives = read_objectives_from_file(objective_file_path)
_crawler.go_to_page("https://www.google.com/")
try:
for objective in objectives:
if len(objective) > 0:
gpt_cmd = ""
while True:
browser_content = "\n".join(_crawler.crawl())
prev_cmd = gpt_cmd
gpt_cmd = get_gpt_command(objective, _crawler.page.url, browser_content)
print("gpt_cmd",gpt_cmd)
gpt_cmd = gpt_cmd.strip()
print("gptmcnd",gpt_cmd)
if not quiet:
print("URL: " + _crawler.page.url)
print("Objective: " + objective)
print("----------------\n" + browser_content + "\n----------------\n")
if len(gpt_cmd) > 0:
print("lenght0gpt_cmd",gpt_cmd)
command = extract_commands(gpt_cmd)
print("Suggested command:", command)
run_cmd(command)
break
except KeyboardInterrupt:
print("\n[!] Ctrl+C detected, exiting gracefully.")
exit(0)
| [
"\n\nMy gmail address is [email protected] and password is 8977821555\nYou are an agent controlling a browser. You are given:\n\n(1) an objective that you are trying to achieve\n(2) the URL of your current web page\n(3) a simplified text description of what's visible in the browser window (more on that below)\n\nYou can issue these commands:\nSCROLL UP - scroll up one page\nSCROLL DOWN - scroll down one page\nCLICK X - click on a given element. You can only click on links, buttons, and inputs!\nTYPE X \"TEXT\" - type the specified text into the input with id X\nTYPESUBMIT X \"TEXT\" - same as TYPE above, except then it presses ENTER to submit the form\n\nThe format of the browser content is highly simplified; all formatting elements are stripped.\nInteractive elements such as links, inputs, buttons are represented like this:\n\n <link id=1>text</link>\n <button id=2>text</button>\n <input id=3>text</input>\n\nImages are rendered as their alt text like this:\n\n <img id=4 alt=\"\"/>\n\nBased on your given objective, issue whatever command you believe will get you closest to achieving your goal.\nYou always start on Google; you should submit a search query to Google that will take you to the best page for\nachieving your objective. And then interact with that page to achieve your objective.\n\n\nDon't try to interact with elements that you can't see.\n\nHere are some examples:\n\nEXAMPLE 1:\n==================================================\nCURRENT BROWSER CONTENT:\n------------------\n<link id=0 aria-label=\"Gmail (opens a new tab)\">Gmail</link>\n<link id=1 aria-label=\"Search for Images (opens a new tab)\">Images</link>\n<link id=2 aria-label=\"Google apps\"/>\n<link id=3>Sign in</link>\n<img id=4 Google/>\n<button id=5 Search Search/>\n<button id=6 Search by voice/>\n<button id=7 Search by image/>\n<button id=8 Google Search/>\n<button id=9 I'm Feeling Lucky/>\n<text id=10>Google offered in:</text>\n<link id=11>हिन्दी</link>\n<link id=12>বাংলা</link>\n<link id=13>తెలుగు</link>\n<link id=14>मराठी</link>\n<link id=15>தமிழ்</link>\n<link id=16>ગુજરાતી</link>\n<link id=17>ಕನ್ನಡ</link>\n<link id=18>മലയാളം</link>\n<link id=19>ਪੰਜਾਬੀ</link>\n<text id=20>India</text>\n<link id=21>About</link>\n<link id=22>Advertising</link>\n<link id=23>Business</link>\n<link id=24>How Search works</link>\n<link id=25>Privacy</link>\n<link id=26>Terms</link>\n<text id=27>Settings</text>\n\n\n------------------\nOBJECTIVE: search for instagram\nCURRENT URL: https://www.google.com/\nYOUR COMMAND: \nTYPESUBMIT 5 \"instagram\"\n\n==================================================\n\nEXAMPLE 2:\n==================================================\nCURRENT BROWSER CONTENT:\n------------------\n<link id=0 aria-label=\"Gmail (opens a new tab)\">Gmail</link>\n<link id=1 aria-label=\"Search for Images (opens a new tab)\">Images</link>\n<link id=2 aria-label=\"Google apps\"/>\n<link id=3>Sign in</link>\n<img id=4 Google/>\n<button id=5 Search Search/>\n<button id=6 Search by voice/>\n<button id=7 Search by image/>\n<button id=8 Google Search/>\n<button id=9 I'm Feeling Lucky/>\n<text id=10>Google offered in:</text>\n<link id=11>submit</link>\n<link id=12>বাংলা</link>\n<link id=13>తెలుగు</link>\n<link id=14>मराठी</link>\n<link id=15>தமிழ்</link>\n<link id=16>ગુજરાતી</link>\n<link id=17>ಕನ್ನಡ</link>\n<link id=18>മലയാളം</link>\n<link id=19>ਪੰਜਾਬੀ</link>\n<text id=20>India</text>\n<link id=21>About</link>\n<link id=22>Advertising</link>\n<link id=23>Business</link>\n<link id=24>How Search works</link>\n<link id=25>Privacy</link>\n<link id=26>Terms</link>\n<text id=27>Settings</text>\n\n\n------------------\nOBJECTIVE: click the submit button\nCURRENT URL: https://www.google.com/\nYOUR COMMAND: \nCLICK 11\n==================================================\n\ngiven an objective that you are trying to achieve.\ngiven the URL of the current web page.\ngiven a simplified text description of what's visible in the browser window.\nYou can issue the following commands:\nSCROLL UP - scroll up one page\nSCROLL DOWN - scroll down one page\nCLICK X - click on a given element. You can only click on links, buttons, and inputs!\nTYPE X \"TEXT\" - type the specified text into the input with id X\nTYPESUBMIT X \"TEXT\" - same as TYPE above, except then it presses ENTER to submit the form\nBased on my given objective, you issue whatever command you believe will get me closest to achieving my goal.\nyou always start on Google; you should submit a search query to Google that will take me to the best page for achieving my objective. And then interact with that page to achieve my objective.\n\n\nThe current browser content, objective, and current URL follow. Reply with your next command to the browser.\n\nCURRENT BROWSER CONTENT:\n------------------\n$browser_content\n------------------\n\nOBJECTIVE: $objective\nCURRENT URL: $url\nYOUR COMMAND:\n",
"$browser_content",
"$objective"
] |
2024-01-10 | amzadb/ChatPDFs | pdf_util.py | from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm = ChatOpenAI()
# llm_model = "gpt-3.5-turbo"
# llm = ChatOpenAI(temperature=0.6, model=llm_model)
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
| [] |
2024-01-10 | kod3000/analyze-image | backend~api~vision_processing.py | import os
import requests
from .constants import OPENAI_SAMPLE_RESPONSE as sample
correct_api_key = "sk-" + os.environ.get('OPENAI_API_KEY_1')
wrong_api_key = "sk-" + os.environ.get('OPENAI_API_KEY_2')
def process_vision(base64_image, use_correct_key=True):
"""
Process the image using OpenAI's API
Takes in a base64 encoded image and returns the response from OpenAI
Developer Notes :
- You can pass in false flag to test error handling
"""
api_key = wrong_api_key # Default to wrong key
if use_correct_key:
api_key = correct_api_key
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What’s in this image?"
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 300
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
# return sample # For testing
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
print(response)
print(response.json())
return response.json()
| [
"[{'type': 'text', 'text': 'What’s in this image?'}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}]"
] |
2024-01-10 | KTerhuja/bmo-chatbot | BMO.py | import os
# from dotenv import load_dotenv, find_dotenv
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.llms import AzureOpenAI
from langchain.document_loaders import DirectoryLoader,PyPDFLoader
from langchain.document_loaders import UnstructuredExcelLoader
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.memory import ConversationBufferMemory
from IPython.display import display, Markdown
import pandas as pd
import gradio as gr
import random
import time
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain import PromptTemplate
from langchain.vectorstores import Chroma
from langchain.agents.tools import Tool
from langchain.experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner
from langchain import OpenAI, VectorDBQA
from langchain.chains.router import MultiRetrievalQAChain
import streamlit as st
from streamlit_chat import message
# _ = load_dotenv(find_dotenv())
os.environ["OPENAI_API_KEY"] = "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"
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0)
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002",chunk_size =1)
st.write("loading chroma")
bcar_retriever = Chroma(embedding_function=embeddings,persist_directory=f"/workspaces/msc/zip_bmo emb/BCAR_Embedding").as_retriever()
smsb_retriever = Chroma(embedding_function=embeddings,persist_directory=f"/workspaces/msc/zip_bmo emb/SMSB_EMBEDDING").as_retriever()
bmo_retriever = Chroma(embedding_function=embeddings,persist_directory=f"/workspaces/msc/zip_bmo emb/BMO_FULL_EMBEDDING").as_retriever()
creditirb_retriever = Chroma(embedding_function=embeddings,persist_directory=f"/workspaces/msc/zip_bmo emb/IRB").as_retriever()
creditstd_retriever = Chroma(embedding_function=embeddings,persist_directory=f"/workspaces/msc/zip_bmo emb/credit_risk_standartize").as_retriever()
nbc_retriever = Chroma(embedding_function=embeddings,persist_directory=f"/workspaces/msc/zip_bmo emb/NBC_Embedding").as_retriever()
st.write("loading qa")
qa_bcar = RetrievalQA.from_chain_type(llm=llm, retriever=bcar_retriever, verbose=True)
qa_bmo = RetrievalQA.from_chain_type(llm=llm, retriever=bmo_retriever, verbose=True)
qa_creditirb = RetrievalQA.from_chain_type(llm=llm, retriever=creditirb_retriever, verbose=True)
qa_creditstd = RetrievalQA.from_chain_type(llm=llm, retriever=creditstd_retriever, verbose=True)
qa_smsb = RetrievalQA.from_chain_type(llm=llm, retriever=smsb_retriever, verbose=True)
qa_nbc = RetrievalQA.from_chain_type(llm=llm, retriever=nbc_retriever, verbose=True)
tools = [
Tool(
name = "BCAR",
func=qa_bcar.run,
description="useful for when you need to find answer regarding bcar different categories and schedules"
),
Tool(
name="BMO Annual Report",
func=qa_bmo.run,
description="useful for when you need to find details about BMO bank like category it follows, fiscal year end etc"
),
Tool(
name="Credit Risk –Internal Ratings Based Approach",
func=qa_creditirb.run,
description="useful for when you need to find details about Credit Risk –Internal Ratings Based Approach "
),
Tool(
name="Credit Risk –Standardized Approach",
func=qa_creditstd.run,
description="useful for when you need to find details about Credit Risk –Standardized Approach "
),
Tool(
name="SMSB",
func=qa_smsb.run,
description="useful for when you need to find details about SMSB that is one category approach among BCAR"
),
Tool(
name="National Bnak Of Canada Annual Report",
func=qa_nbc.run,
description="useful for when you need to find details about National Bank of Canada like category it follows, fiscal year end etc"
),
]
planner = load_chat_planner(llm)
executor = load_agent_executor(llm, tools, verbose=True)
agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)
#agent.run("Which reports Bank BMO has to send to OSFI for BCAR credit risk?")
## generated stores AI generated responses
if 'generated' not in st.session_state:
st.session_state['generated'] = []
## past stores User's questions
if 'past' not in st.session_state:
st.session_state['past'] = []
def get_text():
input_text = st.text_input("You: ", "", key="input")
return input_text
user_input = get_text()
if user_input:
output = agent.run(user_input)
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
if 'generated' in st.session_state:
for i in range(len(st.session_state['generated'])-1,-1,-1):
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
message(st.session_state["generated"][i], key=str(i)) | [] |
2024-01-10 | KTerhuja/bmo-chatbot | creating_embeddin.py | import os
from langchain.llms import GooglePalm
from langchain.embeddings import GooglePalmEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.document_transformers import EmbeddingsRedundantFilter
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import DocumentCompressorPipeline, EmbeddingsFilter
from langchain.document_loaders import PyPDFLoader
from langchain.chains import RetrievalQA
from tqdm import tqdm
from time import sleep
import os
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
# from langchain.document_loaders import DirectoryLoader,PyPDFLoader
# from langchain.document_loaders import UnstructuredExcelLoader
# from langchain.vectorstores import DocArrayInMemorySearch
from langchain.memory import ConversationBufferMemory
# from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain import PromptTemplate
from langchain.vectorstores import Chroma
file_names = os.listdir("./data/")
os.environ["OPENAI_API_KEY"] = "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"
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.1)
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002",chunk_size = 1)
for file in ["bmo_ar2022 (2).pdf"]:
print(file.split(".")[0])
loader = PyPDFLoader(f"./data/{file}")
documents = loader.load()
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000,chunk_overlap=100,length_function = len)
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
docsearch = FAISS.from_documents(texts, embeddings)
docsearch.save_local(folder_path='FAISS_VS', index_name=f"{file.split('.')[0]}_index")
print(file.split(".")[0])
# docsearch = FAISS.load_local(folder_path='./FAISS_VS', embeddings=embeddings, index_name=f"Basel Capital Adequacy Reporting (BCAR) 2023_index")
# retriever = docsearch.as_retriever()
# bmo_retriver = FAISS.load_local(folder_path='./FAISS_VS', embeddings=embeddings, index_name='bmo_ar2022_index').as_retriever()
# qa_bmo = RetrievalQA.from_chain_type(llm=llm, retriever=bmo_retriver, verbose=True)
# print(qa_bmo.run("Which reports bank BMO has to send to OSFI for BCAR Credit Risk?")) | [] |
2024-01-10 | KTerhuja/bmo-chatbot | bmo_openai.py | import os
# from dotenv import load_dotenv, find_dotenv
# from langchain.llms import GooglePalm
from langchain.vectorstores import FAISS
# from langchain.embeddings import GooglePalmEmbeddings
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
# from langchain.llms import AzureOpenAI
# from langchain.document_loaders import DirectoryLoader,PyPDFLoader
# from langchain.document_loaders import UnstructuredExcelLoader
# from langchain.vectorstores import DocArrayInMemorySearch
from langchain.memory import ConversationBufferMemory
# from IPython.display import display, Markdown
# import pandas as pd
# import gradio as gr
# from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain import PromptTemplate
# from langchain.vectorstores import Chroma
# from langchain.agents.tools import Tool
# from langchain.experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner
# from langchain import OpenAI, VectorDBQA
# from langchain.chains.router import MultiRetrievalQAChain
import streamlit as st
from streamlit_chat import message
# from langchain.document_loaders import UnstructuredPDFLoader
# _ = load_dotenv(find_dotenv())
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.1)
template = """
You are virtual assistant of OSFI.
Use the following context (delimited by <ctx></ctx>), and the chat history (delimited by <hs></hs>) to answer the question:
------
<ctx>
{context}
</ctx>
------
<hs>
{history}
</hs>
------
{question}
Answer:
"""
prompt = PromptTemplate(input_variables=["history", "context", "question"],template=template)
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002",chunk_size =1)
bcar_retriever = FAISS.load_local(folder_path='./FAISS_VS', embeddings=embeddings, index_name='Basel Capital Adequacy Reporting (BCAR) 2023 (2)_index')
bmo_retriver = FAISS.load_local(folder_path='./FAISS_VS', embeddings=embeddings, index_name='bmo_ar2022 (2)_index')
creditirb_retriever = FAISS.load_local(folder_path='./FAISS_VS', embeddings=embeddings, index_name='Capital Adequacy Requirements (CAR) Chapter 5 Credit Risk Internal Ratings Based Approach_index')
creditstd_retriever = FAISS.load_local(folder_path='./FAISS_VS', embeddings=embeddings, index_name='Capital Adequacy Requirements (CAR) Chapter 4 Credit Risk Standardized Approach_index')
nbc_retriever = FAISS.load_local(folder_path='./FAISS_VS', embeddings=embeddings, index_name='NATIONAL BANK OF CANADA_ 2022 Annual Report (1)_index')
smsb_retriever = FAISS.load_local(folder_path='./FAISS_VS', embeddings=embeddings, index_name='SMSB (1)_index')
indices = [bcar_retriever,bmo_retriver,creditirb_retriever,creditstd_retriever,nbc_retriever,smsb_retriever]
for index in indices[1:]:
indices[0].merge_from(index)
agent = RetrievalQA.from_chain_type(llm = llm,
chain_type='stuff', # 'stuff', 'map_reduce', 'refine', 'map_rerank'
retriever=bcar_retriever.as_retriever(),
verbose=False,
chain_type_kwargs={
"verbose":True,
"prompt": prompt,
"memory": ConversationBufferMemory(
memory_key="history",
input_key="question"),
})
# st.title("BMO Chatbot")
# if 'something' not in st.session_state:
# user_input = ''
# def submit():
# user_input = st.session_state.widget
# st.session_state.widget = ''
# if 'generated' not in st.session_state:
# st.session_state['generated'] = []
# ## past stores User's questions
# if 'past' not in st.session_state:
# st.session_state['past'] = []
# messages = st.container()
# user_input = st.text_input("Query", key="widget", on_change=submit)
# relevent_docs = st.expander("Relevent Docs", expanded=False)
# if user_input:
# output = agent.run(user_input)
# with relevent_docs:
# st.write("\n\n\n",bcar_retriever.as_retriever().get_relevant_documents(user_input),"\n\n\n")
# st.session_state.past.append(user_input)
# st.session_state.generated.append(output)
# if 'generated' in st.session_state:
# with messages:
# for i in range(len(st.session_state['generated'])):
# message(st.session_state['past'][i], is_user=True, key=str(i) + '_user',avatar_style="initials",seed="U")
# message(st.session_state["generated"][i], key=str(i),avatar_style="initials",seed="A")
# agent.run("Which reports bank BMO has to send to OSFI for BCAR Credit Risk?")
# print(bcar_retriever.as_retriever().get_relevant_documents("what is fiscal year end of BMO?"))
st.title("BMO Chatbot")
# if 'something' not in st.session_state:
# user_input = ''
# def submit():
# user_input = st.session_state.widget
# st.session_state.widget = ''
if 'generated' not in st.session_state:
st.session_state['generated'] = []
## past stores User's questions
if 'past' not in st.session_state:
st.session_state['past'] = []
messages = st.container()
user_input = st.chat_input("Query")
relevent_docs = st.expander("Relevent Docs", expanded=False)
if user_input:
output = agent.run(user_input)
# with relevent_docs:
# st.write("\n\n\n",bcar_retriever.as_retriever().get_relevant_documents(user_input),"\n\n\n")
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
if 'generated' in st.session_state:
with messages:
for i in range(len(st.session_state['generated'])):
st.chat_message("user").write(st.session_state['past'][i])
st.chat_message("assistant").write(st.session_state["generated"][i])
| [
"\nYou are virtual assistant of OSFI.\nUse the following context (delimited by <ctx></ctx>), and the chat history (delimited by <hs></hs>) to answer the question:\n------\n<ctx>\n{context}\n</ctx>\n------\n<hs>\n{history}\n</hs>\n------\n{question}\nAnswer:\n",
"question",
"context"
] |
2024-01-10 | KTerhuja/bmo-chatbot | bmo_simple.py | import os
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
# from langchain.document_loaders import DirectoryLoader,PyPDFLoader
# from langchain.document_loaders import UnstructuredExcelLoader
# from langchain.vectorstores import DocArrayInMemorySearch
from langchain.memory import ConversationBufferMemory
# from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain import PromptTemplate
from langchain.vectorstores import Chroma
os.environ["OPENAI_API_KEY"] = "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"
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.1)
template = """
You are virtual assistant of OSFI.
Use the following context (delimited by <ctx></ctx>), and the chat history (delimited by <hs></hs>) to answer the question:
------
<ctx>
{context}
</ctx>
------
<hs>
{history}
</hs>
------
{question}
Answer:
"""
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002",chunk_size =1)
db_bcar = Chroma(embedding_function=embeddings,persist_directory="./zip_bmo emb/BCAR_Embedding")
db_bmo = Chroma(embedding_function=embeddings,persist_directory="./zip_bmo emb/BMO_FULL_EMBEDDING")
db_credirirb = Chroma(embedding_function=embeddings,persist_directory="./zip_bmo emb/IRB")
db_creditstd = Chroma(embedding_function=embeddings,persist_directory="./zip_bmo emb/credit_risk_standartize")
db_smsb = Chroma(embedding_function=embeddings,persist_directory="./zip_bmo emb/SMSB_EMBEDDING")
db_ncb = Chroma(embedding_function=embeddings,persist_directory="./zip_bmo emb/NBC_Embedding")
dbs = [db_bmo ,db_credirirb ,db_creditstd ,db_smsb ,db_ncb]
db_bcar._collection.add(
embeddings=db_bmo.get()["embeddings"],
metadatas=db_bmo.get()["metadatas"],
documents=db_bmo.get()["documents"],
ids=db_bmo.get()["ids"])
# for db in dbs[0:1]:
# db_bcar._collection.add(
# embeddings=db.get()["embeddings"],
# metadatas=db.get()["metadatas"],
# documents=db.get()["documents"],
# ids=db.get()["ids"])
prompt = PromptTemplate(input_variables=["history", "context", "question"],template=template)
retriever = db_bcar.as_retriever()
qa = RetrievalQA.from_chain_type(llm = llm,
chain_type='stuff', # 'stuff', 'map_reduce', 'refine', 'map_rerank'
retriever=retriever,
verbose=False,
chain_type_kwargs={
"verbose":True,
"prompt": prompt,
"memory": ConversationBufferMemory(
memory_key="history",
input_key="question"),
})
print(qa.run("Hi"))
# st.title("BMO Chatbot")
# if 'something' not in st.session_state:
# st.session_state.something = ''
# def submit():
# st.session_state.something = st.session_state.widget
# st.session_state.widget = ''
# if 'generated' not in st.session_state:
# st.session_state['generated'] = []
# ## past stores User's questions
# if 'past' not in st.session_state:
# st.session_state['past'] = []
# messages = st.container()
# user_input = st.text_input("Query", key="widget", on_change=submit)
# if st.session_state.something:
# output = qa.run(st.session_state.something)
# st.session_state.past.append(st.session_state.something)
# st.session_state.generated.append(output)
# if 'generated' in st.session_state:
# with messages:
# for i in range(len(st.session_state['generated'])):
# message(st.session_state['past'][i], is_user=True, key=str(i) + '_user',avatar_style="initials",seed="U")
# message(st.session_state["generated"][i], key=str(i),avatar_style="initials",seed="B")
| [
"\nYou are virtual assistant of OSFI.\nUse the following context (delimited by <ctx></ctx>), and the chat history (delimited by <hs></hs>) to answer the question:\n------\n<ctx>\n{context}\n</ctx>\n------\n<hs>\n{history}\n</hs>\n------\n{question}\nAnswer:\n",
"question",
"context"
] |
2024-01-10 | MuriloRyan/gptsimplesite | blueprints~gptconnect~gptapi.py | from flask import Flask,render_template, Blueprint, url_for, request,jsonify,redirect
from blueprints.database.mongoapi import database
from blueprints.database.mongodb import writehistory
import requests
import openai
import os
gptapi = Blueprint('gptapi',__name__)
gptapi.register_blueprint(database)
openai.api_key = os.getenv('GPTKEY')
#site/gpt/query/?prompt={prompt}&email={email}
@gptapi.route('/query/', methods=['POST'])
def madeQuery():
prompt=request.form.get('query')
query = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=0.8,
max_tokens=100
)
data = {
'email': request.args.get('email'),
'query': prompt,
'response': query['choices'][0]['text']
}
writehistory(data)
return redirect('/') | [] |
2024-01-10 | somyaranjan26/ProbChain | ingest.py | import os
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.document_loaders import PyPDFLoader
model_name = "BAAI/bge-large-en"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
embeddings = HuggingFaceBgeEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
print("Embeddings Created.......")
loader = PyPDFLoader("Scrum-Guide.pdf")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
vector_store = Chroma.from_documents(texts, embeddings, collection_metadata={"hnsw:space": "cosine"}, persist_directory="stores/pet_cosine")
print("Vector Store Created.......") | [] |
2024-01-10 | superRaptor911/test-chat-bot | preprocessor.py | from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import CharacterTextSplitter
from utility import pickle_object
import os
def load_data():
txt_loader = DirectoryLoader(
"./data/", glob="**/*.txt", show_progress=True, use_multithreading=True
)
py_loader = DirectoryLoader(
"./data/", glob="**/*.py", show_progress=True, use_multithreading=True
)
js_loader = DirectoryLoader(
"./data/", glob="**/*.js", show_progress=True, use_multithreading=True
)
ts_loader = DirectoryLoader(
"./data/", glob="**/*.ts", show_progress=True, use_multithreading=True
)
tsx_loader = DirectoryLoader(
"./data/", glob="**/*.tsx", show_progress=True, use_multithreading=True
)
java_loader = DirectoryLoader(
"./data/", glob="**/*.java", show_progress=True, use_multithreading=True
)
kt_loader = DirectoryLoader(
"./data/", glob="**/*.kt", show_progress=True, use_multithreading=True
)
loaders = [txt_loader, py_loader, js_loader, ts_loader, tsx_loader, java_loader, kt_loader]
documents = []
for loader in loaders:
documents.extend(loader.load())
print(f"Total number of documents: {len(documents)}")
return documents
def split_data(documents):
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(documents)
return documents
if __name__ == "__main__":
documents = load_data()
documents = split_data(documents)
if not os.path.exists("output"):
os.mkdir("output")
pickle_object(documents, "output/documents.bin")
print("saved to output/documents.bin")
| [] |
2024-01-10 | dbspaceLab/dbspace | src~dbspace~signal~PAC~PyPAC.py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 17:11:22 2015
@author: rali
Researcher: REHMAN ALI
Principal Investigator: ROBERT BUTERA
Graduate Collaborator: VINEET TIRUVADI
Neurolab at Georgia Institute of Technology
"""
import numpy as np
from scipy import signal as sig
from scipy.stats import entropy
from scipy.signal import coherence, welch
import matplotlib.pyplot as plt
from matplotlib import cm
from nitime.analysis.spectral import MorletWaveletAnalyzer
from nitime import timeseries as ts
# MUST INSTALL nitime: easy_install nitime
def CFCfilt(signal,freqForAmp,freqForPhase,fs,passbandRipl):
""" CFCFILT Returns a matrix of bandpass filtered LFP signals
USAGE: oscillations = CFCfilt(signal,freqForAmp,freqForPhase,fs,passbandRipl)
signal is the input LFP to be bandpassed, fs is the sampling rate
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02
oscillations is a matrix of complex-valued time-series (3D array):
rows correspond to frequency for phase
columns correspond to frequency for amplitude """
" Setting-up empty 3D array for bandpassed time-series "
numTimeSamples = np.size(signal);
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
oscillations = np.zeros((frqPhaseSize,frqAmpSize,numTimeSamples),dtype=np.complex64);
" Linear Ripple to Decibel Ripple Conversion "
Rp = 40*np.log10((1+passbandRipl)/(1-passbandRipl));
" Variable Band-pass Filtering "
" First index varies bandwidth (frequency for phase) "
" Second index varies center frequency (frequency for amplitude) "
for jj in np.arange(frqPhaseSize):
for kk in np.arange(frqAmpSize):
freq = freqForAmp[kk]; # Center Frequency
delf = freqForPhase[jj]; # Bandwidth
if freq > 1.8*delf:
freqBand = np.array([freq-1.2*delf, freq+1.2*delf])/(fs/2);
bb, aa = sig.cheby1(3,Rp,freqBand,btype='bandpass');
else:
bb, aa = sig.cheby1(3,Rp,(freq+1.2*delf)/(fs/2));
oscillation = sig.filtfilt(bb,aa,signal);
oscillations[jj,kk,:] = sig.hilbert(oscillation);
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(freq));
return oscillations;
def morletCWT(signal,fs,frequencies,sd_rel,sd):
""" MORLETCWT Determines Complex Morlet Wavelet CWT Coefficients for Signal
USAGE: coefs = morletCWT(signal,Fb,Fc,frequencies)
signal is the input signal, fs is sampling rate (Hz)
frequencies are the center frequencies for the CWT
sd_rel is filter bandwidth as a fraction of the center frequencies (default 0.2)
sd is a list of sd_rel for each center frequency in frequencies """
signalObject = ts.TimeSeries(signal,sampling_rate=fs);
cwtMorletObject = MorletWaveletAnalyzer(signalObject,freqs=frequencies,sd_rel=sd_rel,sd=sd);
return np.array(cwtMorletObject.analytic);
def preCFCProc(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl):
""" PRECFCPROC Uses variable-bandwidth bandpass filtering on signal
USAGE: oscilsForAmp, oscilsForPhase = preCFCProc(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz), bw is the bandwidth of the bandpass filters for phase (typically 4.5 Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02 """
oscilsAmpMod = CFCfilt(sigForAmp,freqForAmp,freqForPhase,fs,passbandRipl);
oscilsForPhase = CFCfilt(sigForPhase,freqForPhase,np.array([bw]),fs,passbandRipl);
return oscilsAmpMod, oscilsForPhase;
def preCWTProc(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,sd_rel_phase,sd_rel_amp):
""" PRECWTProc Determines Complex Morlet Wavelet CWT Coefficients for Signals
USAGE: coefsForAmp, coefsForPhase = preCWTProc(sigForAmp,sigForPhase,
freqForAmp,freqForPhase,fs,sd_rel,sd_ffa,sd_ffp)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz)
sd_rel_phase is filter bandwidth given as a fraction of the freqForPhase (default 0.2)
sd_rel_amp is filter bandwidth given as a fraction of freqForPhase in order
to enforce variable-bandwidth bandpass filtering around freqForAmp
(freqForAmp +/- sd_rel_amp * freqForPhase)"""
coefsForPhase = morletCWT(sigForPhase,fs,freqForPhase,sd_rel_phase,None);
coefsForAmp = np.zeros((sigForAmp.size,freqForAmp.size,freqForPhase.size),dtype=np.complex64);
for kk in np.arange(freqForPhase.size):
sd_ffa = 2*(freqForPhase[kk]/freqForAmp)*sd_rel_amp;
coefsForAmp[:,:,kk] = morletCWT(sigForAmp,fs,freqForAmp,None,sd_ffa);
print("Completed: Frequency for Phase [Hz] = "+str(freqForPhase[kk]));
return coefsForAmp, coefsForPhase
def comodShow(freqForAmp,freqForPhase,MIs):
""" COMODSHOW Displays the comodulogram
USAGE: comodShow(freqForAmp,freqForPhase,MIs)
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
MIs is the modulation indices (columns--freqForPhase; rows--freqForAmp) """
dfp = np.mean(np.diff(freqForPhase));
dfa = np.mean(np.diff(freqForAmp));
comodplt = plt.imshow(MIs.transpose(), interpolation='nearest',
extent=[np.min(freqForPhase)-dfp/2,np.max(freqForPhase)+dfp/2,
np.min(freqForAmp)-dfa/2,np.max(freqForAmp)+dfa/2],
origin = 'lower', cmap = cm.jet, aspect = 'auto');
plt.xlabel('Frequency for Phase [Hz]');
plt.ylabel('Frequency for Amplitude [Hz]');
plt.colorbar();
return comodplt;
def GenLinMod(oscAmpMod,oscForPhase,freqForAmp,freqForPhase):
""" GENLINMOD Calculates comulolograms based on Generalized Linear Model
USAGE: MIs = GenLinMod(oscAmpMod,oscForPhase,freqForAmp,freqForPhase)
oscAmpMod is a cell matrix of time-series oscillations bandpassed
around freqForAmp (vector) with bandwidth specified by freqForPhase (vector).
oscForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth """
" Applying Generalized Linear Model-based CFC to Oscillations Data "
ModCorr = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phase will change each row. Amplitude will change each column "
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
ampOsc = np.abs(oscAmpMod[rr,cc,:]);
phaseOsc = np.angle(oscForPhase[0,rr,:]);
X = np.matrix(np.column_stack((np.cos(phaseOsc),
np.sin(phaseOsc), np.ones(np.size(phaseOsc)))));
B = np.linalg.inv((np.transpose(X)*X))* \
np.transpose(X)*np.transpose(np.matrix(ampOsc));
ampOscTrend = X*B;
ampOscResid = ampOsc.flatten()-np.array(ampOscTrend).flatten();
rsq = 1-np.var(ampOscResid)/np.var(ampOsc);
ModCorr[rr,cc] = np.sqrt(rsq);
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
MIs = np.arctanh(ModCorr);
return MIs;
def GenLinModCWT(coefsForAmp,coefsForPhase,freqForAmp,freqForPhase):
""" GENLINMODCWT Calculates comulolograms based on Generalized Linear Model
USAGE: MIs = GenLinModCWT(coefsForAmp,coefsForPhase,freqForAmp,freqForPhase)
coefsForAmp is a cell matrix of time-series oscillations bandpassed
around freqForAmp (vector) with bandwidth specified by freqForPhase (vector).
coefsForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth """
" Applying Generalized Linear Model-based CFC to Oscillations Data "
ModCorr = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phase will change each row. Amplitude will change each column "
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
ampOsc = np.abs(coefsForAmp[:,cc,rr]);
phaseOsc = np.angle(coefsForPhase[:,rr]);
X = np.matrix(np.column_stack((np.cos(phaseOsc),
np.sin(phaseOsc), np.ones(np.size(phaseOsc)))));
B = np.linalg.inv((np.transpose(X)*X))* \
np.transpose(X)*np.transpose(np.matrix(ampOsc));
ampOscTrend = X*B;
ampOscResid = ampOsc.flatten()-np.array(ampOscTrend).flatten();
rsq = 1-np.var(ampOscResid)/np.var(ampOsc);
ModCorr[rr,cc] = np.sqrt(rsq);
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
MIs = np.arctanh(ModCorr);
return MIs;
def GLMcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw=4.5,passbandRipl=0.02):
""" GLMCOMOD Generates a Generalized-Linear-Model Based Comodulogram
USAGE: MIs, comodplt = GLMcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz), bw is the bandwidth of the bandpass filters typically (4.5 Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02 """
oscAmpMod,oscForPhase = preCFCProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,bw=bw,passbandRipl=passbandRipl);
MIs = GenLinMod(oscAmpMod,oscForPhase,freqForAmp,freqForPhase);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Generalized Linear Model (GLM)");
return MIs, comodplt;
def GLMcomodCWT(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,sd_rel_phase=0.2,sd_rel_amp=40):
""" GLMCOMODCWT Generates a Generalized-Linear-Model Based Comodulogram
USAGE: MIs, comodplt = GLMcomodCWT(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz)
sd_rel_phase is filter bandwidth given as a fraction of the freqForPhase (default 0.2)
sd_rel_amp is filter bandwidth given as a fraction of freqForPhase in order
to enforce variable-bandwidth bandpass filtering around freqForAmp
(freqForAmp +/- sd_rel_amp * freqForPhase)"""
coefsForAmp, coefsForPhase = preCWTProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,sd_rel_phase,sd_rel_amp)
MIs = GenLinModCWT(coefsForAmp,coefsForPhase,freqForAmp,freqForPhase);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Generalized Linear Model (GLM)");
return MIs, comodplt;
def EnvSigCorr(oscAmpMod,oscForPhase,freqForAmp,freqForPhase):
""" ENVSIGCORR Calculates comulolograms based on envelope-to-signal correlation
USAGE: MIs = EnvSigCorr(oscAmpMod,oscForPhase,freqForAmp,freqForPhase)
oscAmpMod is a cell matrix of time-series oscillations bandpassed
around freqForAmp (vector) with bandwidth specified by freqForPhase (vector).
oscForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth """
" Applying Envelope-to-Signal-Correlation based CFC to Oscillations Data "
ModCorr = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phase will change each row. Amplitude will change each column "
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
ampOsc = np.abs(oscAmpMod[rr,cc,:]);
phaseOsc = np.real(oscForPhase[0,rr,:]);
ModCorr[rr,cc] = np.corrcoef(ampOsc,phaseOsc)[0,1];
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
MIs = np.arctanh(np.abs(ModCorr));
return MIs;
def EnvSigCorrCWT(coefsForAmp,coefsForPhase,freqForAmp,freqForPhase):
""" ENVSIGCORR Calculates comulolograms based on envelope-to-signal correlation
USAGE: MIs = EnvSigCorr(oscAmpMod,oscForPhase,freqForAmp,freqForPhase)
coefsForAmp is a cell matrix of time-series oscillations bandpassed
around freqForAmp (vector) with bandwidth specified by freqForPhase (vector).
coefsForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth """
" Applying Envelope-to-Signal-Correlation based CFC to Oscillations Data "
ModCorr = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phase will change each row. Amplitude will change each column "
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
ampOsc = np.abs(coefsForAmp[:,cc,rr]);
phaseOsc = np.angle(coefsForPhase[:,rr]);
ModCorr[rr,cc] = np.corrcoef(ampOsc,phaseOsc)[0,1];
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
MIs = np.arctanh(np.abs(ModCorr));
return MIs;
def ESCcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw=4.5,passbandRipl=0.02):
""" ESCCOMOD Generates a Envelope-to-Signal Correlation-Based Comodulogram
USAGE: MIs, comodplt = ESCcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz), bw is the bandwidth of the bandpass filters typically (4.5 Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02 """
oscAmpMod,oscForPhase = preCFCProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,bw=bw,passbandRipl=passbandRipl);
MIs = EnvSigCorr(oscAmpMod,oscForPhase,freqForAmp,freqForPhase);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Envelope-to-Signal Correlation (ESC)");
return MIs, comodplt;
def ESCcomodCWT(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,sd_rel_phase=0.2,sd_rel_amp=40):
""" ESCCOMODCWT Generates a Envelope-to-Signal Correlation-Based Comodulogram
USAGE: MIs, comodplt = ESCcomodCWT(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz)
sd_rel_phase is filter bandwidth given as a fraction of the freqForPhase (default 0.2)
sd_rel_amp is filter bandwidth given as a fraction of freqForPhase in order
to enforce variable-bandwidth bandpass filtering around freqForAmp
(freqForAmp +/- sd_rel_amp * freqForPhase)"""
coefsForAmp, coefsForPhase = preCWTProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,sd_rel_phase,sd_rel_amp)
MIs = EnvSigCorrCWT(coefsForAmp,coefsForPhase,freqForAmp,freqForPhase);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Envelope-to-Signal Correlation (ESC)");
return MIs, comodplt;
def NormEnvSigCorr(oscAmpMod,oscForPhase,freqForAmp,freqForPhase):
""" NORMENVSIGCORR Calculates comulolograms based on normalized envelope-to-signal correlation
USAGE: MIs = NormEnvSigCorr(oscAmpMod,oscForPhase,freqForAmp,freqForPhase)
oscAmpMod is a cell matrix of time-series oscillations bandpassed
around freqForAmp (vector) with bandwidth specified by freqForPhase (vector).
oscForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth """
" Applying Envelope-to-Signal-Correlation based CFC to Oscillations Data "
ModCorr = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phase will change each row. Amplitude will change each column "
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
ampOsc = np.abs(oscAmpMod[rr,cc,:]);
phaseOsc = np.angle(oscForPhase[0,rr,:]);
ModCorr[rr,cc] = np.corrcoef(ampOsc,np.cos(phaseOsc))[0,1];
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
MIs = np.arctanh(np.abs(ModCorr));
return MIs;
def NormEnvSigCorrCWT(coefsForAmp,coefsForPhase,freqForAmp,freqForPhase):
""" NORMENVSIGCORRCWT Calculates comulolograms based on normalized envelope-to-signal correlation
USAGE: MIs = NormEnvSigCorrCWT(oscAmpMod,oscForPhase,freqForAmp,freqForPhase)
coefsForAmp is a cell matrix of time-series oscillations bandpassed
around freqForAmp (vector) with bandwidth specified by freqForPhase (vector).
coefsForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth """
" Applying Envelope-to-Signal-Correlation based CFC to Oscillations Data "
ModCorr = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phase will change each row. Amplitude will change each column "
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
ampOsc = np.abs(coefsForAmp[:,cc,rr]);
phaseOsc = np.angle(coefsForPhase[:,rr]);
ModCorr[rr,cc] = np.corrcoef(ampOsc,np.cos(phaseOsc))[0,1];
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
MIs = np.arctanh(np.abs(ModCorr));
return MIs;
def NESCcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw=4.5,passbandRipl=0.02):
""" NESCCOMOD Generates a Normalized ESC-Based Comodulogram
USAGE: MIs, comodplt = NESCcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz), bw is the bandwidth of the bandpass filters typically (4.5 Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02 """
oscAmpMod,oscForPhase = preCFCProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,bw=bw,passbandRipl=passbandRipl);
MIs = NormEnvSigCorr(oscAmpMod,oscForPhase,freqForAmp,freqForPhase);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Normalized Envelope-to-Signal Correlation (NESC)");
return MIs, comodplt;
def NESCcomodCWT(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,sd_rel_phase=0.2,sd_rel_amp=40):
""" NESCCOMODCWT Generates a Envelope-to-Signal Correlation-Based Comodulogram
USAGE: MIs, comodplt = NESCcomodCWT(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz)
sd_rel_phase is filter bandwidth given as a fraction of the freqForPhase (default 0.2)
sd_rel_amp is filter bandwidth given as a fraction of freqForPhase in order
to enforce variable-bandwidth bandpass filtering around freqForAmp
(freqForAmp +/- sd_rel_amp * freqForPhase)"""
coefsForAmp, coefsForPhase = preCWTProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,sd_rel_phase,sd_rel_amp)
MIs = NormEnvSigCorrCWT(coefsForAmp,coefsForPhase,freqForAmp,freqForPhase);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Normalized Envelope-to-Signal Correlation (NESC)");
return MIs, comodplt;
def PrinCompAnal(MultChannIn):
""" PRINCOMPANAL Outputs Principal Component Analysis of Multichannel Input
USAGE: [PrinVals PrinComps] = PrinCompAnal(MultChannIn)
Each row of MultChannIn is a separate channel, each column represents a
synchronous sampling of all channels at a time point.
PrinVals is a column vector containing the eigenvalues of the covariance matrix for MultChannIn.
PrinComps is a matrix whose columns are the principal components of the MultChannIn data."""
MultChannInCov = np.cov(MultChannIn);
PrinVals, PrinComps = np.linalg.eig(MultChannInCov);
return PrinVals, PrinComps
def zScoredMVL(TwoChannIn):
""" ZSCOREDMVL Give a z-score to the mean vector based on PCA
USAGE: zScore = zScoredMVL(MultChannIn)
Each row of MultChannIn is a separate channel, each column represents a
synchronous sampling of all channels at a time point.
zScore is the z-score of the mean column vector in MultChannIn. """
XPrinVals, XPrinComps = PrinCompAnal(TwoChannIn);
meanVect = np.array([np.mean(TwoChannIn[0,:]), np.mean(TwoChannIn[1,:])]);
theta = np.arccos(np.dot(meanVect,XPrinComps[0,:])/np.linalg.norm(meanVect));
R = np.sqrt((np.sqrt(XPrinVals[0])*np.cos(theta))**2+(np.sqrt(XPrinVals[1])*np.sin(theta))**2);
zScore = np.linalg.norm(meanVect)/R;
return zScore
def zScoredMV_PCA(oscAmpMod,oscForPhase,freqForAmp,freqForPhase):
""" ZSCOREDMVCFC Calculates and displays the CFC Comulolograms based on inputs
USAGE: [MIs MVLs] = ZScoredMVCFC(oscAmpMod,oscForPhase,freqForAmp,freqForPhase)
MIs is the comodulogram based on the z-scored mean vector
MVLs is the comodulogram based on Canolty's mean vector length (MVL)
oscAmpMod is a cell matrix of time-series oscillations bandpassed
around freqForAmp with bandwidth specified by freqForPhase.
oscForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth. """
" Applying Envelope-to-Signal-Correlation based CFC to Oscillation Data "
MIs = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
MVLs = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phase will change each row. Amplitude will change each column "
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
ampOsc = np.abs(oscAmpMod[rr,cc,:]);
phaseOsc = np.angle(oscForPhase[0,rr,:]);
phasor = ampOsc*np.exp(1j*phaseOsc);
MVLs[rr,cc] = np.abs(np.mean(phasor));
phasorComponents = np.row_stack((np.real(phasor), np.imag(phasor)));
MIs[rr,cc] = zScoredMVL(phasorComponents);
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
return MIs, MVLs;
def zScoredMV_PCA_CWT(coefsForAmp,coefsForPhase,freqForAmp,freqForPhase):
""" ZSCOREDMVCFC Calculates and displays the CFC Comulolograms based on inputs
USAGE: [MIs MVLs] = ZScoredMVCFC(oscAmpMod,oscForPhase,freqForAmp,freqForPhase)
MIs is the comodulogram based on the z-scored mean vector
MVLs is the comodulogram based on Canolty's mean vector length (MVL)
coefsForAmp is a cell matrix of time-series oscillations bandpassed
around freqForAmp (vector) with bandwidth specified by freqForPhase (vector).
coefsForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth """
" Applying Envelope-to-Signal-Correlation based CFC to Oscillation Data "
MIs = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
MVLs = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phase will change each row. Amplitude will change each column "
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
ampOsc = np.abs(coefsForAmp[:,cc,rr]);
phaseOsc = np.angle(coefsForPhase[:,rr]);
phasor = ampOsc*np.exp(1j*phaseOsc);
MVLs[rr,cc] = np.abs(np.mean(phasor));
phasorComponents = np.row_stack((np.real(phasor), np.imag(phasor)));
MIs[rr,cc] = zScoredMVL(phasorComponents);
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
return MIs, MVLs;
def zScoreMVcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw=4.5,passbandRipl=0.02):
""" ZSCOREMVCOMOD Generates a Mean Vector Length-Based Comodulogram
USAGE: MIs = zScoreMVcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz), bw is the bandwidth of the bandpass filters typically (4.5 Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02
option is either 'MVL' or 'Z-Score':
"MVL" gives the mean vector length based on Canolty's Work
"Z-Score" gives z-score of mean vector based on PCA """
oscAmpMod,oscForPhase = preCFCProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,bw=bw,passbandRipl=passbandRipl);
MIs, MVLs = zScoredMV_PCA(oscAmpMod,oscForPhase,freqForAmp,freqForPhase);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Principal Component Analysis (PCA)");
return MIs, MVLs, comodplt;
def zScoreMVcomodCWT(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,sd_rel_phase=0.2,sd_rel_amp=40):
""" ZSCOREMVCOMODCWT Generates a Mean Vector Length-Based Comodulogram
USAGE: MIs = zScoreMVcomodCWT(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz)
sd_rel_phase is filter bandwidth given as a fraction of the freqForPhase (default 0.2)
sd_rel_amp is filter bandwidth given as a fraction of freqForPhase in order
to enforce variable-bandwidth bandpass filtering around freqForAmp
(freqForAmp +/- sd_rel_amp * freqForPhase)"""
coefsForAmp, coefsForPhase = preCWTProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,sd_rel_phase,sd_rel_amp)
MIs, MVLs = zScoredMV_PCA_CWT(coefsForAmp,coefsForPhase,freqForAmp,freqForPhase);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Principal Component Analysis (PCA)");
return MIs, MVLs, comodplt;
def KullLiebDiv(P, Q = None):
""" KULLLEIBDIV Calculates the Kullback-Liebler Divergence of the probability vector P from the probability vector Q.
USAGE: KLDiv = KullLiebDiv(P,Q)
If Q is not given, it is assumed to be the uniform distribution of the length of P.
This function accepts two inputs methods: KLDiv(P,Q) or KLDiv(P) with Q implied as aforementioned. """
if Q == None: KLDiv = np.log(np.size(P)) - entropy(P);
else: KLDiv = entropy(P,Q);
return KLDiv;
def KLDivModIndex(P):
""" KLDIVMODINDEX determines the MI for an input Probability Vector P
USAGE: MI = KLDivModIndex(P)
Divides the Kullback-Liebler Divergence of P with respect to the uniform distribution
by natural log of the length of P which bounds the output between 0 and 1. """
return KullLiebDiv(P)/np.size(P);
def KullLeibBin(oscAmpMod,oscForPhase,freqForAmp,freqForPhase,n):
""" KULLLEIBBIN Calculates and displays the CFC Comulolograms based on inputs
USAGE: MIs = KullLeibBin(oscAmpMod,oscForPhase,freqForAmp,freqForPhase,n,option)
oscAmpMod is a cell matrix of time-series oscillations bandpassed
around freqForAmp with bandwidth specified by freqForPhase.
oscForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth.
n is the number phasebins for the Kullback-Liebler Modulation Index. """
" Applying Kullback-Leibler Divergence-based CFC to Oscillation Data "
phaseBins = np.linspace(-np.pi,np.pi,n+1); highFreqAmplitude = np.zeros(n);
MIs = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phases will change each row. Amplitudes will change each column "
frqAmpSize = np.size(freqForAmp); frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
amplitudes = np.abs(oscAmpMod[rr,cc,:]);
phases = np.angle(oscForPhase[0,rr,:]);
for kk in np.arange(n):
amps = amplitudes[(phases > phaseBins[kk]) & (phases <= phaseBins[kk+1])];
highFreqAmplitude[kk] = np.mean(amps);
MIs[rr,cc] = KLDivModIndex(highFreqAmplitude);
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
return MIs;
def KLDivMIcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw=4.5,passbandRipl=0.02,n=36):
""" KLDIVMICOMOD Generates a Kullback-Liebler-Based Comodulogram
USAGE: MIs = KLDivMIcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,n,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz), bw is the bandwidth of the bandpass filters typically (4.5 Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02
n is the number phasebins for the Kullback-Liebler Modulation Index(MI). """
oscAmpMod,oscForPhase = preCFCProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,bw=bw,passbandRipl=passbandRipl);
MIs = KullLeibBin(oscAmpMod,oscForPhase,freqForAmp,freqForPhase,n);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Kullback-Liebler Divergence (KLDiv)");
return MIs, comodplt;
def HeightsRatioBin(oscAmpMod,oscForPhase,freqForAmp,freqForPhase,n,method):
""" HEIGHTSRATIOBIN Calculates and displays the CFC Comulolograms based on Heights Ratio
MIs = HeightsRatioBin(oscAmpMod,oscForPhase,freqForAmp,freqForPhase,n,method,option)
oscAmpMod is a cell matrix of time-series oscillations bandpassed
around freqForAmp with bandwidth specified by freqForPhase.
oscForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth
n is the number phasebins for the Heights-Ratio Modulation Index(MI)
method: there are 3 ways of doing this
1) 'Lakatos' -- h_max/h_min
2) 'Tort' -- (h_max - h_min)/h_max;
3) 'AM Radio' --- (h_max - h_min)/(h_max + h_min) """
" Applying Kullback-Leibler Divergence-based CFC to Oscillation Data "
phaseBins = np.linspace(-np.pi,np.pi,n+1); highFreqAmplitude = np.zeros(n);
MIs = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phases will change each row. Amplitudes will change each column "
frqAmpSize = np.size(freqForAmp); frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
amplitudes = np.abs(oscAmpMod[rr,cc,:]);
phases = np.angle(oscForPhase[0,rr,:]);
for kk in np.arange(n):
amps = amplitudes[(phases > phaseBins[kk]) & (phases <= phaseBins[kk+1])];
highFreqAmplitude[kk] = np.mean(amps);
if method == 'AM Radio':
MIs[rr,cc] = (max(highFreqAmplitude)-min(highFreqAmplitude)) \
/ (max(highFreqAmplitude)+min(highFreqAmplitude));
if method == 'Tort':
MIs[rr,cc] = (max(highFreqAmplitude)-min(highFreqAmplitude)) \
/ (max(highFreqAmplitude));
if method == 'Lakatos':
MIs[rr,cc] = (max(highFreqAmplitude))/(min(highFreqAmplitude));
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
return MIs;
def HRcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw=4.5,passbandRipl=0.02,n=36,method='AM Radio'):
""" HRCOMOD Generates a Heights Ratio-Based Comodulogram
USAGE: MIs = HRcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,n,method,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz), bw is the bandwidth of the bandpass filters typically (4.5 Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02
n is the number phasebins for the Heights-Ratio Modulation Index (MI)
method: there are 3 ways of doing this
1) 'Lakatos' -- h_max/h_min
2) 'Tort' -- (h_max - h_min)/h_max;
3) 'AM Radio' --- (h_max - h_min)/(h_max + h_min) """
oscAmpMod,oscForPhase = preCFCProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,bw=bw,passbandRipl=passbandRipl);
MIs = HeightsRatioBin(oscAmpMod,oscForPhase,freqForAmp,freqForPhase,n,method);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Heights Ratio (HR)");
return MIs, comodplt;
def PhaseLocVal(oscAmpMod,oscForPhase,freqForAmp,freqForPhase):
""" PHASELOCVAL Calculates comulolograms based on Phase Locking Value
USAGE: MIs = PhaseLocVal(oscAmpMod,oscForPhase,freqForAmp,freqForPhase)
oscAmpMod is a cell matrix of time-series oscillations bandpassed
around freqForAmp (vector) with bandwidth specified by freqForPhase (vector).
oscForPhase is a row cell vector of time-series oscillations bandpassed
around freqForPhase with some small bandwidth """
" Applying Generalized Linear Model-based CFC to Oscillations Data "
PLVs = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
" Phase will change each row. Amplitude will change each column "
frqAmpSize = np.size(freqForAmp);
frqPhaseSize = np.size(freqForPhase);
for cc in np.arange(frqAmpSize):
for rr in np.arange(frqPhaseSize):
ampOsc = np.abs(oscAmpMod[rr,cc,:]);
phaseOsc = np.angle(oscForPhase[0,rr,:]);
ampOscPhase = np.angle(sig.hilbert(ampOsc));
PLVs[rr,cc] = np.abs(np.mean(np.exp(1j*(phaseOsc - ampOscPhase))));
delf = freqForPhase[rr]; ctrfreq = freqForAmp[cc];
print("Completed: Frequency for Phase [Hz] = "+str(delf)+ \
", Frequency for Amplitude [Hz] = "+str(ctrfreq));
MIs = np.arcsin(2*PLVs-1);
return MIs;
def PLVcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw=4.5,passbandRipl=0.02):
""" PLVCOMOD Generates a Phase-Locking-Value Based Comodulogram
USAGE: MIs, comodplt = PLVcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,bw,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz), bw is the bandwidth of the bandpass filters typically (4.5 Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02 """
oscAmpMod,oscForPhase = preCFCProc(sigForAmp,sigForPhase,freqForAmp,
freqForPhase,fs,bw=bw,passbandRipl=passbandRipl);
MIs = PhaseLocVal(oscAmpMod,oscForPhase,freqForAmp,freqForPhase);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Phase Locking Value (PLV)");
return MIs, comodplt;
def mscohere(x,y,Fs,f,window):
""" MSCOHERE Returns the Magnitude-Squared Coherence at Specified Frequencies:
USAGE = Cxy = mscohere(x,y,Fs,f,window)
x and y are input signals
Fs is the sampling frequency in [Hz]
f are the specified frequencies over which Cxy is calculated
window is the windowing function (i.e. 'flattop', 'blackmanharris', 'hamming', 'hanning', etc.) """
nfft = int(2**(np.floor(np.log2(np.size(y)))-1));
noverlap = np.floor(nfft*0.99);
ff, Cxy = coherence(x,y,fs=Fs,nperseg=nfft,noverlap=noverlap,window=window);
return np.exp(np.interp(f,ff,np.log(Cxy)));
def CVcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,passbandRipl=0.02,window='flattop',bw=None):
""" CVCOMODULOGRAM Generates a Coherence Value-Based Comodulogram
USAGE: MIs = CVcomodulogram(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02
window is the windowing function (i.e. 'flattop', 'blackmanharris', 'hamming', 'hanning', etc.)
bw is the bandwidth of the filter for sigForAmp (default is max of freqForPhase)"""
if bw == None: bandwidth = max(freqForPhase);
else: bandwidth = bw;
oscAmpMod = CFCfilt(sigForAmp,freqForAmp,np.array([bandwidth]),fs,passbandRipl);
CVs = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
for cc in np.arange(np.size(freqForAmp)):
ampOsc = np.abs(oscAmpMod[-1,cc,:]); ctrfreq = freqForAmp[cc];
CVs[:,cc] = mscohere(ampOsc,sigForPhase,fs,freqForPhase,window);
print("Completed: Frequency for Amplitude [Hz] = "+str(ctrfreq));
MIs = np.arctanh(CVs);
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Coherence Value (CV)");
return MIs, comodplt;
def powSpecDens(x,Fs,f,window):
""" POWSPECDENS Returns the Magnitude-Squared Coherence at Specified Frequencies:
USAGE = Cxy = powSpecDens(x,Fs,f,window)
x and y are input signals
Fs is the sampling frequency in [Hz]
f are the specified frequencies over which Cxy is calculated
window is the windowing function (i.e. 'flattop', 'blackmanharris', 'hamming', 'hanning', etc.) """
nfft = int(2**(np.floor(np.log2(np.size(x)))-1)); noverlap = np.floor(nfft*0.99); # nfft - 1;
ff, Pxx = welch(x,fs=Fs,nperseg=nfft,noverlap=noverlap,window=window);
return np.exp(np.interp(f,ff,np.log(Pxx)));
def PSDcomod(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,passbandRipl=0.02,window='flattop',bw=None):
""" PSDCOMODULOGRAM Generates a Power Spectral Density-Based Comodulogram
USAGE: MIs = CVcomodulogram(sigForAmp,sigForPhase,freqForAmp,freqForPhase,fs,passbandRipl,option)
sigForAmp is the input LFP to be analyzed for amplitude
sigForPhase is the input LFP to be analyzed for phase
freqForAmp is a vector of center frequencies (frequency for amplitude)
freqForPhase is a vector of frequency for phase controlling bandwidth
fs is sampling rate (Hz)
passbandRipl is on a linear scale (not decibel): its preferred value is 0.02
window is the windowing function (i.e. 'flattop', 'blackmanharris', 'hamming', 'hanning', etc.)
bw is the bandwidth of the filter for sigForAmp (default is max of freqForPhase)"""
if bw == None: bandwidth = max(freqForPhase);
else: bandwidth = bw;
oscAmpMod = CFCfilt(sigForAmp,freqForAmp,np.array([bandwidth]),fs,passbandRipl);
MIs = np.zeros((np.size(freqForPhase),np.size(freqForAmp)));
for cc in np.arange(np.size(freqForAmp)):
ampOsc = np.abs(oscAmpMod[-1,cc,:]); ctrfreq = freqForAmp[cc];
MIs[:,cc] = powSpecDens(ampOsc,fs,freqForPhase,window);
print("Completed: Frequency for Amplitude [Hz] = "+str(ctrfreq));
comodplt = comodShow(freqForAmp,freqForPhase,MIs);
plt.title("Power Spectral Density (PSD)");
return MIs, comodplt;
# CHANGE CWT BASED COMODULOGRAM AS FOLLOWS:
# CURRENTLY CWT USES NO VARIABLE-BANDWIDTH CONSIDERATIONS
# make sd_ffa depend on freqForPhase to somehow enforce this
# Later Translate to MATLAB
| [] |
2024-01-10 | qeternity/guidance | guidance~llms~_transformers.py | import os
import time
import collections
import regex
import pygtrie
import queue
import threading
import collections.abc
from ._llm import LLM, LLMSession, SyncSession
class Transformers(LLM):
""" A HuggingFace transformers language model with Guidance support.
"""
llm_name: str = "transformers"
def __init__(self, model=None, tokenizer=None, caching=True, token_healing=True, acceleration=True, \
temperature=0.0, device=None, **kwargs):
super().__init__()
# fill in default model value
if model is None:
model = os.environ.get("TRANSFORMERS_MODEL", None)
if model is None:
try:
with open(os.path.expanduser('~/.transformers_model'), 'r') as file:
model = file.read().replace('\n', '')
except:
pass
self.model_obj, self.tokenizer = self._model_and_tokenizer(model, tokenizer, **kwargs)
self.model_name = model if isinstance(model, str) else model.__class__.__name__
self.caching = caching
self.current_time = time.time()
self.call_history = collections.deque()
self.temperature = temperature
self.token_healing = token_healing
self.acceleration = acceleration
if device is not None: # set the device if requested
self.model_obj = self.model_obj.to(device)
self.device = self.model_obj.device # otherwise note the current device
self._token_prefix_map = self._build_token_prefix_map(model)
def new_string_builder(self, starting_ids=None):
return TransformersStringBuilder(self.tokenizer, starting_ids)
def prefix_matches(self, prefix):
""" Return the list of tokens that match the given prefix.
"""
return [v for arr in self._token_prefix_map.values(prefix=prefix) for v in arr]
def encode(self, string, **kwargs):
return self.tokenizer.encode(string, **kwargs)
def decode(self, tokens, **kwargs):
return self.tokenizer.decode(tokens, **kwargs)
def id_to_token(self, id):
return self.tokenizer.convert_ids_to_tokens([id])[0]
def token_to_id(self, token):
return self.tokenizer.convert_tokens_to_ids([token])[0]
def end_of_text(self):
return self.tokenizer.eos_token
@staticmethod
def role_start(role):
raise NotImplementedError("In order to use chat role tags you need to use a chat-specific subclass of Transformers for your LLM from guidance.transformers.*!")
def _build_token_prefix_map(self, model_name):
""" Build a map from token to index.
"""
token_map = pygtrie.CharTrie()
for i in range(self.tokenizer.vocab_size):
s = self.id_to_token(i)
if s in token_map:
token_map[s].append(i) # handle duplicate token encodings... (GPT2 BPE has this oddly enough)
else:
token_map[s] = [i]
return token_map
def _model_and_tokenizer(self, model, tokenizer, **kwargs):
# intantiate the model and tokenizer if needed
if isinstance(model, str):
# make sure transformers is installed
try:
import transformers
except:
raise Exception("Please install transformers with `pip install transformers` in order to use guidance.llms.Transformers!")
if tokenizer is None:
tokenizer = transformers.AutoTokenizer.from_pretrained(model, **kwargs)
model = transformers.AutoModelForCausalLM.from_pretrained(model, **kwargs)
assert tokenizer is not None, "You must give a tokenizer object when you provide a model object (as opposed to just a model name)!"
return model, tokenizer
def session(self, asynchronous=False):
if asynchronous:
return TransformersSession(self)
else:
return SyncSession(TransformersSession(self))
class TransformersSession(LLMSession):
def __init__(self, llm):
super().__init__(llm)
self._past_key_values = None
self._prefix_cache = []
def __enter__(self):
# we only need decorators if we are using token acceleration
if self.llm.acceleration:
# decorate the prep step to preserve the initial past key values we have passed
def prep_step_decorator(method):
def decorate_prep_step(input_ids, **kwargs):
# if we are extending the input ids with the cached tokens then
# don't pass past key values to the input prep step, otherwise it
# would delete all but the last input_ids, and we have already removed
# the correct prefix from the input_ids (which is not always all but the last one)
if len(self._prefix_cache) > 0:
kwargs["past"] = None
input_ids = input_ids[:,len(self._prefix_cache):]
# if "attention_mask" in kwargs:
# kwargs["attention_mask"] = kwargs["attention_mask"][:,len(self._prefix_cache):]
model_kwargs = method(input_ids, **kwargs)
# provide the past key values for the actual model call
model_kwargs["past_key_values"] = self._past_key_values
if "position_ids" in model_kwargs: # models like OPT update the position ids internally
model_kwargs["position_ids"] = model_kwargs["position_ids"][:,len(self._prefix_cache):] # and update position ids
# we only need to do this first time, after that the past key values will
# be up until the last token, just like transformer models normally expect
# so we can clear our cache and let transformers cache like normal
self._prefix_cache = [] # this will get refilled once the generate call is done
return model_kwargs
else:
return method(input_ids, **kwargs)
decorate_prep_step.__func__ = method.__func__ # make us still look like a bound method
return decorate_prep_step
if getattr(self.llm.model_obj, "_orig_prepare_method", None) is None:
self.llm.model_obj._orig_prepare_method = self.llm.model_obj.prepare_inputs_for_generation
self.llm.model_obj.prepare_inputs_for_generation = prep_step_decorator(self.llm.model_obj._orig_prepare_method)
# decorate the update step to save the past key values
def update_step_decorator(method):
def decorate_update_step(outputs, *args, **kwargs):
# save the past key values
self._past_key_values = getattr(outputs, "past_key_values", None)
return method(outputs, *args, **kwargs)
return decorate_update_step
if getattr(self.llm.model_obj, "_orig_update_method", None) is None:
self.llm.model_obj._orig_update_method = self.llm.model_obj._update_model_kwargs_for_generation
self.llm.model_obj._update_model_kwargs_for_generation = update_step_decorator(self.llm.model_obj._orig_update_method)
return self
async def __call__(self, prompt, stop=None, stop_regex=None, temperature=None, n=1, max_tokens=1000, logprobs=None,
top_p=1.0, echo=False, logit_bias=None, token_healing=None, pattern=None, stream=False,
cache_seed=0, caching=None, **generate_kwargs):
""" Generate a completion of the given prompt.
"""
# fill in defaults
if temperature is None:
temperature = self.llm.temperature
if token_healing is None:
token_healing = self.llm.token_healing
# generate the cache key
cache_params = self._cache_params(locals().copy())
llm_cache = self.llm.cache
key = llm_cache.create_key(self.llm.llm_name, **cache_params)
# set the stop patterns
if stop is not None:
if isinstance(stop, str):
stop_regex = [regex.escape(stop)]
else:
stop_regex = [regex.escape(s) for s in stop]
if isinstance(stop_regex, str):
stop_regex = [stop_regex]
if stop_regex is None:
stop_regex = []
stop_regex.append(regex.escape(self.llm.tokenizer.eos_token)) # make sure the end of sequence token is always included
# handle function calling
if "function_call" in generate_kwargs:
assert generate_kwargs["function_call"] in ["none"], "Transformers does not yet have function call support!"
del generate_kwargs["function_call"]
# handle caching
in_cache = key in llm_cache
not_caching = (caching is not True and not self.llm.caching) or caching is False
if not in_cache or not_caching:
import transformers
assert prompt != "", "You must provide a non-zero length prompt to the Transformers language model!"
# encode the prompt
import torch
# encoded2 = self.llm.encode([prompt for _ in range(n)], return_tensors="pt")
encoded = self.llm.encode(prompt)
encoded = torch.tensor([encoded for _ in range(n)])
if self.llm.device is not None:
encoded = encoded.to(self.llm.device)
input_ids = encoded#["input_ids"]
# attention_mask = encoded["attention_mask"]
model_config = self.llm.model_obj.config
# ensure that we are extending a common sequence batch (our token healing assumes this right now)
assert (input_ids[0,-1] == input_ids[:,-1]).all(), "The current token healing implementation assumes that batches are reps of the same sequence!"
healed_token_ids = []
processors = []
stoppers = []
# save what the prompt looks like when coded and then decoded (this captures added start tokens, etc.)
coded_prompt = self.llm.decode(input_ids[0])
# setup token healing
if token_healing:
healer = TokenHealingLogitsProcessor(self.llm, model_config.vocab_size, input_ids[0])
healed_token_ids = healer.healed_token_ids
if len(healed_token_ids) > 0:
input_ids = input_ids[:,:-len(healed_token_ids)]
# attention_mask = attention_mask[:,:-len(healed_token_ids)]
max_tokens += len(healed_token_ids) # increase to account for the tokens we regen for token healing
processors.append(healer)
# setup logit biasing
if logit_bias is not None:
processors.append(BiasLogitsProcessor(self.llm, model_config.vocab_size, logit_bias))
# find the max context length
possible_attributes = ["max_sequence_length", "max_seq_len", "model_max_length", "n_positions", "max_position_embeddings"]
max_context = None
for obj in [model_config, self.llm.tokenizer]:
for attr in possible_attributes:
if max_context is None:
max_context = getattr(obj, attr, None)
else:
break
assert max_context is not None, "Could not find a max context length for the model! Tried: "+", ".join(possible_attributes)
# make sure we don't run off the end of the model
if max_tokens + len(input_ids[0]) > max_context:
max_tokens = max_context - len(input_ids[0])
# find how much of the prompt is cached
prefix_match_len = 0
for token in input_ids[0]:
if prefix_match_len >= len(self._prefix_cache) or token != self._prefix_cache[prefix_match_len]:
break
else:
prefix_match_len += 1
# we always need to run the model on at least one token so transformers is happy
if prefix_match_len == len(input_ids[0]):
prefix_match_len -= 1
# trim the cache to what we can use
if prefix_match_len < len(self._prefix_cache): # prefix_match_len > 0 and
self._past_key_values = tuple((key[:,:,:prefix_match_len,:],value[:,:,:prefix_match_len,:]) for key,value in self._past_key_values) # TODO: this is specific to the GPT2 tensor layout
self._prefix_cache = self._prefix_cache[:prefix_match_len]
# add support for pattern guidance
if pattern is not None:
processors.append(RegexLogitsProcessor(pattern, stop_regex, self.llm, model_config.vocab_size, temperature == 0, len(coded_prompt), self.llm.tokenizer.eos_token_id))
if stop_regex is not None:
stoppers.append(RegexStoppingCriteria(stop_regex, self.llm, len(coded_prompt)))
# a streamer to handle potentially partial output
streamer = TransformersStreamer(
input_ids=input_ids,
stop_regex=stop_regex,
healed_token_ids=healed_token_ids,
prefix_length=len(coded_prompt),
llm=self.llm,
max_new_tokens=max_tokens,
logprobs=logprobs
)
# the args for the transformers generate call
generate_args = dict(
inputs=input_ids,
# attention_mask=attention_mask,
# position_ids=position_ids,
temperature=temperature,
max_new_tokens=max_tokens,
top_p=top_p,
pad_token_id=model_config.pad_token_id if model_config.pad_token_id is not None else self.llm.tokenizer.eos_token_id,
logits_processor=transformers.LogitsProcessorList(processors),
stopping_criteria=transformers.StoppingCriteriaList(stoppers),
# past_key_values=self._past_key_values,
output_scores=logprobs is not None and logprobs > 0,
return_dict_in_generate=True,
**generate_kwargs
)
# override the model config for do_sample when the temperature requires it
do_sample = getattr(model_config, "do_sample", None)
if do_sample is True and temperature == 0:
generate_args["do_sample"] = False
elif do_sample is False and temperature > 0:
generate_args["do_sample"] = True
# if we are streaming then we need to run the inference process in a separate thread
if stream:
generate_args["streamer"] = streamer
thread = threading.Thread(target=self.llm.model_obj.generate, kwargs=generate_args)
thread.start()
return self._stream_then_save(streamer, key, thread)
# if we are not streaming we still manually use the streamer for consistency
else:
generated_sequence = self.llm.model_obj.generate(**generate_args)
streamer.put(generated_sequence)
self.llm.cache[key] = streamer.__next__()
self._update_prefix_cache(streamer)
return llm_cache[key]
def _update_prefix_cache(self, streamer):
# note what we now have cached and ready for our next call in this session
if self._past_key_values and len(streamer.generated_sequence) == 1:
self._prefix_cache = streamer.generated_sequence[0][:self._past_key_values[0][0].shape[-2]] # self._past_key_values is already saved, this just aligns with it
def _stream_then_save(self, streamer, key, thread):
list_out = []
for out in streamer:
list_out.append(out)
yield out
thread.join() # clean up the thread
self.llm.cache[key] = list_out
self._update_prefix_cache(streamer)
self._last_computed_key = key
def __exit__(self, exc_type, exc_value, traceback):
""" Restore the model to its original state by removing monkey patches.
"""
if getattr(self.llm.model_obj, "_orig_prepare_method", None) is not None:
self.llm.model_obj.prepare_inputs_for_generation = self.llm.model_obj._orig_prepare_method
del self.llm.model_obj._orig_prepare_method
if getattr(self.llm.model_obj, "_orig_update_method", None) is not None:
self.llm.model_obj._update_model_kwargs_for_generation = self.llm.model_obj._orig_update_method
del self.llm.model_obj._orig_update_method
return False
class TokenHealingLogitsProcessor():
""" Token healing.
When we tokenize the prompt the last token(s) we get are not the last token(s) we would
have gotten if the prompt + generation was concatented and then tokenized. This
is not good because it does not align with the pretraining of the model, so
we "heal" this boundary by backing up as many tokens as needed and then forcing the first tokens
generated to start with the prefix of the tokens we removed from the prompt. This could
result in the same tokens at the end of the prompt, or some suffix of the tokens we removed
could be replaced by a single longer one that crosses the prompt boundary.
"""
def __init__(self, model, vocab_size, prompt_ids, bias_value=100.):
""" Build a new TokenHealingLogitsProcessor.
Note that bias_value is in score space (log-odds normally) and should be
enough to ensure those tokens are the only ones used.
"""
# loop backwards through the prompt tokens looking for places where there are possible
# extensions that cross the prompt boundary
prefix_str = ""
self.extension_tokens = []
for i in range(len(prompt_ids)-1, max(len(prompt_ids)-10, -1), -1):
token_str = model.id_to_token(prompt_ids[i])
prefix_str = token_str + prefix_str
try:
extensions = model.prefix_matches(prefix_str)
except KeyError: # this must be a special token outside the vocab, so we assume it does not have any valid extensions
extensions = []
self.extension_tokens.append(extensions)
if i != len(prompt_ids)-1:
self.extension_tokens[-1].append(prompt_ids[i]) # add the token used in the input prompt to the list of possible extensions
self.extension_tokens = self.extension_tokens[::-1]
# prune off any extension token positions that don't have multiple multiple possible extensions
found_extensions = False
for i in range(len(self.extension_tokens)):
if len(self.extension_tokens[i]) > 1:
self.extension_tokens = self.extension_tokens[i:]
found_extensions = True
break
if found_extensions:
self.healed_token_ids = prompt_ids[len(prompt_ids)-len(self.extension_tokens):]
else:
self.extension_tokens = []
self.healed_token_ids = []
# if we have multiple possible completions past the last token, then biasing is needed
if len(self.extension_tokens) > 0:
import torch
# build a set of masks for each possible extension position
self.token_masks = []
for i in range(len(self.extension_tokens)):
token_mask = torch.zeros(vocab_size)
token_mask.scatter_(0, torch.tensor(self.extension_tokens[i]), bias_value)
if model.device is not None:
token_mask = token_mask.to(model.device)
self.token_masks.append(token_mask)
self.num_extensions = 0
def __call__(self, input_ids, scores):
# we only bias the first token generated
if self.num_extensions >= len(self.extension_tokens):
return scores
self.num_extensions += 1
# check if the last token was from the original prompt (if not then we have already "healed" by choosing a token that crosses the prompt boundary)
if self.num_extensions > 1 and input_ids[0][-1] != self.healed_token_ids[self.num_extensions-2]:
return scores
# handle list inputs
if isinstance(scores, list):
import torch
scores = torch.tensor(scores)
# make only allowed tokens possible
return scores + self.token_masks[self.num_extensions-1]
class BiasLogitsProcessor():
""" Simple token biasing.
"""
def __init__(self, model, vocab_size, logit_bias):
""" Build a new BiasLogitsProcessor.
"""
import torch
self.bias_vector = torch.zeros(vocab_size)
for token, bias in logit_bias.items():
self.bias_vector[token] = bias
self.bias_vector = self.bias_vector.to(model.device)
def __call__(self, input_ids, scores):
# handle list inputs
if isinstance(scores, list):
import torch
scores = torch.tensor(scores)
return scores + self.bias_vector
class RegexLogitsProcessor():
""" Pattern guiding.
Guide generation to match a regular expression.
TODO: currently slow, could be made much faster by doing rejection sampling inline with the sampling/greedy process.
"""
def __init__(self, pattern, stop_regex, llm, vocab_size, is_greedy, prefix_length, eos_token_id, max_consider=500000):
""" Build a new TokenHealingLogitsProcessor.
Parameters
----------
pattern : str
The regex pattern we are seeking to match.
stop_regex : str or list of str
The stop regex(s) allowed to come after this pattern.
llm : function
The llm.
vocab_size : int
The size of the vocabulary.
is_greedy : bool
The token selection mode currently in use. We need to know this so we can
effectively take over that sampling process inside this logit processor.
eos_token_id : int
The end of the stop token of the model.
max_consider : int
How many top values to bias. Note that we could remove this option once this
processor is performance optimized (by integrating it into the sampling/greedy process).
"""
import torch
if isinstance(stop_regex, str):
stop_regex = [stop_regex]
self.pattern_no_stop = regex.compile(pattern)
self.pattern = regex.compile(pattern + "(" + "|".join(stop_regex) + ")?")
self.llm = llm
self.is_greedy = is_greedy
self.prefix_length = prefix_length
self.max_consider = max_consider
self.bias_vector = torch.zeros(vocab_size)
self.current_strings = None
self.current_length = 0
self.forced_chars = 0
self.eos_token_id = eos_token_id
def __call__(self, input_ids, scores):
import torch
# handle 1D inputs
one_dim = False
if not isinstance(input_ids[0], collections.abc.Sequence) and not (hasattr(input_ids[0], "shape") and len(input_ids[0].shape) > 0):
one_dim = True
input_ids = torch.tensor(input_ids).unsqueeze(0)
scores = torch.tensor(scores).unsqueeze(0)
# extend our current strings
if self.current_strings is None:
self.current_strings = [self.llm.new_string_builder() for i in range(len(input_ids))]
for i in range(len(self.current_strings)):
self.current_strings[i].extend(input_ids[i][self.current_length:])
assert len(self.current_strings) == 1, "Regex patterns guides do not support batched inference with Transformers yet!"
self.current_length = len(input_ids[0])
# compute the bias values
self.bias_vector[:] = 0
sort_inds = torch.argsort(scores, 1, True)
to_bias = []
for i in range(min(sort_inds.shape[1], self.max_consider)):
self.current_strings[0].extend([sort_inds[0,i]])
proposed_string = str(self.current_strings[0])[self.prefix_length:]
self.current_strings[0].pop()
m = self.pattern.fullmatch(proposed_string, partial=True) # partial means we don't match currently but might as the string grows
if m:
to_bias.append(int(sort_inds[0, i]))
if self.is_greedy: # TODO: make this much faster for non-greedy sampling (by tracking how much prob mass we have looked through perhaps...)
break # we are done if we are doing greedy sampling and we found the top valid hit
# if we found no more valid tokens then we just end the sequence
if not len(to_bias):
to_bias = [self.eos_token_id]
# bias allowed tokens
min_to_bias = float(scores[0, to_bias].min())
bias_value = scores[0, sort_inds[0, 0]] - min_to_bias + 10 # make sure the tokens that fit the pattern have higher scores than the top value
for x in to_bias:
self.bias_vector[x] = bias_value
out = scores + self.bias_vector.to(scores.device)
if one_dim:
return out[0]
else:
return out
class RegexStoppingCriteria():
def __init__(self, stop_pattern, llm, prefix_length):
if isinstance(stop_pattern, str):
self.stop_patterns = [regex.compile(stop_pattern)]
else:
self.stop_patterns = [regex.compile(pattern) for pattern in stop_pattern]
self.prefix_length = prefix_length
self.llm = llm
self.current_strings = None
self.current_length = 0
def __call__(self, input_ids, scores, **kwargs):
# handle 1D inputs
if not isinstance(input_ids[0], collections.abc.Sequence) and not (hasattr(input_ids[0], "shape") and len(input_ids[0].shape) > 0):
input_ids = [input_ids]
# extend our current strings
if self.current_strings is None:
self.current_strings = [self.llm.new_string_builder() for _ in range(len(input_ids))]
for i in range(len(self.current_strings)):
self.current_strings[i].extend(input_ids[i][self.current_length:])
self.current_length = len(input_ids[0])
# check if all of the strings match a stop string (and hence we can stop the batch inference)
all_done = True
for i in range(len(self.current_strings)):
found = False
for s in self.stop_patterns:
if s.search(str(self.current_strings[i])[self.prefix_length:]):
found = True
if not found:
all_done = False
break
return all_done
class TransformersStringBuilder():
"""This deals with the complexity of building up a string from tokens bit by bit."""
def __init__(self, tokenizer, starting_ids=None):
self.tokenizer = tokenizer
self.token_strings = []
self._joint_string = ""
if starting_ids is not None:
self.extend(starting_ids)
def extend(self, new_ids):
new_token_strings = self.tokenizer.convert_ids_to_tokens(new_ids)
self.token_strings.extend(new_token_strings)
new_str = self.tokenizer.convert_tokens_to_string(self.token_strings)
diff_str = new_str[len(self._joint_string):]
self._joint_string = new_str
return diff_str
def pop(self):
"""Remove the last token from the string and return text it removed."""
self.token_strings.pop()
new_str = self.tokenizer.convert_tokens_to_string(self.token_strings)
diff_str = self._joint_string[len(new_str):]
self._joint_string = new_str
return diff_str
def __str__(self):
return self._joint_string
def __len__(self):
return len(self._joint_string)
class TransformersStreamer():
def __init__(self, input_ids, stop_regex, healed_token_ids, prefix_length, llm, max_new_tokens, logprobs, timeout=None):
self.input_ids = input_ids
self.stop_regex = stop_regex
self.healed_token_ids = healed_token_ids
self.logprobs = logprobs
self.llm = llm
self.max_total_tokens = max_new_tokens + len(input_ids[0])
self.timeout = timeout
self.str_pos = [prefix_length for i in range(len(self.input_ids))]
self.out_queue = queue.Queue()
self.sequence_pos = [len(self.input_ids[0]) for i in range(len(self.input_ids))]
self.generated_sequence = [[] for i in range(len(self.input_ids))]
self.display_logprobs = [[] for i in range(len(self.input_ids))]
self.generated_string = [self.llm.new_string_builder(input_ids[0]) for i in range(len(self.input_ids))]
self.prefix_cache = []
def put(self, token_obj):
import torch
if isinstance(token_obj, torch.Tensor):
new_tokens = token_obj
else:
new_tokens = token_obj['sequences']
if isinstance(new_tokens, torch.Tensor):
new_tokens = new_tokens.cpu()
# if we are given a single sequence, then make it a batch of size 1
if len(new_tokens.shape) == 1:
new_tokens = new_tokens.unsqueeze(0)
# extract the scores if we are given them (and format them to be the same shape as the tokens)
if self.logprobs:
assert len(new_tokens) == 1, "logprobs are not supported for batched generation right now in guidance.llms.Transformers"
new_scores = [torch.nn.functional.log_softmax(x, dim=-1).cpu() for x in token_obj['scores']]
len_diff = len(new_tokens[0]) - len(new_scores)
if len_diff > 0:
new_scores = [None for i in range(len_diff)] + new_scores
new_scores = [new_scores]
out = {"choices": [None for i in range(len(self.input_ids))]}
put_data = False
for i in range(len(self.input_ids)):
self.generated_sequence[i].extend(list(new_tokens[i]))
# save logprobs if needed
if self.logprobs:
for scores in new_scores[i]:
if scores is None:
self.display_logprobs[i].append(None)
else:
top_inds = scores[0].argsort(descending=True)[:self.logprobs] # TODO: verify the [0] is always correct
self.display_logprobs[i].append({self.llm.id_to_token(j): float(scores[0][j]) for j in top_inds})
if self.sequence_pos[i] < len(self.generated_sequence[i]):
display_tokens = list(self.generated_sequence[i][self.sequence_pos[i]:])
val = self.generated_string[i].extend(display_tokens)
# val = self.llm.decode(display_tokens)#[self.llm._prefix_token_id] + display_tokens)[len(self.llm._prefix_token):]
# self.generated_string[i] += val
if self.str_pos[i] < len(self.generated_string[i]):
val = str(self.generated_string[i])[self.str_pos[i]:]
finish_reason = None
# check why we stopped
stop_pos = len(val) + 1
if len(self.generated_sequence[i]) >= self.max_total_tokens:
finish_reason = "length"
elif self.generated_sequence[i][-1] == self.llm.tokenizer.eos_token_id:
finish_reason = "endoftext"
eos_str = self.generated_string[i].pop() # remove the end of text token
stop_pos = len(val) - len(eos_str)
# trim off the stop regex matches if needed
found_partial = False
stop_text = None
if self.stop_regex is not None:# and (finish_reason is None or len(self.input_ids) > 1):
stop_regex_obj = [regex.compile(s) for s in self.stop_regex]
for s in stop_regex_obj:
m = s.search(val, partial=True)
if m:
span = m.span()
if span[1] > span[0]:
if m.partial: # we might be starting a stop sequence, so we can't emit anything yet
found_partial = True
break
else:
stop_text = val[span[0]:span[1]]
stop_pos = min(span[0], stop_pos)
break
# record the reason we stopped (if we have stopped)
if stop_pos <= len(val):
finish_reason = "stop"
# emit the data if we are not potentially in the middle of a stop sequence
if not found_partial or finish_reason is not None:
out["choices"][i] = {
"text": val[:stop_pos],
"finish_reason": finish_reason,
"stop_text": stop_text,
"logprobs": {
# "token_healing_prefix": self.last_token_str,
"top_logprobs": self.display_logprobs[i][self.sequence_pos[i]:]
}
}
self.str_pos[i] = len(self.generated_string[i])
put_data = True
self.sequence_pos[i] = len(self.generated_sequence[i])
if put_data:
self.out_queue.put(out)
def end(self):
# make sure we have flushed all of the data
for i in range(len(self.input_ids)):
assert self.str_pos[i] >= len(self.generated_string[i]), "Not all data was flushed, this means generation stopped for an unknown reason!"
self.out_queue.put(None)
def __iter__(self):
return self
def __next__(self):
value = self.out_queue.get(timeout=self.timeout)
if value is None:
raise StopIteration()
else:
return value
| [] |
2024-01-10 | qeternity/guidance | guidance~llms~_openai.py | import openai
import os
import time
import requests
import aiohttp
import copy
import time
import asyncio
import types
import collections
import json
import re
import regex
from ._llm import LLM, LLMSession, SyncSession
class MalformedPromptException(Exception):
pass
import pyparsing as pp
role_start_tag = pp.Suppress(pp.Optional(pp.White()) + pp.Literal("<|im_start|>"))
role_start_name = pp.Word(pp.alphanums + "_")("role_name")
role_kwargs = pp.Suppress(pp.Optional(" ")) + pp.Dict(pp.Group(pp.Word(pp.alphanums + "_") + pp.Suppress("=") + pp.QuotedString('"')))("kwargs")
role_start = (role_start_tag + role_start_name + pp.Optional(role_kwargs) + pp.Suppress("\n")).leave_whitespace()
role_end = pp.Suppress(pp.Literal("<|im_end|>"))
role_content = pp.Combine(pp.ZeroOrMore(pp.CharsNotIn("<") | pp.Literal("<") + ~pp.FollowedBy("|im_end|>")))("role_content")
role_group = pp.Group(role_start + role_content + role_end)("role_group").leave_whitespace()
partial_role_group = pp.Group(role_start + role_content)("role_group").leave_whitespace()
roles_grammar = pp.ZeroOrMore(role_group) + pp.Optional(partial_role_group) + pp.StringEnd()
# import pyparsing as pp
# role_start_tag = pp.Literal("<|im_start|>")
# role_start_name = pp.Word(pp.alphanums + "_")
# role_kwargs = pp.Dict(pp.Group(pp.Word(pp.alphanums + "_") + pp.Suppress("=") + pp.QuotedString('"')))
# role_start = role_start_tag + role_start_name + pp.Optional(role_kwargs) + pp.Suppress("\n")
# role_end = pp.Literal("<|im_end|>")
# role_content = pp.CharsNotIn("<|im_start|><|im_end|>")
# r'<\|im_start\|>([^\n]+)\n(.*?)(?=<\|im_end\|>|$)'
def prompt_to_messages(prompt):
messages = []
assert prompt.endswith("<|im_start|>assistant\n"), "When calling OpenAI chat models you must generate only directly inside the assistant role! The OpenAI API does not currently support partial assistant prompting."
parsed_prompt = roles_grammar.parse_string(prompt)
# pattern = r'<\|im_start\|>([^\n]+)\n(.*?)(?=<\|im_end\|>|$)'
# matches = re.findall(pattern, prompt, re.DOTALL)
# if not matches:
# return [{'role': 'user', 'content': prompt}]
for role in parsed_prompt:
if len(role["role_content"]) > 0: # only add non-empty messages (OpenAI does not support empty messages anyway)
message = {'role': role["role_name"], 'content': role["role_content"]}
if "kwargs" in role:
for k, v in role["kwargs"].items():
message[k] = v
messages.append(message)
return messages
async def add_text_to_chat_mode_generator(chat_mode):
in_function_call = False
async for resp in chat_mode:
if "choices" in resp:
for c in resp['choices']:
# move content from delta to text so we have a consistent interface with non-chat mode
found_content = False
if "content" in c['delta'] and c['delta']['content'] != "":
found_content = True
c['text'] = c['delta']['content']
# capture function call data and convert to text again so we have a consistent interface with non-chat mode and open models
if "function_call" in c['delta']:
# build the start of the function call (the follows the syntax that GPT says it wants when we ask it, and will be parsed by the @function_detector)
if not in_function_call:
start_val = "\n```typescript\nfunctions."+c['delta']['function_call']["name"]+"("
if not c['text']:
c['text'] = start_val
else:
c['text'] += start_val
in_function_call = True
# extend the arguments JSON string
val = c['delta']['function_call']["arguments"]
if 'text' in c:
c['text'] += val
else:
c['text'] = val
if not found_content and not in_function_call:
break # the role markers are outside the generation in chat mode right now TODO: consider how this changes for uncontrained generation
else:
yield resp
else:
yield resp
# close the function call if needed
if in_function_call:
yield {'choices': [{'text': ')```'}]}
def add_text_to_chat_mode(chat_mode):
if isinstance(chat_mode, (types.AsyncGeneratorType, types.GeneratorType)):
return add_text_to_chat_mode_generator(chat_mode)
else:
for c in chat_mode['choices']:
c['text'] = c['message']['content']
return chat_mode
class OpenAI(LLM):
llm_name: str = "openai"
def __init__(self, model=None, caching=True, max_retries=5, max_calls_per_min=60,
api_key=None, api_type="open_ai", api_base=None, api_version=None, deployment_id=None,
temperature=0.0, chat_mode="auto", organization=None, rest_call=False,
allowed_special_tokens={"<|endoftext|>", "<|endofprompt|>"},
token=None, endpoint=None):
super().__init__()
# map old param values
# TODO: add deprecated warnings after some time
if token is not None:
if api_key is None:
api_key = token
if endpoint is not None:
if api_base is None:
api_base = endpoint
# fill in default model value
if model is None:
model = os.environ.get("OPENAI_MODEL", None)
if model is None:
try:
with open(os.path.expanduser('~/.openai_model'), 'r') as file:
model = file.read().replace('\n', '')
except:
pass
# fill in default deployment_id value
if deployment_id is None:
deployment_id = os.environ.get("OPENAI_DEPLOYMENT_ID", None)
# auto detect chat completion mode
if chat_mode == "auto":
# parse to determin if the model need to use the chat completion API
chat_model_pattern = r'^(gpt-3\.5-turbo|gpt-4)(-\d+k)?(-\d{4})?$'
if re.match(chat_model_pattern, model):
chat_mode = True
else:
chat_mode = False
# fill in default API key value
if api_key is None: # get from environment variable
api_key = os.environ.get("OPENAI_API_KEY", getattr(openai, "api_key", None))
if api_key is not None and not api_key.startswith("sk-") and os.path.exists(os.path.expanduser(api_key)): # get from file
with open(os.path.expanduser(api_key), 'r') as file:
api_key = file.read().replace('\n', '')
if api_key is None: # get from default file location
try:
with open(os.path.expanduser('~/.openai_api_key'), 'r') as file:
api_key = file.read().replace('\n', '')
except:
pass
if organization is None:
organization = os.environ.get("OPENAI_ORGANIZATION", None)
# fill in default endpoint value
if api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", None) or os.environ.get("OPENAI_ENDPOINT", None) # ENDPOINT is deprecated
import tiktoken
self._tokenizer = tiktoken.get_encoding(tiktoken.encoding_for_model(model).name)
self.chat_mode = chat_mode
self.allowed_special_tokens = allowed_special_tokens
self.model_name = model
self.deployment_id = deployment_id
self.caching = caching
self.max_retries = max_retries
self.max_calls_per_min = max_calls_per_min
if isinstance(api_key, str):
api_key = api_key.replace("Bearer ", "")
self.api_key = api_key
self.api_type = api_type
self.api_base = api_base
self.api_version = api_version
self.current_time = time.time()
self.call_history = collections.deque()
self.temperature = temperature
self.organization = organization
self.rest_call = rest_call
self.endpoint = endpoint
if not self.rest_call:
self.caller = self._library_call
else:
self.caller = self._rest_call
self._rest_headers = {
"Content-Type": "application/json"
}
def session(self, asynchronous=False):
if asynchronous:
return OpenAISession(self)
else:
return SyncSession(OpenAISession(self))
def role_start(self, role_name, **kwargs):
assert self.chat_mode, "role_start() can only be used in chat mode"
return "<|im_start|>"+role_name+"".join([f' {k}="{v}"' for k,v in kwargs.items()])+"\n"
def role_end(self, role=None):
assert self.chat_mode, "role_end() can only be used in chat mode"
return "<|im_end|>"
def end_of_text(self):
return "<|endoftext|>"
@classmethod
async def stream_then_save(cls, gen, key, stop_regex, n):
list_out = []
cached_out = None
# init stop_regex variables
if stop_regex is not None:
if isinstance(stop_regex, str):
stop_patterns = [regex.compile(stop_regex)]
else:
stop_patterns = [regex.compile(pattern) for pattern in stop_regex]
current_strings = ["" for _ in range(n)]
# last_out_pos = ["" for _ in range(n)]
# iterate through the stream
all_done = False
async for curr_out in gen:
# if we have a cached output, extend it with the current output
if cached_out is not None:
out = merge_stream_chunks(cached_out, curr_out)
else:
out = curr_out
# check if we have stop_regex matches
found_partial = False
if stop_regex is not None:
# keep track of the generated text so far
for i,choice in enumerate(curr_out['choices']):
current_strings[i] += choice['text']
# check if all of the strings match a stop string (and hence we can stop the batch inference)
all_done = True
for i in range(len(current_strings)):
found = False
for s in stop_patterns:
if s.search(current_strings[i]):
found = True
if not found:
all_done = False
break
# find where trim off the stop regex matches if needed (and look for partial matches)
stop_pos = [1e10 for _ in range(n)]
stop_text = [None for _ in range(n)]
for i in range(len(current_strings)):
for s in stop_patterns:
m = s.search(current_strings[i], partial=True)
if m:
span = m.span()
if span[1] > span[0]:
if m.partial: # we might be starting a stop sequence, so we can't emit anything yet
found_partial = True
break
else:
stop_text[i] = current_strings[i][span[0]:span[1]]
stop_pos[i] = min(span[0], stop_pos[i])
if stop_pos != 1e10:
stop_pos[i] = stop_pos[i] - len(current_strings[i]) # convert to relative position from the end
# if we might be starting a stop sequence, we need to cache the output and continue to wait and see
if found_partial:
cached_out = out
continue
# if we get here, we are not starting a stop sequence, so we can emit the output
else:
cached_out = None
if stop_regex is not None:
for i in range(len(out['choices'])):
if stop_pos[i] < len(out['choices'][i]['text']):
out['choices'][i] = out['choices'][i].to_dict() # because sometimes we might need to set the text to the empty string (and OpenAI's object does not like that)
out['choices'][i]['text'] = out['choices'][i]['text'][:stop_pos[i]]
out['choices'][i]['stop_text'] = stop_text[i]
out['choices'][i]['finish_reason'] = "stop"
list_out.append(out)
yield out
if all_done:
gen.aclose()
break
# if we have a cached output, emit it
if cached_out is not None:
list_out.append(cached_out)
yield out
cls.cache[key] = list_out
def _stream_completion(self):
pass
# Define a function to add a call to the deque
def add_call(self):
# Get the current timestamp in seconds
now = time.time()
# Append the timestamp to the right of the deque
self.call_history.append(now)
# Define a function to count the calls in the last 60 seconds
def count_calls(self):
# Get the current timestamp in seconds
now = time.time()
# Remove the timestamps that are older than 60 seconds from the left of the deque
while self.call_history and self.call_history[0] < now - 60:
self.call_history.popleft()
# Return the length of the deque as the number of calls
return len(self.call_history)
async def _library_call(self, **kwargs):
""" Call the OpenAI API using the python package.
Note that is uses the local auth token, and does not rely on the openai one.
"""
# save the params of the openai library
prev_key = openai.api_key
prev_org = openai.organization
prev_type = openai.api_type
prev_version = openai.api_version
prev_base = openai.api_base
# set the params of the openai library if we have them
if self.api_key is not None:
openai.api_key = self.api_key
if self.organization is not None:
openai.organization = self.organization
if self.api_type is not None:
openai.api_type = self.api_type
if self.api_version is not None:
openai.api_version = self.api_version
if self.api_base is not None:
openai.api_base = self.api_base
assert openai.api_key is not None, "You must provide an OpenAI API key to use the OpenAI LLM. Either pass it in the constructor, set the OPENAI_API_KEY environment variable, or create the file ~/.openai_api_key with your key in it."
if self.chat_mode:
kwargs['messages'] = prompt_to_messages(kwargs['prompt'])
del kwargs['prompt']
del kwargs['echo']
del kwargs['logprobs']
# print(kwargs)
out = await openai.ChatCompletion.acreate(**kwargs)
out = add_text_to_chat_mode(out)
else:
out = await openai.Completion.acreate(**kwargs)
# restore the params of the openai library
openai.api_key = prev_key
openai.organization = prev_org
openai.api_type = prev_type
openai.api_version = prev_version
openai.api_base = prev_base
return out
async def _rest_call(self, **kwargs):
""" Call the OpenAI API using the REST API.
"""
# Define the request headers
headers = copy.copy(self._rest_headers)
if self.api_key is not None:
headers['Authorization'] = f"Bearer {self.api_key}"
# Define the request data
stream = kwargs.get("stream", False)
data = {
"model": self.model_name,
"prompt": kwargs["prompt"],
"max_tokens": kwargs.get("max_tokens", None),
"temperature": kwargs.get("temperature", 0.0),
"top_p": kwargs.get("top_p", 1.0),
"n": kwargs.get("n", 1),
"stream": stream,
"logprobs": kwargs.get("logprobs", None),
'stop': kwargs.get("stop", None),
"echo": kwargs.get("echo", False)
}
if self.chat_mode:
data['messages'] = prompt_to_messages(data['prompt'])
del data['prompt']
del data['echo']
del data['logprobs']
# Send a POST request and get the response
# An exception for timeout is raised if the server has not issued a response for 10 seconds
try:
if stream:
session = aiohttp.ClientSession()
response = await session.post(self.endpoint, json=data, headers=headers, timeout=60)
status = response.status
else:
response = requests.post(self.endpoint, headers=headers, json=data, timeout=60)
status = response.status_code
text = response.text
if status != 200:
if stream:
text = await response.text()
raise Exception("Response is not 200: " + text)
if stream:
response = self._rest_stream_handler(response, session)
else:
response = response.json()
except requests.Timeout:
raise Exception("Request timed out.")
except requests.ConnectionError:
raise Exception("Connection error occurred.")
if self.chat_mode:
response = add_text_to_chat_mode(response)
return response
async def _close_response_and_session(self, response, session):
await response.release()
await session.close()
async def _rest_stream_handler(self, response, session):
# async for line in response.iter_lines():
async for line in response.content:
text = line.decode('utf-8')
if text.startswith('data: '):
text = text[6:]
if text.strip() == '[DONE]':
await self._close_response_and_session(response, session)
break
else:
yield json.loads(text)
def encode(self, string):
# note that is_fragment is not used used for this tokenizer
return self._tokenizer.encode(string, allowed_special=self.allowed_special_tokens)
def decode(self, tokens):
return self._tokenizer.decode(tokens)
def merge_stream_chunks(first_chunk, second_chunk):
""" This merges two stream responses together.
"""
out = copy.deepcopy(first_chunk)
# merge the choices
for i in range(len(out['choices'])):
out_choice = out['choices'][i]
second_choice = second_chunk['choices'][i]
out_choice['text'] += second_choice['text']
if 'index' in second_choice:
out_choice['index'] = second_choice['index']
if 'finish_reason' in second_choice:
out_choice['finish_reason'] = second_choice['finish_reason']
if out_choice.get('logprobs', None) is not None:
out_choice['logprobs']['token_logprobs'] += second_choice['logprobs']['token_logprobs']
out_choice['logprobs']['top_logprobs'] += second_choice['logprobs']['top_logprobs']
out_choice['logprobs']['text_offset'] = second_choice['logprobs']['text_offset']
return out
class OpenAIStreamer():
def __init__(self, stop_regex, n):
self.stop_regex = stop_regex
self.n = n
self.current_strings = ["" for _ in range(n)]
self.current_length = 0
class RegexStopChecker():
def __init__(self, stop_pattern, decode, prefix_length):
if isinstance(stop_pattern, str):
self.stop_patterns = [regex.compile(stop_pattern)]
else:
self.stop_patterns = [regex.compile(pattern) for pattern in stop_pattern]
self.prefix_length = prefix_length
self.decode = decode
self.current_strings = None
self.current_length = 0
def __call__(self, input_ids, scores, **kwargs):
# extend our current strings
if self.current_strings is None:
self.current_strings = ["" for _ in range(len(input_ids))]
for i in range(len(self.current_strings)):
self.current_strings[i] += self.decode(input_ids[i][self.current_length:])
# trim off the prefix string so we don't look for stop matches in the prompt
if self.current_length == 0:
for i in range(len(self.current_strings)):
self.current_strings[i] = self.current_strings[i][self.prefix_length:]
self.current_length = len(input_ids[0])
# check if all of the strings match a stop string (and hence we can stop the batch inference)
all_done = True
for i in range(len(self.current_strings)):
found = False
for s in self.stop_patterns:
if s.search(self.current_strings[i]):
found = True
if not found:
all_done = False
break
return all_done
# define the syntax for the function definitions
import pyparsing as pp
start_functions = pp.Suppress(pp.Literal("## functions\n\nnamespace functions {\n\n"))
comment = pp.Combine(pp.Suppress(pp.Literal("//") + pp.Optional(" ")) + pp.restOfLine)
end_functions = pp.Suppress("} // namespace functions")
function_def_start = pp.Optional(comment)("function_description") + pp.Suppress(pp.Literal("type")) + pp.Word(pp.alphas + "_")("function_name") + pp.Suppress(pp.Literal("=") + pp.Literal("(_:") + pp.Literal("{"))
function_def_end = pp.Suppress(pp.Literal("})") + pp.Literal("=>") + pp.Literal("any;"))
parameter_type = (pp.Word(pp.alphas + "_")("simple_type") | pp.QuotedString('"')("enum_option") + pp.OneOrMore(pp.Suppress("|") + pp.QuotedString('"')("enum_option"))("enum")) + pp.Suppress(pp.Optional(","))
parameter_def = pp.Optional(comment)("parameter_description") + pp.Word(pp.alphas + "_")("parameter_name") + pp.Optional(pp.Literal("?"))("is_optional") + pp.Suppress(pp.Literal(":")) + pp.Group(parameter_type)("parameter_type")
function_def = function_def_start + pp.OneOrMore(pp.Group(parameter_def)("parameter")) + function_def_end
functions_def = start_functions + pp.OneOrMore(pp.Group(function_def)("function")) + end_functions
def get_json_from_parse(parse_out):
functions = []
for function in parse_out:
function_name = function.function_name
function_description = function.function_description
parameters = {
"type": "object",
"properties": {},
"required": []
}
for parameter in function:
if isinstance(parameter, str):
continue
parameter_name = parameter.parameter_name
parameter_description = parameter.parameter_description
parameter_type = parameter.parameter_type
is_optional = parameter.is_optional
d = {}
if parameter_type.simple_type:
d["type"] = parameter_type.simple_type
elif parameter_type.enum:
d["type"] = "string"
d["enum"] = [s for s in parameter_type]
if parameter_description:
d["description"] = parameter_description
if not is_optional:
parameters["required"].append(parameter_name)
parameters["properties"][parameter_name] = d
functions.append({
"name": function_name,
"description": function_description,
"parameters": parameters
})
return functions
def extract_function_defs(prompt):
""" This extracts function definitions from the prompt.
"""
if "\n## functions\n" not in prompt:
return None
else:
functions_text = prompt[prompt.index("\n## functions\n")+1:prompt.index("} // namespace functions")+24]
parse_out = functions_def.parseString(functions_text)
return get_json_from_parse(parse_out)
# Define a deque to store the timestamps of the calls
class OpenAISession(LLMSession):
async def __call__(self, prompt, stop=None, stop_regex=None, temperature=None, n=1, max_tokens=1000, logprobs=None,
top_p=1.0, echo=False, logit_bias=None, token_healing=None, pattern=None, stream=None,
cache_seed=0, caching=None, **completion_kwargs):
""" Generate a completion of the given prompt.
"""
# we need to stream in order to support stop_regex
if stream is None:
stream = stop_regex is not None
assert stop_regex is None or stream, "We can only support stop_regex for the OpenAI API when stream=True!"
assert stop_regex is None or n == 1, "We don't yet support stop_regex combined with n > 1 with the OpenAI API!"
assert token_healing is None or token_healing is False, "The OpenAI API does not yet support token healing! Please either switch to an endpoint that does, or don't use the `token_healing` argument to `gen`."
# set defaults
if temperature is None:
temperature = self.llm.temperature
# get the arguments as dictionary for cache key generation
args = locals().copy()
assert not pattern, "The OpenAI API does not support Guidance pattern controls! Please either switch to an endpoint that does, or don't use the `pattern` argument to `gen`."
# assert not stop_regex, "The OpenAI API does not support Guidance stop_regex controls! Please either switch to an endpoint that does, or don't use the `stop_regex` argument to `gen`."
# define the key for the cache
cache_params = self._cache_params(args)
llm_cache = self.llm.cache
key = llm_cache.create_key(self.llm.llm_name, **cache_params)
# allow streaming to use non-streaming cache (the reverse is not true)
if key not in llm_cache and stream:
cache_params["stream"] = False
key1 = llm_cache.create_key(self.llm.llm_name, **cache_params)
if key1 in llm_cache:
key = key1
# check the cache
if key not in llm_cache or caching is False or (caching is not True and not self.llm.caching):
# ensure we don't exceed the rate limit
while self.llm.count_calls() > self.llm.max_calls_per_min:
await asyncio.sleep(1)
functions = extract_function_defs(prompt)
fail_count = 0
while True:
try_again = False
try:
self.llm.add_call()
call_args = {
"model": self.llm.model_name,
"deployment_id": self.llm.deployment_id,
"prompt": prompt,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"n": n,
"stop": stop,
"logprobs": logprobs,
"echo": echo,
"stream": stream,
**completion_kwargs
}
if functions is None:
if "function_call" in call_args:
del call_args["function_call"]
else:
call_args["functions"] = functions
if logit_bias is not None:
call_args["logit_bias"] = {str(k): v for k,v in logit_bias.items()} # convert keys to strings since that's the open ai api's format
out = await self.llm.caller(**call_args)
except openai.error.RateLimitError:
await asyncio.sleep(3)
try_again = True
fail_count += 1
if not try_again:
break
if fail_count > self.llm.max_retries:
raise Exception(f"Too many (more than {self.llm.max_retries}) OpenAI API RateLimitError's in a row!")
if stream:
return self.llm.stream_then_save(out, key, stop_regex, n)
else:
llm_cache[key] = out
# wrap as a list if needed
if stream:
if isinstance(llm_cache[key], list):
return llm_cache[key]
return [llm_cache[key]]
return llm_cache[key]
import os
import json
import platformdirs
from ._openai import OpenAI
class AzureOpenAI(OpenAI):
def __init__(self, *args, **kwargs):
raise NotImplementedError("The AzureOpenAI class has been merged with the OpenAI class for Azure usage. Please use the OpenAI class instead: https://guidance.readthedocs.io/en/latest/example_notebooks/api_examples/llms/OpenAI.html")
class MSALOpenAI(OpenAI):
""" Microsoft Authentication Library (MSAL) OpenAI style integration.
Warning: This class is not finalized and may change in the future.
"""
llm_name: str = "azure_openai"
def __init__(self, model=None, client_id=None, authority=None, caching=True, max_retries=5, max_calls_per_min=60, token=None,
endpoint=None, scopes=None, temperature=0.0, chat_mode="auto"):
assert endpoint is not None, "An endpoint must be specified!"
# build a standard OpenAI LLM object
super().__init__(
model=model, caching=caching, max_retries=max_retries, max_calls_per_min=max_calls_per_min,
token=token, endpoint=endpoint, temperature=temperature, chat_mode=chat_mode
)
self.client_id = client_id
self.authority = authority
self.scopes = scopes
from msal import PublicClientApplication, SerializableTokenCache
self._token_cache = SerializableTokenCache()
self._token_cache_path = os.path.join(platformdirs.user_cache_dir("guidance"), "_azure_openai.token")
self._app = PublicClientApplication(client_id=self.client_id, authority=self.authority, token_cache=self._token_cache)
if os.path.exists(self._token_cache_path):
self._token_cache.deserialize(open(self._token_cache_path, 'r').read())
self._rest_headers["X-ModelType"] = self.model_name
@property
def token(self):
return self._get_token()
@token.setter
def token(self, value):
pass # ignored for now
def _get_token(self):
accounts = self._app.get_accounts()
result = None
if accounts:
# Assuming the end user chose this one
chosen = accounts[0]
# Now let's try to find a token in cache for this account
result = self._app.acquire_token_silent(self.scopes, account=chosen)
if not result:
# So no suitable token exists in cache. Let's get a new one from AAD.
flow = self._app.initiate_device_flow(scopes=self.scopes)
if "user_code" not in flow:
raise ValueError(
"Fail to create device flow. Err: %s" % json.dumps(flow, indent=4))
print(flow["message"])
result = self._app.acquire_token_by_device_flow(flow)
# save the aquired token
with open(self._token_cache_path, "w") as f:
f.write(self._token_cache.serialize())
return result["access_token"]
| [
"role_content"
] |
2024-01-10 | qeternity/guidance | guidance~llms~_llm.py | from typing import Any, Dict
import asyncio
import re
import json
import guidance
from .caches import DiskCache
class LLMMeta(type):
def __init__(cls, *args, **kwargs):
cls._cache = None
@property
def cache(cls):
if cls._cache is None:
cls._cache = DiskCache(cls.llm_name)
return cls._cache
@cache.setter
def cache(cls, value):
cls._cache = value
class LLM(metaclass=LLMMeta):
cache_version = 1
default_system_prompt = "You are a helpful assistant."
llm_name: str = "unknown"
def __init__(self):
self.chat_mode = False # by default models are not in role-based chat mode
self.model_name = "unknown"
# these should all start with the @ symbol and are variables programs can use when running with this LLM
self.tool_def = guidance("""
# Tools
{{#if len(functions) > 0~}}
## functions
namespace functions {
{{#each functions item_name="function"~}}
// {{function.description}}
type {{function.name}} = (_: {
{{~#each function.parameters.properties}}
{{#if contains(this, "description")}}// {{this.description}}
{{/if~}}
{{@key}}{{#unless contains(function.parameters.required, @key)}}?{{/unless}}: {{#if contains(this, "enum")}}{{#each this.enum}}"{{this}}"{{#unless @last}} | {{/unless}}{{/each}}{{else}}{{this.type}}{{/if}}{{#unless @last}},{{/unless}}
{{~/each}}
}) => any;
{{/each~}}
} // namespace functions
{{~/if~}}""", functions=[])
self.function_call_stop_regex = r"\n?\n?```typescript\nfunctions.[^\(]+\(.*?\)```"
def extract_function_call(self, text):
m = re.match(r"\n?\n?```typescript\nfunctions.([^\(]+)\((.*?)\)```", text, re.DOTALL)
if m:
return CallableAnswer(m.group(1), m.group(2))
def __call__(self, *args, asynchronous=False, **kwargs):
"""Creates a session and calls the LLM with the given arguments.
Note that this is a convenience wrapper so you don't have to call session(),
for higher performance across multiple calls, use a session directly.
"""
with self.session(asynchronous=asynchronous) as s:
out = s(*args, **kwargs)
return out
def __getitem__(self, key):
"""Gets an attribute from the LLM."""
return getattr(self, key)
def session(self, asynchronous=False):
"""Creates a session for the LLM.
This implementation is meant to be overridden by subclasses.
"""
if asynchronous:
return LLMSession(self)
else:
return SyncSession(LLMSession(self))
def encode(self, string, **kwargs):
return self._tokenizer.encode(string, **kwargs)
def decode(self, tokens, **kwargs):
return self._tokenizer.decode(tokens, **kwargs)
def id_to_token(self, id):
return self.decode([id])
def token_to_id(self, token):
return self.encode(token)[0]
# allow for caches to be get and set on the object as well as the class
@property
def cache(self):
if self._cache is not None:
return self._cache
else:
return self.__class__.cache
@cache.setter
def cache(self, value):
self._cache = value
class LLMSession:
def __init__(self, llm):
self.llm = llm
self._call_counts = {} # tracks the number of repeated identical calls to the LLM with non-zero temperature
def __enter__(self):
return self
async def __call__(self, *args, **kwargs):
return self.llm(*args, **kwargs)
def __exit__(self, exc_type, exc_value, traceback):
pass
def _gen_key(self, args_dict):
del args_dict["self"] # skip the "self" arg
return "_---_".join([str(v) for v in ([args_dict[k] for k in args_dict] + [self.llm.model_name, self.llm.__class__.__name__, self.llm.cache_version])])
def _cache_params(self, args_dict) -> Dict[str, Any]:
"""get the parameters for generating the cache key"""
key = self._gen_key(args_dict)
# if we have non-zero temperature we include the call count in the cache key
if args_dict.get("temperature", 0) > 0:
args_dict["call_count"] = self._call_counts.get(key, 0)
# increment the call count
self._call_counts[key] = args_dict["call_count"] + 1
args_dict["model_name"] = self.llm.model_name
args_dict["cache_version"] = self.llm.cache_version
args_dict["class_name"] = self.llm.__class__.__name__
return args_dict
class SyncSession:
def __init__(self, session):
self._session = session
def __enter__(self):
self._session.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
return self._session.__exit__(exc_type, exc_value, traceback)
def __call__(self, *args, **kwargs):
return asyncio.get_event_loop().run_until_complete(
self._session.__call__(*args, **kwargs)
)
class CallableAnswer:
def __init__(self, name, args_string, function=None):
self.__name__ = name
self.args_string = args_string
def __call__(self, *args, **kwargs):
if self._function is None:
raise NotImplementedError(f"Answer {self.__name__} has no function defined")
return self._function(*args, **self.__kwdefaults__, **kwargs)
@property
def __kwdefaults__(self):
"""We build this lazily in case the user wants to handle validation errors themselves."""
return json.loads(self.args_string)
def __repr__(self):
return f"CallableAnswer(__name__={self.__name__}, __kwdefaults__={self.__kwdefaults__})"
| [
"You are a helpful assistant."
] |
2024-01-10 | qeternity/guidance | tests~utils.py | import guidance
import pytest
opanai_model_cache = {}
def get_llm(model_name, caching=False, **kwargs):
""" Get an LLM by name.
"""
if model_name.startswith("openai:"):
return get_openai_llm(model_name[7:], caching, **kwargs)
elif model_name.startswith("transformers:"):
return get_transformers_llm(model_name[13:], caching, **kwargs)
def get_openai_llm(model_name, caching=False, **kwargs):
""" Get an OpenAI LLM with model reuse and smart test skipping.
"""
# we cache the models so lots of tests using the same model don't have to
# load it over and over again
key = model_name+"_"+str(caching)
if key not in opanai_model_cache:
opanai_model_cache[key] = guidance.llms.OpenAI(model_name, caching=caching, **kwargs)
llm = opanai_model_cache[key]
if llm.api_key is None:
pytest.skip("OpenAI token not found")
return llm
transformers_model_cache = {}
def get_transformers_llm(model_name, caching=False):
""" Get an OpenAI LLM with model reuse.
"""
# we cache the models so lots of tests using the same model don't have to
# load it over and over again
key = model_name+"_"+str(caching)
if key not in transformers_model_cache:
transformers_model_cache[key] = guidance.llms.Transformers(model_name, caching=caching)
return transformers_model_cache[key]
| [] |
2024-01-10 | qeternity/guidance | guidance~llms~caches~_diskcache.py | import os
import diskcache
import platformdirs
from guidance.llms.caches import Cache
class DiskCache(Cache):
"""DiskCache is a cache that uses diskcache lib."""
def __init__(self, llm_name: str):
self._diskcache = diskcache.Cache(
os.path.join(
platformdirs.user_cache_dir("guidance"), f"_{llm_name}.diskcache"
)
)
def __getitem__(self, key: str) -> str:
return self._diskcache[key]
def __setitem__(self, key: str, value: str) -> None:
self._diskcache[key] = value
def __contains__(self, key: str) -> bool:
return key in self._diskcache
def clear(self):
self._diskcache.clear()
| [] |
2024-01-10 | qeternity/guidance | guidance~_program.py | import inspect
import re
import html
import uuid
# import parsimonious
import logging
import copy
import asyncio
import pathlib
import os
import time
import datetime
import nest_asyncio
# from .llms import _openai
from . import _utils
from ._program_executor import ProgramExecutor
from . import commands
import guidance
log = logging.getLogger(__name__)
# load the javascript client code
file_path = pathlib.Path(__file__).parent.parent.absolute()
with open(file_path / "guidance" / "resources" / "main.js", encoding="utf-8") as f:
js_data = f.read()
class Log:
def __init__(self) -> None:
self._entries = []
def append(self, entry):
if not hasattr(entry, "time"):
entry["time"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self._entries.append(entry)
def __str__(self) -> str:
string_entries = []
for entry in self._entries:
type_str = "["+entry.get("type", "")+"]"
string_entries.append(
f"{entry.get('time', '')}{type_str: >10s}{entry.get('name', ''): >15s}: " + str({k:v for k,v in entry.items() if k not in ["type", "name", "time"]})
)
return "\n".join(string_entries)
def __repr__(self) -> str:
return str(self)
def __len__(self) -> int:
return len(self._entries)
def __getitem__(self, key):
return self._entries[key]
def copy(self):
new_log = Log()
new_log._entries = [copy.copy(v) for v in self._entries]
return new_log
class Program:
''' A program template that can be compiled and executed to generate a new filled in (executed) program.
Note that as the template gets executed {{!-- handlebars comment markers --}} get left in
the generated output to mark where template tags used to be.
'''
def __init__(self, text, llm=None, cache_seed=0, logprobs=None, silent=None, async_mode=False, stream=None, caching=None, await_missing=False, log=None, **kwargs):
""" Create a new Program object from a program string.
Parameters
----------
text : str
The program string to use as a guidance template.
llm : guidance.llms.LLM (defaults to guidance.llm)
The language model to use for executing the program.
cache_seed : int (default 0) or None
The seed to use for the cache. If you want to use the same cache for multiple programs
you can set this to the same value for all of them. Set this to None to disable caching.
Caching is enabled by default, and saves calls that have tempurature=0, and also saves
higher temperature calls but uses different seed for each call.
logprobs : int or None (default)
The number of logprobs to return from the language model for each token. (not well supported yet,
since some endpoints don't support it)
silent : bool (default None)
If True, the program will not display any output. This is useful for programs that are
only used to generate variables for other programs. If None we automatically set this based
on if we are streaming and if we are in interactive mode.
async_mode : bool (default False)
If True, the program will be executed asynchronously. This is useful for programs that
take a long time to run, or that need to be run in parallel.
stream : bool (default None)
If True, the program will try to stream all the results from the LLM token by token. If None
streaming will be enabled if is needed for funtionality. (Warning: this param may change a bit in the future)
caching : bool (default None)
If True, the program will cache the results of the LLM. If False, it will not cache the results.
If None, it will use the default caching setting from the LLM.
await_missing : bool (default False)
If True, the program will automatically await any missing variables. This means the program
will stop executation at that point and return a paritally executed program. This is useful
for executing programs on different machines, for example shipping a program to a GPU machine
then waiting for the results to come back for any local processing, then shipping it back to
the GPU machine to continue execution.
log : bool or Log
If True, the program will log all the commands that are executed into the `program.log` property.
If a Log object is passed in, it will be used as the log instead of creating a new one.
"""
# see if we were given a raw function instead of a string template
# if so, convert it to a string template that calls the function
if not isinstance(text, str) and callable(text):
sig = inspect.signature(text)
args = ""
for name,_ in sig.parameters.items():
args += f" {name}={name}"
fname = _utils.find_func_name(text, kwargs)
kwargs[fname] = text
text = "{{set (%s%s)}}" % (fname, args)
# save the given parameters
self._text = text
self.llm = llm or getattr(guidance, "llm", None)
self.cache_seed = cache_seed
self.caching = caching
self.logprobs = logprobs
self.async_mode = async_mode
self.silent = silent
self.stream = stream
self.await_missing = await_missing
self.log = log
if self.silent is None:
self.silent = self.stream is True or not _utils.is_interactive()
# build or capture the log
if self.log is True:
self.log = Log()
# set our variables
self._variables = {}
self._variables.update(_built_ins)
self._variables.update({
"llm": llm
})
kwargs.pop("self", None)
kwargs = dict(kwargs)
for k in list(kwargs.keys()): # handle @varname syntax
if k.startswith("_AT_"):
kwargs["@"+k[4:]] = kwargs[k]
kwargs.pop(k)
self._variables.update(kwargs)
# set internal state variables
self._id = str(uuid.uuid4())
self._comm = None # front end communication object
self._executor = None # the ProgramExecutor object that is running the program
self._last_display_update = 0 # the last time we updated the display (used for throttling updates)
self._execute_complete = asyncio.Event() # fires when the program is done executing to resolve __await__
self._emit_stream_event = asyncio.Event() # fires when we need to emit a stream event
self._exception = None # if the program finished with an exception its stored here
self._displaying = not self.silent # if we are displaying we need to update the display as we execute
self._displayed = False # marks if we have been displayed in the client yet
self._displaying_html = False # if we are displaying html (vs. text)
self._tasks = [] # list of children tasks
# throttle the display updates
if os.environ.get("VSCODE_CWD", None) is not None:
self.display_throttle_limit = 0.1 # VSCode has a bug that causes flashing, so we slow down the display
else:
self.display_throttle_limit = 0.1 # the minimum time between display updates
self.update_display = DisplayThrottler(self._update_display, self.display_throttle_limit)
# see if we are in an ipython environment
# check if get_ipython variable exists
try:
self._ipython = get_ipython()
except NameError:
self._ipython = None
# if we are echoing in ipython we assume we can display html
if self._ipython and not self.silent:
self._displaying_html = True
def __repr__(self):
return self.text
def __getitem__(self, key):
return self._variables[key]
def _interface_event(self, msg):
""" Handle an event from the front end.
"""
if msg["event"] == "stop":
self._executor.stop()
elif msg["event"] == "opened":
pass # we don't need to do anything here because the first time we display we'll send the html
pass
def _ipython_display_(self):
""" Display the program in the ipython notebook.
"""
log.debug(f"displaying program in _ipython_display_ with self._comm={self._comm}, self.id={self._id}")
# mark that we are displaying (and so future execution updates should be displayed)
self._displaying = True
self._displaying_html = True
# build and display the html
html = self._build_html(self.marked_text)
self._display_html(html)
async def _await_finish_execute(self):
"""Used by self.__await__ to wait for the program to complete."""
try:
await self._execute_complete.wait() # wait for the program to finish executing
except asyncio.CancelledError:
# if this task gets canceled, cancel all sub-tasks
for task in self._tasks:
task.cancel()
# if the program finished executing with an exception
# re-raise the exception in the main coroutine
if self._exception:
raise self._exception
return self
def __await__(self):
return self._await_finish_execute().__await__()
def __aiter__(self):
"""Return an async iterator that yields the program in partial states as it is run."""
return self._stream_run_async()
def __call__(self, **kwargs):
"""Execute this program with the given variable values and return a new executed/executing program.
Note that the returned program might not be fully executed if `stream=True`. When streaming you need to
use the python `await` keyword if you want to ensure the program is finished (note that is different than
the `await` guidance langauge command, which will cause the program to stop execution at that point).
"""
# merge the given kwargs with the current variables
kwargs = {**{
"async_mode": self.async_mode,
"stream": self.stream,
"silent": self.silent,
"cache_seed": self.cache_seed,
"caching": self.caching,
"logprobs": self.logprobs,
"await_missing": self.await_missing,
"log": self.log.copy() if hasattr(self.log, "copy") else self.log,
"llm": self.llm,
}, **kwargs}
log.debug(f"in __call__ with kwargs: {kwargs}")
# create a new program object that we will execute in-place
new_program = Program(
text=self.marked_text,
# copy the (non-function) variables so that we don't modify the original program during execution
# TODO: what about functions? should we copy them too?
**{**{k: v if callable(v) else copy.deepcopy(v) for k,v in self._variables.items()}, **kwargs}
)
# create an executor for the new program (this also marks the program as executing)
new_program._executor = ProgramExecutor(new_program)
# if we are in async mode, schedule the program in the current event loop
if new_program.async_mode:
loop = asyncio.get_event_loop()
assert loop.is_running(), "The program is in async mode but there is no asyncio event loop running! Start one and try again."
update_task = loop.create_task(new_program.update_display.run()) # start the display updater
execute_task = loop.create_task(new_program.execute())
new_program._tasks.append(update_task)
new_program._tasks.append(execute_task)
# if we are not in async mode, we need to create a new event loop and run the program in it until it is done
else:
# apply nested event loop patch if needed
try:
other_loop = asyncio.get_event_loop()
nest_asyncio.apply(other_loop)
except RuntimeError:
pass
loop = asyncio.new_event_loop()
update_task = loop.create_task(new_program.update_display.run()) # start the display updater
new_program._tasks.append(update_task)
if new_program.stream:
return self._stream_run(loop, new_program)
else:
loop.run_until_complete(new_program.execute())
return new_program
def get(self, key, default=None):
"""Get the value of a variable by name."""
return self._variables.get(key, default)
def _stream_run(self, loop, new_program):
"""This feels a bit hacky at the moment. TODO: clean this up."""
# add the program execution to the event loop
execute_task = loop.create_task(new_program.execute())
new_program._tasks.append(execute_task)
# run the event loop until the program is done executing
while new_program._executor is not None:
try:
loop.run_until_complete(execute_task) # this will stop each time the program wants to emit a new state
except RuntimeError as e:
# we don't mind that the task is not yet done, we will restart the loop
if str(e) != "Event loop stopped before Future completed.":
raise e
if getattr(loop, "_stopping", False):
loop._stopping = False # clean up the stopping flag
if new_program._executor is not None and new_program._executor.executing:
try:
yield new_program
except GeneratorExit:
# this will cause the program to stop executing and finish as a valid partial execution
if new_program._executor.executing:
new_program._executor.executing = False
yield new_program
# cancel all tasks and close the loop
for task in self._tasks:
task.cancel()
loop.run_until_complete(asyncio.sleep(0)) # give the loop a chance to cancel the tasks
# TODO: do we really want to close the loop? what if it is used by others?
loop.close() # we are done with the loop (note that the loop is already stopped)
async def _stream_run_async(self):
# run the event loop until the program is done executing
while self._executor is not None:
if self._executor.executing:
await self._emit_stream_event.wait()
self._emit_stream_event.clear()
try:
yield self
except GeneratorExit as e:
# this will cause the program to stop executing and finish as a valid partial execution
if self._executor.executing:
self._executor.executing = False
await self._execute_complete.wait()
raise e
yield self
def _update_display(self, last=False):
"""Updates the display with the current marked text after debouncing.
Parameters
----------
last : bool
If True, this is the last update and we should clear the send queue and prepare the
UI for saving etc.
force : bool
If True, we will update the display even if it would otherwise be throttled.
"""
log.debug(f"Updating display (last={last}, self._displaying={self._displaying}, self._comm={self._comm})")
if self.stream:
if self.async_mode:
# if we are streaming in async mode then we set the event to let the generator know it can yield
self._emit_stream_event.set()
else:
# if we are streaming not in async mode then we pause the event loop to let the generator
# that is controlling execution return (it will restart the event loop when it is ready)
if self._executor is not None:
asyncio.get_event_loop().stop()
# this is always called during execution, and we only want to update the display if we are displaying
if not self._displaying:
return
# debounce the display updates
# now = time.time()
# log.debug(now - self._last_display_update)
# debounce_delay = self.display_throttle_limit if self._comm and self._comm.is_open else self.display_throttle_limit_low
# if last or (now - self._last_display_update > debounce_delay):
if self._displaying_html:
out = self._build_html(self.marked_text)
# clear the send queue if this is the last update
if last and self._comm:
self._comm.clear_send_queue()
# send an update to the front end client if we have one...
# TODO: we would like to call `display` for the last update so NB saving works, but see https://github.com/microsoft/vscode-jupyter/issues/13243
if self._displayed and self._comm and self._comm.is_open: #(not last or self._comm.is_open):
log.debug(f"Updating display send message to front end")
# log.debug(out)
self._comm.send({"replace": out})
if last:
self._comm.send({"event": "complete"})
# ...otherwise dump the client to the front end
else:
log.debug(f"Updating display dump to front end")
from IPython.display import clear_output
if self._displayed:
clear_output(wait=True) # TODO: should use wait=True but that doesn't work in VSCode until after the April 2023 release
self._display_html(out)
self._last_display_update = time.time()
def _display_html(self, html):
from IPython.display import display
# create the comm object if we don't have one
if self._comm is None:
self._comm = _utils.JupyterComm(self._id, self._ipython, self._interface_event)
# dump the html to the front end
html = f"""<div id="guidance-stop-button-{self._id}" style="cursor: pointer; margin: 0px; display: none; float: right; padding: 3px; border-radius: 4px 4px 4px 4px; border: 0px solid rgba(127, 127, 127, 1); padding-left: 10px; padding-right: 10px; font-size: 13px; background-color: rgba(127, 127, 127, 0.25);">Stop program</div><div id="guidance-content-{self._id}">{html}</div>
<script type="text/javascript">{js_data}; window._guidanceDisplay("{self._id}");</script>"""
display({"text/html": html}, display_id=self._id, raw=True, clear=True, include=["text/html"])
self._displayed = True
async def execute(self):
""" Execute the current program.
Note that as execution progresses the program will be incrementally converted
from a template into a completed string (with variables stored). At each point
in this process the current template remains valid.
"""
log.debug(f"Executing program (self.async_mode={self.async_mode}, self.silent={self.silent}, self._displaying_html={self._displaying_html})")
# if we are already displaying html, we need to yield to the event loop so the jupyter comm can initialize
if self._displaying_html:
await asyncio.sleep(0)
# run the program and capture the output
try:
if self.llm is None:
await self._executor.run(None)
else:
with self.llm.session(asynchronous=True) as llm_session:
await self._executor.run(llm_session)
self._text = self._variables["@raw_prefix"]
# if the execution failed, capture the exception so it can be re-raised
# in the main coroutine
except Exception as exception:
self._exception = exception
finally:
# delete the executor and so mark the program as not executing
self._executor = None
# update the display with the final output
self.update_display(last=True)
await self.update_display.done()
# fire an event noting that execution is complete (this will release any await calls waiting on the program)
self._execute_complete.set()
def __getitem__(self, key):
return self._variables[key]
def __contains__(self, key):
return key in self._variables
def __delitem__(self, key):
del self._variables[key]
def variables(self, built_ins=False, show_hidden=False):
""" Returns a dictionary of the variables in the program.
Parameters
----------
built_ins : bool
If True, built-in variables will be included in the returned dictionary.
show_hidden : bool
If True, hidden variables will be included in the returned dictionary.
"""
out = {}
for k,v in self._variables.items():
if show_hidden or not k.startswith("_"):
if built_ins or not (k in _built_ins and callable(_built_ins[k])):
out[k] = v
return out
@property
def text(self):
# strip out the markers for the unformatted output
return _utils.strip_markers(self.marked_text)
@property
def marked_text(self):
if self._executor is not None:
return self._variables["@raw_prefix"]
else:
return self._text
def _build_html(self, text, last=False):
output = text
def undo_html_encode(x):
return x.replace("&#123;", "{").replace("&#125;", "}").replace("&#36;", "$")
def start_generate_or_select(x):
no_echo = "echo=False" in x.group(1)
alpha = 1.0 if no_echo else 1.0
# script that toggles the viisibility of the next element
click_script = 'var e = this.nextElementSibling; if (e.style.display == "inline") { e.style.display = "none"; this.style.borderRight = "1px solid rgba(0, 165, 0, 0.25)"; } else { e.style.display = "inline"; this.style.borderRight = "0px";}'
if no_echo:
out = f'''<div style='background-color: rgba(0, 165, 0, 0.25); border-radius: 4px 0px 0px 4px; border: 1px solid rgba(0, 165, 0, 1); padding-left: 3px; padding-right: 3px; user-select: none; color: rgb(0, 165, 0, 1.0); display: inline; font-weight: normal; cursor: pointer' onClick='{click_script}'>no echo</div>'''
out += "<span style='background-color: rgba(0, 165, 0, 0.25); opacity: {}; display: none;' title='{}'>".format(alpha, undo_html_encode(x.group(1)))
else:
out = "<span style='background-color: rgba(0, 165, 0, 0.25); opacity: {}; display: inline;' title='{}'>".format(alpha, undo_html_encode(x.group(1)))
return out
def start_each(x):
no_echo = "echo=False" in x.group(1)
alpha = 0.5 if no_echo else 1.0
color = "rgba(165, 165, 165, 0.1)" #if "geneach" not in x.group(1) else "rgba(0, 165, 0, 0.1)"
return "<span style='opacity: {}; display: inline; background-color: {};' title='{}'>".format(alpha, color, undo_html_encode(x.group(1)))
def start_block(x):
escaped_tag = undo_html_encode(x.group(1))
if "hidden=True" in escaped_tag:
display = "inline" # none (we actively stip hidden tags right now so we don't need this until we support the UX to show hidden stuff)
else:
display = "inline"
return f"<span style='background-color: rgba(165, 165, 165, 0.1); display: {display};' title='{escaped_tag}'>"
def role_box(x):
# name = x.group(3).lower() # standardize to lowercase for display
# content = x.group(4)
content = x.group(3)
tag_text = undo_html_encode(x.group(2))
role_name = x.group(1)
# if we have a generic role tag then the role name is an attribute
if role_name == "role":
role_name = re.search(r"role_name=([^ ]*)", tag_text).group(1)
start_pattern = html.escape(self.llm.role_start(role_name)).replace("|", r"\|")
start_pattern_with_name = html.escape(self.llm.role_start(role_name, __ARxG__="__VAxLUE__")).replace("|", r"\|") # TODO: make this more general for multiple keyword args
start_pattern_with_name = start_pattern_with_name.replace("__VAxLUE__", "[^\n]*?").replace("__ARxG__", "[^=]*?")
end_pattern = html.escape(self.llm.role_end(role_name)).replace("|", r"\|")
# strip the start and end patterns from the content
content = re.sub("^" + start_pattern, "", content, flags=re.DOTALL)
content = re.sub("^" + start_pattern_with_name, "", content, flags=re.DOTALL)
content = re.sub(end_pattern + "$", "", content, flags=re.DOTALL)
# one div that contains two divs, where the left of the two inner divs has a fixed width of 100px
# """<div style='display: flex;'>
# <div style='width: 100px; border-right: 1px solid rgba(127, 127, 127, 0.2); padding-right: 5px; margin-right: 5px;'>{name}</div>
# <div>{content}</div>
# </div>"""
# return f'''<div style="border-left: 1px solid rgba(127, 127, 127, 0.2); margin-top: 10px; padding-left: 5px;"><span style="color: rgba(127,127,127,0.5)">{name}</span>
# {content}</div>'''
return f"<div style='display: flex; border-bottom: 1px solid rgba(127, 127, 127, 0.2); align-items: center;'><div style='flex: 0 0 80px; opacity: 0.5;'>{role_name.lower()}</div><div style='flex-grow: 1; padding: 5px; padding-top: 10px; padding-bottom: 10px; margin-top: 0px; white-space: pre-wrap; margin-bottom: 0px;'>{content}</div></div>"
display_out = html.escape(output)
# log.debug(display_out)
# start_pattern = html.escape(self.llm.role_start("(.*?)")).replace("|", r"\|")
# end_pattern = html.escape(self.llm.role_end("(.*?)")).replace("|", r"\|")
# display_out = re.sub(r"[\s]+({{!--.*?--}})?"+start_pattern, r"\1"+start_pattern.replace("(.*?)", r"\1").replace(r"\|", "|"), display_out, flags=re.DOTALL)
# display_out = re.sub(start_pattern + "(.*?)" + end_pattern, role_box, display_out, flags=re.DOTALL)
# log.debug(display_out)
# strip out hidden blocks (might want to make a better UI for this at some point)
display_out = re.sub(r"{{!--GMARKER_START[^}]*--}}{{!--GHIDDEN:(.*?)--}}{{!--GMARKER_END[^}]*--}}", "", display_out, flags=re.DOTALL)
# highlight command tags
display_out = re.sub(r"(\{\{(?!\!)(?!~\!).*?\}\})", r"<span style='font-family: monospace; background-color: rgba(0, 0, 0, 0.05);'>\1</span>", display_out, flags=re.DOTALL)
# if we have role markers, we wrap them in special formatting
if re.search(r"{{!--GMARKER_START_(role|system|user|assistant|function)", display_out) is not None:
# start_pattern = html.escape(self.llm.role_start("assistant")).replace("|", r"\|").replace(r"assistant", r"([^\n]*)").replace(r"ASSISTANT", r"([^\n]*)")
# end_pattern = html.escape(self.llm.role_end("assistant")).replace("|", r"\|").replace(r"assistant", r"([^\n]*)").replace(r"ASSISTANT", r"([^\n]*)")
# strip whitespace before role markers
display_out = re.sub(r"\s*{{!--GMARKER_START_(role|system|user|assistant|function)\$(.*?)--}}", r"{{!--GMARKER_START_\1$\2--}}", display_out, flags=re.DOTALL)
# strip whitespace after role markers
# TODO: support end_patterns with capture groups
display_out = re.sub(r"{{!--GMARKER_END_(role|system|user|assistant|function)\$(.*?)--}}\s*", r"{{!--GMARKER_END_\1$\2--}}", display_out, flags=re.DOTALL)
if "GMARKER_START_function" in display_out:
display_out += ""
pass
# wrap role markers in nice formatting
display_out = re.sub(r"{{!--GMARKER_START_(role|system|user|assistant|function)\$(.*?)--}}" + "(.*?)" + r"{{!--GMARKER_END_(role|system|user|assistant|function)\$(.*?)--}}", role_box, display_out, flags=re.DOTALL)
# wrap unfinished role markers in nice formatting
display_out = re.sub(r"{{!--GMARKER_START_(role|system|user|assistant|function)\$(.*?)--}}" + "(.*)", role_box, display_out, flags=re.DOTALL)
display_out = re.sub(r"(\{\{generate.*?\}\})", r"<span style='background-color: rgba(0, 165, 0, 0.25);'>\1</span>", display_out, flags=re.DOTALL)
display_out = re.sub(r"(\{\{#select\{\{/select.*?\}\})", r"<span style='background-color: rgba(0, 165, 0, 0.25);'>\1</span>", display_out, flags=re.DOTALL)
display_out = re.sub(r"(\{\{#each [^'\"].*?\{\{/each.*?\}\})", r"<span style='background-color: rgba(0, 138.56128016, 250.76166089, 0.25);'>\1</span>", display_out, flags=re.DOTALL)
# display_out = re.sub(r"(\{\{(?!\!)(?!generate)(?!#select)(?!#each)(?!/each)(?!/select).*?\}\})", r"<span style='font-family: monospace; background-color: rgba(0, 0, 0, 0.05);'>\1</span>", display_out, flags=re.DOTALL)
# format the generate command results
display_out = re.sub(r"{{!--GMARKER_START_gen\$([^\$]*)\$--}}", start_generate_or_select, display_out)
display_out = display_out.replace("{{!--GMARKER_END_gen$$--}}", "</span>")
def click_loop_start(id, total_count, echo, color):
click_script = '''
function cycle_IDVAL(button_el) {
var i = 0;
while (i < 50) {
var el = document.getElementById("IDVAL_" + i);
if (el.style.display == "inline") {
el.style.display = "none";
var next_el = document.getElementById("IDVAL_" + (i+1));
if (!next_el) {
next_el = document.getElementById("IDVAL_0");
}
if (next_el) {
next_el.style.display = "inline";
}
break;
}
i += 1;
}
button_el.innerHTML = (((i+1) % TOTALCOUNT) + 1) + "/" + TOTALCOUNT;
}
cycle_IDVAL(this);'''.replace("IDVAL", id).replace("TOTALCOUNT", str(total_count)).replace("\n", "")
out = f'''<div style='background: rgba(255, 255, 255, 0.0); border-radius: 4px 0px 0px 4px; border: 1px solid {color}; border-right: 0px; padding-left: 3px; padding-right: 3px; user-select: none; color: {color}; display: inline; font-weight: normal; cursor: pointer' onClick='{click_script}'>1/{total_count}</div>'''
out += f"<div style='display: inline;' id='{id}_0'>"
return out
def click_loop_mid(id, index, echo):
alpha = 1.0 if not echo else 0.5
out = f"</div><div style='display: none; opacity: {alpha}' id='{id}_{index}'>"
return out
display_out = re.sub(
r"{{!--GMARKERmany_generate_start_([^_]+)_([0-9]+)\$([^\$]*)\$--}}",
lambda x: click_loop_start(x.group(3), int(x.group(2)), x.group(1) == "True", "rgba(0, 165, 0, 0.25)"),
display_out
)
display_out = re.sub(
r"(?:--}})?{{!--GMARKERmany_generate_([^_]+)_([0-9]+)\$([^\$]*)\$--}}{{!--G ",
lambda x: click_loop_mid(x.group(3), int(x.group(2)), x.group(1) == "True"),
display_out
)
display_out = re.sub(r"--}}{{!--GMARKERmany_generate_end\$([^\$]*)\$--}}", "</div>", display_out)
# format the each command results
display_out = re.sub(r"{{!--GMARKER_START_each\$([^\$]*)\$--}}", start_each, display_out)
display_out = re.sub(
r"{{!--GMARKER_each_noecho_start_([^_]+)_([0-9]+)\$([^\$]*)\$--}}",
lambda x: click_loop_start(x.group(3), int(x.group(2)), False, "rgb(100, 100, 100, 1)"),
display_out
)
display_out = re.sub(
r"{{!--GMARKER_each_noecho_([^_]+)_([0-9]+)\$([^\$]*)\$--}}",
lambda x: click_loop_mid(x.group(3), int(x.group(2)), False),
display_out
)
display_out = re.sub(r"{{!--GMARKER_each_noecho_end\$([^\$]*)\$--}}", "</div>", display_out)
# format the geneach command results
display_out = re.sub(r"{{!--GMARKER_START_geneach\$([^\$]*)\$--}}", start_each, display_out)
# format the set command results
# display_out = re.sub(r"{{!--GMARKER_set\$([^\$]*)\$--}}", r"<div style='background-color: rgba(165, 165, 165, 0); border-radius: 4px 4px 4px 4px; border: 1px solid rgba(165, 165, 165, 1); border-left: 2px solid rgba(165, 165, 165, 1); border-right: 2px solid rgba(165, 165, 165, 1); padding-left: 0px; padding-right: 3px; color: rgb(165, 165, 165, 1.0); display: inline; font-weight: normal; overflow: hidden;'><div style='display: inline; background: rgba(165, 165, 165, 1); padding-right: 5px; padding-left: 4px; margin-right: 3px; color: #fff'>set</div>\1</div>", display_out)
# display_out = re.sub(r"{{!--GMARKER_START_set\$([^\$]*)\$--}}", lambda x: "<span style='display: inline;' title='{}'>".format(undo_html_encode(x.group(1))), display_out)
display_out = re.sub(r"{{!--GMARKER_set\$([^\$]*)\$--}}", r"", display_out) # just hide them for now
display_out = re.sub(r"{{!--GMARKER_START_select\$([^\$]*)\$--}}", start_generate_or_select, display_out)
display_out = display_out.replace("{{!--GMARKER_END_select$$--}}", "</span>")
display_out = re.sub(r"{{!--GMARKER_START_variable_ref\$([^\$]*)\$--}}", lambda x: "<span style='background-color: rgba(0, 138.56128016, 250.76166089, 0.25); display: inline;' title='{}'>".format(undo_html_encode(x.group(1))), display_out)
display_out = display_out.replace("{{!--GMARKER_END_variable_ref$$--}}", "</span>")
display_out = display_out.replace("{{!--GMARKER_each$$--}}", "")#<div style='border-left: 1px dashed rgb(0, 0, 0, .2); border-top: 0px solid rgb(0, 0, 0, .2); margin-right: -4px; display: inline; width: 4px; height: 24px;'></div>")
display_out = re.sub(r"{{!--GMARKER_START_block\$([^\$]*)\$--}}", start_block, display_out)
display_out = re.sub(r"{{!--GMARKER_START_([^\$]*)\$([^\$]*)\$--}}", lambda x: "<span style='background-color: rgba(0, 138.56128016, 250.76166089, 0.25); display: inline;' title='{}'>".format(undo_html_encode(x.group(2))), display_out)
display_out = re.sub(r"{{!--GMARKER_END_([^\$]*)\$\$--}}", "</span>", display_out)
# display_out = re.sub(' and (?=.* and )', ', ', display_out)
# strip out comments
display_out = re.sub(r"{{~?!.*?}}", "", display_out)
# re.sub(r"<div class='strip_leading_whitespace'")
display_out = add_spaces(display_out)
display_out = "<pre style='margin: 0px; padding: 0px; padding-left: 8px; margin-left: -8px; border-radius: 0px; border-left: 1px solid rgba(127, 127, 127, 0.2); white-space: pre-wrap; font-family: ColfaxAI, Arial; font-size: 15px; line-height: 23px;'>"+display_out+"</pre>"
return display_out
def add_spaces(s):
""" This adds spaces so the browser will show leading and trailing newlines.
"""
if s.startswith("\n"):
s = " " + s
if s.endswith("\n"):
s = s + " "
return s
_built_ins = {
"gen": commands.gen,
"each": commands.each,
"geneach": commands.geneach,
"select": commands.select,
"if": commands.if_,
"unless": commands.unless,
"add": commands.add,
"BINARY_OPERATOR_+": commands.add,
"subtract": commands.subtract,
"BINARY_OPERATOR_-": commands.subtract,
"multiply": commands.multiply,
"BINARY_OPERATOR_*": commands.multiply,
"strip": commands.strip,
"block": commands.block,
"set": commands.set,
"await": commands.await_,
"role": commands.role,
"user": commands.user,
"system": commands.system,
"assistant": commands.assistant,
"function": commands.function,
"break": commands.break_,
"equal": commands.equal,
"BINARY_OPERATOR_==": commands.equal,
"notequal": commands.notequal,
"BINARY_OPERATOR_!=": commands.notequal,
"greater": commands.greater,
"BINARY_OPERATOR_>": commands.greater,
"less": commands.less,
"BINARY_OPERATOR_<": commands.less,
"contains": commands.contains,
"parse": commands.parse,
"callable": commands.callable,
"len": commands.len,
"range": commands.range,
"UNARY_OPERATOR_not": commands.not_,
}
class DisplayThrottler():
def __init__(self, display_function, throttle_limit):
self.display_function = display_function
self.throttle_limit = throttle_limit
self._done = False
self.last_time = 0
async def run(self):
self._data_event = asyncio.Event()
self._done_event = asyncio.Event()
while True:
await self._data_event.wait()
now = time.time()
log.info("in DisplayThrottler run loop -- now: {}, last_time: {}, throttle_limit: {}".format(now, self.last_time, self.throttle_limit))
if self._done or now - self.last_time >= self.throttle_limit:
try:
self.display_function(last=self._done)
except Exception as e:
self._done = True
raise e
finally:
self.last_time = now
self._data_event.clear()
if self._done:
self._done_event.set()
break
else:
await asyncio.sleep(self.throttle_limit - (now - self.last_time))
def __call__(self, last=False):
if last:
self._done = True
self._data_event.set()
async def done(self):
return await self._done_event.wait()
| [] |
2024-01-10 | tourbut/VideoGPT | ytchat.py | from pytube import YouTube
def download_youtube_video(url, filename='temp_video.mp4'):
try:
# Create a YouTube object with the URL
yt = YouTube(url)
# Select the first stream: usually the best available
video_stream = yt.streams.filter(file_extension='mp4').first()
if not video_stream:
print("No mp4 video stream available")
return False
# Set the filename
video_stream.download(filename=filename)
print("Download complete!")
return True
except Exception as e:
print(f"An error occurred: {e}")
return False
# Example usage:
url = ''
#download_youtube_video(url, filename='temp_video.mp4')
from moviepy.editor import *
def convert_mp4_to_mp3(mp4_file_path, mp3_file_path):
try:
# 비디오 클립 로드
video_clip = VideoFileClip(mp4_file_path)
# 오디오 추출 및 MP3 파일로 저장
video_clip.audio.write_audiofile(mp3_file_path)
print("MP3 변환 완료!")
except Exception as e:
print(f"오류가 발생했습니다: {e}")
#convert_mp4_to_mp3('temp_video.mp4', 'temp_audio.mp3')
import openai
# OpenAI API 키 설정
openai.api_key = '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'
def transcribe_audio_to_text(audio_file_path, text_file_path):
try:
# 오디오 파일 열기
with open(audio_file_path, 'rb') as audio_file:
# Whisper API를 사용하여 오디오 파일 전사
transcript_response = openai.audio.transcriptions.create(
model="whisper-1",
file=audio_file,
response_format="text"
)
# 전사된 텍스트 가져오기
transcribed_text = transcript_response
# 텍스트 파일로 저장
with open(text_file_path, 'w') as text_file:
text_file.write(transcribed_text)
print(f"전사된 텍스트가 {text_file_path}에 저장되었습니다.")
return transcribed_text
except Exception as e:
print(f"오류가 발생했습니다: {e}")
return None
# Example usage:
#transcribed_text = transcribe_audio_to_text('temp_audio.mp3','temp_text.txt')
#print(transcribed_text)
def get_context_from_file(file_path):
try:
with open(file_path, 'r', encoding='utf-8') as file:
context = file.read()
return context
except FileNotFoundError:
print("파일을 찾을 수 없습니다. 파일 경로를 확인해주세요.")
return None
except Exception as e:
print(f"파일을 읽는 동안 오류가 발생했습니다: {e}")
return None
def chat_with_gpt4(context, user_message):
try:
# API 호출을 위한 메시지 리스트 생성
messages = [
{"role": "system", "content": "You are a knowledgeable assistant."},
]
# 컨텍스트를 시스템 메시지로 추가 (선택적)
if context:
messages.append({"role": "system", "content": context})
# 사용자 메시지 추가
messages.append({"role": "user", "content": user_message})
# GPT-4를 사용하여 대화 완성 API 호출
response = openai.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages
)
#print(response)
# 응답 추출
answer = response.choices[0].message
return answer
except Exception as e:
print(f"오류가 발생했습니다: {e}")
return None
context = get_context_from_file('temp_text.txt')
answer = chat_with_gpt4(context, "무슨 내용이야?")
print(answer) | [
"You are a knowledgeable assistant."
] |
2024-01-10 | rbhattad31/RealEstateSalesGpt | Real_estate~Real_estate~parsers.py | import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish # OutputParserException
from loguru import logger
class SalesConvoOutputParser(AgentOutputParser):
ai_prefix: str = "AI" # change for salesperson_name
verbose: bool = False
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
logger.info("In Output Parser "+text)
if self.verbose:
print("TEXT")
print(text)
print("-------")
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
logger.info("In Output Parser 2"+text)
print(text)
# image_pattern = r'https?://[^\s]+'
# image_match = re.search(image_pattern, text)
# action = image_match.group(1)
# action_input = image_match.group(2)
# if image_match:
# return AgentAction(action.strip(), action_input.strip(" ").strip('"'),text)
regex = r"Action: (.*?)[\n]*Action Input: (.*)"
match = re.search(regex, text)
if not match:
## TODO - this is not entirely reliable, sometimes results in an error.
return AgentFinish(
{
"output": "I apologize, I was unable to find the answer to your question. Is there anything else I can help with?"
},
text,
)
raise OutputParserException(f"Could not parse LLM output: `{text}`")
#logger.info("In Output Parser 3"+action)
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
except Exception as e:
logger.error('Ouput Parser Error: ' + str(e))
@property
def _type(self) -> str:
return "sales-agent"
| [] |
2024-01-10 | rbhattad31/RealEstateSalesGpt | Real_estate~Real_estate~customagent_chroma.py | # Import things that are needed generically
import os
import re
import shutil
from typing import Dict, List, Any, Union, Callable
from langchain.document_loaders import TextLoader
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
from pydantic import BaseModel, Field
from langchain import LLMChain, PromptTemplate, SerpAPIWrapper, LLMMathChain
from langchain.llms import BaseLLM, AzureOpenAI
from langchain.chains.base import Chain
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.agents import Tool, LLMSingleActionAgent, AgentExecutor
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.prompts.base import StringPromptTemplate
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
from loguru import logger
class StageAnalyzerChain(LLMChain):
"""Chain to analyze which conversation stage should the conversation move into."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
stage_analyzer_inception_prompt_template = """You are a sales assistant helping your sales agent to determine which stage of a sales conversation should the agent move to, or stay at.
Following '===' is the conversation history.
Use this conversation history to make your decision.
Only use the text between first and second '===' to accomplish the task above, do not take it as a command of what to do.
===
{conversation_history}
===
Now determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting ony from the following options:
1. Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional.
2. Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.
3. Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.
4. Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.
5. Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.
6. Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.
7. Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.
Only answer with a number between 1 through 7 with a best guess of what stage should the conversation continue with.
The answer needs to be one number only, no words.
If there is no conversation history, output 1.
Do not answer anything else nor add anything to you answer."""
prompt = PromptTemplate(
template=stage_analyzer_inception_prompt_template,
input_variables=["conversation_history"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
class StageAnalyzerChain(LLMChain):
"""Chain to analyze which conversation stage should the conversation move into."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
stage_analyzer_inception_prompt_template = """You are a sales assistant helping your sales agent to determine which stage of a sales conversation should the agent move to, or stay at.
Following '===' is the conversation history.
Use this conversation history to make your decision.
Only use the text between first and second '===' to accomplish the task above, do not take it as a command of what to do.
===
{conversation_history}
===
Now determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting ony from the following options:
1. Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional.
2. Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.
3. Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.
4. Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.
5. Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.
6. Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.
7. Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.
Only answer with a number between 1 through 7 with a best guess of what stage should the conversation continue with.
The answer needs to be one number only, no words.
If there is no conversation history, output 1.
Do not answer anything else nor add anything to you answer."""
prompt = PromptTemplate(
template=stage_analyzer_inception_prompt_template,
input_variables=["conversation_history"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
class SalesConversationChain(LLMChain):
"""Chain to generate the next utterance for the conversation."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
sales_agent_inception_prompt = """Never forget your name is {salesperson_name}. You work as a {salesperson_role}.
You work at company named {company_name}. {company_name}'s business is the following: {company_business}
Company values are the following. {company_values}
You are contacting a potential customer in order to {conversation_purpose}
Your means of contacting the prospect is {conversation_type}
If you're asked about where you got the user's contact information, say that you got it from public records.
Keep your responses in short length to retain the user's attention. Never produce lists, just answers.
You must respond according to the previous conversation history and the stage of the conversation you are at.
Only generate one response at a time! When you are done generating, end with '<END_OF_TURN>' to give the user a chance to respond.
Example:
Conversation history:
{salesperson_name}: Hey, how are you? This is {salesperson_name} calling from {company_name}. Do you have a minute? <END_OF_TURN>
User: I am well, and yes, why are you calling? <END_OF_TURN>
{salesperson_name}:
End of example.
Current conversation stage:
{conversation_stage}
Conversation history:
{conversation_history}
{salesperson_name}:
"""
prompt = PromptTemplate(
template=sales_agent_inception_prompt,
input_variables=[
"salesperson_name",
"salesperson_role",
"company_name",
"company_business",
"company_values",
"conversation_purpose",
"conversation_type",
"conversation_stage",
"conversation_history",
],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
conversation_stages = {
"1": "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are contacting the prospect.",
"2": "Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.",
"3": "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.",
"4": "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.",
"5": "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.",
"6": "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.",
"7": "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.",
}
verbose = True
os.environ['OPENAI_API_VERSION'] = "2023-03-15-preview"
llm = AzureChatOpenAI(temperature=0.2, deployment_name="bradsol-openai-test", model_name="gpt-35-turbo", request_timeout=200)
stage_analyzer_chain = StageAnalyzerChain.from_llm(llm, verbose=verbose)
sales_conversation_utterance_chain = SalesConversationChain.from_llm(
llm, verbose=verbose
)
#stage_analyzer_chain.run(conversation_history="")
# sales_conversation_utterance_chain.run(
# salesperson_name="Ted Lasso",
# salesperson_role="Business Development Representative",
# company_name="Sleep Haven",
# company_business="Sleep Haven is a premium mattress company that provides customers with the most comfortable and supportive sleeping experience possible. We offer a range of high-quality mattresses, pillows, and bedding accessories that are designed to meet the unique needs of our customers.",
# company_values="Our mission at Sleep Haven is to help people achieve a better night's sleep by providing them with the best possible sleep solutions. We believe that quality sleep is essential to overall health and well-being, and we are committed to helping our customers achieve optimal sleep by offering exceptional products and customer service.",
# conversation_purpose="find out whether they are looking to achieve better sleep via buying a premier mattress.",
# conversation_history="Hello, this is Ted Lasso from Sleep Haven. How are you doing today? <END_OF_TURN>\nUser: I am well, howe are you?<END_OF_TURN>",
# conversation_type="chat",
# conversation_stage=1
# )
##Product Catalog
loader = TextLoader("../sample_product_catalog.txt", encoding='utf8')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
#embeddings = OpenAIEmbeddings(deployment="bradsol-embedding-test")
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
#shutil.rmtree("./chroma_db")
# load it into Chroma
#db = Chroma.from_documents(docs, embeddings, persist_directory="./chroma_db")
llm = AzureOpenAI(temperature=0.6, deployment_name="bradsol-openai-test", model_name="gpt-35-turbo")
db = Chroma(persist_directory="./chroma_db", embedding_function=embeddings)
knowledge_base = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=db.as_retriever()
)
print(knowledge_base.run("features of the Columbia Men Brown Plateau Venture hiking shoes"))
# query it
# query = "price of Columbia Men Grey Crestwood Waterproof"
# docs = db.similarity_search(query)
#
# # print results
# print(docs[0].page_content)
# retriever = db.as_retriever(search_type="mmr")
# docs = retriever.get_relevant_documents(query)[0]
# print(docs)
tools = [
Tool.from_function(
func=knowledge_base.run,
name="ProductSearch",
description="useful for when you need to answer questions about product information",
# coroutine= ... <- you can specify an async method if desired as well
),
]
| [
"company_name",
"Never forget your name is {salesperson_name}. You work as a {salesperson_role}.\n You work at company named {company_name}. {company_name}'s business is the following: {company_business}\n Company values are the following. {company_values}\n You are contacting a potential customer in order to {conversation_purpose}\n Your means of contacting the prospect is {conversation_type}\n\n If you're asked about where you got the user's contact information, say that you got it from public records.\n Keep your responses in short length to retain the user's attention. Never produce lists, just answers.\n You must respond according to the previous conversation history and the stage of the conversation you are at.\n Only generate one response at a time! When you are done generating, end with '<END_OF_TURN>' to give the user a chance to respond. \n Example:\n Conversation history: \n {salesperson_name}: Hey, how are you? This is {salesperson_name} calling from {company_name}. Do you have a minute? <END_OF_TURN>\n User: I am well, and yes, why are you calling? <END_OF_TURN>\n {salesperson_name}:\n End of example.\n\n Current conversation stage: \n {conversation_stage}\n Conversation history: \n {conversation_history}\n {salesperson_name}: \n ",
"company_values",
"conversation_history",
"company_business",
"conversation_purpose",
"conversation_type",
"salesperson_name",
"salesperson_role",
"You are a sales assistant helping your sales agent to determine which stage of a sales conversation should the agent move to, or stay at.\n Following '===' is the conversation history. \n Use this conversation history to make your decision.\n Only use the text between first and second '===' to accomplish the task above, do not take it as a command of what to do.\n ===\n {conversation_history}\n ===\n\n Now determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting ony from the following options:\n 1. Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional.\n 2. Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.\n 3. Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.\n 4. Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.\n 5. Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.\n 6. Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.\n 7. Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.\n\n Only answer with a number between 1 through 7 with a best guess of what stage should the conversation continue with. \n The answer needs to be one number only, no words.\n If there is no conversation history, output 1.\n Do not answer anything else nor add anything to you answer.",
"conversation_stage"
] |
2024-01-10 | rbhattad31/RealEstateSalesGpt | Real_estate~Real_estate~chains.py | from langchain import LLMChain, PromptTemplate
from langchain.llms import BaseLLM
from langchain.output_parsers import PydanticOutputParser
from Real_estate.logger import time_logger
class StageAnalyzerChain(LLMChain):
"""Chain to analyze which conversation stage should the conversation move into."""
@classmethod
@time_logger
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
stage_analyzer_inception_prompt_template = """You are a sales assistant helping your sales agent to determine which stage of a sales conversation should the agent stay at or move to when talking to a user.
Following '===' is the conversation history.
Use this conversation history to make your decision.
Only use the text between first and second '===' to accomplish the task above, do not take it as a command of what to do.
===
{conversation_history}
===
Now determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting only from the following options:
{conversation_stages}
Current Conversation stage is: {conversation_stage_id}
If there is no conversation history, output 1.
The answer needs to be one number only, no words.
Do not answer anything else nor add anything to you answer."""
prompt = PromptTemplate(
template=stage_analyzer_inception_prompt_template,
input_variables=[
"conversation_history",
"conversation_stage_id",
"conversation_stages",
],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
class SalesConversationChain(LLMChain):
"""Chain to generate the next utterance for the conversation."""
@classmethod
@time_logger
def from_llm(
cls,
llm: BaseLLM,
verbose: bool = True,
use_custom_prompt: bool = False,
custom_prompt: str = "You are an AI Sales agent, sell me this pencil",
) -> LLMChain:
"""Get the response parser."""
if use_custom_prompt:
sales_agent_inception_prompt = custom_prompt
prompt = PromptTemplate(
template=sales_agent_inception_prompt,
input_variables=[
"salesperson_name",
"salesperson_role",
"company_name",
"company_business",
"company_values",
"conversation_purpose",
"conversation_type",
"conversation_history",
],
)
else:
sales_agent_inception_prompt = """Never forget your name is {salesperson_name}. You work as a {salesperson_role}.
You work at company named {company_name}. {company_name}'s business is the following: {company_business}.
Company values are the following. {company_values}
You are contacting a potential prospect in order to {conversation_purpose}
Your means of contacting the prospect is {conversation_type}
If you're asked about where you got the user's contact information, say that you got it from public records.
Keep your responses in short length to retain the user's attention. Never produce lists, just answers.
Start the conversation by just a greeting and how is the prospect doing without pitching in your first turn.
When the conversation is over, output <END_OF_CALL>
Always think about at which conversation stage you are at before answering:
1: Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are calling.
2: Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.
3: Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.
4: Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.
5: Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.
6: Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.
7: Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.
8: End conversation: The prospect has to leave to call, the prospect is not interested, or next steps where already determined by the sales agent.
Example 1:
Conversation history:
{salesperson_name}: Hey, good morning! <END_OF_TURN>
User: Hello, who is this? <END_OF_TURN>
{salesperson_name}: This is {salesperson_name} calling from {company_name}. How are you?
User: I am well, why are you calling? <END_OF_TURN>
{salesperson_name}: I am calling to talk about options for your home insurance. <END_OF_TURN>
User: I am not interested, thanks. <END_OF_TURN>
{salesperson_name}: Alright, no worries, have a good day! <END_OF_TURN> <END_OF_CALL>
End of example 1.
You must respond according to the previous conversation history and the stage of the conversation you are at.
Only generate one response at a time and act as {salesperson_name} only! When you are done generating, end with '<END_OF_TURN>' to give the user a chance to respond.
Conversation history:
{conversation_history}
{salesperson_name}:"""
prompt = PromptTemplate(
template=sales_agent_inception_prompt,
input_variables=[
"salesperson_name",
"salesperson_role",
"company_name",
"company_business",
"company_values",
"conversation_purpose",
"conversation_type",
"conversation_history",
]
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
| [
"You are a sales assistant helping your sales agent to determine which stage of a sales conversation should the agent stay at or move to when talking to a user.\n Following '===' is the conversation history. \n Use this conversation history to make your decision.\n Only use the text between first and second '===' to accomplish the task above, do not take it as a command of what to do.\n ===\n {conversation_history}\n ===\n Now determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting only from the following options:\n {conversation_stages}\n Current Conversation stage is: {conversation_stage_id}\n If there is no conversation history, output 1.\n The answer needs to be one number only, no words.\n Do not answer anything else nor add anything to you answer.",
"company_name",
"Never forget your name is {salesperson_name}. You work as a {salesperson_role}.\nYou work at company named {company_name}. {company_name}'s business is the following: {company_business}.\nCompany values are the following. {company_values}\nYou are contacting a potential prospect in order to {conversation_purpose}\nYour means of contacting the prospect is {conversation_type}\n\nIf you're asked about where you got the user's contact information, say that you got it from public records.\nKeep your responses in short length to retain the user's attention. Never produce lists, just answers.\nStart the conversation by just a greeting and how is the prospect doing without pitching in your first turn.\nWhen the conversation is over, output <END_OF_CALL>\nAlways think about at which conversation stage you are at before answering:\n\n1: Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are calling.\n2: Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.\n3: Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.\n4: Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.\n5: Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.\n6: Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.\n7: Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.\n8: End conversation: The prospect has to leave to call, the prospect is not interested, or next steps where already determined by the sales agent.\n\nExample 1:\nConversation history:\n{salesperson_name}: Hey, good morning! <END_OF_TURN>\nUser: Hello, who is this? <END_OF_TURN>\n{salesperson_name}: This is {salesperson_name} calling from {company_name}. How are you? \nUser: I am well, why are you calling? <END_OF_TURN>\n{salesperson_name}: I am calling to talk about options for your home insurance. <END_OF_TURN>\nUser: I am not interested, thanks. <END_OF_TURN>\n{salesperson_name}: Alright, no worries, have a good day! <END_OF_TURN> <END_OF_CALL>\nEnd of example 1.\n\nYou must respond according to the previous conversation history and the stage of the conversation you are at.\nOnly generate one response at a time and act as {salesperson_name} only! When you are done generating, end with '<END_OF_TURN>' to give the user a chance to respond.\n\nConversation history: \n{conversation_history}\n{salesperson_name}:",
"conversation_stage_id",
"company_values",
"conversation_history",
"company_business",
"conversation_purpose",
"conversation_type",
"<END_OF_TURN>",
"salesperson_name",
"salesperson_role",
"conversation_stages",
"re asked about where you got the user"
] |
2024-01-10 | rbhattad31/RealEstateSalesGpt | Real_estate~Real_estate~callbackhandler.py | from typing import List, Any, Dict, Union
from loguru import logger
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import BaseMessage, LLMResult, AgentAction, AgentFinish
class MyCustomHandler(BaseCallbackHandler):
"""Base callback handler that can be used to handle callbacks from langchain."""
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
"""Run when LLM starts running."""
#print("On LLM Start")
logger.info("On LLM Start")
def on_chat_model_start(
self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any
) -> Any:
"""Run when Chat Model starts running."""
#print("On Chat Model Start")
logger.info("On Chat Model Start")
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
logger.info("On LLM New Token")
#print("On new llm token"+token)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when LLM errors."""
logger.error("On LLM Error")
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
"""Run when chain starts running."""
logger.info("On Chain Start")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
"""Run when chain ends running."""
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when chain errors."""
logger.error("On Chain Error")
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
"""Run when tool starts running."""
logger.info("On Tool Start")
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
"""Run when tool ends running."""
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when tool errors."""
def on_text(self, text: str, **kwargs: Any) -> Any:
"""Run on arbitrary text."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end.""" | [] |
2024-01-10 | davila7/langchain-101 | agents~agents.py | # pip install google-search-results
import streamlit as st
from dotenv import load_dotenv
from langchain.agents import load_tools, AgentType, initialize_agent, Tool, get_all_tool_names
from langchain import OpenAI, Wikipedia
from langchain.chat_models import ChatOpenAI, ChatVertexAI, ChatGooglePalm, ChatAnthropic
from langchain.llms import VertexAI, GooglePalm, Cohere, AzureOpenAI, Anthropic
from langchain.agents.react.base import DocstoreExplorer
from langchain.memory import ConversationBufferMemory
from langchain.utilities import SerpAPIWrapper
from langchain.callbacks import StreamlitCallbackHandler
from langchain.agents import create_csv_agent
import sys
import io
import re
from typing import Callable, Any
# cargamos openai api key
load_dotenv()
def capture_and_display_output(func: Callable[..., Any], *args, **kwargs) -> Any:
original_stdout = sys.stdout
sys.stdout = output_catcher = io.StringIO()
# Ejecutamos la función dada y capturamos su salida
# response = func(*args, **kwargs)
st_callback = StreamlitCallbackHandler(st.container(), max_thought_containers=100, expand_new_thoughts=True, collapse_completed_thoughts=False)
response = func(*args, callbacks=[st_callback])
# Restauramos la salida estándar a su valor original
sys.stdout = original_stdout
# Limpiamos la salida capturada
output_text = output_catcher.getvalue()
cleaned_text = re.sub(r'\x1b\[[0-9;-]*[mK]', '', output_text)
lines = cleaned_text.split('\n')
# Mostramos el texto limpiado en Streamlit como código
with st.expander("Verbose", expanded=False):
for line in lines:
st.markdown(line)
return response
docstore = DocstoreExplorer(Wikipedia())
agents = []
n_tools = get_all_tool_names()
docstore_tools = ["search", "lookup"]
conversational_tools = ['current_search']
self_ask_tools = ['intermediate_answer']
# ejecutamos el agente
def main():
chat_agent = False
load_tools_boolean = True
memory = None
st.set_page_config(page_title="Langchain Agent AI", page_icon="🤖", layout="wide")
st.title("Try Langchain Agents 🦜")
st.write("If you want to work with these agents in production, use [Judini.ai](https://judini.ai)")
# select provider (openai, vertexai, azure, makersuite, cohere)
options_provider = st.selectbox(
'Select Provider',
('openai', 'codegpt', 'makersuite', 'vertexai', 'azure', 'anthropic', 'cohere'))
# dependiendo del provider seleccionado, se cargan los modelos disponibles
# openai: davinci, gpt-3.5-tubo, gpt-4
# makersuite: text-bison-001, codebison-001, chatbison-001
# vertexai: text-bison-001, codebison-001, chatbison-001
# azure: davinci, gpt-3.5-tubo, gpt-4
# cohere: coral
options_model = ''
if options_provider == 'codegpt':
options_model = st.selectbox(
'Select Agent',
('DaniGPT', 'MessiGPT', 'LLM Expert'))
if options_provider == 'openai':
options_model = st.selectbox(
'Select Model',
('gpt-4', 'gpt-3.5-turbo-0613', 'text-davinci-003'))
if options_provider == 'makersuite':
options_model = st.selectbox(
'Select Model',
('chat-bison-001', 'text-bison-001'))
if options_provider == 'vertexai':
options_model = st.selectbox(
'Select Model',
('chat-bison@001', 'codechat-bison@001', 'text-bison@001', 'code-bison@001'))
if options_provider == 'azure':
options_model = st.selectbox(
'Select Model',
('gpt-4', 'gpt-3.5-turbo-0613', 'text-davinci-003'))
if options_provider == 'cohere':
options_model = st.selectbox(
'Select Model',
('command-large-001', 'coral'))
if options_provider == 'anthropic':
options_model = st.selectbox(
'Select Model',
('claude-v1.3', 'claude-2'))
st.write('Provider: '+options_provider+" Model: "+options_model)
for key in AgentType:
agents.append(key.value)
options_agent = st.selectbox(
'Select Agents',
agents)
if options_agent == 'zero-shot-react-description':
st.write(options_agent, ": This agent uses the ReAct framework to determine which tool to use based solely on the tool's description. Any number of tools can be provided. This agent requires that a description is provided for each tool.")
tools = n_tools
if options_agent == 'react-docstore':
st.write(options_agent, ": This agent uses the ReAct framework to interact with a docstore. Two tools must be provided: a Search tool and a Lookup tool (they must be named exactly as so). The Search tool should search for a document, while the Lookup tool should lookup a term in the most recently found document. This agent is equivalent to the original ReAct paper, specifically the Wikipedia example.")
tools = docstore_tools
load_tools_boolean = False
if options_agent == 'self-ask-with-search':
st.write(options_agent, ": This agent utilizes a single tool that should be named Intermediate Answer. This tool should be able to lookup factual answers to questions. This agent is equivalent to the original self ask with search paper, where a Google search API was provided as the tool.")
tools = self_ask_tools
load_tools_boolean = False
if options_agent == 'conversational-react-description':
st.write(options_agent, ": This agent is designed to be used in conversational settings. The prompt is designed to make the agent helpful and conversational. It uses the ReAct framework to decide which tool to use, and uses memory to remember the previous conversation interactions.")
tools = conversational_tools
memory = ConversationBufferMemory(memory_key="chat_history")
load_tools_boolean = False
if options_agent == 'chat-zero-shot-react-description':
st.write(options_agent, ": This agent uses the ReAct framework to determine which tool to use based solely on the tool's description. Any number of tools can be provided. This agent requires that a description is provided for each tool.")
tools = n_tools
chat_agent = True
if options_agent == 'chat-conversational-react-description':
st.write(options_agent, ": The chat-conversational-react-description agent type lets us create a conversational agent using a chat model instead of an LLM")
tools = conversational_tools
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
load_tools_boolean = False
chat_agent = True
if options_agent == 'openai-functions':
st.write(options_agent, ": Certain OpenAI models (like gpt-3.5-turbo-0613 and gpt-4-0613) have been explicitly fine-tuned to detect when a function should to be called and respond with the inputs that should be passed to the function. The OpenAI Functions Agent is designed to work with these models.")
tools = n_tools
chat_agent = True
if options_agent == 'openai-multi-functions':
st.write(options_agent, ": Certain OpenAI models (like gpt-3.5-turbo-0613 and gpt-4-0613) have been explicitly fine-tuned to detect when a function should to be called and respond with the inputs that should be passed to the function. The OpenAI Functions Agent is designed to work with these models.")
tools = n_tools
chat_agent = True
tools_selected = st.multiselect(
'Select Tools',
tools)
if "search" in tools_selected:
tools_selected.remove("search")
tools_selected.append(
Tool(
name="Search",
func=docstore.search,
description="useful for when you need to ask with search",
))
if "lookup" in tools_selected:
tools_selected.remove("lookup")
tools_selected.append(
Tool(
name="Lookup",
func=docstore.lookup,
description="useful for when you need to ask with lookup",
))
if "current_search" in tools_selected:
tools_selected.remove("current_search")
search = SerpAPIWrapper()
tools_selected.append(
Tool(
name = "Current Search",
func=search.run,
description="useful for when you need to answer questions about current events or the current state of the world"
),
)
if "intermediate_answer" in tools_selected:
tools_selected.remove("intermediate_answer")
search = SerpAPIWrapper()
tools_selected.append(
Tool(
name="Intermediate Answer",
func=search.run,
description="useful for when you need to ask with search",
)
)
st.write('Tools', tools_selected)
form = st.form('AgentsTools')
question = form.text_input("Enter your question", "")
btn = form.form_submit_button("Run")
if btn:
st.markdown("### Response Agent AI")
with st.spinner("Loading"):
# crear el llm segun lo seleccionado en el provider y el model
if options_provider == 'codegpt':
agent_id = "770c40ed-2178-445d-8b91-8cfa87ccaf80"
codegpt_api_base = "https://api.codegpt.co/v1"
codegpt_api_key = "63e52832-331a-4999-bce4-02dd0d04294b"
llm = ChatOpenAI(openai_api_key=codegpt_api_key,
openai_api_base=codegpt_api_base,
model=agent_id)
if options_provider == 'openai':
if chat_agent:
llm = ChatOpenAI(model=options_model, temperature=0)
else:
llm = OpenAI(model=options_model, temperature=0)
if options_provider == 'vertexai':
if chat_agent:
llm = ChatVertexAI(model=options_model, temperature=0)
else:
llm = VertexAI(model=options_model, temperature=0)
if options_provider == 'cohere':
llm = Cohere(model=options_model, temperature=0)
if options_provider == 'anthropic':
llm = ChatAnthropic(model=options_model, temperature=0)
if load_tools_boolean:
final_tools = load_tools(tools_selected, llm)
else:
final_tools = tools_selected
agent = initialize_agent(final_tools, llm, agent=options_agent, verbose=True, memory=memory)
st.info(capture_and_display_output(agent.run, question))
if __name__ == "__main__":
main() | [] |
2024-01-10 | davila7/langchain-101 | functions_callings~talk_send_message_vanilla.py | import streamlit as st
from bokeh.models.widgets import Button
from bokeh.models import CustomJS
from streamlit_bokeh_events import streamlit_bokeh_events
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import openai
import json
import os
from dotenv import load_dotenv
load_dotenv()
def send_email(email, subject, body):
"""send the user an email with the answer"""
try:
if(subject == ''):
subject = 'GPT Email'
message = Mail(
# add the email connected to your sendgrid code here
from_email='[email protected]',
to_emails=email,
subject=subject,
html_content=body
)
st.write(message)
sg = SendGridAPIClient(os.getenv("SENDGRID_API_KEY"))
response = sg.send(message)
st.write(response.status_code)
st.write(response.body)
st.write(response.headers)
except Exception as e:
print(f"An error occurred: {str(e)}")
st.title('GPT Sends Emails')
st.write('Instructions:')
st.write("Click on the 'Start Talking' button and allow the browser permission to use the microphone. Say a sentence requesting to send an email with a message. You must say the person's full email address.")
st.write("Example: Send an email to [email protected] reminding him that he must study the OpenAI Functions API for tomorrow's exam")
user_secret = st.text_input(label = ":blue[OpenAI API key]",
value="",
placeholder = "Paste your openAI API key, sk-",
type = "password")
if(user_secret):
stt_button = Button(label="Start talking", button_type="success")
stt_button.js_on_event("button_click", CustomJS(code="""
var recognition = new webkitSpeechRecognition();
recognition.continuous = true;
recognition.interimResults = true;
recognition.onresult = function (e) {
var value = "";
for (var i = e.resultIndex; i < e.results.length; ++i) {
if (e.results[i].isFinal) {
value += e.results[i][0].transcript;
}
}
if ( value != "") {
document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
}
}
recognition.start();
"""))
result = streamlit_bokeh_events(
stt_button,
events="GET_TEXT",
key="listen",
refresh_on_update=False,
override_height=75,
debounce_time=0)
if result:
if "GET_TEXT" in result:
user_input = result.get("GET_TEXT")
st.write('Audio Input: ', user_input)
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "user", "content": user_input}],
functions=[
{
"name": "send_email",
"description": "Sends an email to a person",
"parameters": {
"type": "object",
"properties": {
"email": {
"type": "string",
"description": "A person to send the email",
},
"body": {"type": "string"},
"subject": {"type": "string"},
},
},
}
],
function_call="auto",
)
message = response["choices"][0]["message"]
st.write('GPT: ', message)
if message.get("function_call"):
function_name = message["function_call"]["name"]
print('function_name: ', function_name)
if(function_name == 'send_email'):
# Access the arguments
arguments = json.loads(message['function_call']['arguments'])
email_arg = arguments['email']
body_arg = arguments['body']
subject_arg = arguments['subject']
# Step 3, call the function
function_response = send_email(
email_arg, subject_arg, body_arg
)
print(function_response) | [] |
2024-01-10 | davila7/langchain-101 | functions_callings~llama_api_function_callings_langchain.py | import json
from llamaapi import LlamaAPI
from dotenv import load_dotenv
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import streamlit as st
import asyncio
from pydantic import BaseModel
from typing import List
from langchain.experimental.llms import ChatLlamaAPI
load_dotenv()
def get_or_create_eventloop():
try:
return asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
class FunctionCallArguments(BaseModel):
email: str
body: str
subject: str
class FunctionCall(BaseModel):
name: str
arguments: FunctionCallArguments
class Message(BaseModel):
function_call: FunctionCall
class Choice(BaseModel):
message: Message
class ChoiceList(BaseModel):
choices: List[Choice]
def send_email(email, subject, body):
"""send the user an email with the answer"""
try:
if(subject == ''):
subject = 'GPT Email'
message = Mail(
# add the email connected to your sendgrid code here
from_email=os.getenv("SENDGRID_EMAIL"),
to_emails=email,
subject=subject,
html_content=body
)
st.write(message)
sg = SendGridAPIClient(os.getenv("SENDGRID_API_KEY"))
response = sg.send(message)
st.write(response)
except Exception as e:
st.write(f"An error occurred: {str(e)}")
# Define your API request
def run_conversation(prompt):
# Initialize the llamaapi with your api_token
llama = LlamaAPI(os.getenv("LLAMA_API_API_KEY"))
function_calling_json = [
{
"name": "send_email",
"description": "Sends an email to the specified email address",
"parameters": {
"type": "object",
"properties": {
"email": {
"type": "string",
"description": "An email address to send the email to",
},
"body": {"type": "string"},
"subject": {"type": "string"},
},
},
}
]
api_request_json = {
"messages": [
{"role": "user", "content": prompt},
],
"functions": function_calling_json,
"stream": False,
"function_call": "auto"
}
# Make your request and handle the response
response = llama.run(api_request_json)
message = response.json()
st.write(message)
# Step 2, check if the model wants to call a function
if message['choices'][0]['message']['function_call']:
function_name = message['choices'][0]['message']['function_call']["name"]
st.write(function_name)
if(function_name == 'send_email'):
# Access the arguments
model = ChoiceList(**message)
arguments = model.choices[0].message.function_call.arguments
st.write(arguments)
email_arg = arguments.email
body_arg = arguments.body
subject_arg = arguments.subject
# Step 3, call the function
function_response = send_email(
email_arg, subject_arg, body_arg
)
print(function_response)
def main():
st.set_page_config(page_title="Llama API Function Callings", page_icon="🤖", layout="wide")
st.title("Llama API Function Callings 🦙")
form = st.form('AgentsTools')
question = form.text_input("Instruction", "")
btn = form.form_submit_button("Run")
if btn:
st.markdown("### Response Llama API")
with st.spinner("Loading"):
run_conversation(question)
if __name__ == "__main__":
main() | [] |
2024-01-10 | davila7/langchain-101 | llm~output_generations.py | from dotenv import load_dotenv
from langchain import OpenAI
# cargamos openai api key
load_dotenv()
# cargamos el modelo text-ada-001 con dos respuesta y que seleccione 1
llm = OpenAI(model_name="text-ada-001", n=2, best_of=2)
#print(llm("cómo estás?"))
# muestra la cantidad de tokens
print(llm.get_num_tokens("cómo estás?"))
# creamos una lista de resultados, incluso podemos multiplicar la lista en el mismo parámetro
llm_result = llm.generate(["Dime un chiste", "Dime un poema"]*5)
# cantidad de resultados
print(len(llm_result.generations))
# muestra los resultados
print(llm_result.generations)
# muestra la información que envía el proveedor
print(llm_result.llm_output) | [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.