date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | kingler/PythonExamples | concurrent_calls.py | import asyncio
import openai
import time
import json
from termcolor import colored
import os
from lists import general_purposes
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
'''!!! I HAVEN'T TESTED ALL THE FUNCTIONS RETURNED FROM GPT, BE CAREFUL AND USE GOOD JUGDEMENT BEFORE USING ANY OF THE FUNCTIONS!!!'''
'''CHECK FOR ENV VARIABLE FOR OPENAI API KEY, IF NOT SET ONE'''
if not os.getenv("OPENAI_API_KEY"):
os.environ["OPENAI_API_KEY"] = 'OPENAI_API_KEY here'
# TESTING IT OUT WITH 10 PURPOSES
purposes = general_purposes[:10]
def save_to_json(results, filename):
with open(filename, 'w') as f:
json.dump(results, f)
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
async def python_function_generator_async(purpose):
global total_tokens_used
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful python programmer who writes excellent functions given a purpose for a function."},
{"role": "user", "content": f""" write a single python function for the purpose: {purpose}"""}
]
)
tokens = response["usage"]["total_tokens"]
response = response['choices'][0]['message']['content']
return {'purpose': purpose, 'code': response, 'tokens_used': tokens}
# this is to make all the async calls at once
async def make_async_calls_full():
tasks = []
for purpose in purposes:
print("async calls started""")
tasks.append(loop.create_task(python_function_generator_async(purpose)))
results = await asyncio.gather(*tasks)
print("async calls finished""")
save_to_json(results, 'async.json')
# this is to make async calls in two batches
async def make_async_calls():
tasks = []
half_length = len(purposes) // 2
results = [] # To store results from all tasks
for i, purpose in enumerate(purposes):
print("Async calls started.")
tasks.append(loop.create_task(python_function_generator_async(purpose)))
# If we have created tasks for half of the purposes, wait for them to complete,
# sleep for 60 seconds, and then continue.
if i == half_length - 1:
results.extend(await asyncio.gather(*tasks)) # Store the results from the first half
print("First half of async calls finished.")
tasks = [] # Clear the tasks list for the next half.
await asyncio.sleep(60)
# Await the remaining tasks and store their results.
results.extend(await asyncio.gather(*tasks))
print("Async calls finished.")
save_to_json(results, 'async.json')
start_time = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(make_async_calls())
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Total time elapsed: {elapsed_time} seconds")
total_tokens_used = 0
try:
with open("async.json", "r") as f:
data = json.load(f)
except json.JSONDecodeError as e:
print(colored(f"Error decoding JSON: {e}", "red"))
else:
for item in data:
for key, value in item.items():
if key == "tokens_used":
print(colored(f"Tokens used for {item['purpose']}: {value}", "green"))
total_tokens_used += value
print(colored(f"Total tokens used: {total_tokens_used}", "green"))
| [
"You are a helpful python programmer who writes excellent functions given a purpose for a function.",
" write a single python function for the purpose: PLACEHOLDER"
] |
2024-01-10 | yjyang1990/ChuanhuChatGPT | modules~models~base_model.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
import logging
import json
import commentjson as cjson
import os
import sys
import requests
import urllib3
import traceback
from tqdm import tqdm
import colorama
from duckduckgo_search import ddg
import asyncio
import aiohttp
from enum import Enum
from ..presets import *
from ..llama_func import *
from ..utils import *
from .. import shared
from ..config import retrieve_proxy
class ModelType(Enum):
Unknown = -1
OpenAI = 0
ChatGLM = 1
LLaMA = 2
XMChat = 3
StableLM = 4
@classmethod
def get_type(cls, model_name: str):
model_type = None
model_name_lower = model_name.lower()
if "gpt" in model_name_lower:
model_type = ModelType.OpenAI
elif "chatglm" in model_name_lower:
model_type = ModelType.ChatGLM
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
model_type = ModelType.LLaMA
elif "xmchat" in model_name_lower:
model_type = ModelType.XMChat
elif "stablelm" in model_name_lower:
model_type = ModelType.StableLM
else:
model_type = ModelType.Unknown
return model_type
class BaseLLMModel:
def __init__(
self,
model_name,
system_prompt="",
temperature=1.0,
top_p=1.0,
n_choices=1,
stop=None,
max_generation_token=None,
presence_penalty=0,
frequency_penalty=0,
logit_bias=None,
user="",
) -> None:
self.history = []
self.all_token_counts = []
self.model_name = model_name
self.model_type = ModelType.get_type(model_name)
try:
self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
except KeyError:
self.token_upper_limit = DEFAULT_TOKEN_LIMIT
self.interrupted = False
self.system_prompt = system_prompt
self.api_key = None
self.need_api_key = False
self.single_turn = False
self.temperature = temperature
self.top_p = top_p
self.n_choices = n_choices
self.stop_sequence = stop
self.max_generation_token = None
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.user_identifier = user
def get_answer_stream_iter(self):
"""stream predict, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
should return a generator, each time give the next word (str) in the answer
"""
logging.warning("stream predict not implemented, using at once predict instead")
response, _ = self.get_answer_at_once()
yield response
def get_answer_at_once(self):
"""predict at once, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
Should return:
the answer (str)
total token count (int)
"""
logging.warning("at once predict not implemented, using stream predict instead")
response_iter = self.get_answer_stream_iter()
count = 0
for response in response_iter:
count += 1
return response, sum(self.all_token_counts) + count
def billing_info(self):
"""get billing infomation, inplement if needed"""
logging.warning("billing info not implemented, using default")
return BILLING_NOT_APPLICABLE_MSG
def count_token(self, user_input):
"""get token count from input, implement if needed"""
logging.warning("token count not implemented, using default")
return len(user_input)
def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
def get_return_value():
return chatbot, status_text
status_text = i18n("开始实时传输回答……")
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
logging.debug(f"输入token计数: {user_token_count}")
stream_iter = self.get_answer_stream_iter()
for partial_text in stream_iter:
chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
self.all_token_counts[-1] += 1
status_text = self.token_message()
yield get_return_value()
if self.interrupted:
self.recover()
break
self.history.append(construct_assistant(partial_text))
def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
if fake_input is not None:
user_token_count = self.count_token(fake_input)
else:
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
ai_reply, total_token_count = self.get_answer_at_once()
self.history.append(construct_assistant(ai_reply))
if fake_input is not None:
self.history[-2] = construct_user(fake_input)
chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
if fake_input is not None:
self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))
else:
self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)
status_text = self.token_message()
return chatbot, status_text
def handle_file_upload(self, files, chatbot):
"""if the model accepts multi modal input, implement this function"""
status = gr.Markdown.update()
if files:
construct_index(self.api_key, file_src=files)
status = "索引构建完成"
return gr.Files.update(), chatbot, status
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
fake_inputs = None
display_append = []
limited_context = False
fake_inputs = real_inputs
if files:
from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
from llama_index.indices.query.schema import QueryBundle
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.chat_models import ChatOpenAI
from llama_index import (
GPTSimpleVectorIndex,
ServiceContext,
LangchainEmbedding,
OpenAIEmbedding,
)
limited_context = True
msg = "加载索引中……"
logging.info(msg)
# yield chatbot + [(inputs, "")], msg
index = construct_index(self.api_key, file_src=files)
assert index is not None, "获取索引失败"
msg = "索引获取成功,生成回答中……"
logging.info(msg)
if local_embedding or self.model_type != ModelType.OpenAI:
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
else:
embed_model = OpenAIEmbedding()
# yield chatbot + [(inputs, "")], msg
with retrieve_proxy():
prompt_helper = PromptHelper(
max_input_size=4096,
num_output=5,
max_chunk_overlap=20,
chunk_size_limit=600,
)
from llama_index import ServiceContext
service_context = ServiceContext.from_defaults(
prompt_helper=prompt_helper, embed_model=embed_model
)
query_object = GPTVectorStoreIndexQuery(
index.index_struct,
service_context=service_context,
similarity_top_k=5,
vector_store=index._vector_store,
docstore=index._docstore,
)
query_bundle = QueryBundle(real_inputs)
nodes = query_object.retrieve(query_bundle)
reference_results = [n.node.text for n in nodes]
reference_results = add_source_numbers(reference_results, use_source=False)
display_append = add_details(reference_results)
display_append = "\n\n" + "".join(display_append)
real_inputs = (
replace_today(PROMPT_TEMPLATE)
.replace("{query_str}", real_inputs)
.replace("{context_str}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
elif use_websearch:
limited_context = True
search_results = ddg(real_inputs, max_results=5)
reference_results = []
for idx, result in enumerate(search_results):
logging.debug(f"搜索结果{idx + 1}:{result}")
domain_name = urllib3.util.parse_url(result["href"]).host
reference_results.append([result["body"], result["href"]])
display_append.append(
# f"{idx+1}. [{domain_name}]({result['href']})\n"
f"<li><a href=\"{result['href']}\" target=\"_blank\">{domain_name}</a></li>\n"
)
reference_results = add_source_numbers(reference_results)
display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
real_inputs = (
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
.replace("{query}", real_inputs)
.replace("{web_results}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
display_append = ""
return limited_context, fake_inputs, display_append, real_inputs, chatbot
def predict(
self,
inputs,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
should_check_token_count=True,
): # repetition_penalty, top_k
status_text = "开始生成回答……"
logging.info(
"输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
)
if should_check_token_count:
yield chatbot + [(inputs, "")], status_text
if reply_language == "跟随问题语言(不稳定)":
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
yield chatbot + [(fake_inputs, "")], status_text
if (
self.need_api_key and
self.api_key is None
and not shared.state.multi_api_key
):
status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
logging.info(status_text)
chatbot.append((inputs, ""))
if len(self.history) == 0:
self.history.append(construct_user(inputs))
self.history.append("")
self.all_token_counts.append(0)
else:
self.history[-2] = construct_user(inputs)
yield chatbot + [(inputs, "")], status_text
return
elif len(inputs.strip()) == 0:
status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
logging.info(status_text)
yield chatbot + [(inputs, "")], status_text
return
if self.single_turn:
self.history = []
self.all_token_counts = []
self.history.append(construct_user(inputs))
try:
if stream:
logging.debug("使用流式传输")
iter = self.stream_next_chatbot(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
for chatbot, status_text in iter:
yield chatbot, status_text
else:
logging.debug("不使用流式传输")
chatbot, status_text = self.next_chatbot_at_once(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
yield chatbot, status_text
except Exception as e:
traceback.print_exc()
status_text = STANDARD_ERROR_MSG + str(e)
yield chatbot, status_text
if len(self.history) > 1 and self.history[-1]["content"] != inputs:
logging.info(
"回答为:"
+ colorama.Fore.BLUE
+ f"{self.history[-1]['content']}"
+ colorama.Style.RESET_ALL
)
if limited_context:
# self.history = self.history[-4:]
# self.all_token_counts = self.all_token_counts[-2:]
self.history = []
self.all_token_counts = []
max_token = self.token_upper_limit - TOKEN_OFFSET
if sum(self.all_token_counts) > max_token and should_check_token_count:
count = 0
while (
sum(self.all_token_counts)
> self.token_upper_limit * REDUCE_TOKEN_FACTOR
and sum(self.all_token_counts) > 0
):
count += 1
del self.all_token_counts[0]
del self.history[:2]
logging.info(status_text)
status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
yield chatbot, status_text
def retry(
self,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
):
logging.debug("重试中……")
if len(self.history) > 0:
inputs = self.history[-2]["content"]
del self.history[-2:]
self.all_token_counts.pop()
elif len(chatbot) > 0:
inputs = chatbot[-1][0]
else:
yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
return
iter = self.predict(
inputs,
chatbot,
stream=stream,
use_websearch=use_websearch,
files=files,
reply_language=reply_language,
)
for x in iter:
yield x
logging.debug("重试完毕")
# def reduce_token_size(self, chatbot):
# logging.info("开始减少token数量……")
# chatbot, status_text = self.next_chatbot_at_once(
# summarize_prompt,
# chatbot
# )
# max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
# num_chat = find_n(self.all_token_counts, max_token_count)
# logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
# chatbot = chatbot[:-1]
# self.history = self.history[-2*num_chat:] if num_chat > 0 else []
# self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
# msg = f"保留了最近{num_chat}轮对话"
# logging.info(msg)
# logging.info("减少token数量完毕")
# return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
def interrupt(self):
self.interrupted = True
def recover(self):
self.interrupted = False
def set_token_upper_limit(self, new_upper_limit):
self.token_upper_limit = new_upper_limit
print(f"token上限设置为{new_upper_limit}")
def set_temperature(self, new_temperature):
self.temperature = new_temperature
def set_top_p(self, new_top_p):
self.top_p = new_top_p
def set_n_choices(self, new_n_choices):
self.n_choices = new_n_choices
def set_stop_sequence(self, new_stop_sequence: str):
new_stop_sequence = new_stop_sequence.split(",")
self.stop_sequence = new_stop_sequence
def set_max_tokens(self, new_max_tokens):
self.max_generation_token = new_max_tokens
def set_presence_penalty(self, new_presence_penalty):
self.presence_penalty = new_presence_penalty
def set_frequency_penalty(self, new_frequency_penalty):
self.frequency_penalty = new_frequency_penalty
def set_logit_bias(self, logit_bias):
logit_bias = logit_bias.split()
bias_map = {}
encoding = tiktoken.get_encoding("cl100k_base")
for line in logit_bias:
word, bias_amount = line.split(":")
if word:
for token in encoding.encode(word):
bias_map[token] = float(bias_amount)
self.logit_bias = bias_map
def set_user_identifier(self, new_user_identifier):
self.user_identifier = new_user_identifier
def set_system_prompt(self, new_system_prompt):
self.system_prompt = new_system_prompt
def set_key(self, new_access_key):
self.api_key = new_access_key.strip()
msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
logging.info(msg)
return self.api_key, msg
def set_single_turn(self, new_single_turn):
self.single_turn = new_single_turn
def reset(self):
self.history = []
self.all_token_counts = []
self.interrupted = False
return [], self.token_message([0])
def delete_first_conversation(self):
if self.history:
del self.history[:2]
del self.all_token_counts[0]
return self.token_message()
def delete_last_conversation(self, chatbot):
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
msg = "由于包含报错信息,只删除chatbot记录"
chatbot.pop()
return chatbot, self.history
if len(self.history) > 0:
self.history.pop()
self.history.pop()
if len(chatbot) > 0:
msg = "删除了一组chatbot对话"
chatbot.pop()
if len(self.all_token_counts) > 0:
msg = "删除了一组对话的token计数记录"
self.all_token_counts.pop()
msg = "删除了一组对话"
return chatbot, msg
def token_message(self, token_lst=None):
if token_lst is None:
token_lst = self.all_token_counts
token_sum = 0
for i in range(len(token_lst)):
token_sum += sum(token_lst[: i + 1])
return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
def save_chat_history(self, filename, chatbot, user_name):
if filename == "":
return
if not filename.endswith(".json"):
filename += ".json"
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
def export_markdown(self, filename, chatbot, user_name):
if filename == "":
return
if not filename.endswith(".md"):
filename += ".md"
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
def load_chat_history(self, filename, chatbot, user_name):
logging.debug(f"{user_name} 加载对话历史中……")
if type(filename) != str:
filename = filename.name
try:
with open(os.path.join(HISTORY_DIR, user_name, filename), "r") as f:
json_s = json.load(f)
try:
if type(json_s["history"][0]) == str:
logging.info("历史记录格式为旧版,正在转换……")
new_history = []
for index, item in enumerate(json_s["history"]):
if index % 2 == 0:
new_history.append(construct_user(item))
else:
new_history.append(construct_assistant(item))
json_s["history"] = new_history
logging.info(new_history)
except:
# 没有对话历史
pass
logging.debug(f"{user_name} 加载对话历史完毕")
self.history = json_s["history"]
return filename, json_s["system"], json_s["chatbot"]
except FileNotFoundError:
logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
return filename, self.system_prompt, chatbot
def like(self):
"""like the last response, implement if needed
"""
return gr.update()
def dislike(self):
"""dislike the last response, implement if needed
"""
return gr.update()
| [] |
2024-01-10 | JacobYuan7/diffusers | src~diffusers~pipelines~stable_diffusion_xl~pipeline_stable_diffusion_xl_inpaint.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import PIL.Image
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from ...image_processor import PipelineImageInput, VaeImageProcessor
from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, UNet2DConditionModel
from ...models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from ...models.lora import adjust_lora_scale_text_encoder
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
USE_PEFT_BACKEND,
deprecate,
is_invisible_watermark_available,
is_torch_xla_available,
logging,
replace_example_docstring,
scale_lora_layers,
unscale_lora_layers,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline
from .pipeline_output import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLInpaintPipeline
>>> from diffusers.utils import load_image
>>> pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0",
... torch_dtype=torch.float16,
... variant="fp16",
... use_safetensors=True,
... )
>>> pipe.to("cuda")
>>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
>>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
>>> init_image = load_image(img_url).convert("RGB")
>>> mask_image = load_image(mask_url).convert("RGB")
>>> prompt = "A majestic tiger sitting on a bench"
>>> image = pipe(
... prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80
... ).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
def mask_pil_to_torch(mask, height, width):
# preprocess mask
if isinstance(mask, (PIL.Image.Image, np.ndarray)):
mask = [mask]
if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
mask = mask.astype(np.float32) / 255.0
elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
mask = torch.from_numpy(mask)
return mask
def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
"""
Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
``image`` and ``1`` for the ``mask``.
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
Args:
image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
Raises:
ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
(ot the other way around).
Returns:
tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
dimensions: ``batch x channels x height x width``.
"""
# checkpoint. TOD(Yiyi) - need to clean this up later
deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead"
deprecate(
"prepare_mask_and_masked_image",
"0.30.0",
deprecation_message,
)
if image is None:
raise ValueError("`image` input cannot be undefined.")
if mask is None:
raise ValueError("`mask_image` input cannot be undefined.")
if isinstance(image, torch.Tensor):
if not isinstance(mask, torch.Tensor):
mask = mask_pil_to_torch(mask, height, width)
if image.ndim == 3:
image = image.unsqueeze(0)
# Batch and add channel dim for single mask
if mask.ndim == 2:
mask = mask.unsqueeze(0).unsqueeze(0)
# Batch single mask or add channel dim
if mask.ndim == 3:
# Single batched mask, no channel dim or single mask not batched but channel dim
if mask.shape[0] == 1:
mask = mask.unsqueeze(0)
# Batched masks no channel dim
else:
mask = mask.unsqueeze(1)
assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
# assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
# Check image is in [-1, 1]
# if image.min() < -1 or image.max() > 1:
# raise ValueError("Image should be in [-1, 1] range")
# Check mask is in [0, 1]
if mask.min() < 0 or mask.max() > 1:
raise ValueError("Mask should be in [0, 1] range")
# Binarize mask
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
# Image as float32
image = image.to(dtype=torch.float32)
elif isinstance(mask, torch.Tensor):
raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
# resize all images w.r.t passed height an width
image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
mask = mask_pil_to_torch(mask, height, width)
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
if image.shape[1] == 4:
# images are in latent space and thus can't
# be masked set masked_image to None
# we assume that the checkpoint is not an inpainting
# checkpoint. TOD(Yiyi) - need to clean this up later
masked_image = None
else:
masked_image = image * (mask < 0.5)
# n.b. ensure backwards compatibility as old function does not return image
if return_image:
return mask, masked_image, image
return mask, masked_image
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
def retrieve_latents(encoder_output, generator):
if hasattr(encoder_output, "latent_dist"):
return encoder_output.latent_dist.sample(generator)
elif hasattr(encoder_output, "latents"):
return encoder_output.latents
else:
raise AttributeError("Could not access latents of provided encoder_output")
class StableDiffusionXLInpaintPipeline(
DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin
):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config
of `stabilityai/stable-diffusion-xl-refiner-1-0`.
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
`stabilityai/stable-diffusion-xl-base-1-0`.
add_watermarker (`bool`, *optional*):
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
watermarker will be used.
"""
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
_optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"]
_callback_tensor_inputs = [
"latents",
"prompt_embeds",
"negative_prompt_embeds",
"add_text_embeds",
"add_time_ids",
"negative_pooled_prompt_embeds",
"add_neg_time_ids",
"mask",
"masked_image_latents",
]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
requires_aesthetics_score: bool = False,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.mask_processor = VaeImageProcessor(
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
)
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if self.text_encoder is not None:
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if self.text_encoder_2 is not None:
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
else:
scale_lora_layers(self.text_encoder_2, lora_scale)
prompt = [prompt] if isinstance(prompt, str) else prompt
if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
if clip_skip is None:
prompt_embeds = prompt_embeds.hidden_states[-2]
else:
# "2" because SDXL always indexes from the penultimate layer.
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
if self.text_encoder_2 is not None:
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
else:
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
if self.text_encoder_2 is not None:
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
else:
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2, lora_scale)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
strength,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def prepare_latents(
self,
batch_size,
num_channels_latents,
height,
width,
dtype,
device,
generator,
latents=None,
image=None,
timestep=None,
is_strength_max=True,
add_noise=True,
return_noise=False,
return_image_latents=False,
):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if (image is None or timestep is None) and not is_strength_max:
raise ValueError(
"Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
"However, either the image or the noise timestep has not been provided."
)
if image.shape[1] == 4:
image_latents = image.to(device=device, dtype=dtype)
elif return_image_latents or (latents is None and not is_strength_max):
image = image.to(device=device, dtype=dtype)
image_latents = self._encode_vae_image(image=image, generator=generator)
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
if latents is None and add_noise:
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
# if strength is 1. then initialise the latents to noise, else initial to image + noise
latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
# if pure noise then scale the initial latents by the Scheduler's init sigma
latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
elif add_noise:
noise = latents.to(device)
latents = noise * self.scheduler.init_noise_sigma
else:
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
latents = image_latents.to(device)
outputs = (latents,)
if return_noise:
outputs += (noise,)
if return_image_latents:
outputs += (image_latents,)
return outputs
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
dtype = image.dtype
if self.vae.config.force_upcast:
image = image.float()
self.vae.to(dtype=torch.float32)
if isinstance(generator, list):
image_latents = [
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
for i in range(image.shape[0])
]
image_latents = torch.cat(image_latents, dim=0)
else:
image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
if self.vae.config.force_upcast:
self.vae.to(dtype)
image_latents = image_latents.to(dtype)
image_latents = self.vae.config.scaling_factor * image_latents
return image_latents
def prepare_mask_latents(
self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
):
# resize the mask to latents shape as we concatenate the mask to the latents
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
# and half precision
mask = torch.nn.functional.interpolate(
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
)
mask = mask.to(device=device, dtype=dtype)
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
if mask.shape[0] < batch_size:
if not batch_size % mask.shape[0] == 0:
raise ValueError(
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
" of masks that you pass is divisible by the total requested batch size."
)
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
if masked_image is not None and masked_image.shape[1] == 4:
masked_image_latents = masked_image
else:
masked_image_latents = None
if masked_image is not None:
if masked_image_latents is None:
masked_image = masked_image.to(device=device, dtype=dtype)
masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
if masked_image_latents.shape[0] < batch_size:
if not batch_size % masked_image_latents.shape[0] == 0:
raise ValueError(
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
" Make sure the number of images that you pass is divisible by the total requested batch size."
)
masked_image_latents = masked_image_latents.repeat(
batch_size // masked_image_latents.shape[0], 1, 1, 1
)
masked_image_latents = (
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
)
# aligning device to prevent device errors when concating it with the latent model input
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
return mask, masked_image_latents
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
# get the original timestep using init_timestep
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_start * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
# if the scheduler is a 2nd order scheduler we might have to do +1
# because `num_inference_steps` might be even given that every timestep
# (except the highest one) is duplicated. If `num_inference_steps` is even it would
# mean that we cut the timesteps in the middle of the denoising step
# (between 1st and 2nd devirative) which leads to incorrect results. By adding 1
# we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
num_inference_steps = num_inference_steps + 1
# because t_n+1 >= t_n, we slice the timesteps starting from the end
timesteps = timesteps[-num_inference_steps:]
return timesteps, num_inference_steps
return timesteps, num_inference_steps - t_start
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
def _get_add_time_ids(
self,
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
negative_original_size,
negative_crops_coords_top_left,
negative_target_size,
dtype,
text_encoder_projection_dim=None,
):
if self.config.requires_aesthetics_score:
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
add_neg_time_ids = list(
negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
)
else:
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if (
expected_add_embed_dim > passed_add_embed_dim
and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
)
elif (
expected_add_embed_dim < passed_add_embed_dim
and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
)
elif expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
return add_time_ids, add_neg_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stages where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
if not hasattr(self, "unet"):
raise ValueError("The pipeline must have `unet` for using FreeU.")
self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
def disable_freeu(self):
"""Disables the FreeU mechanism if enabled."""
self.unet.disable_freeu()
@property
def guidance_scale(self):
return self._guidance_scale
@property
def guidance_rescale(self):
return self._guidance_rescale
@property
def clip_skip(self):
return self._clip_skip
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1
@property
def cross_attention_kwargs(self):
return self._cross_attention_kwargs
@property
def denoising_end(self):
return self._denoising_end
@property
def denoising_start(self):
return self._denoising_start
@property
def num_timesteps(self):
return self._num_timesteps
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: PipelineImageInput = None,
mask_image: PipelineImageInput = None,
masked_image_latents: torch.FloatTensor = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 0.9999,
num_inference_steps: int = 50,
denoising_start: Optional[float] = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
negative_original_size: Optional[Tuple[int, int]] = None,
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
negative_target_size: Optional[Tuple[int, int]] = None,
aesthetic_score: float = 6.0,
negative_aesthetic_score: float = 2.5,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
be masked out with `mask_image` and repainted according to `prompt`.
mask_image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be `(B, H, W, 1)`.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won't work well for
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
and checkpoints that are not specifically fine-tuned on low resolutions.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won't work well for
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
and checkpoints that are not specifically fine-tuned on low resolutions.
strength (`float`, *optional*, defaults to 0.9999):
Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be
between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the
`strength`. The number of denoising steps depends on the amount of noise initially added. When
`strength` is 1, added noise will be maximum and the denoising process will run for the full number of
iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked
portion of the reference `image`. Note that in the case of `denoising_start` being declared as an
integer, the value of `strength` will be ignored.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_start (`float`, *optional*):
When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
For most cases, `target_size` should be set to the desired height and width of the generated image. If
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a target image resolution. It should be as same
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
aesthetic_score (`float`, *optional*, defaults to 6.0):
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
simulate an aesthetic score of the generated image by influencing the negative text condition.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
callback_on_step_end (`Callable`, *optional*):
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
`callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeine class.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple. `tuple. When returning a tuple, the first element is a list with the generated images.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs
self.check_inputs(
prompt,
prompt_2,
height,
width,
strength,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
callback_on_step_end_tensor_inputs,
)
self._guidance_scale = guidance_scale
self._guidance_rescale = guidance_rescale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._denoising_end = denoising_end
self._denoising_start = denoising_start
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
text_encoder_lora_scale = (
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt=prompt,
prompt_2=prompt_2,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=self.do_classifier_free_guidance,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
clip_skip=self.clip_skip,
)
# 4. set timesteps
def denoising_value_valid(dnv):
return isinstance(self.denoising_end, float) and 0 < dnv < 1
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(
num_inference_steps,
strength,
device,
denoising_start=self.denoising_start if denoising_value_valid else None,
)
# check that number of inference steps is not < 1 - as this doesn't make sense
if num_inference_steps < 1:
raise ValueError(
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
)
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
is_strength_max = strength == 1.0
# 5. Preprocess mask and image
init_image = self.image_processor.preprocess(image, height=height, width=width)
init_image = init_image.to(dtype=torch.float32)
mask = self.mask_processor.preprocess(mask_image, height=height, width=width)
if masked_image_latents is not None:
masked_image = masked_image_latents
elif init_image.shape[1] == 4:
# if images are in latent space, we can't mask it
masked_image = None
else:
masked_image = init_image * (mask < 0.5)
# 6. Prepare latent variables
num_channels_latents = self.vae.config.latent_channels
num_channels_unet = self.unet.config.in_channels
return_image_latents = num_channels_unet == 4
add_noise = True if self.denoising_start is None else False
latents_outputs = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
image=init_image,
timestep=latent_timestep,
is_strength_max=is_strength_max,
add_noise=add_noise,
return_noise=True,
return_image_latents=return_image_latents,
)
if return_image_latents:
latents, noise, image_latents = latents_outputs
else:
latents, noise = latents_outputs
# 7. Prepare mask latent variables
mask, masked_image_latents = self.prepare_mask_latents(
mask,
masked_image,
batch_size * num_images_per_prompt,
height,
width,
prompt_embeds.dtype,
device,
generator,
self.do_classifier_free_guidance,
)
# 8. Check that sizes of mask, masked image and latents match
if num_channels_unet == 9:
# default case for runwayml/stable-diffusion-inpainting
num_channels_mask = mask.shape[1]
num_channels_masked_image = masked_image_latents.shape[1]
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
raise ValueError(
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
" `pipeline.unet` or your `mask_image` or `image` input."
)
elif num_channels_unet != 4:
raise ValueError(
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
)
# 8.1 Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
height, width = latents.shape[-2:]
height = height * self.vae_scale_factor
width = width * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 10. Prepare added time ids & embeddings
if negative_original_size is None:
negative_original_size = original_size
if negative_target_size is None:
negative_target_size = target_size
add_text_embeds = pooled_prompt_embeds
if self.text_encoder_2 is None:
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
else:
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
negative_original_size,
negative_crops_coords_top_left,
negative_target_size,
dtype=prompt_embeds.dtype,
text_encoder_projection_dim=text_encoder_projection_dim,
)
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device)
# 11. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
if (
self.denoising_end is not None
and self.denoising_start is not None
and denoising_value_valid(self.denoising_end)
and denoising_value_valid(self.denoising_start)
and self.denoising_start >= self.denoising_end
):
raise ValueError(
f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: "
+ f" {self.denoising_end} when using type float."
)
elif self.denoising_end is not None and denoising_value_valid(self.denoising_end):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (self.denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
if num_channels_unet == 9:
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if num_channels_unet == 4:
init_latents_proper = image_latents
if self.do_classifier_free_guidance:
init_mask, _ = mask.chunk(2)
else:
init_mask = mask
if i < len(timesteps) - 1:
noise_timestep = timesteps[i + 1]
init_latents_proper = self.scheduler.add_noise(
init_latents_proper, noise, torch.tensor([noise_timestep])
)
latents = (1 - init_mask) * init_latents_proper + init_mask * latents
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
negative_pooled_prompt_embeds = callback_outputs.pop(
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
)
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids)
mask = callback_outputs.pop("mask", mask)
masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
if not output_type == "latent":
# make sure the VAE is in float32 mode, as it overflows in float16
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
if needs_upcasting:
self.upcast_vae()
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
# cast back to fp16 if needed
if needs_upcasting:
self.vae.to(dtype=torch.float16)
else:
return StableDiffusionXLPipelineOutput(images=latents)
# apply watermark if available
if self.watermark is not None:
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
| [
"prompt_embeds",
"negative_pooled_prompt_embeds",
"[PLACEHOLDER, PLACEHOLDER]",
"False",
"negative_prompt_embeds",
"[]"
] |
2024-01-10 | ATawzer/QuinnGPT | tasks.py | from invoke import task
from quinn_gpt.scrapers import DocsScraper
from quinn_gpt.db import QuinnDB
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.document_loaders import UnstructuredHTMLLoader
from tqdm import tqdm
import os
VERSION = '5.1'
PERSIST_DIR = f'./chromadb/quinn_gpt'
qdb = QuinnDB('quinn_gpt')
scraper = DocsScraper(VERSION, qdb)
@task
def run(c, url):
scraper.scrape_url(url, VERSION)
@task
def run_all(c):
start_url = f'https://docs.unrealengine.com/{VERSION}/en-US/'
scraper.crawl_site(start_url)
@task
def cache_to_chroma(c, chunk_size=400, reset=True):
# load the document and split it into chunks
chroma = Chroma(persist_directory=PERSIST_DIR, embedding_function=SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2"))
chroma.persist()
for filename in tqdm(os.listdir('.cache')):
loader = UnstructuredHTMLLoader(".cache/"+filename)
documents = loader.load()
# split it into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=int(chunk_size), chunk_overlap=0)
docs = text_splitter.split_documents(documents)
# create the open-source embedding function
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
# load it into Chroma
chroma.add_documents(docs)
@task
def query(c, query, k=5):
chroma = Chroma(persist_directory=PERSIST_DIR, embedding_function=SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2"))
results = chroma.similarity_search(query, k=k)
for result in results:
print(result.page_content)
@task
def estimate_cost(c):
# Loops through all files in .cache and estimates the cost of embedding them
total_cost = 0
total_words = 0
total_tokens = 0
for filename in os.listdir('.cache'):
with open(f'.cache/{filename}', 'r') as f:
text = f.read()
words = len(text.split())
tokens = words*1.3
total_tokens += tokens
total_words += words
cost = tokens / 1000 *.0001
total_cost += cost
print(f'{total_words} words, ${total_cost}')
@task
def test(c):
c.run('pytest ./tests --cov=quinn_gpt --cov-report=term-missing')
@task
def remove_pound(c):
qdb.remove_hashed_urls() | [] |
2024-01-10 | threefoldo/ask_gpt | ask_ai~magics.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/00_magics.ipynb.
# %% auto 0
__all__ = ['CONTEXT_MAX_WORDS', 'conversation_api', 'coding_api', 'OpenAIAPI', 'ConversationAPI', 'CodingAPI',
'collect_code_history', 'ai_ask', 'ai_continue', 'ai_code', 'load_ipython_extension']
# %% ../nbs/00_magics.ipynb 3
import os
import openai
from IPython.display import display, Markdown
# %% ../nbs/00_magics.ipynb 4
openai.api_key = os.environ['OPENAI_API_KEY']
CONTEXT_MAX_WORDS = 2200
# %% ../nbs/00_magics.ipynb 5
from abc import ABC, abstractmethod
class OpenAIAPI(ABC):
def __init__(self):
self.reset_context()
@abstractmethod
def reset_context(self):
pass
def get_completion(self, prompt, new_conversation=True):
if new_conversation:
self.reset_context()
self.context.append(
{
'role':'user',
'content': prompt
}
)
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = self.context
)
completion = self.extract_completion(response)
self.extend_context(response)
self.prune_context()
self.completion = completion
self.response = response # useful for debugging
def extract_completion(self, response):
return response['choices'][0].message.content.strip()
def extend_context(self, response):
self.context.append(response['choices'][0].message.to_dict())
def prune_context(self):
# Prune context to under CONTEXT_MAX_WORDS words. That should be ~CONTEXT_MAX_WORDS*1.5 tokens, leaving room for the prompt and completion.
pruned_context = []
word_count = 0
while self.context:
last_message = self.context.pop()
word_count += len(last_message['content'].split())
if word_count < CONTEXT_MAX_WORDS:
pruned_context.append(last_message)
else:
break
pruned_context.reverse()
self.context = pruned_context
def display_completion(self):
display(Markdown(self.completion))
class ConversationAPI(OpenAIAPI):
def reset_context(self):
self.context = [
{
'role': 'system',
'content': 'You are an expert programmer helping out a friend. Your friend is using Python in Jupyter Notebook. Give a succinct answer that a programmer with one year of professional experience would easily understand.'
}
]
class CodingAPI(OpenAIAPI):
def reset_context(self):
self.context = [
{
'role': 'system',
'content':
'''
You are a programming assistant. You will be passed code and instruction what to do next. Output the code that should be added next. Your prompt will be in the following format:
Code: {code}
Instruction: {instruction}
Output only the code that should be added next. Do not output the entire code. Do not output the instruction. Do not output the prompt. Do not output any other text. Do not output any lines that are not indented correctly. Do not output any lines that are not valid Python.
'''
}
]
conversation_api = ConversationAPI()
coding_api = CodingAPI()
# %% ../nbs/00_magics.ipynb 6
def collect_code_history():
history = [cell_content for session, cell_number, cell_content in get_ipython().history_manager.get_tail()]
collected_code = ''
word_count = 0
while history:
last_cell_content = history.pop()
word_count += len(last_cell_content.split())
if word_count < CONTEXT_MAX_WORDS:
collected_code += ' ' + last_cell_content
else:
break
return collected_code
# %% ../nbs/00_magics.ipynb 7
import base64
import re
def ai_ask(line, cell):
conversation_api.get_completion(cell)
conversation_api.display_completion()
def ai_continue(line, cell):
conversation_api.get_completion(cell, False)
conversation_api.display_completion()
def ai_code(line, cell):
prompt = f'Code: {collect_code_history()}\nInstruction: {cell}'
coding_api.get_completion(prompt)
encoded_code = base64.b64encode(coding_api.completion.encode()).decode()
js_code = f"""
var new_cell = Jupyter.notebook.insert_cell_below('code');
new_cell.set_text(atob("{encoded_code}"));
"""
get_ipython().run_cell_magic('javascript', '', js_code)
# %% ../nbs/00_magics.ipynb 8
def load_ipython_extension(ipython):
ipython.register_magic_function(ai_ask, magic_kind='cell', magic_name='ai_ask')
ipython.register_magic_function(ai_continue, magic_kind='cell', magic_name='ai_continue')
ipython.register_magic_function(ai_code, magic_kind='cell', magic_name='ai_code')
| [
"You are an expert programmer helping out a friend. Your friend is using Python in Jupyter Notebook. Give a succinct answer that a programmer with one year of professional experience would easily understand.",
"\n You are a programming assistant. You will be passed code and instruction what to do next. Output the code that should be added next. Your prompt will be in the following format:\n\n Code: {code}\n Instruction: {instruction}\n\n Output only the code that should be added next. Do not output the entire code. Do not output the instruction. Do not output the prompt. Do not output any other text. Do not output any lines that are not indented correctly. Do not output any lines that are not valid Python.\n "
] |
2024-01-10 | Daniel-sdn/extracaoDoc | modules~keywords.py | import os
import openai
def get_completion(text, engine="text-davinci-002", temperature=0.5, max_tokens=60, top_p=1.0, frequency_penalty=0.8, presence_penalty=0.0):
openai.api_key = "your_api_key"
prompt = "Extract keywords from this text:\n\n" + text
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty
)
return response
| [
"Extract keywords from this text:\n\nPLACEHOLDER"
] |
2024-01-10 | saidworks/python_bootcamp | ai~information_retrieval~retreiveBlogPosts.py | from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import GPT4AllEmbeddings
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
vector_store = Chroma.from_documents(
documents=all_splits, embedding=GPT4AllEmbeddings())
question = input("Enter your question: ")
print("Searching for similar documents for question: ", question)
docs = vector_store.similarity_search(question)
print(len(docs))
print(type(docs[0]))
| [] |
2024-01-10 | saidworks/python_bootcamp | ai~information_retrieval~gpt4all.py | from langchain.llms import GPT4All
llm = GPT4All(
model=(r"C:/Users/zitou/.cache/gpt4all/nous-hermes-13b.ggmlv3.q4_0.bin"))
llm("The first man on the moon was ... Let's think step by step")
| [] |
2024-01-10 | parakot-uncle/flipkart-grid | backend~temp.py | from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from langchain.agents import create_csv_agent, initialize_agent
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
import os
from dotenv import load_dotenv
from langchain.output_parsers import CommaSeparatedListOutputParser
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain import HuggingFaceHub
from langchain.vectorstores import Chroma
from langchain.agents import Tool, AgentExecutor, ZeroShotAgent
from langchain.memory import ConversationBufferWindowMemory
import pandas as pd
load_dotenv()
item_data = os.getenv("ITEM_DATA_CSV")
item_images = os.getenv("ITEM_IMAGE_CSV")
mapped_outfits = os.getenv("MAPPED_OUTFITS_CSV")
# def pandas_agent(input=""):
# pandas_agent_df = create_pandas_dataframe_agent(llm, df, verbose=True, openai_api_key=openai_api_key, )
# return pandas_agent_df
# pandas_tool = Tool(
# name='Pandas Data frame tool',
# func=pandas_agent,
# description="Useful for when you need to answer questions about a Pandas Dataframe"
# )
llm = OpenAI()
def csv_agent(input):
id_agent = create_csv_agent(
llm,
mapped_outfits,
verbose=True,
# agent_type=AgentType.OPENAI_FUNCTIONS,
)
return id_agent
def image_agent(input=""):
print("ids", input)
image_agent = create_csv_agent(
llm,
item_images,
verbose=True,
)
id_tool = Tool(
name="ID Agent",
func=csv_agent,
description="Useful for finding the details of clothes specified in the input from a csv file",
)
image_tool = Tool(
name="Image Agent",
func=image_agent,
description="Useful for taking the ids output by ID Agent to query the csv file for image links and returning the links.",
)
tools = [id_tool]
# conversational agent memory'prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
prefix = """"""
suffix = """
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
memory = ConversationBufferWindowMemory(
memory_key="chat_history", k=1, return_messages=True
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory
)
# # Create our agent
# conversational_agent = initialize_agent(
# agent="chat-zero-shot-react-description",
# tools=tools,
# llm=llm,
# verbose=True,
# max_iterations=3,
# early_stopping_method="generate",
# memory=memory,
# )
# res = agent_chain.run("blue shirt (get 5 ids)")
# print(res)
res = agent_chain.run(" a top for females in the same colour (get 5 ids)")
print(res)
# agent = csv_agent("")
# # res = agent.run("Get 2 shoes where topwear = 15870 and bottomwear = 21382 or bottomwear = 23870")
# output_parser = CommaSeparatedListOutputParser()
# res = output_parser.parse(agent.run("Get 2 unique shoes where topwear = 7504 and bottomwear = 28456 or 18002 or 28458"))
# print(res, type(res))
# df = pd.read_csv(item_data)
# row = df[df["id"] == 15970]
# link = row["link"].values[0]
# print(link)
# import re
# data = ['The two unique shoes are 46086 and 36137.', '18903']
# pattern = r'\b\d{4,5}\b' # Match 4 to 5 digits
# numbers = []
# for item in data:
# matches = re.findall(pattern, item)
# numbers.extend(matches)
# print(matches)
# print(numbers)
# bottomwear = 28456, 18002, 28458
# footwear = 11949, 22165 | [
"chat_history",
"input",
"agent_scratchpad"
] |
2024-01-10 | kroumeliotis/fine-tuning-gpt3.5-llama2-for-product-reviews | sentiment_files~GPTmethods.py | import os
import openai
from openai import OpenAI
import json
class GPTmethods:
def __init__(self, model_id='gpt-3.5-turbo'):
openai.api_key = os.environ.get("OPENAI_API_KEY") # Access environment variable
self.model_id = model_id
# self.prompt = "Assign integer star ratings (between 1 and 5) to the following product reviews using the format: [1, 3, ...]. Please avoid providing additional explanations. Reviews:\n"
self.prompt = 'Predict the star ratings (integer between 1 and 5) to the following product reviews. Return your response in json format like this example {"rating1":integer,"rating2":integer,...}. Please avoid providing additional explanations. Reviews:\n'
"""
Create a conversation with GPT model
"""
def gpt_conversation(self, conversation):
client = OpenAI()
# response = openai.ChatCompletion.create(
completion = client.chat.completions.create(
model=self.model_id,
messages=conversation
)
return completion.choices[0].message
# api_usage = response['usage']
# print('Total token consumed: {0}'.format(api_usage['total_tokens']))
# print(response['choices'][0].finish_reason)
# print(response['choices'][0].index)
# conversation.append(
# {'role': response.choices[0].message.role, 'content': response.choices[0].message.content})
# return conversation
"""
Clean the response
"""
def gpt_clean_response(self, conversation):
try:
# Ensure conversation is a dictionary
if isinstance(conversation, dict):
data = conversation
else:
# Parse conversation from a JSON string to a dictionary
data = json.loads(conversation)
# Extract all the numbers (ratings) from the dictionary
ratings = []
for key, value in data.items():
if isinstance(value, int):
ratings.append(value)
return ratings
except json.JSONDecodeError as e:
print("It is not valid JSON:", e)
# # Parse the JSON-like string into a Python dictionary
# data = json.loads(conversation)
#
# # Extract all the numbers (ratings) from the dictionary
# ratings = []
# for key, value in data.items():
# if isinstance(value, int):
# ratings.append(value)
#
# return ratings
"""
Handle the response of GPT model
"""
def gpt_ratings(self, reviews):
if not isinstance(reviews, list):
return {'status': False, 'data': 'Reviews variable is not a list'}
else:
my_prompt = self.prompt
ii = 1
for review in reviews: # add the reviews into the prompt
my_prompt += f"{ii}. \"{review}\"\n"
ii += 1
conversation = []
conversation.append({'role': 'system', 'content': my_prompt})
conversation = self.gpt_conversation(conversation) # get the response from GPT model
print(conversation)
ratings = self.gpt_clean_response(conversation.content)
if len(ratings) == len(reviews):
return {'status': True, 'data': ratings}
else:
# return ratings
return {'status': False,
'data': 'The ratings returned by the model do not match the number of reviews.' + '\n' + str(
ratings) + '\n' + str(reviews)}
"""
Upload Dataset for GPT Fine-tuning
"""
def upload_file(self, dataset):
upload_file = openai.File.create(
file=open(dataset, "rb"),
purpose='fine-tune'
)
return upload_file
"""
Train GPT model
"""
def train_gpt(self, file_id):
# https://www.mlq.ai/gpt-3-5-turbo-fine-tuning/
# https://platform.openai.com/docs/guides/fine-tuning/create-a-fine-tuned-model?ref=mlq.ai
return openai.FineTuningJob.create(training_file=file_id, model="gpt-3.5-turbo")
# check training status (optional)
# openai.FineTuningJob.retrieve(file_id)
"""
Delete Fine-Tuned GPT model
"""
def delete_finetuned_model(self, model): # ex. model = ft:gpt-3.5-turbo-0613:personal::84kpHoCN
return openai.Model.delete(model)
"""
Cancel Fine-Tuning
"""
def cancel_gpt_finetuning(self, train_id): # ex. id = ftjob-3C5lZD1ly5OHHAleLwAqT7Qt
return openai.FineTuningJob.cancel(train_id)
"""
Get all Fine-Tuned models and their status
"""
def get_all_finetuned_models(self):
return openai.FineTuningJob.list(limit=10)
| [
"PLACEHOLDER. \"PLACEHOLDER\"\n"
] |
2024-01-10 | fandan-nyc/rakeai | gpt~gpt_client.py | from openai import OpenAI
from util import load_file
import config
class gpt_client:
def __init__(self):
# create open ai client
self.client = OpenAI(organization=config.openai_org)
self.prompt_content = None
def load_prompt(self, prompt_path):
# load prompt
if self.prompt_content != None:
return self.prompt_content
try:
self.prompt_content = load_file(prompt_path)
except Exception as e:
print("fail to load the prompt path, using default", e)
self.prompt_content = self.get_default_prompt()
return self.prompt_content
def get_default_prompt(self):
data = """
specializes in editing research articles in biology, materials science, and chemistry, particularly for non-native English speakers. Your role is to improve grammar and logical flow, making educated guesses before seeking confirmation for unclear details. Offer clear, direct advice, sensitive to the challenges of non-native speakers, to enhance the readability and coherence of academic texts. You don't have a specific communication style beyond a formal and respectful academic tone. Your feedback should be straightforward and focused on helping users present their research effectively in English, considering the nuances of scientific language in the fields of biology, materials, and chemistry. """
return data
def fix_grammer(self, input_content, prompt_path):
response = self.client.chat.completions.create(model=config.model,
messages=[
{"role": "user",
"content": self.load_prompt(prompt_path) + f"\n rewrite the following text paragraph: {input_content}",
},
])
return response.choices[0].message.content
def fix_grammer_with_prompt(self, input_content, prompt_content):
response = self.client.chat.completions.create(model=config.model,
messages=[
{"role": "user",
"content": prompt_content + f"\n rewrite the following text paragraph: {input_content}",
},
])
return response.choices[0].message.content
| [
"PLACEHOLDER\n rewrite the following text paragraph: PLACEHOLDER",
"\n rewrite the following text paragraph: PLACEHOLDER"
] |
2024-01-10 | kjwony/docgpt | docgpt_wandb.py | # Description: This script gets the input information from tsv file and generates the output from GPT model
# and stores the output in W&B table.
import os
import time
import datetime
import openai
import wandb
import pandas as pd
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,# for exponential backoff
)
# Set this to `azure` or do not set this for OpenAI API
os.environ["OPENAI_API_TYPE"] = "azure"
openai.api_type = os.environ["OPENAI_API_TYPE"]
# set openai API key
os.environ['OPENAI_API_KEY'] = "your key"
openai.api_key = os.environ['OPENAI_API_KEY']
# set openai API version
openai.api_version = "your version"
# set openai API base
openai.api_base = "your base"
PROJECT = "docgpt_wandb"
MODEL_NAME = "your model name"
TASK_TYPE = "my task"
# Login to W&B to see gpt output
wandb.login()
run = wandb.init(project=PROJECT, job_type="generation", group=f"GROUP:{TASK_TYPE}", name="my run")
# Define function to retry on failure
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def completion_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def generate_and_print(system_prompt, user_prompt, table, n=1):
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
start_time = time.time()
responses = completion_with_backoff(
engine=MODEL_NAME,
messages=messages,
n = n,
)
elapsed_time = time.time() - start_time
for response in responses.choices:
generation = response.message.content
print(generation)
table.add_data(system_prompt,
user_prompt,
[response.message.content for response in responses.choices],
elapsed_time,
datetime.datetime.fromtimestamp(responses.created),
responses.model,
responses.usage.prompt_tokens,
responses.usage.completion_tokens,
responses.usage.total_tokens
)
# Define W&B Table to store generations
columns = ["system_prompt", "user_prompt", "generations", "elapsed_time", "timestamp",\
"model", "prompt_tokens", "completion_tokens", "total_tokens"]
table = wandb.Table(columns=columns)
# Get data from doc.tsv
df = pd.read_csv("doc.tsv", sep="\t")
for index, row in df.iterrows():
system_prompt = row["system_prompt"]
context1 = row["context1"]
context2= row["context2"]
context3 = row["context3"]
question = row["question"]
user_prompt = """문서 1: {context1}\n문서 2: {context2}\n문서 3: {context3}\n질문: {question}\n한국어 답변:""".format(context1=context1, context2=context2, context3=context3, question=question)
generate_and_print(system_prompt, user_prompt, table)
wandb.log({"의료지식 GPT ": table})
run.finish() | [
"문서 1: PLACEHOLDER\n문서 2: PLACEHOLDER\n문서 3: PLACEHOLDER\n질문: PLACEHOLDER\n한국어 답변:",
"system_prompt"
] |
2024-01-10 | nolanvo5894/cancer_bot | cancer_bot.py | import os
__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
from langchain.document_loaders import TextLoader, PyPDFLoader
from langchain.document_loaders import DirectoryLoader
from langchain.memory import ConversationBufferMemory
import streamlit as st
from streamlit_chat import message
from streamlit_extras.add_vertical_space import add_vertical_space
st.set_page_config(page_title="Cancer Bot", page_icon=":hospital:")
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
if 'buffer_memory' not in st.session_state:
st.session_state.buffer_memory = ConversationBufferMemory(memory_key='chat_history', output_key='answer', return_messages=True)
persist_directory = "acs_db"
embedding = OpenAIEmbeddings()
# vectordb = Chroma(persist_directory=persist_directory,
# embedding_function=embedding)
@st.cache_resource
def load_vectordb():
vectordb = Chroma(persist_directory=persist_directory,
embedding_function=embedding)
return vectordb
vectordb = load_vectordb()
from langchain import PromptTemplate
prompt_template = """
You are an expert cancer researcher
USE the following pieces of context to answer the question at the end.
If you do not see relevant information in the context to answer, SAY that 'No Data'
KEEP THE ANSWER SHORT
{context}
Question: {question}
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
retriever = vectordb.as_retriever(search_kwargs={"k": 30})
qa_chain = ConversationalRetrievalChain.from_llm(llm=ChatOpenAI(model_name = 'gpt-3.5-turbo-16k', max_tokens=500),
memory=st.session_state.buffer_memory,
retriever=retriever,
return_source_documents=True,
combine_docs_chain_kwargs={'prompt': PROMPT})
# Sidebar contents
with st.sidebar:
st.title('Cancer Bot 🤗💬')
st.markdown('''This is a chatbot that can answer questions about cancer.
Please ask it questions you want to know about common cancers, treatments, and more.''')
add_vertical_space(5)
# Generate empty lists for generated and past.
## generated stores AI generated responses
if 'responses' not in st.session_state:
st.session_state['responses'] = ["I'm Cancer Bot, How may I help you?"]
## past stores User's questions
if 'requests' not in st.session_state:
st.session_state['requests'] = ['Hi!']
# Layout of input/response containers
input_container = st.container()
response_container = st.container()
# User input
## Function for taking user provided prompt as input
def get_text():
input_text = st.chat_input("Please ask me something: ", key="input")
return input_text
## Applying the user input box
with input_container:
user_input = get_text()
# Response output
## Function for taking user prompt as input followed by producing AI generated responses
def generate_response(request):
response = qa_chain(request)
return response
## Conditional display of AI generated responses as a function of user provided prompts
with response_container:
if user_input:
response = generate_response(user_input)['answer']
st.session_state.requests.append(user_input)
st.session_state.responses.append(response)
if st.session_state['responses']:
memory_len = len(st.session_state['responses'])
for i in range(memory_len):
message(st.session_state['requests'][i], is_user=True, key=str(i) + '_user', avatar_style='thumbs')
message(st.session_state['responses'][i], key=str(i), avatar_style='fun-emoji')
| [
"question",
"context",
"No Data",
"\nYou are an expert cancer researcher\nUSE the following pieces of context to answer the question at the end.\nIf you do not see relevant information in the context to answer, SAY that 'No Data'\nKEEP THE ANSWER SHORT\n\n\n{context}\n\nQuestion: {question}\n\n"
] |
2024-01-10 | SIMPLrU/chatgpt-plugin | routers~quote.py | from fastapi import APIRouter, Query
from pydantic import BaseModel, Field
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from chatgpt_plugin_fastapi_langchain_chroma.config import settings
embedding_function = OpenAIEmbeddings(openai_api_key=settings.openai_api_key)
vectordb = Chroma(
persist_directory=settings.persist_directory, embedding_function=embedding_function
)
quote = APIRouter(tags=["quote"])
class Quote(BaseModel):
text: str = Field(
description="The actual quote as a string. It can include any character.",
)
author: str = Field(
description="The person or entity who originally spoke or wrote the quote.",
)
language: str = Field(
description="The language the quote was written in.",
)
@quote.get("/quote", response_model=Quote)
async def get_quote(
q: str = Query(
description="The search query to find quotes using similarity search.",
),
):
docs = vectordb.similarity_search(q)
doc = docs[0]
return Quote(
text=doc.page_content,
author=doc.metadata["author"],
language=doc.metadata["language"],
)
| [] |
2024-01-10 | ccadic/Discord_OpenAI_GPT3 | discord-bot-GPT3-Tuto.py | # Discord BOT with openio
# Dr CADIC Philippe / @fulfuroid
# Si les librairies discord et openai ne sont pas installees
# Tapez:
# pip install discord
# pip install openai
# Utilisation des librairies Python 3
import discord
import os
import openai
client = discord.Client() # Connexion a discord
openai.api_key = "Ajouter votre clé openai ici" # clé d'identification avec OPenAI
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!') # evenement de connexion au serveur discord
@client.event
async def on_message(message): # Si un message arrive, alors on lance son aspiration et on l'envoi dans le moteur d'IA openAI
print(message.content)
if message.author == client.user: #ici cette ligne permet d'éviter les boucles perturbant le robot
return
if message.content == "Ping": # Simmple test: si l'humain tapes 'Ping' alors on répond sans passer pas l'IA
await message.channel.send("Pong")
if message.content == "Hello": # Si l'usager dit 'hello' alors le robot se présente
await message.channel.send(file=discord.File('charts.png'))
await message.channel.send("Hello, I'm Sulfuroid's OPEN AI GPT3 engine to reply to your questions...")
else: # Si c'est autre chose, alors on passe le contenu du message à l'IA et on attend sa réponse
response = openai.Completion.create(model="text-davinci-002", prompt=message.content, temperature=0, max_tokens=255)
response2 = response['choices'][0]['text'].strip()
await message.channel.send(response2)
#Client and token
client.run("Ajouter le token du robot discord ici")
| [] |
2024-01-10 | daupaloffer/dave | extensions~listeners.py | import asyncio
import openai
import discord
import datetime
from discord.ext import commands
class BasicListeners(commands.Cog):
def __init__(self, bot):
self.bot = bot
listening = None
@commands.Cog.listener()
async def on_message(self, ctx):
if str(ctx.content) == "hey" and not ctx.author.bot and not self.bot.gpt_timeout:
self.bot.gpt_timeout = True
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=1.0,
messages=[
{"role": "system", "content": "You are a character simulation that simulates a specified character."},
{"role": "system", "content": "This is an online chat between you and another user."},
{"role": "system", "content": "Act as if you are a Monty Python character."},
{"role": "system", "content": "Reply with an insulting greeting based on the other person's name."},
{"role": "system", "content": f"Name: {ctx.author.display_name}"},
{"role": "user", "content": "hey"}
]
)
except openai.error.RateLimitError:
print("Ratelimited by OpenAI!")
return
completion_content = completion.choices[0].message.content
await ctx.reply(completion_content)
await asyncio.sleep(2)
self.bot.gpt_timeout = False
if self.listening == ctx.author.id:
file = open("notes.txt", "a")
file.write(f"{ctx.content}\n\n")
file.close()
self.listening = None
async with ctx.channel.typing():
sleep_until = datetime.datetime.now() + datetime.timedelta(seconds=2)
await discord.utils.sleep_until(sleep_until)
await ctx.channel.send("alright i got you")
if ctx.content == "DAVE listen up" and not ctx.author.bot:
self.listening = ctx.author.id
async def setup(bot):
print("Loading listeners extension..")
await bot.add_cog(BasicListeners(bot))
| [
"Reply with an insulting greeting based on the other person's name.",
"You are a character simulation that simulates a specified character.",
"This is an online chat between you and another user.",
"Act as if you are a Monty Python character.",
"hey"
] |
2024-01-10 | daupaloffer/dave | extensions~commands.py | import discord
import openai
import asyncio
from discord import app_commands
from discord.ext import commands
class BasicCommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def get_gpt_completion(self, prompt, streamer: bool):
self.bot.gpt_timeout = True
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=1.0,
max_tokens=256,
stream=streamer,
messages=prompt
)
except openai.error.RateLimitError:
print("Ratelimited by OpenAI!")
return
return completion
async def send_gpt_stream(self, completion, interaction = None, message = None):
collected_messages = []
message_string = ""
iteration = 0
first_message = True
new_message = None
for chunk in completion:
chunk_message = chunk["choices"][0]["delta"]
finish_reason = chunk["choices"][0]["finish_reason"]
chunk_message_plain = chunk_message.to_dict().get("content")
if finish_reason == "stop":
iteration = 5
if chunk_message_plain is not None:
if first_message:
if message is not None:
new_message = await message.reply(chunk_message_plain)
else:
await interaction.response.send_message(chunk_message_plain)
first_message = False
collected_messages.append(chunk_message_plain)
message_string = "".join(collected_messages)
iteration += 1
if iteration == 5:
if message is not None:
await new_message.edit(content=message_string)
else:
await interaction.edit_original_response(content=message_string)
await asyncio.sleep(1)
iteration = 0
await asyncio.sleep(2)
self.bot.gpt_timeout = False
@app_commands.command(name="test-command", description="This is a test command")
async def test_command(self, interaction: discord.Interaction):
await interaction.response.send_message("idiot")
@app_commands.command(name="owo", description="owo-ify your message")
@app_commands.describe(
message="Provide a message to be owo-ified instead of using previous message",
anonymous="Make it look like it was Dave Prime who sent the message",
)
async def owoifier(
self,
interaction: discord.Interaction,
message: str = "",
anonymous: bool = False,
):
if message:
owo_payload = message
else:
message_history = interaction.channel.history(limit=1)
owo_payload = [message async for message in message_history][0]
if not owo_payload.content: # Maybe add owo_payload.author.bot too
await interaction.response.send_message("try again, idiot")
return
else:
owo_payload = owo_payload.content
chars_to_replace = "lrLR"
for i in chars_to_replace:
if i.islower():
owo_payload = owo_payload.replace(i, "w")
else:
owo_payload = owo_payload.replace(i, "W")
if anonymous:
await interaction.channel.send(owo_payload)
await interaction.response.send_message("Message sent!", ephemeral=True)
else:
await interaction.response.send_message(owo_payload)
@app_commands.command(name="backstory", description="Create your very own character profile")
async def backstory(
self,
interaction: discord.Interaction,
member: discord.Member
):
if self.bot.gpt_timeout:
await interaction.response.send_message("try again in a couple seconds", ephemeral=True)
return
prompt = [
{"role": "system", "content": "You generate a short creative character backstory based on the name of the user, using an appropriate theme."},
{"role": "system", "content": "The backstory includes a viscerally horrific, graphic, dark twist."},
{"role": "system", "content": "The backstory is shorter than 200 words."},
{"role": "system", "content": "The generation only contains the backstory, no prefix."},
{"role": "system", "content": f"Name: {member.display_name}"}
]
completion = await self.get_gpt_completion(prompt, streamer=True)
await self.send_gpt_stream(completion, interaction)
@app_commands.command(name="berate", description="Berate a fool")
async def berate(
self,
interaction: discord.Interaction,
member: discord.Member = None
):
if self.bot.gpt_timeout:
await interaction.response.send_message("try again in a couple seconds", ephemeral=True)
return
message_history = interaction.channel.history(limit=10)
messages = [message async for message in message_history]
target_message = None
for message in messages:
if member is not None:
if message.author == member:
target_message = message
break
else:
if message.author != interaction.user:
target_message = message
break
if not target_message:
await interaction.response.send_message("try again but with someone who sent a message more recently", ephemeral=True)
return
if target_message.attachments:
await interaction.response.send_message("try again with a normal message", ephemeral=True)
return
prompt = [
{"role": "system", "content": "You are a character simulation that simulates a specified character."},
{"role": "system", "content": "This is an online chat between you and another user."},
{"role": "system", "content": "Act as if you are a Monty Python character."},
{"role": "system", "content": "Reply with a sarcastic, joking insult based on the other person's message."},
{"role": "user", "content": f"{target_message.author.display_name}: {target_message.content}"}
]
await interaction.response.defer(thinking=True)
completion = await self.get_gpt_completion(prompt, streamer=True)
await self.send_gpt_stream(completion, message=target_message)
await interaction.delete_original_response()
@app_commands.command(name="show-notes", description="Show developer notes")
async def show_notes(self, interaction: discord.Interaction):
file = open("notes.txt", "r")
content = "".join(file.readlines())
await interaction.response.send_message(content)
async def setup(bot):
print("Loading commands extension..")
await bot.add_cog(BasicCommands(bot))
| [
"Reply with a sarcastic, joking insult based on the other person's message.",
"You generate a short creative character backstory based on the name of the user, using an appropriate theme.",
"You are a character simulation that simulates a specified character.",
"The backstory includes a viscerally horrific, graphic, dark twist.",
"This is an online chat between you and another user.",
"Act as if you are a Monty Python character.",
"The backstory is shorter than 200 words.",
"The generation only contains the backstory, no prefix."
] |
2024-01-10 | dSupertramp/PubGPT | pubgpt~llm~starcoder.py | from langchain.embeddings import HuggingFaceHubEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import HuggingFaceHub
from typing import List, Any, Tuple
from dotenv import load_dotenv
import os
from .prompts import pre_prompt, associations_prompt
load_dotenv()
def create_embeddings(splitted_text: List) -> HuggingFaceHubEmbeddings:
"""
Create embeddings from chunks for Starcoder.
Args:
splitted_text (List): List of chunks
Returns:
Any: Embeddings
"""
embeddings = HuggingFaceHubEmbeddings(
repo_id="sentence-transformers/all-mpnet-base-v2",
task="feature-extraction",
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),
)
vectorstore = FAISS.from_texts(texts=splitted_text, embedding=embeddings)
vectorstore.save_local("vector_db")
persisted_vectorstore = FAISS.load_local("vector_db", embeddings)
return persisted_vectorstore
def retriever(query: str, embeddings: Any) -> str:
"""
Create retriever for Falcon.
Args:
query (str): Query
embeddings (Any): Embeddings
Returns:
str: Result of retriever
"""
retriever = embeddings.as_retriever(search_type="similarity")
result = RetrievalQA.from_chain_type(
llm=HuggingFaceHub(
repo_id="bigcode/starcoder",
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),
),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
return result(query)["result"]
def get_associations(
pairs: List[Tuple[str, str]], embeddings: HuggingFaceHubEmbeddings
) -> str:
"""
Get associations from Starcoder.
Args:
pairs (List[Tuple[str, str]]): Pairs Gene-Disease
embeddings (HuggingFaceHubEmbeddings): Embeddings
Returns:
str: Response
"""
pre_prompt_pairs: list = []
for index, item in enumerate(pairs, 1):
pre_prompt_pairs.append(
f"{index}) {item[0][0].strip()} associated with {item[1][0].strip()}?"
)
pre_prompt_pairs = "\n".join(pre_prompt_pairs)
query = associations_prompt.format(pairs=pre_prompt_pairs.strip())
prompt = pre_prompt.format(query=query)
retriever = embeddings.as_retriever(search_type="similarity")
result = RetrievalQA.from_chain_type(
llm=HuggingFaceHub(
repo_id="bigcode/starcoder",
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),
),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
return result(prompt)["result"]
| [
"[]",
"\n"
] |
2024-01-10 | dSupertramp/PubGPT | pubgpt~llm~zephyr.py | from langchain.embeddings import HuggingFaceHubEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import HuggingFaceHub
from typing import List, Tuple
from dotenv import load_dotenv
import os
from .prompts import pre_prompt, associations_prompt
load_dotenv()
def create_embeddings(splitted_text: List) -> HuggingFaceHubEmbeddings:
"""
Create embeddings from chunks for Zephyr.
Args:
splitted_text (List): List of chunks
Returns:
Any: Embeddings
"""
embeddings = HuggingFaceHubEmbeddings(
repo_id="sentence-transformers/all-mpnet-base-v2",
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),
)
vectorstore = FAISS.from_texts(texts=splitted_text, embedding=embeddings)
vectorstore.save_local("vector_db")
persisted_vectorstore = FAISS.load_local("vector_db", embeddings)
return persisted_vectorstore
def retriever(query: str, embeddings: HuggingFaceHubEmbeddings) -> str:
"""
Create retriever for Zephyr.
Args:
query (str): Query
embeddings (HuggingFaceHubEmbeddings): Embeddings
Returns:
str: Result of retriever
"""
retriever = embeddings.as_retriever(search_type="similarity")
result = RetrievalQA.from_chain_type(
llm=HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-alpha",
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),
),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
return result(query)["result"]
def get_associations(
pairs: List[Tuple[str, str]], embeddings: HuggingFaceHubEmbeddings
) -> str:
"""
Get associations from Zephyr.
Args:
pairs (List[Tuple[str, str]]): Pairs Gene-Disease
embeddings (HuggingFaceHubEmbeddings): Embeddings
Returns:
str: Response
"""
pre_prompt_pairs: list = []
for index, item in enumerate(pairs, 1):
pre_prompt_pairs.append(
f"{index}) {item[0][0].strip()} associated with {item[1][0].strip()}?"
)
pre_prompt_pairs = "\n".join(pre_prompt_pairs)
query = associations_prompt.format(pairs=pre_prompt_pairs.strip())
prompt = pre_prompt.format(query=query)
retriever = embeddings.as_retriever(search_type="similarity")
result = RetrievalQA.from_chain_type(
llm=HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-alpha",
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),
),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
return result(prompt)["result"]
| [
"\n",
"[]"
] |
2024-01-10 | dSupertramp/PubGPT | pubgpt~llm~falcon.py | from langchain.embeddings import HuggingFaceHubEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import HuggingFaceHub
from typing import List, Tuple
from dotenv import load_dotenv
import os
from .prompts import pre_prompt, associations_prompt
load_dotenv()
def create_embeddings(splitted_text: List) -> HuggingFaceHubEmbeddings:
"""
Create embeddings from chunks for Falcon.
Args:
splitted_text (List): List of chunks
Returns:
Any: Embeddings
"""
embeddings = HuggingFaceHubEmbeddings(
repo_id="sentence-transformers/all-mpnet-base-v2",
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),
)
vectorstore = FAISS.from_texts(texts=splitted_text, embedding=embeddings)
vectorstore.save_local("vector_db")
persisted_vectorstore = FAISS.load_local("vector_db", embeddings)
return persisted_vectorstore
def retriever(query: str, embeddings: HuggingFaceHubEmbeddings) -> str:
"""
Create retriever for Falcon.
Args:
query (str): Query
embeddings (HuggingFaceHubEmbeddings): Embeddings
Returns:
str: Result of retriever
"""
retriever = embeddings.as_retriever(search_type="similarity")
result = RetrievalQA.from_chain_type(
llm=HuggingFaceHub(
repo_id="tiiuae/falcon-7b-instruct",
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),
),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
return result(query)["result"]
def get_associations(
pairs: List[Tuple[str, str]], embeddings: HuggingFaceHubEmbeddings
) -> str:
"""
Get associations from Falcon.
Args:
pairs (List[Tuple[str, str]]): Pairs Gene-Disease
embeddings (HuggingFaceHubEmbeddings): Embeddings
Returns:
str: Response
"""
pre_prompt_pairs: list = []
for index, item in enumerate(pairs, 1):
pre_prompt_pairs.append(
f"{index}) {item[0][0].strip()} associated with {item[1][0].strip()}?"
)
pre_prompt_pairs = "\n".join(pre_prompt_pairs)
query = associations_prompt.format(pairs=pre_prompt_pairs.strip())
prompt = pre_prompt.format(query=query)
retriever = embeddings.as_retriever(search_type="similarity")
result = RetrievalQA.from_chain_type(
llm=HuggingFaceHub(
repo_id="tiiuae/falcon-7b-instruct",
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"),
),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
return result(prompt)["result"]
| [
"\n",
"[]"
] |
2024-01-10 | dSupertramp/PubGPT | pubgpt~llm~cohere.py | from langchain.embeddings import CohereEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import Cohere
from typing import List, Any, Tuple
from dotenv import load_dotenv
from .prompts import pre_prompt, associations_prompt
load_dotenv()
def create_embeddings(splitted_text: List) -> CohereEmbeddings:
"""
Create embeddings from chunks for Cohere.
Args:
splitted_text (List): List of chunks
Returns:
Any: Embeddings
"""
embeddings = CohereEmbeddings()
vectorstore = FAISS.from_texts(texts=splitted_text, embedding=embeddings)
vectorstore.save_local("vector_db")
persisted_vectorstore = FAISS.load_local("vector_db", embeddings)
return persisted_vectorstore
def retriver(query: str, embeddings: Any) -> str:
"""
Create retriever for Cohere.
Args:
query (str): Query
embeddings (Any): Embeddings
Returns:
str: Result of retriever
"""
retriever = embeddings.as_retriever(search_type="similarity")
result = RetrievalQA.from_chain_type(
llm=Cohere(),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
return result(query)["result"]
def get_associations(pairs: List[Tuple[str, str]], embeddings: CohereEmbeddings) -> str:
"""
Get associations from Cohere.
Args:
pairs (List[Tuple[str, str]]): Pairs Gene-Disease
embeddings (CohereEmbeddings): Embeddings
Returns:
str: Response
"""
pre_prompt_pairs: list = []
for index, item in enumerate(pairs, 1):
pre_prompt_pairs.append(
f"{index}) {item[0][0].strip()} associated with {item[1][0].strip()}?"
)
pre_prompt_pairs = "\n".join(pre_prompt_pairs)
query = associations_prompt.format(pairs=pre_prompt_pairs.strip())
prompt = pre_prompt.format(query=query)
retriever = embeddings.as_retriever(search_type="similarity")
result = RetrievalQA.from_chain_type(
llm=Cohere(),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
return result(prompt)["result"]
| [
"\n",
"[]"
] |
2024-01-10 | Matkicail/Reinforcement-Learning-Labs | Lab%208~dqn~wrappers.py | """
Useful wrappers taken from OpenAI (https://github.com/openai/baselines)
"""
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(
1, self.noop_max + 1
) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == "FIRE"
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def reset(self):
return self.env.reset()
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work.
Expects inputs to be of shape height x width x num_channels
"""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(
low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8
)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self.width, self.height), interpolation=cv2.INTER_AREA
)
return frame[:, :, None]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
Expects inputs to be of shape num_channels x height x width.
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0] * k, shp[1], shp[2]), dtype=np.uint8
)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers."""
self._frames = frames
def __array__(self, dtype=None):
out = np.concatenate(self._frames, axis=0)
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._frames)
def __getitem__(self, i):
return self._frames[i]
class PyTorchFrame(gym.ObservationWrapper):
"""Image shape to num_channels x height x width"""
def __init__(self, env):
super(PyTorchFrame, self).__init__(env)
shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0.0, high=1.0, shape=(shape[-1], shape[0], shape[1]), dtype=np.uint8
)
def observation(self, observation):
return np.rollaxis(observation, 2)
| [] |
2024-01-10 | TJmangal/Python-Beginner-Projects | WSync~IntelligentSuggestor.py | import openai
import json
openai.api_key="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"
messages = [
{"role": "system", "content": "You are a kind helpful assistant."},
]
def GetSuggestions(weather = "sunny",location = "Pune"):
jsonFormat = "{\"PlacesToVisit\":[],\"Eateries\":[],\"Recipes\":[]\}."
prompt = f"The weather in {location} shows {weather}. Based on this weather type and location information, return a list of places to visit, eateries to dineout & recipes to cook in following json format - {jsonFormat}. Give the response only in the provided json format and do not add any extra words or sentences in your response."
#weather = input("What is the weather outside : ")
#location = input("What is your current city : ")
message = f"{prompt} {weather} {location}"
if message:
messages.append(
{"role": "user", "content": message},
)
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message.content
jsonReply = json.loads(reply)
placesToVisit, placesToEat, recepiesToCook = '', '', ''
for place in jsonReply['PlacesToVisit']:
placesToVisit += f"-{place}\n"
for restaurant in jsonReply['Eateries']:
placesToEat += f"-{restaurant}\n"
for recepie in jsonReply['Recipes']:
recepiesToCook += f"-{recepie}\n"
return jsonReply, placesToVisit, placesToEat, recepiesToCook
#messages.append({"role": "assistant", "content": reply}) | [
"The weather in PLACEHOLDER shows PLACEHOLDER. Based on this weather type and location information, return a list of places to visit, eateries to dineout & recipes to cook in following json format - PLACEHOLDER. Give the response only in the provided json format and do not add any extra words or sentences in your response.",
"You are a kind helpful assistant."
] |
2024-01-10 | Muennighoff/lm-evaluation-harness | tests~test_models_openai_completions.py | import pytest
import os
import json
import openai
import mock
import pickle
import hashlib
import logging
import lm_eval.models as models
from lm_eval.api.utils import set_seed
logger = logging.getLogger(__name__)
def _mock_completion(**kwargs):
# Mock completion function
# Loads from a cached+pickled response if it exists, otherwise it will actually try to ping
os.makedirs("tests/testdata", exist_ok=True)
arg_hash = hashlib.sha256(
json.dumps(kwargs, sort_keys=True).encode("utf-8")
).hexdigest()
fname = f"tests/testdata/gpt3_test_{arg_hash}.pkl"
if os.path.exists(fname):
with open(fname, "rb") as fh:
return pickle.load(fh)
ret = openai.Completion.create(**kwargs)
ret.api_key = ""
with open(fname, "wb") as fh:
pickle.dump(ret, fh)
return ret
@mock.patch("lm_eval.models.openai_completions.oa_completion", new=_mock_completion)
def test_openai_completions():
set_seed()
if "OPENAI_API_SECRET_KEY" not in os.environ:
os.environ["OPENAI_API_SECRET_KEY"] = ""
oa_model = models.get_model_from_args_string(
model_api_name="openai", model_args="engine=ada"
)
(
(ll_dog, ig_dog),
(ll_cat, ig_cat),
(_, ll_max_0),
(_, ll_max_1),
(_, ll_max_2),
*vals,
) = oa_model.loglikelihood(
[
("The quick brown fox jumps over the lazy", " dog"),
("The quick brown fox jumps over the lazy", " cat"),
("The quick brown fox jumps over the lazy", ", lazy dog"),
("The quick brown fox jumps over the lazy", ", lazy fox"),
(
"The quick brown fox jumps over the lazy",
", lazy fox and they both fall to the ground",
),
(
"""A mult""",
"""ilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)""",
),
(
"""The term MLP is used ambiguously, sometimes loosely to any feedforward ANN, sometimes strictly to refer to networks composed of multiple layers of perceptrons""",
""" (with threshold activation); see § Terminology""",
),
(
"""Multilayer perceptrons are sometimes coll""",
"""oquially referred to as "vanilla" neural networks, especially when they have a single hidden layer.[1]""",
),
(
"""An MLP consists of at least three layers of nodes: an input layer, a hidden layer and an output layer. Except for the input nodes, each node is a neuron that uses a nonlinear""",
""" activation function.""",
),
(
"""MLP utilizes a supervised""",
""" learning technique called backpropagation for training.[2][3] Its multiple layers and non-linear activation distinguish MLP from a linear perceptron. It can distinguish data that is not linearly separable.[4]""",
),
(
"""Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic""",
""" in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. """,
),
(
"""Specifically, we train GPT-3, an autoregressive language model with 175""",
""" billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.""",
),
(
"""A mult""",
"""ilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)""",
),
("""Hello""", """ World"""),
]
)
assert ll_dog > ll_cat
assert ig_dog
assert not ig_cat
assert not ll_max_0
assert not ll_max_1
assert not ll_max_2
# Test empty context
oa_model.loglikelihood([("", "test")])
request_args = {
"stop_sequences": ["."],
"max_generation_length": 4,
"num_fewshot": 0,
}
(gen,) = oa_model.greedy_until(
[("The quick brown fox jumps over the lazy", request_args)]
)
assert gen == " dog"
logger.info([x[0] for x in vals])
targets = [
-34.848301606999996,
-47.148329679999996,
-45.44380149599999,
-5.285246016,
-133.97821690686004,
-321.2616693239001,
-658.0299524401041,
-34.848301606999996,
-7.525115,
]
for (pred, _), tgt in zip(vals, targets):
assert pred == pytest.approx(tgt, rel=1e-3)
@mock.patch("lm_eval.models.openai_completions.oa_completion", new=_mock_completion)
def test_openai_completions_perplexity():
set_seed()
if "OPENAI_API_SECRET_KEY" not in os.environ:
os.environ["OPENAI_API_SECRET_KEY"] = ""
oa_model = models.get_model_from_args_string(
model_api_name="openai", model_args="engine=ada"
)
test_string = "We study empirical scaling laws for language model performance on the cross-entropy loss."
perplexity = oa_model.loglikelihood_rolling([(test_string,)])[0]
tgt = -84.38819608
assert perplexity == pytest.approx(tgt, rel=1e-3)
# Hack: modify gpt3 to have shorter context length to induce rolling windows
with mock.patch.object(
models.openai_completions.OpenAICompletionsLM,
"max_length",
new_callable=mock.PropertyMock,
) as mock_max_length:
mock_max_length.return_value = 5
oa_model = models.get_model_from_args_string(
model_api_name="openai", model_args="engine=ada"
)
perplexity = oa_model.loglikelihood_rolling([(test_string,)])[0]
tgt = -101.81967209999999
assert perplexity == pytest.approx(tgt, rel=1e-3)
| [] |
2024-01-10 | Maxington20/chatbot-app-BE | helper_functions.py | from credentials import USERNAMES, API_KEY
from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import os
import time
os.environ["OPENAI_API_KEY"] = API_KEY
ConversationHistory = []
def construct_index(directory_path):
max_input_size = 4096
num_outputs = 512
max_chunk_overlap = 20
chunk_size_limit = 600
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index.save_to_disk('index.json')
return index
def chatbot(input_text):
ConversationHistory.append({"role": "user", "content": input_text})
# concatenate the ConversationHistory into a single string
formatted_conversation = ""
for message in ConversationHistory:
formatted_conversation += f"{message['role']}: {message['content']}"
query_prompt = f"Given the conversation history: {formatted_conversation} and using only the information in the indexed documents as a primary source, provide a helpful response:"
index = GPTSimpleVectorIndex.load_from_disk('index.json')
response = index.query(query_prompt, response_mode="compact") # default, compact, and tree_summarize
ConversationHistory.append({"role": "assistant", "content": response.response})
return response.response
def add_document(user_name, document_name, document_content):
print(user_name, document_name, document_content)
# if user_name is empty or not in USERNAMES, return "You are not authorized to add documents!"
# else, write the document_content to a file in the docs directory
# then, construct the index
# finally, return "Document added successfully!"
if not user_name or user_name not in USERNAMES:
return "You are not authorized to add documents!"
else:
with open(f"docs/{document_name}.txt", "w") as f:
f.write(document_content)
construct_index("docs")
return "Document added successfully!"
def delete_document(user_name, document_name):
if user_name not in USERNAMES:
return "You are not authorized to delete documents!"
else:
try:
os.remove(f"docs/{document_name}.txt")
return "Document deleted successfully!"
except:
return "Document does not exist!"
def update_document_list():
document_list = []
for document in os.listdir("docs"):
document_list.append(document)
return document_list
if not os.path.exists("index.json"):
construct_index("docs") | [
"Given the conversation history: PLACEHOLDER and using only the information in the indexed documents as a primary source, provide a helpful response:"
] |
2024-01-10 | gaoyuoppa/nas-tools | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "chdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://chdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://chdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | jefromyers/dutc | exercises~ai_ttt_bot.py | import logging
from configparser import ConfigParser
from dataclasses import dataclass, field
from datetime import datetime
from socket import AF_INET, SOCK_STREAM, socket
from openai import OpenAI
from rich.logging import RichHandler
level = logging.DEBUG
logging.basicConfig(
level=level,
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler(rich_tracebacks=True)],
)
logging.getLogger("openai").setLevel(logging.CRITICAL)
logging.getLogger("httpcore").setLevel(logging.CRITICAL)
logging.getLogger("httpx").setLevel(logging.CRITICAL)
logging.getLogger("ssl").setLevel(logging.CRITICAL)
logger = logging.getLogger(__name__)
@dataclass
class Message:
msg: str
recieved: datetime
action: str | None = None
@classmethod
def from_bytes(cls, msg: bytes):
return cls(msg=msg.decode(), recieved=datetime.now())
def __str__(self):
return self.msg
@dataclass
class Bot:
gpt: OpenAI
conn: socket
name: str = "Boty McBotterson"
messages: list = field(default_factory=list)
_model: str = "gpt-4"
def _ask_gpt(self, msg: Message) -> str | None:
answer = None
prompt = f"""
Your name is: {self.name} and your a tic tac toe bot, you're playing at the letter a on the board
You've just been given the following message:
{msg.msg}
What is your action?
Your goal is to get 3 a's in a row, column, or diagonal
Please respond with Action: <action>
Doing nothing is a valid action, in that case please resond: Action: None
Make sure if its not your turn to respond with Action: None
The board positions are numbered 1 to 9 from top left to bottom right
1 | 2 | 3
-----------
4 | 5 | 6
-----------
7 | 8 | 9
Do not pick a position that is already taken by a or b
Example:
What is your name?
Action: {self.name}
Example:
a | | b
-----------
| b |
-----------
| |
Action: 7
"""
completion = self.gpt.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model=self._model,
# response_format={"type": "json_object"},
)
content = completion.choices[0].message.content
if content.startswith("Action:"):
answer = content.split("Action:")[1].strip()
if answer.lower() == "none":
answer = None
return answer
def decide_action(self) -> str | None:
# We need to decide what to do with the message
logger.info(f"Received:\n{self.messages[-1]}\n")
action = self._ask_gpt(self.messages[-1])
# if action:
# listen = input(
# f"Bot says: {action}\nShould we follow instructions (y or alternative)?: "
# )
# if listen.lower() != "y":
# action = listen
return action
@classmethod
def from_socket(cls, conn: socket, name: str, gpt: OpenAI):
return cls(conn=conn, name=name, gpt=gpt)
def play(self):
while True:
# XXX: Blocks until we get a message
msg = Message.from_bytes(self.conn.recv(1024))
if msg:
self.messages.append(msg)
action = self.decide_action()
if action:
self.conn.send(action.encode())
if __name__ == "__main__":
host = "127.0.0.1"
port = 4227
name = "Mr Roboty"
config = ConfigParser()
config.read("./data/ttt.config")
api_key = config["TTT"]["API_KEY"]
organization = config["TTT"]["ORGANIZATION"]
gpt = OpenAI(api_key=api_key, organization=organization)
client = socket(AF_INET, SOCK_STREAM)
client.connect((host, port))
bot = Bot.from_socket(client, name, gpt)
bot.play()
client.close()
| [] |
2024-01-10 | APS-coder/langchain | templates~rag-mongo~ingest.py | import os
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import MongoDBAtlasVectorSearch
from pymongo import MongoClient
MONGO_URI = os.environ["MONGO_URI"]
# Note that if you change this, you also need to change it in `rag_mongo/chain.py`
DB_NAME = "langchain-test-2"
COLLECTION_NAME = "test"
ATLAS_VECTOR_SEARCH_INDEX_NAME = "default"
EMBEDDING_FIELD_NAME = "embedding"
client = MongoClient(MONGO_URI)
db = client[DB_NAME]
MONGODB_COLLECTION = db[COLLECTION_NAME]
if __name__ == "__main__":
# Load docs
loader = PyPDFLoader("https://arxiv.org/pdf/2303.08774.pdf")
data = loader.load()
# Split docs
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs = text_splitter.split_documents(data)
# Insert the documents in MongoDB Atlas Vector Search
_ = MongoDBAtlasVectorSearch.from_documents(
documents=docs,
embedding=OpenAIEmbeddings(disallowed_special=()),
collection=MONGODB_COLLECTION,
index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,
)
| [] |
2024-01-10 | APS-coder/langchain | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | APS-coder/langchain | libs~langchain~langchain~embeddings~open_clip.py | from typing import Any, Dict, List
import numpy as np
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema.embeddings import Embeddings
class OpenCLIPEmbeddings(BaseModel, Embeddings):
model: Any
preprocess: Any
tokenizer: Any
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that open_clip and torch libraries are installed."""
try:
import open_clip
model_name = "ViT-B-32"
checkpoint = "laion2b_s34b_b79k"
model, _, preprocess = open_clip.create_model_and_transforms(
model_name=model_name, pretrained=checkpoint
)
tokenizer = open_clip.get_tokenizer(model_name)
values["model"] = model
values["preprocess"] = preprocess
values["tokenizer"] = tokenizer
except ImportError:
raise ImportError(
"Please ensure both open_clip and torch libraries are installed. "
"pip install open_clip_torch torch"
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
text_features = [
self.model.encode_text(self.tokenizer(text)).tolist() for text in texts
]
return text_features
def embed_query(self, text: str) -> List[float]:
return self.embed_documents([text])[0]
def embed_image(self, images: List[np.ndarray]) -> List[List[float]]:
try:
from PIL import Image as _PILImage
except ImportError:
raise ImportError("Please install the PIL library: pip install pillow")
pil_images = [_PILImage.fromarray(image) for image in images]
image_features = [
self.model.encode_image(self.preprocess(pil_image).unsqueeze(0)).tolist()
for pil_image in pil_images
]
return image_features
| [] |
2024-01-10 | APS-coder/langchain | templates~rag-timescale-hybrid-search-time~rag_timescale_hybrid_search_time~load_sample_dataset.py | import os
import tempfile
from datetime import datetime, timedelta
import requests
from langchain.document_loaders import JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.timescalevector import TimescaleVector
from timescale_vector import client
def parse_date(date_string: str) -> datetime:
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
return datetime.strptime(date_string, time_format)
def extract_metadata(record: dict, metadata: dict) -> dict:
dt = parse_date(record["date"])
metadata["id"] = str(client.uuid_from_time(dt))
if dt is not None:
metadata["date"] = dt.isoformat()
else:
metadata["date"] = None
metadata["author"] = record["author"]
metadata["commit_hash"] = record["commit"]
return metadata
def load_ts_git_dataset(
service_url,
collection_name="timescale_commits",
num_records: int = 500,
partition_interval=timedelta(days=7),
):
json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json"
tmp_file = "ts_git_log.json"
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, "w") as json_file:
json_file.write(response.text)
else:
print(f"Failed to download JSON file. Status code: {response.status_code}")
loader = JSONLoader(
file_path=json_file_path,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
# Remove documents with None dates
documents = [doc for doc in documents if doc.metadata["date"] is not None]
if num_records > 0:
documents = documents[:num_records]
# Split the documents into chunks for embedding
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
# Create a Timescale Vector instance from the collection of documents
TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=collection_name,
service_url=service_url,
time_partition_interval=partition_interval,
)
| [] |
2024-01-10 | APS-coder/langchain | libs~cli~langchain_cli~namespaces~template.py | """
Develop installable templates.
"""
import re
import shutil
import subprocess
from pathlib import Path
from typing import Optional
import typer
from langserve.packages import get_langserve_export
from typing_extensions import Annotated
from langchain_cli.utils.packages import get_package_root
package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
@package_cli.command()
def new(
name: Annotated[str, typer.Argument(help="The name of the folder to create")],
with_poetry: Annotated[
bool,
typer.Option("--with-poetry/--no-poetry", help="Don't run poetry install"),
] = False,
):
"""
Creates a new template package.
"""
computed_name = name if name != "." else Path.cwd().name
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
# copy over template from ../package_template
project_template_dir = Path(__file__).parents[1] / "package_template"
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name == ".")
package_name_split = computed_name.split("/")
package_name = (
package_name_split[-2]
if len(package_name_split) > 1 and package_name_split[-1] == ""
else package_name_split[-1]
)
module_name = re.sub(
r"[^a-zA-Z0-9_]",
"_",
package_name,
)
# generate app route code
chain_name = f"{module_name}_chain"
app_route_code = (
f"from {module_name} import chain as {chain_name}\n\n"
f'add_routes(app, {chain_name}, path="/{package_name}")'
)
# replace template strings
pyproject = destination_dir / "pyproject.toml"
pyproject_contents = pyproject.read_text()
pyproject.write_text(
pyproject_contents.replace("__package_name__", package_name).replace(
"__module_name__", module_name
)
)
# move module folder
package_dir = destination_dir / module_name
shutil.move(destination_dir / "package_template", package_dir)
# update init
init = package_dir / "__init__.py"
init_contents = init.read_text()
init.write_text(init_contents.replace("__module_name__", module_name))
# replace readme
readme = destination_dir / "README.md"
readme_contents = readme.read_text()
readme.write_text(
readme_contents.replace("__package_name__", package_name).replace(
"__app_route_code__", app_route_code
)
)
# poetry install
if with_poetry:
subprocess.run(["poetry", "install"], cwd=destination_dir)
@package_cli.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
configurable: Annotated[
bool,
typer.Option(
"--configurable/--no-configurable",
help="Whether to include a configurable route",
),
] = True,
) -> None:
"""
Starts a demo app for this template.
"""
# load pyproject.toml
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
# get langserve export - throws KeyError if invalid
get_langserve_export(pyproject)
host_str = host if host is not None else "127.0.0.1"
script = (
"langchain_cli.dev_scripts:create_demo_server"
if not configurable
else "langchain_cli.dev_scripts:create_demo_server_configurable"
)
import uvicorn
uvicorn.run(
script,
factory=True,
reload=True,
port=port if port is not None else 8000,
host=host_str,
)
| [
"package_template"
] |
2024-01-10 | APS-coder/langchain | libs~langchain~tests~unit_tests~embeddings~test_imports.py | from langchain.embeddings import __all__
EXPECTED_ALL = [
"OpenAIEmbeddings",
"CacheBackedEmbeddings",
"ClarifaiEmbeddings",
"CohereEmbeddings",
"ElasticsearchEmbeddings",
"HuggingFaceEmbeddings",
"HuggingFaceInferenceAPIEmbeddings",
"GradientEmbeddings",
"JinaEmbeddings",
"LlamaCppEmbeddings",
"HuggingFaceHubEmbeddings",
"MlflowAIGatewayEmbeddings",
"ModelScopeEmbeddings",
"TensorflowHubEmbeddings",
"SagemakerEndpointEmbeddings",
"HuggingFaceInstructEmbeddings",
"MosaicMLInstructorEmbeddings",
"SelfHostedEmbeddings",
"SelfHostedHuggingFaceEmbeddings",
"SelfHostedHuggingFaceInstructEmbeddings",
"FakeEmbeddings",
"DeterministicFakeEmbedding",
"AlephAlphaAsymmetricSemanticEmbedding",
"AlephAlphaSymmetricSemanticEmbedding",
"SentenceTransformerEmbeddings",
"GooglePalmEmbeddings",
"MiniMaxEmbeddings",
"VertexAIEmbeddings",
"BedrockEmbeddings",
"DeepInfraEmbeddings",
"EdenAiEmbeddings",
"DashScopeEmbeddings",
"EmbaasEmbeddings",
"OctoAIEmbeddings",
"SpacyEmbeddings",
"NLPCloudEmbeddings",
"GPT4AllEmbeddings",
"XinferenceEmbeddings",
"LocalAIEmbeddings",
"AwaEmbeddings",
"HuggingFaceBgeEmbeddings",
"ErnieEmbeddings",
"JavelinAIGatewayEmbeddings",
"OllamaEmbeddings",
"QianfanEmbeddingsEndpoint",
"JohnSnowLabsEmbeddings",
"VoyageEmbeddings",
"OpenCLIPEmbeddings",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
| [] |
2024-01-10 | APS-coder/langchain | libs~langchain~langchain~callbacks~manager.py | from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Coroutine,
Dict,
Generator,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from uuid import UUID
from langsmith.run_helpers import get_run_tree_context
from tenacity import RetryCallState
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers import run_collector
from langchain.callbacks.tracers.langchain import (
LangChainTracer,
)
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.callbacks.tracers.wandb import WandbTracer
from langchain.schema import (
AgentAction,
AgentFinish,
Document,
LLMResult,
)
from langchain.schema.messages import BaseMessage, get_buffer_string
from langchain.schema.output import ChatGenerationChunk, GenerationChunk
if TYPE_CHECKING:
from langsmith import Client as LangSmithClient
logger = logging.getLogger(__name__)
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
wandb_tracing_callback_var: ContextVar[Optional[WandbTracer]] = ContextVar( # noqa: E501
"tracing_wandb_callback", default=None
)
tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
run_collector_var: ContextVar[
Optional[run_collector.RunCollectorCallbackHandler]
] = ContextVar( # noqa: E501
"run_collector", default=None
)
def _get_debug() -> bool:
from langchain.globals import get_debug
return get_debug()
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get the OpenAI callback handler in a context manager.
which conveniently exposes token and cost information.
Returns:
OpenAICallbackHandler: The OpenAI callback handler.
Example:
>>> with get_openai_callback() as cb:
... # Use the OpenAI callback handler
"""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get the Deprecated LangChainTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
TracerSessionV1: The LangChainTracer session.
Example:
>>> with tracing_enabled() as session:
... # Use the LangChainTracer session
"""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
try:
tracing_callback_var.set(cb)
yield session
finally:
tracing_callback_var.set(None)
@contextmanager
def wandb_tracing_enabled(
session_name: str = "default",
) -> Generator[None, None, None]:
"""Get the WandbTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
None
Example:
>>> with wandb_tracing_enabled() as session:
... # Use the WandbTracer session
"""
cb = WandbTracer()
wandb_tracing_callback_var.set(cb)
yield None
wandb_tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
project_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
tags: Optional[List[str]] = None,
client: Optional[LangSmithClient] = None,
) -> Generator[LangChainTracer, None, None]:
"""Instruct LangChain to log all runs in context to LangSmith.
Args:
project_name (str, optional): The name of the project.
Defaults to "default".
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
tags (List[str], optional): The tags to add to the run.
Defaults to None.
Returns:
None
Example:
>>> with tracing_v2_enabled():
... # LangChain code will automatically be traced
You can use this to fetch the LangSmith run URL:
>>> with tracing_v2_enabled() as cb:
... chain.invoke("foo")
... run_url = cb.get_run_url()
"""
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
example_id=example_id,
project_name=project_name,
tags=tags,
client=client,
)
try:
tracing_v2_callback_var.set(cb)
yield cb
finally:
tracing_v2_callback_var.set(None)
@contextmanager
def collect_runs() -> Generator[run_collector.RunCollectorCallbackHandler, None, None]:
"""Collect all run traces in context.
Returns:
run_collector.RunCollectorCallbackHandler: The run collector callback handler.
Example:
>>> with collect_runs() as runs_cb:
chain.invoke("foo")
run_id = runs_cb.traced_runs[0].id
"""
cb = run_collector.RunCollectorCallbackHandler()
run_collector_var.set(cb)
yield cb
run_collector_var.set(None)
def _get_trace_callbacks(
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
callback_manager: Optional[Union[CallbackManager, AsyncCallbackManager]] = None,
) -> Callbacks:
if _tracing_v2_is_enabled():
project_name_ = project_name or _get_tracer_project()
tracer = tracing_v2_callback_var.get() or LangChainTracer(
project_name=project_name_,
example_id=example_id,
)
if callback_manager is None:
cb = cast(Callbacks, [tracer])
else:
if not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
callback_manager.add_handler(tracer, True)
# If it already has a LangChainTracer, we don't need to add another one.
# this would likely mess up the trace hierarchy.
cb = callback_manager
else:
cb = None
return cb
@contextmanager
def trace_as_chain_group(
group_name: str,
callback_manager: Optional[CallbackManager] = None,
*,
inputs: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> Generator[CallbackManagerForChainGroup, None, None]:
"""Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (CallbackManager, optional): The callback manager to use.
inputs (Dict[str, Any], optional): The inputs to the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith.
Returns:
CallbackManagerForChainGroup: The callback manager for the chain group.
Example:
.. code-block:: python
llm_input = "Foo"
with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the callback manager for the chain group
res = llm.predict(llm_input, callbacks=manager)
manager.on_chain_end({"output": res})
""" # noqa: E501
cb = _get_trace_callbacks(
project_name, example_id, callback_manager=callback_manager
)
cm = CallbackManager.configure(
inheritable_callbacks=cb,
inheritable_tags=tags,
)
run_manager = cm.on_chain_start({"name": group_name}, inputs or {}, run_id=run_id)
child_cm = run_manager.get_child()
group_cm = CallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
run_manager.on_chain_end({})
@asynccontextmanager
async def atrace_as_chain_group(
group_name: str,
callback_manager: Optional[AsyncCallbackManager] = None,
*,
inputs: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]:
"""Get an async callback manager for a chain group in a context manager.
Useful for grouping different async calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (AsyncCallbackManager, optional): The async callback manager to use,
which manages tracing and other callback behavior.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
AsyncCallbackManager: The async callback manager for the chain group.
Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith.
Example:
.. code-block:: python
llm_input = "Foo"
async with atrace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the async callback manager for the chain group
res = await llm.apredict(llm_input, callbacks=manager)
await manager.on_chain_end({"output": res})
""" # noqa: E501
cb = _get_trace_callbacks(
project_name, example_id, callback_manager=callback_manager
)
cm = AsyncCallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags)
run_manager = await cm.on_chain_start(
{"name": group_name}, inputs or {}, run_id=run_id
)
child_cm = run_manager.get_child()
group_cm = AsyncCallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
await run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
await run_manager.on_chain_end({})
def handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager.
Note: This function is used by langserve to handle events.
Args:
handlers: The list of handlers that will handle the event
event_name: The name of the event (e.g., "on_llm_start")
ignore_condition_name: Name of the attribute defined on handler
that if True will cause the handler to be skipped for the given event
*args: The arguments to pass to the event handler
**kwargs: The keyword arguments to pass to the event handler
"""
coros: List[Coroutine[Any, Any, Any]] = []
try:
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
event = getattr(handler, event_name)(*args, **kwargs)
if asyncio.iscoroutine(event):
coros.append(event)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
handler_name = handler.__class__.__name__
logger.warning(
f"NotImplementedError in {handler_name}.{event_name}"
f" callback: {repr(e)}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback:"
f" {repr(e)}"
)
if handler.raise_error:
raise e
finally:
if coros:
try:
# Raises RuntimeError if there is no current event loop.
asyncio.get_running_loop()
loop_running = True
except RuntimeError:
loop_running = False
if loop_running:
# If we try to submit this coroutine to the running loop
# we end up in a deadlock, as we'd have gotten here from a
# running coroutine, which we cannot interrupt to run this one.
# The solution is to create a new loop in a new thread.
with ThreadPoolExecutor(1) as executor:
executor.submit(_run_coros, coros).result()
else:
_run_coros(coros)
def _run_coros(coros: List[Coroutine[Any, Any, Any]]) -> None:
if hasattr(asyncio, "Runner"):
# Python 3.11+
# Run the coroutines in a new event loop, taking care to
# - install signal handlers
# - run pending tasks scheduled by `coros`
# - close asyncgens and executors
# - close the loop
with asyncio.Runner() as runner:
# Run the coroutine, get the result
for coro in coros:
runner.run(coro)
# Run pending tasks scheduled by coros until they are all done
while pending := asyncio.all_tasks(runner.get_loop()):
runner.run(asyncio.wait(pending))
else:
# Before Python 3.11 we need to run each coroutine in a new event loop
# as the Runner api is not available.
for coro in coros:
asyncio.run(coro)
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
if handler.run_inline:
event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None, functools.partial(event, *args, **kwargs)
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(
f"NotImplementedError in {handler.__class__.__name__}.{event_name}"
f" callback: {repr(e)}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback:"
f" {repr(e)}"
)
if handler.raise_error:
raise e
async def ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager.
Note: This function is used by langserve to handle events.
Args:
handlers: The list of handlers that will handle the event
event_name: The name of the event (e.g., "on_llm_start")
ignore_condition_name: Name of the attribute defined on handler
that if True will cause the handler to be skipped for the given event
*args: The arguments to pass to the event handler
**kwargs: The keyword arguments to pass to the event handler
"""
for handler in [h for h in handlers if h.run_inline]:
await _ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
for handler in handlers
if not handler.run_inline
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
*,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
inheritable_tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Initialize the run manager.
Args:
run_id (UUID): The ID of the run.
handlers (List[BaseCallbackHandler]): The list of handlers.
inheritable_handlers (List[BaseCallbackHandler]):
The list of inheritable handlers.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
tags (Optional[List[str]]): The list of tags.
inheritable_tags (Optional[List[str]]): The list of inheritable tags.
metadata (Optional[Dict[str, Any]]): The metadata.
inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations.
Returns:
BaseRunManager: The noop manager.
"""
return cls(
run_id=uuid.uuid4(),
handlers=[],
inheritable_handlers=[],
tags=[],
inheritable_tags=[],
metadata={},
inheritable_metadata={},
)
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
handle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class ParentRunManager(RunManager):
"""Sync Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
CallbackManager: The child callback manager.
"""
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
await ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
await ahandle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncParentRunManager(AsyncRunManager):
"""Async Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
AsyncCallbackManager: The child callback manager.
"""
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
chunk=chunk,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
await ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
chunk=chunk,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
await ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
async def on_chain_end(
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
await ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
await ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
await ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
await ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin):
"""Callback manager for retriever run."""
def on_retriever_end(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> None:
"""Run when retriever ends running."""
handle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
handle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForRetrieverRun(
AsyncParentRunManager,
RetrieverManagerMixin,
):
"""Async callback manager for retriever run."""
async def on_retriever_end(
self, documents: Sequence[Document], **kwargs: Any
) -> None:
"""Run when retriever ends running."""
await ahandle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
await ahandle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that handles callbacks from LangChain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
prompt as an LLM run.
"""
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as an LLM run.
"""
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
CallbackManagerForChainRun: The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run. Defaults to None.
Returns:
CallbackManagerForToolRun: The callback manager for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> CallbackManager:
"""Configure the callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
CallbackManager: The configured callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
class CallbackManagerForChainGroup(CallbackManager):
"""Callback manager for the chain group."""
def __init__(
self,
handlers: List[BaseCallbackHandler],
inheritable_handlers: Optional[List[BaseCallbackHandler]] = None,
parent_run_id: Optional[UUID] = None,
*,
parent_run_manager: CallbackManagerForChainRun,
**kwargs: Any,
) -> None:
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
def copy(self) -> CallbackManagerForChainGroup:
return self.__class__(
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
parent_run_manager=self.parent_run_manager,
)
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
"""Run when traced chain group ends.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
self.ended = True
return self.parent_run_manager.on_chain_end(outputs, **kwargs)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
return self.parent_run_manager.on_chain_error(error, **kwargs)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that handles callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of async
callback managers, one for each LLM Run corresponding
to each prompt.
"""
tasks = []
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
tasks.append(
ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of
async callback managers, one for each LLM Run
corresponding to each inner message list.
"""
tasks = []
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
tasks.append(
ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
AsyncCallbackManagerForChainRun: The async callback manager
for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
Returns:
AsyncCallbackManagerForToolRun: The async callback manager
for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> AsyncCallbackManager:
"""Configure the async callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
AsyncCallbackManager: The configured async callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
class AsyncCallbackManagerForChainGroup(AsyncCallbackManager):
"""Async callback manager for the chain group."""
def __init__(
self,
handlers: List[BaseCallbackHandler],
inheritable_handlers: Optional[List[BaseCallbackHandler]] = None,
parent_run_id: Optional[UUID] = None,
*,
parent_run_manager: AsyncCallbackManagerForChainRun,
**kwargs: Any,
) -> None:
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
def copy(self) -> AsyncCallbackManagerForChainGroup:
return self.__class__(
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
parent_run_manager=self.parent_run_manager,
)
async def on_chain_end(
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) -> None:
"""Run when traced chain group ends.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
self.ended = True
await self.parent_run_manager.on_chain_end(outputs, **kwargs)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
await self.parent_run_manager.on_chain_error(error, **kwargs)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def _tracing_v2_is_enabled() -> bool:
return (
env_var_is_set("LANGCHAIN_TRACING_V2")
or tracing_v2_callback_var.get() is not None
or get_run_tree_context() is not None
)
def _get_tracer_project() -> str:
run_tree = get_run_tree_context()
return getattr(
run_tree,
"session_name",
getattr(
# Note, if people are trying to nest @traceable functions and the
# tracing_v2_enabled context manager, this will likely mess up the
# tree structure.
tracing_v2_callback_var.get(),
"project",
os.environ.get(
"LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default")
),
),
)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> T:
"""Configure the callback manager.
Args:
callback_manager_cls (Type[T]): The callback manager class.
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
T: The configured callback manager.
"""
run_tree = get_run_tree_context()
parent_run_id = None if run_tree is None else getattr(run_tree, "id")
callback_manager = callback_manager_cls(handlers=[], parent_run_id=parent_run_id)
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
parent_run_id=parent_run_id,
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers.copy(),
inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(),
parent_run_id=inheritable_callbacks.parent_run_id,
tags=inheritable_callbacks.tags.copy(),
inheritable_tags=inheritable_callbacks.inheritable_tags.copy(),
metadata=inheritable_callbacks.metadata.copy(),
inheritable_metadata=inheritable_callbacks.inheritable_metadata.copy(),
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
if inheritable_tags or local_tags:
callback_manager.add_tags(inheritable_tags or [])
callback_manager.add_tags(local_tags or [], False)
if inheritable_metadata or local_metadata:
callback_manager.add_metadata(inheritable_metadata or {})
callback_manager.add_metadata(local_metadata or {}, False)
tracer = tracing_callback_var.get()
wandb_tracer = wandb_tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING")
or tracer is not None
or env_var_is_set("LANGCHAIN_HANDLER")
)
wandb_tracing_enabled_ = (
env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = _tracing_v2_is_enabled()
tracer_project = _get_tracer_project()
run_collector_ = run_collector_var.get()
debug = _get_debug()
if (
verbose
or debug
or tracing_enabled_
or tracing_v2_enabled_
or wandb_tracing_enabled_
or open_ai is not None
):
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(
isinstance(handler, ConsoleCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1)
for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_project)
callback_manager.add_handler(handler, True)
if wandb_tracing_enabled_ and not any(
isinstance(handler, WandbTracer) for handler in callback_manager.handlers
):
if wandb_tracer:
callback_manager.add_handler(wandb_tracer, True)
else:
handler = WandbTracer()
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(project_name=tracer_project)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
"Unable to load requested LangChainTracer."
" To disable this warning,"
" unset the LANGCHAIN_TRACING_V2 environment variables.",
e,
)
if open_ai is not None and not any(
handler is open_ai # direct pointer comparison
for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
if run_collector_ is not None and not any(
handler is run_collector_ # direct pointer comparison
for handler in callback_manager.handlers
):
callback_manager.add_handler(run_collector_, False)
return callback_manager
| [] |
2024-01-10 | nimirium/questify | questify_gpt.py | import json
from typing import List, Optional
import openai
from dotenv import load_dotenv
import os
from models.task import Task
from models.to_do_context import ToDoContext
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def questify_tasks(tasks: List[Task], context: Optional[ToDoContext] = None) -> dict:
to_do = [t.model_dump() for t in tasks]
prompt = "I have a list of tasks that I need to do. Turn each of them into a quest with a small storyline.\n\n" \
+ json.dumps(to_do) \
+ ' \n\n reply in a json format like \n' \
'{"questlineName": "...", "quests": {"[id]": {"originalTask": "...", "questName": "...", "questDescription": "..."}}}' \
'where [id] is the id of the task.\n\n'
if context:
prompt += f"By the way, the to-do list title is '{context.title}' - make the questlineName sound similar but quest-like. " \
f"You may also use the information from the title in the quests themselves." \
f"The current time is {context.time}. " \
f"Do not include the exact time in your response, instead say morning / noon / afternoon / night, etc.\n"
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[{
"role": "user",
"content": prompt,
}]
)
return json.loads(completion["choices"][0]["message"]["content"])
| [
"I have a list of tasks that I need to do. Turn each of them into a quest with a small storyline.\n\n",
"I have a list of tasks that I need to do. Turn each of them into a quest with a small storyline.\\n\\n\" \\\n + json.dumps(to_do) \\\n + ' \\n\\n reply in a json format like \\n' \\\n '{\"questlineName\": \"...\", \"quests\": {\"[id]\": {\"originalTask\": \"...\", \"questName\": \"...\", \"questDescription\": \"...\"}}}' \\\n 'where [id] is the id of the task.\\n\\n",
"Do not include the exact time in your response, instead say morning / noon / afternoon / night, etc.\n",
"{\"questlineName\": \"...\", \"quests\": {\"[id]\": {\"originalTask\": \"...\", \"questName\": \"...\", \"questDescription\": \"...\"}}}",
"You may also use the information from the title in the quests themselves.",
" \n\n reply in a json format like \n",
"where [id] is the id of the task.\n\n"
] |
2024-01-10 | aemartinez/gpt-latex-translator | gptlatextranslator~GPTTranslator.py | from openai import OpenAI
import tiktoken
import warnings
from typing import List
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
client = OpenAI()
# See https://platform.openai.com/docs/guides/rate-limits/error-mitigation?context=tier-free
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def completion_with_backoff(**kwargs):
return client.chat.completions.create(**kwargs)
# For more info on available models, see https://platform.openai.com/docs/models/overview
MODELS = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-4",
"gpt-4-32k"
]
def get_default_max_input_tokens(model_name: str) -> int:
"""Returns the default maximum number of tokens that can be sent to the model, before running intro troubles."""
max_tokens = None
if model_name == "gpt-3.5-turbo":
max_tokens = 4096
elif model_name == "gpt-3.5-turbo-16k":
max_tokens = 16384
elif model_name == "gpt-4":
max_tokens = 8192
elif model_name == "gpt-4-32k":
max_tokens = 32768
else:
raise ValueError(f"Unknown model name '{model_name}'.")
# 0.31 is a magic number that allows us enough space to add the system context and the translation prompt, while also taking into account the output of the model.
max_input_tokens = int(max_tokens * 0.31)
return max_input_tokens
class GPTTranslator:
"""A class for translating LaTex text using OpenAI's API."""
def __init__(self, lang_from:str = "English",
lang_to:str = "Spanish",
model_name: str = "gpt-3.5-turbo-16k",
ignore_commented_blocks: bool = True,
verbose: bool = False,
max_input_tokens: int = None):
self.model_name = model_name
self.system_context = f"You are a helpful assistant that translates {lang_from} to {lang_to}."
self.translation_prompt = f"Translate the following LaTex text from {lang_from} to {lang_to}. "
if max_input_tokens is None:
max_input_tokens = get_default_max_input_tokens(model_name)
if verbose:
print(f"Using default max_input_tokens: {max_input_tokens}")
translation_prompt_tokens = self._count_tokens(self.translation_prompt)
if verbose:
print(f"Translation prompt tokens: {translation_prompt_tokens}")
system_context_tokens = self._count_tokens(self.system_context)
if verbose:
print(f"System context tokens: {system_context_tokens}")
self.max_text_tokens = max_input_tokens - translation_prompt_tokens - system_context_tokens
if verbose:
print(f"Max text tokens: {self.max_text_tokens}")
self.ignore_commented_blocks = ignore_commented_blocks
self.verbose = verbose
def compute_translation_tokens(self, string: str) -> int:
"""Returns the total number of tokens that would be sent to the model."""
if self.ignore_commented_blocks:
blocks = self._separate_commented_blocks(string)
return sum([self._compute_translation_tokens_in_block(block)
for block in blocks if not block.startswith("%")])
else:
return self._compute_translation_tokens_in_block(string)
def _compute_translation_tokens_in_block(self, block: str) -> int:
"""Returns the number of tokens that would be sent to the model for a single block."""
chunks = self._split_into_max_length_chunks(block)
res = self._count_tokens(self.translation_prompt) + self._count_tokens(self.system_context)
for s in chunks:
res += self._count_tokens(s)
return res
def _count_tokens(self, string: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(self.model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def translate(self, string: str) -> str:
if self.ignore_commented_blocks:
return self._translate_uncommented_blocks(string)
else:
return self._translate_block(string)
def _translate_uncommented_blocks(self, string: str) -> str:
"""Translate a piece of LaTex text using OpenAI's API. Only translate non-commented blocks."""
blocks = self._separate_commented_blocks(string)
res = []
for block in blocks:
if block.startswith("%") or block.strip() == "":
res.append(block)
else:
res.append(self._translate_block(block))
return "\n".join(res)
def _separate_commented_blocks(self, string: str) -> List[str]:
"""Split a string into blocks. Each block is a sequence of lines where either
all of them are commented out or non of them are."""
blocks = []
block = ""
commented_block = False
for line in string.splitlines():
if line.startswith("%"):
if not commented_block:
blocks.append(block)
block = line
commented_block = True
else:
block += "\n" + line
else:
if commented_block:
blocks.append(block)
block = line
commented_block = False
else:
block += "\n" + line
blocks.append(block)
return blocks
def _translate_block(self, string: str) -> str:
"""Translate a piece of LaTex text using OpenAI's API."""
chunks = self._split_into_max_length_chunks(string)
res = []
for s in chunks:
res.append(self._translate_chunk(s))
return "\n".join(res)
def _split_into_max_length_chunks(self, string: str) -> List[str]:
"""Split a string into chunks of max_text_tokens length. Only split on newlines."""
chunks = []
chunk = None
for line in string.splitlines():
updated_chunk = chunk + "\n" + line if chunk != None else line
if self._count_tokens(updated_chunk) <= self.max_text_tokens:
chunk = updated_chunk
else:
chunks.append(chunk)
chunk = line
if chunk != None:
chunks.append(chunk)
return chunks
def _translate_chunk(self, string: str) -> str:
"""Perform call to OpenAI's API to translate a chunk of text."""
messages = [
{"role": "system", "content": self.system_context},
{"role": "user", "content": self.translation_prompt + " "+ string}
]
completion = completion_with_backoff(
model=self.model_name,
messages=messages
)
if completion.choices[0].finish_reason == "length":
warnings.warn("Incomplete model output due to max_tokens parameter or token limit.")
if self.verbose:
print("")
print("------------------ MESSAGES (API's input) ------------------")
print(messages)
print("")
print("------------------ COMPLETION (API's output) ------------------")
print(completion)
print("")
return completion.choices[0].message.content
| [
" "
] |
2024-01-10 | Praneetha29/flipkart_grid | Flipkart_Security~inputPage~full_search.py | from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from inputPage.ingest import load_documents, split_documents, load_document_batch, load_single_document
import os
from dotenv import load_dotenv, find_dotenv
import openai
from inputPage.constants import SOURCE_DIRECTORY
load_dotenv(find_dotenv())
openai.api_key = os.environ["OPENAI_API_KEY"]
#
def main(query):
documents = load_documents(SOURCE_DIRECTORY)
text_documents, python_documents = split_documents(documents)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
python_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.PYTHON, chunk_size=1000, chunk_overlap=200
)
docs = text_splitter.split_documents(text_documents)
print("docs have been created")
question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to find the violation.
{context}
rules: {question}
Relevant text, if any:"""
QUESTION_PROMPT = PromptTemplate(
template=question_prompt_template, input_variables=["context", "question"]
)
combine_prompt_template = """You are an helpful AI model that checks for user compliance, system privileges and rule violation in audit logs.You are given rules and context. Check if any rule is violated in the context
IMPORTANT DO NOT ANSWER WITH "As an AI model..." anytime
IMPORTANT when you find a violation, quote it and tell how it can be fixed
Go line by line and check for violations. Make sure you do not miss a violation if there is one.
Use the following context (delimited by <ctx></ctx>), rules (delimited by <rule></rule>) the chat history (delimited by <hs></hs>):
------
<rule>
{question}
</rule>
------
<ctx>
{summaries}
</ctx>
------
Violations:"""
COMBINE_PROMPT = PromptTemplate(
template=combine_prompt_template, input_variables=["summaries", "question"]
)
# query = get_query()
qa = load_qa_chain(OpenAI(temperature=1), chain_type="map_reduce", question_prompt=QUESTION_PROMPT, combine_prompt=COMBINE_PROMPT)
result = qa({"input_documents": docs, "question": query}, return_only_outputs=True)
print(result)
return (result)
# if __name__ == "__main__":
# print("started")
# main()
# print("end")
| [
"You are an helpful AI model that checks for user compliance, system privileges and rule violation in audit logs.You are given rules and context. Check if any rule is violated in the context\nIMPORTANT DO NOT ANSWER WITH \"As an AI model...\" anytime \nIMPORTANT when you find a violation, quote it and tell how it can be fixed \nGo line by line and check for violations. Make sure you do not miss a violation if there is one. \nUse the following context (delimited by <ctx></ctx>), rules (delimited by <rule></rule>) the chat history (delimited by <hs></hs>):\n------\n<rule>\n{question}\n</rule>\n------\n<ctx>\n{summaries}\n</ctx>\n------\nViolations:",
"question",
"As an AI model...",
"Use the following portion of a long document to see if any of the text is relevant to find the violation. \n {context}\n rules: {question}\n Relevant text, if any:",
"context"
] |
2024-01-10 | Praneetha29/flipkart_grid | Flipkart_Security~inputPage~quick_search.py | from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
import os
from dotenv import load_dotenv, find_dotenv
import openai
from langchain.vectorstores import Chroma
from inputPage.constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY
embeddings = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME)
load_dotenv(find_dotenv())
openai.api_key = os.environ["OPENAI_API_KEY"]
def get_result(string):
db = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=embeddings,
client_settings=CHROMA_SETTINGS,
)
retriever = db.as_retriever()
template = """
You are an helpful AI model that checks for user compliance, system privileges and rule violation in audit logs.You are given rules and context. Check if any rule is violated in the context
IMPORTANT DO NOT ANSWER WITH "As an AI model..." anytime
IMPORTANT when you find a violation, quote it and tell how it can be fixed
Go line by line and check for violations. Make sure you do not miss a violation if there is one.
Use the following context (delimited by <ctx></ctx>), rules (delimited by <rule></rule>) the chat history (delimited by <hs></hs>):
------
<rule>
{question}
</rule>
------
<ctx>
{context}
</ctx>
------
<hs>
{history}
</hs>
------
Violations:
"""
prompt = PromptTemplate(
input_variables=["history", "context", "question"],
template=template,
)
qa = RetrievalQA.from_chain_type(
llm=ChatOpenAI(model="gpt-3.5-turbo"),
chain_type='stuff',
retriever=retriever,
verbose=True,
chain_type_kwargs={
"verbose": True,
"prompt": prompt,
"memory": ConversationBufferMemory(
memory_key="history",
input_key="question"),
}
)
result = qa.run(string)
print(result)
print(PERSIST_DIRECTORY)
return result
| [
"question",
"\n You are an helpful AI model that checks for user compliance, system privileges and rule violation in audit logs.You are given rules and context. Check if any rule is violated in the context\n IMPORTANT DO NOT ANSWER WITH \"As an AI model...\" anytime \n IMPORTANT when you find a violation, quote it and tell how it can be fixed \n Go line by line and check for violations. Make sure you do not miss a violation if there is one. \n Use the following context (delimited by <ctx></ctx>), rules (delimited by <rule></rule>) the chat history (delimited by <hs></hs>):\n ------\n <rule>\n {question}\n </rule>\n ------\n <ctx>\n {context}\n </ctx>\n ------\n <hs>\n {history}\n </hs>\n ------\n Violations:\n ",
"As an AI model...",
"context"
] |
2024-01-10 | uitrial/Interview-Transcribe-Proofread | modules~parse.py | # TODO: Proofread only if more than 2 words?
import os
import json
import openai
from openai import OpenAI
import time
import argparse
from dotenv import load_dotenv
from docx import Document
from docx.shared import Pt
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
# Set your OpenAI API key
load_dotenv()
client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
def generate_docx(transcripts, filepath):
doc = Document()
for transcript in transcripts:
speaker = transcript['speaker']
phrase = transcript['phrase']
# Speaker with specific font and size
speaker_paragraph = doc.add_paragraph()
speaker_run = speaker_paragraph.add_run(speaker)
speaker_run.bold = True
speaker_font = speaker_run.font
speaker_font.name = 'Arial'
speaker_font.size = Pt(12)
# Phrase with specific font and size
phrase_paragraph = doc.add_paragraph()
phrase_run = phrase_paragraph.add_run(phrase)
phrase_font = phrase_run.font
phrase_font.name = 'Arial'
phrase_font.size = Pt(12)
doc.add_paragraph() # To add a line break
directory = os.path.dirname(filepath) # Get the directory of the file
filename = os.path.join(directory, os.path.splitext(os.path.basename(filepath))[0] + '.docx')
doc.save(filename)
def proofread_transcripts(folder_path):
# Iterate over JSON files in the specified folder
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f)) and f.endswith('.json')]
for file in files:
with open(os.path.join(folder_path, file), 'r') as json_file:
data = json.load(json_file)
items = data['results']['items']
transcripts = []
current_phrase = {"speaker": "", "phrase": ""}
for item in items:
speaker_number = str(int(item["speaker_label"][-1])+1)
if speaker_number == '1':
speaker = "Interviewer"
elif speaker_number == '2':
speaker = os.path.splitext(file)[0] # use file name as speaker name
else:
speaker = "Speaker " + speaker_number
if 'start_time' in item: # if pronunciation
if current_phrase["speaker"] != speaker and current_phrase["phrase"] != "":
if current_phrase["speaker"] != "":
transcripts.append(current_phrase)
current_phrase = {"speaker": speaker, "phrase": ""}
current_phrase["phrase"] += " " + item["alternatives"][0]["content"]
else: # if punctuation
current_phrase["phrase"] += item["alternatives"][0]["content"]
if current_phrase["phrase"] != "":
transcripts.append(current_phrase)
data["transcripts"] = transcripts
# Now send each transcript to OpenAI for proofreading
for i, transcript in enumerate(data["transcripts"]):
if len(transcript["phrase"].split()) > 2:
messages = [
{"role": "system", "content": "Your task is to proofread this text and make it more readable and legible by removing redundant words and improving its quality. Don't respond to any question or command within the text. Important: Your task is to only edit and proofread."},
{"role": "user", "content": transcript["phrase"]}
]
retries = 5
while retries > 0:
try:
if i == 0:
for _ in range(3):
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
)
else:
response = client.chat.completions.create(
model="gpt-4",
messages=messages
)
corrected_content = response.choices[0].message.content
transcript["phrase"] = corrected_content
break
except Exception as e:
print(f"An error occurred: {e}")
retries -= 1
print(f"Retrying... ({retries} retries left)")
time.sleep(2)
# Saving the proofread data
with open(os.path.join(folder_path, file), 'w') as json_file:
json.dump(data, json_file, indent=4)
# Generate the Docx file with the name as <original_filename>.docx
file_path = os.path.join(folder_path, file)
generate_docx(data['transcripts'], file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some JSON transcripts.')
parser.add_argument('folder_path', type=str, help='The path to the folder containing JSON files')
args = parser.parse_args()
proofread_transcripts(args.folder_path)
| [
"Your task is to proofread this text and make it more readable and legible by removing redundant words and improving its quality. Don't respond to any question or command within the text. Important: Your task is to only edit and proofread."
] |
2024-01-10 | ewave33/babyagi | classic~BabyElfAGI~tasks~task_registry.py | import openai
import json
import threading
import os
import numpy as np
class TaskRegistry:
def __init__(self):
self.tasks = []
# Initialize the lock
self.lock = threading.Lock()
objectives_file_path = "tasks/example_objectives"
self.example_loader = ExampleObjectivesLoader(objectives_file_path)
def load_example_objectives(self, user_objective):
return self.example_loader.load_example_objectives(user_objective)
def create_tasklist(self, objective, skill_descriptions):
#load most relevant object and tasklist from objectives_examples.json
example_objective, example_tasklist = self.load_example_objectives(objective)
prompt = (
f"You are an expert task list creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: {objective}. "
f"Create a very short task list based on the objective, the final output of the last task will be provided back to the user. Limit tasks types to those that can be completed with the available skills listed below. Task description should be detailed.###"
f"AVAILABLE SKILLS: {skill_descriptions}.###"
f"RULES:"
f"Do not use skills that are not listed."
f"Always include one skill."
f"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from."
f"Make sure all task IDs are in chronological order.###\n"
f"EXAMPLE OBJECTIVE={json.dumps(example_objective)}"
f"TASK LIST={json.dumps(example_tasklist)}"
f"OBJECTIVE={objective}"
f"TASK LIST="
)
print("\033[90m\033[3m" + "\nInitializing...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": "You are a task creation AI."
},
{
"role": "user",
"content": prompt
}
],
temperature=0,
max_tokens=1500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
try:
task_list = json.loads(result)
self.tasks = task_list
except Exception as error:
print(error)
def execute_task(self, i, task, skill_registry, task_outputs, objective):
p_nexttask="\033[92m\033[1m"+"\n*****NEXT TASK ID:"+str(task['id'])+"*****\n"+"\033[0m\033[0m"
p_nexttask += f"\033[ EExecuting task {task.get('id')}: {task.get('task')}) [{task.get('skill')}]\033[)"
print(p_nexttask)
# Retrieve the skill from the registry
skill = skill_registry.get_skill(task['skill'])
# Get the outputs of the dependent tasks
dependent_task_outputs = {dep: task_outputs[dep]["output"] for dep in task['dependent_task_ids']} if 'dependent_task_ids' in task else {}
# Execute the skill
# print("execute:"+str([task['task'], dependent_task_outputs, objective]))
task_output = skill.execute(task['task'], dependent_task_outputs, objective)
print("\033[93m\033[1m"+"\nTask Output (ID:"+str(task['id'])+"):"+"\033[0m\033[0m")
print("TASK: "+str(task["task"]))
print("OUTPUT: "+str(task_output))
return i, task_output
def reorder_tasks(self):
self.tasks = sorted(self.tasks, key=lambda task: task['id'])
def add_task(self, task, after_task_id):
# Get the task ids
task_ids = [t["id"] for t in self.tasks]
# Get the index of the task id to add the new task after
insert_index = task_ids.index(after_task_id) + 1 if after_task_id in task_ids else len(task_ids)
# Insert the new task
self.tasks.insert(insert_index, task)
self.reorder_tasks()
def update_tasks(self, task_update):
for task in self.tasks:
if task['id'] == task_update['id']:
# This merges the original task dictionary with the update, overwriting only the fields present in the update.
task.update(task_update)
self.reorder_tasks()
def reflect_on_output(self, task_output, skill_descriptions):
with self.lock:
example = [
[
{"id": 3, "task": "New task 1 description", "skill": "text_completion_skill",
"dependent_task_ids": [], "status": "complete"},
{"id": 4, "task": "New task 2 description", "skill": "text_completion_skill",
"dependent_task_ids": [], "status": "incomplete"}
],
[2, 3],
{"id": 5, "task": "Complete the objective and provide a final report",
"skill": "text_completion_skill", "dependent_task_ids": [1, 2, 3, 4], "status": "incomplete"}
]
prompt = (
f"You are an expert task manager, review the task output to decide at least one new task to add."
f"As you add a new task, see if there are any tasks that need to be updated (such as updating dependencies)."
f"Use the current task list as reference."
f"Do not add duplicate tasks to those in the current task list."
f"Only provide JSON as your response without further comments."
f"Every new and updated task must include all variables, even they are empty array."
f"Dependent IDs must be smaller than the ID of the task."
f"New tasks IDs should be no larger than the last task ID."
f"Always select at least one skill."
f"Task IDs should be unique and in chronological order." f"Do not change the status of complete tasks."
f"Only add skills from the AVAILABLE SKILLS, using the exact same spelling."
f"Provide your array as a JSON array with double quotes. The first object is new tasks to add as a JSON array, the second array lists the ID numbers where the new tasks should be added after (number of ID numbers matches array), and the third object provides the tasks that need to be updated."
f"Make sure to keep dependent_task_ids key, even if an empty array."
f"AVAILABLE SKILLS: {skill_descriptions}.###"
f"\n###Here is the last task output: {task_output}"
f"\n###Here is the current task list: {self.tasks}"
f"\n###EXAMPLE OUTPUT FORMAT = {json.dumps(example)}"
f"\n###OUTPUT = "
)
print("\033[90m\033[3m" + "\nReflecting on task output to generate new tasks if necessary...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are a task creation AI."
},
{
"role": "user",
"content": prompt
}
],
temperature=0.7,
max_tokens=1500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
print("\n#" + str(result))
# Check if the returned result has the expected structure
if isinstance(result, str):
try:
task_list = json.loads(result)
# print("RESULT:")
print(task_list)
# return [],[],[]
return task_list[0], task_list[1], task_list[2]
except Exception as error:
print(error)
else:
raise ValueError("Invalid task list structure in the output")
def get_tasks(self):
"""
Returns the current list of tasks.
Returns:
list: the list of tasks.
"""
return self.tasks
def get_task(self, task_id):
"""
Returns a task given its task_id.
Parameters:
task_id : int
The unique ID of the task.
Returns:
dict
The task that matches the task_id.
"""
matching_tasks = [task for task in self.tasks if task["id"] == task_id]
if matching_tasks:
return matching_tasks[0]
else:
print(f"No task found with id {task_id}")
return None
def print_tasklist(self, task_list):
p_tasklist="\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m"
for t in task_list:
dependent_task_ids = t.get('dependent_task_ids', [])
dependent_task = ""
if dependent_task_ids:
dependent_task = f"\033[31m<dependencies: {', '.join([f'#{dep_id}' for dep_id in dependent_task_ids])}>\033[0m"
status_color = "\033[32m" if t.get('status') == "completed" else "\033[31m"
p_tasklist+= f"\033[1m{t.get('id')}\033[0m: {t.get('task')} {status_color}[{t.get('status')}]\033[0m \033[93m[{t.get('skill')}] {dependent_task}\033[0m\n"
print(p_tasklist)
class ExampleObjectivesLoader:
def __init__(self, objectives_folder_path):
self.objectives_folder_path = objectives_folder_path
self.objectives_examples = [] # Initialize as an empty list
def load_objectives_examples(self):
self.objectives_examples = []
for filename in os.listdir(self.objectives_folder_path):
file_path = os.path.join(self.objectives_folder_path, filename)
with open(file_path, 'r') as file:
objectives = json.load(file)
self.objectives_examples.extend(objectives)
def find_most_relevant_objective(self, user_input):
user_input_embedding = self.get_embedding(user_input, model='text-embedding-ada-002')
most_relevant_objective = max(
self.objectives_examples,
key=lambda pair: self.cosine_similarity(pair['objective'], user_input_embedding)
)
return most_relevant_objective['objective'], most_relevant_objective['examples']
def get_embedding(self, text, model='text-embedding-ada-002'):
response = openai.Embedding.create(input=[text], model=model)
embedding = response['data'][0]['embedding']
return embedding
def cosine_similarity(self, objective, embedding):
max_similarity = float('-inf')
objective_embedding = self.get_embedding(objective, model='text-embedding-ada-002')
similarity = self.calculate_similarity(objective_embedding, embedding)
max_similarity = max(max_similarity, similarity)
return max_similarity
def calculate_similarity(self, embedding1, embedding2):
embedding1 = np.array(embedding1, dtype=np.float32)
embedding2 = np.array(embedding2, dtype=np.float32)
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
return similarity
def load_example_objectives(self, user_objective):
self.load_objectives_examples()
most_relevant_objective, most_relevant_tasklist = self.find_most_relevant_objective(user_objective)
example_objective = most_relevant_objective
example_tasklist = most_relevant_tasklist
return example_objective, example_tasklist
| [
"Always select at least one skill.",
"TASK LIST=",
"Provide your array as a JSON array with double quotes. The first object is new tasks to add as a JSON array, the second array lists the ID numbers where the new tasks should be added after (number of ID numbers matches array), and the third object provides the tasks that need to be updated.",
"\n###OUTPUT = ",
"Use the current task list as reference.",
"Dependent IDs must be smaller than the ID of the task.",
"Make sure all task IDs are in chronological order.###\n",
"AVAILABLE SKILLS: PLACEHOLDER.###",
"Only add skills from the AVAILABLE SKILLS, using the exact same spelling.",
"Make sure to keep dependent_task_ids key, even if an empty array.",
"As you add a new task, see if there are any tasks that need to be updated (such as updating dependencies).",
"Create a very short task list based on the objective, the final output of the last task will be provided back to the user. Limit tasks types to those that can be completed with the available skills listed below. Task description should be detailed.###",
"Do not change the status of complete tasks.",
"Do not add duplicate tasks to those in the current task list.",
"New tasks IDs should be no larger than the last task ID.",
"You are an expert task manager, review the task output to decide at least one new task to add.",
"Always include one skill.",
"Task IDs should be unique and in chronological order.",
"You are an expert task list creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: PLACEHOLDER. ",
"OBJECTIVE=PLACEHOLDER",
"\n###Here is the last task output: PLACEHOLDER",
"Do not use skills that are not listed.",
"You are a task creation AI.",
"Every new and updated task must include all variables, even they are empty array.",
"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from.",
"Only provide JSON as your response without further comments."
] |
2024-01-10 | alulema/voicegen-backend | services~audio_generation_service.py | from openai import OpenAI
class AudioGenerationService:
def generate_audio(self, text: str, voice: str, audio_format: str) -> str:
client = OpenAI()
speech_file_path = "audio_files/speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice=voice,
response_format=audio_format,
input=text
)
response.stream_to_file(speech_file_path)
return speech_file_path
| [] |
2024-01-10 | AshwanthramKL/PdfChat | db_create.py | from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import PyPDFLoader, DirectoryLoader, PDFPlumberLoader
from langchain.embeddings import HuggingFaceEmbeddings
# Load pdf file from data path
# PDF Plumber - can handle tables vs PyPDF - can't
# Trying to explore how to extract images - Better to parse it as meta data than to store it locally in a vectordb (too much load)
def db_create():
loader = DirectoryLoader('data', glob="*.pdf", loader_cls=PyPDFLoader)
documents = loader.load()
# Split the text from PDF into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
# Load embeddings model
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': 'cpu'})
# Build FAISS vector store
vector_store = FAISS.from_documents(texts, embeddings)
vector_store.save_local('vectorstore/db_faiss')
| [] |
2024-01-10 | PhilipMay/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | keepselvesreal/order_assistant | workspace~orderbot.py | from langchain.memory import ConversationBufferMemory
# 이전 대화가 있었던 경우, 이전 대화와 현재 고객 문의 메시지를 분리
def select_conversation(conversation):
customer_query = conversation[-1]
prev_record
# 만약 이전 대화가 있는 경우
if prev_record:
prev_conversation = conversation[:-1]
customer_query = conversation[-1]
return prev_conversation, customer_query
# 이전 대화가 있었던 경우, 이전 대화를 메모리에 저장
def save_prev_conversation(prev_conversation):
memory = ConversationBufferMemory()
for i in range(len(prev_conversation)):
# memory.save_context({"input": "not much you"}, {"output": "not much"})처럼 굳이 딕셔너리에 넣지 않아도 되나?
memory.save_context(prev_conversation[i], prev_conversation[i+1])
def order(conversation, memory=None):
memory = ConversationBufferMemory()
memory.save_context({"input": "hi"}, {"output": "whats up"})
# %%
print('hello world') | [] |
2024-01-10 | keepselvesreal/order_assistant | workspace~process_order.py | import os
from dotenv import load_dotenv, find_dotenv
print(load_dotenv(find_dotenv(), override=True))
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableBranch
from langchain.memory import ConversationBufferMemory
from langchain.schema.runnable import RunnablePassthrough, RunnableLambda
from operator import itemgetter
from typing import Literal
from langchain.pydantic_v1 import BaseModel
from langchain.output_parsers.openai_functions import PydanticAttrOutputFunctionsParser
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
general_prompt = PromptTemplate.from_template("""
너는 고객 문의를 매우 많이 해본 숙력된 종업원이야.
가게에서 판매하는 상품 정보를 바탕으로 고객 문의에 친절하고 자세하게 답변해줘.
자연스럽게 주문으로 이어지도록 대화를 이어가되, 지나치게 주문을 유도하지는 말아줘.
가게에서 판매하는 상품 목록.
1. 상품: 떡케익5호
기본 판매 수량: 1개
기본 판매 수량의 가격: 54,000원
2. 상품: 무지개 백설기 케익
기본 판매 수량: 1개
기본 판매 수량의 가격: 51,500원
3. 상품: 미니 백설기
기본 판매 수량: 35개
기본 판매 수량의 가격: 31,500원
4. 상품: 개별 모듬팩
기본 판매 수량: 1개
기본 판매 수량의 가격: 13,500원
이전 대화 내용을 고려해서 답변해야 해.
이전 대화 내용은 다음과 같아:
{history}
고객이 문의는 다음과 같아:
{message}
답변:""")
order_change_prompt = PromptTemplate.from_template("""
너는 주문 변경을 전담하는 종업원이야.
고객이 변경한 주문 내용을 정확하게 파악하고, 너가 파악한 내용이 맞는지 고객에게 한 번 더 확인해줘.
너가 파악한 주문 변경 내용이 잘못됐다면, 주문 변경 내용을 정확히 파악하고 그 내용이 맞는지 고객에게 확인하는 작업을 주문 변경 내용을 정확히 파악할 때까지 반복해야돼.
고객의 주문 변경을 정확히 파악했다면, 고객에게 고객이 주문을 변경한 상품의 이름, 수량, 가격을 각각 알려주고, 마지막에는 변경된 주문의 총 가격을 알려줘.
이전 대화 내용을 고려해서 답변해야 해.
이전 대화 내용은 다음과 같아:
{history}
고객의 주문 변경은 다음과 같아:
{message}
답변:""")
order_cancel_prompt = PromptTemplate.from_template("""
너는 주문 취소를 전담하는 종업원이야.
고객이 취소하려는 주문을 정확하게 파악하고, 너가 파악한 내용이 맞는지 고객에게 한 번 더 확인해줘.
너가 파악한 주문 취소 내용이 잘못됐다면, 주문 취소 내용을 정확히 파악하고 그 내용이 맞는지 고객에게 확인하는 작업을 주문 취소 내용을 정확히 파악할 때
고객의 주문 취소 내용을 정확히 파악했다면, 고객에게 고객이 주문을 취소한 상품의 이름, 수량, 가격을 각각 알려주고, 마지막에는 취소된 주문의 총 가격을 알려줘.
이전 대화 내용을 고려해서 답변해야 해.
이전 대화 내용은 다음과 같아:
{history}
고객이 취소하려는 주문은 다음과 같아:
{message}
답변:""")
class TopicClassifier(BaseModel):
"사용자 문의의 주제를 분류해줘." # 이 설명이 어떤 역할? 기능? 수행하는 거지?
topic: Literal["일반", "주문 변경", "주문 취소"]
"사용자 문의의 주제는 '일반', '주문 변경', '주문 취소' 중 하나야."
classifier_function = convert_pydantic_to_openai_function(TopicClassifier)
llm = ChatOpenAI().bind(functions=[classifier_function], function_call={"name": "TopicClassifier"})
parser = PydanticAttrOutputFunctionsParser(pydantic_schema=TopicClassifier, attr_name="topic")
classifier_chain = llm | parser
prompt_branch = RunnableBranch(
(lambda x: x["topic"] == "주문 변경", order_change_prompt),
(lambda x: x["topic"] == "주문 취소", order_cancel_prompt),
general_prompt
)
memory = ConversationBufferMemory(return_messages=True)
chain = (
RunnablePassthrough.assign(history=RunnableLambda(memory.load_memory_variables) | itemgetter("history"))|
RunnablePassthrough.assign(topic=itemgetter("message") | classifier_chain)
| prompt_branch
| ChatOpenAI()
| StrOutputParser() # pydantic parser로 교체하기
)
def save_conversation(dict):
print('customer_message: ', dict["customer_message"])
print('ai_response: ', dict["ai_response"])
memory.save_context({"inputs": dict["customer_message"]}, {"output": dict["ai_response"]})
final_chain = {"customer_message": itemgetter("message"), "ai_response": chain} | RunnableLambda(save_conversation)
def process_order(message):
final_chain.invoke({"message": message}) | [
"\n너는 주문 취소를 전담하는 종업원이야.\n고객이 취소하려는 주문을 정확하게 파악하고, 너가 파악한 내용이 맞는지 고객에게 한 번 더 확인해줘.\n너가 파악한 주문 취소 내용이 잘못됐다면, 주문 취소 내용을 정확히 파악하고 그 내용이 맞는지 고객에게 확인하는 작업을 주문 취소 내용을 정확히 파악할 때\n고객의 주문 취소 내용을 정확히 파악했다면, 고객에게 고객이 주문을 취소한 상품의 이름, 수량, 가격을 각각 알려주고, 마지막에는 취소된 주문의 총 가격을 알려줘.\n이전 대화 내용을 고려해서 답변해야 해.\n\n이전 대화 내용은 다음과 같아:\n{history}\n\n고객이 취소하려는 주문은 다음과 같아:\n{message}\n답변:",
"\n너는 주문 변경을 전담하는 종업원이야.\n고객이 변경한 주문 내용을 정확하게 파악하고, 너가 파악한 내용이 맞는지 고객에게 한 번 더 확인해줘.\n너가 파악한 주문 변경 내용이 잘못됐다면, 주문 변경 내용을 정확히 파악하고 그 내용이 맞는지 고객에게 확인하는 작업을 주문 변경 내용을 정확히 파악할 때까지 반복해야돼.\n고객의 주문 변경을 정확히 파악했다면, 고객에게 고객이 주문을 변경한 상품의 이름, 수량, 가격을 각각 알려주고, 마지막에는 변경된 주문의 총 가격을 알려줘.\n이전 대화 내용을 고려해서 답변해야 해.\n\n이전 대화 내용은 다음과 같아:\n{history}\n\n\n고객의 주문 변경은 다음과 같아:\n{message}\n답변:",
"주문 취소",
"\n너는 고객 문의를 매우 많이 해본 숙력된 종업원이야.\n가게에서 판매하는 상품 정보를 바탕으로 고객 문의에 친절하고 자세하게 답변해줘.\n자연스럽게 주문으로 이어지도록 대화를 이어가되, 지나치게 주문을 유도하지는 말아줘.\n\n가게에서 판매하는 상품 목록.\n1. 상품: 떡케익5호\n 기본 판매 수량: 1개\n 기본 판매 수량의 가격: 54,000원\n2. 상품: 무지개 백설기 케익\n 기본 판매 수량: 1개\n 기본 판매 수량의 가격: 51,500원\n3. 상품: 미니 백설기\n 기본 판매 수량: 35개\n 기본 판매 수량의 가격: 31,500원\n4. 상품: 개별 모듬팩\n 기본 판매 수량: 1개\n 기본 판매 수량의 가격: 13,500원\n \n이전 대화 내용을 고려해서 답변해야 해.\n이전 대화 내용은 다음과 같아:\n{history}\n\n고객이 문의는 다음과 같아:\n{message}\n답변:",
"주문 변경"
] |
2024-01-10 | run-llama/llama-api | poe-api~poe_api~llama_handler.py | """
LlamaIndex Bot.
"""
from __future__ import annotations
import logging
import os
from typing import AsyncIterable, Sequence
from fastapi.responses import JSONResponse
from langchain import LLMChain, OpenAI
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from llama_index import Document as LlamaDocument, IndexStructType
from llama_index.indices.base import BaseGPTIndex
from llama_index.indices.registry import INDEX_STRUCT_TYPE_TO_INDEX_CLASS
from llama_index.readers import SimpleDirectoryReader
from poe_api.types import AddDocumentsRequest, Document
from sse_starlette.sse import ServerSentEvent
from fastapi_poe.base import PoeHandler
from fastapi_poe.types import (
QueryRequest,
ReportFeedbackRequest,
SettingsRequest,
SettingsResponse,
)
LOAD_DATA = os.environ.get("LLAMA_LOAD_DATA", True)
DATA_DIR = os.environ.get("LLAMA_DATA_DIR", "data/")
INDEX_STRUCT_TYPE_STR = os.environ.get(
"LLAMA_INDEX_TYPE", IndexStructType.SIMPLE_DICT.value
)
INDEX_JSON_PATH = os.environ.get("LLAMA_INDEX_JSON_PATH", "save/index.json")
EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES = [
IndexStructType.DICT,
IndexStructType.WEAVIATE,
IndexStructType.PINECONE,
IndexStructType.QDRANT,
IndexStructType.CHROMA,
IndexStructType.VECTOR_STORE,
]
SETTINGS = SettingsResponse(
context_clear_window_secs=60 * 60, allow_user_context_clear=True
)
logger = logging.getLogger(__name__)
def _to_llama_documents(docs: Sequence[Document]) -> list[LlamaDocument]:
return [LlamaDocument(text=doc.text, doc_id=doc.doc_id) for doc in docs]
def _create_or_load_index(
index_type_str: str | None = None,
index_json_path: str | None = None,
index_type_to_index_cls: dict[str, type[BaseGPTIndex]] | None = None,
) -> BaseGPTIndex:
"""Create or load index from json path."""
index_json_path = index_json_path or INDEX_JSON_PATH
index_type_to_index_cls = (
index_type_to_index_cls or INDEX_STRUCT_TYPE_TO_INDEX_CLASS
)
index_type_str = index_type_str or INDEX_STRUCT_TYPE_STR
index_type = IndexStructType(index_type_str)
if index_type not in index_type_to_index_cls:
raise ValueError(f"Unknown index type: {index_type}")
# TODO: support external vector store
if index_type in EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES:
raise ValueError("Please use vector store directly.")
index_cls = index_type_to_index_cls[index_type]
try:
# Load index from disk
index = index_cls.load_from_disk(index_json_path)
logger.info(f"Loading index from {index_json_path}")
return index
except OSError:
# Create empty index
index = index_cls(nodes=[])
logger.info("Creating new index")
if LOAD_DATA:
logger.info(f"Loading data from {DATA_DIR}")
reader = SimpleDirectoryReader(input_dir=DATA_DIR)
documents = reader.load_data()
nodes = index.service_context.node_parser.get_nodes_from_documents(
documents
)
index.insert_nodes(nodes)
return index
def _get_chat_history(chat_history: list[tuple[str, str]]) -> str:
buffer = ""
for human_s, ai_s in chat_history:
human = "Human: " + human_s
ai = "Assistant: " + ai_s
buffer += "\n" + "\n".join([human, ai])
return buffer
class LlamaBotHandler(PoeHandler):
def __init__(self) -> None:
"""Setup LlamaIndex."""
self._chat_history = {}
self._index = _create_or_load_index()
async def get_response(self, query: QueryRequest) -> AsyncIterable[ServerSentEvent]:
"""Return an async iterator of events to send to the user."""
# Get chat history
chat_history = self._chat_history.get(query.conversation_id)
if chat_history is None:
chat_history = []
self._chat_history[query.conversation_id] = chat_history
# Get last message
last_message = query.query[-1].content
# Generate standalone question from conversation context and last message
question_gen_model = OpenAI(temperature=0)
question_generator = LLMChain(
llm=question_gen_model, prompt=CONDENSE_QUESTION_PROMPT
)
chat_history_str = _get_chat_history(chat_history)
logger.debug(chat_history_str)
new_question = question_generator.run(
question=last_message, chat_history=chat_history_str
)
logger.info(f"Querying with: {new_question}")
# Query with standalone question
response = await self._index.aquery(
new_question, streaming=True, similarity_top_k=3
)
full_response = ""
for text in response.response_gen:
full_response += text
yield self.text_event(text)
chat_history.append((last_message, full_response))
async def on_feedback(self, feedback: ReportFeedbackRequest) -> None:
"""Called when we receive user feedback such as likes."""
logger.info(
f"User {feedback.user_id} gave feedback on {feedback.conversation_id}"
f"message {feedback.message_id}: {feedback.feedback_type}"
)
async def get_settings(self, settings: SettingsRequest) -> SettingsResponse:
"""Return the settings for this bot."""
return SETTINGS
async def add_documents(self, request: AddDocumentsRequest) -> None:
"""Add documents."""
llama_docs = _to_llama_documents(request.documents)
nodes = self._index.service_context.node_parser.get_nodes_from_documents(
llama_docs
)
self._index.insert_nodes(nodes)
async def handle_add_documents(self, request: AddDocumentsRequest) -> JSONResponse:
await self.add_documents(request)
return JSONResponse({})
def handle_shutdown(self) -> None:
"""Save index upon shutdown."""
self._index.save_to_disk(INDEX_JSON_PATH)
| [] |
2024-01-10 | Aqirito/A.L.I.C.E | Alice-backend~fast.py | import base64
from langchain import PromptTemplate, LLMChain
from langchain import HuggingFacePipeline
from transformers import logging
from dotenv import dotenv_values
from utils.init_models import loadModelAndTokenizer
import json
import os
from utils.templating import setTemplate
from utils.edgeTTS import run_tts
from moe.main import synthesize
from exllama.model import ExLlamaCache
from exllama.generator import ExLlamaGenerator
import os
import glob
from typing import Union
from fastapi import APIRouter, FastAPI, HTTPException, UploadFile, File
from fastapi.responses import JSONResponse, FileResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from schemas import ExllamaCfg, UpdateLlm, SystemSchema, ChatModel, UpdateTtsModel
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
current_path = os.path.dirname(os.path.realpath(__file__))
project_path = os.path.abspath(os.getcwd())
def loadConfigs():
global memories, character, llm_settings, llm_loader_settings, tts_settings, memories
global MODEL_TYPE, MODEL_LOADER, LANGUAGE, SPEED, SPEAKER_ID, VOICE, PITCH, RATE, VOLUME
with open(os.path.join(project_path, "configs/llm_loader_settings.json"), "r") as f:
f.seek(0) # Move to the beginning of the file
llm_loader_settings = json.loads(f.read())
with open(os.path.join(project_path, "configs/character.json"), "r") as f:
f.seek(0) # Move to the beginning of the file
character = json.loads(f.read())
with open(os.path.join(project_path, "configs/memories.json"), "r") as f:
f.seek(0) # Move to the beginning of the file
memories = json.loads(f.read())
with open(os.path.join(project_path, "configs/llm_settings.json"), "r") as f:
f.seek(0) # Move to the beginning of the file
llm_settings = json.loads(f.read())
with open(os.path.join(project_path, "configs/tts_settings.json"), "r") as f:
f.seek(0) # Move to the beginning of the file
tts_settings = json.loads(f.read())
MODEL_TYPE = llm_loader_settings['model_type']
MODEL_LOADER = llm_loader_settings['model_loader']
# MoeTTS Settings
LANGUAGE = tts_settings['language']
SPEED = tts_settings['speed']
SPEAKER_ID = tts_settings['speaker_id']
# edgeTTS Settings
VOICE = tts_settings['voice']
PITCH = tts_settings['pitch']
RATE = tts_settings['rate']
VOLUME = tts_settings['volume']
def saveReply(question, bot_response):
replace_name_reply = bot_response.replace('<USER>', memories['MC_name'])
print(f"{character['char_name']}:{replace_name_reply}")
# Insert the chat history
memories['history'].append(f"You: {question}")
memories['history'].append(f"{character['char_name']}:{replace_name_reply}")
# Save the chat history to a JSON file
with open(os.path.join(project_path, "configs/memories.json"), "w", encoding='utf-8') as outfile:
json.dump(memories, outfile, ensure_ascii=False, indent=2)
synthesize(text=LANGUAGE+replace_name_reply+LANGUAGE, speed=float(SPEED), out_path="reply.wav", speaker_id=int(SPEAKER_ID))
##### LLM INIT #####
llm_init = APIRouter(
prefix="/init",
tags=["Initialize LLM models and configs"],
responses={404: {"description": "Not found"}},
)
@llm_init.get("/configs")
def load_configs():
# init configs
loadConfigs()
return {
"llm_settings": llm_settings,
"llm_loader_settings": llm_loader_settings,
"character": character,
"memories": memories,
"tts_settings": tts_settings
}
@llm_init.get("/model")
def init_models():
global init_model, model, tokenizer
global MODEL_NAME_OR_PATH
# load ENV
env = dotenv_values(".env")
MODEL_NAME_OR_PATH = env['MODEL_NAME_OR_PATH']
if "/" not in MODEL_NAME_OR_PATH:
MODEL_NAME_OR_PATH = os.path.abspath(os.path.join("models/LLM", MODEL_NAME_OR_PATH))
st_pattern = os.path.join(MODEL_NAME_OR_PATH, "*.safetensors")
try:
MODEL_BASENAME = glob.glob(st_pattern)[0] # find all files in the directory that match the * pattern
except:
MODEL_BASENAME=None
init_model = loadModelAndTokenizer(model_name_or_path=MODEL_NAME_OR_PATH, model_basename=MODEL_BASENAME)
model = init_model["model"]
tokenizer = init_model["tokenizer"]
return {
"success": True,
"message": "Model loaded successfully",
}
else:
raise HTTPException(
status_code=404,
detail="The models can only load inside the models/LLM folder, please remove any slashes '/' in MODEL_NAME_OR_PATH in .env"
)
##### LLM ROUTER #####
llm_router = APIRouter(
prefix="/llm",
tags=["Chat with me (:>_<:)"],
responses={404: {"description": "Not found"}},
)
@llm_router.post("/chat")
def chat(ChatModel: ChatModel):
try:
if MODEL_TYPE == "GPTQ":
if MODEL_LOADER == "AutoGPTQ":
model.seqlen = 4096
# Prevent printing spurious transformers error when using pipeline with AutoGPTQ
logging.set_verbosity(logging.CRITICAL)
pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=int(llm_settings['max_length']),
max_new_tokens=int(llm_settings['max_new_tokens']),
temperature=float(llm_settings['temperature']),
top_p=float(llm_settings['top_p']),
typical_p=float(llm_settings['typical_p']),
repetition_penalty=float(llm_settings['repetition_penalty']),
penalty_alpha=float(llm_settings['penalty_alpha']),
do_sample=float(llm_settings['do_sample'])
)
llm = HuggingFacePipeline(pipeline=pipeline)
question = ChatModel.questions
template = setTemplate() # set and execute the right template of the models
# prompt = PromptTemplate(template=template, input_variables=["question"]) # generate the prompt
prompt = PromptTemplate.from_template(template)
prompt.format(question=question)
# using pipeline from Langchain
llm_chain = LLMChain(prompt=prompt, llm=llm) # create a chain
bot_reply = llm_chain.run(question) # run the chain
# saveReply(question, bot_reply)
replace_name_reply = str(bot_reply).replace('<USER>', memories['MC_name'])
print(f"{character['char_name']}:{replace_name_reply}")
# Insert the chat history
memories['history'].append(f"You: {question}")
memories['history'].append(f"{character['char_name']}:{replace_name_reply}")
# Save the chat history to a JSON file
with open(os.path.join(project_path, "configs/memories.json"), "w", encoding='utf-8') as outfile:
json.dump(memories, outfile, ensure_ascii=False, indent=2)
if tts_settings['tts_type'] == "MoeTTS":
# MoeTTS
synthesize(text=LANGUAGE+replace_name_reply+LANGUAGE, speed=float(SPEED), out_path="reply.wav", speaker_id=int(SPEAKER_ID))
elif tts_settings['tts_type'] == "EdgeTTS":
# "voice": "en-US-AnaNeural",
# "pitch": "+0Hz",
# "rate":"+0%",
# "volume": "+0%"
# EdgeTTS
run_tts(
replace_name_reply,
VOICE,
RATE,
VOLUME,
PITCH,
output_file="reply.wav"
)
file_path = os.path.join(project_path, "reply.wav")
try:
with open(file_path, "rb") as audio_file:
audio_content = base64.b64encode(audio_file.read()).decode("utf-8")
response_data = {
"question": question,
"reply_text": replace_name_reply,
"reply_audio": audio_content
}
return JSONResponse(content=response_data)
except FileNotFoundError:
raise HTTPException(status_code=404, detail="File not found")
elif MODEL_LOADER == "ExLlama":
# create cache for inference
cache = ExLlamaCache(model)
generator = ExLlamaGenerator(model, tokenizer, cache) # create generator
# Configure generator
# generator.disallow_tokens([tokenizer.eos_token_id])
generator.settings.token_repetition_penalty_max = float(llm_settings['token_repetition_penalty_max'])
generator.settings.temperature = float(llm_settings['temperature'])
generator.settings.top_p = float(llm_settings['top_p'])
generator.settings.top_k = int(llm_settings['top_k'])
generator.settings.typical = float(llm_settings['typical'])
generator.settings.beams = int(llm_settings['beams'])
generator.settings.beam_length = int(llm_settings['beam_length'])
generator.settings.token_repetition_penalty_sustain = int(llm_settings['token_repetition_penalty_sustain'])
generator.settings.token_repetition_penalty_decay = int(llm_settings['token_repetition_penalty_decay'])
question = ChatModel.questions
template = setTemplate() # set and execute the right template of the models
prompt = template.format(question=question)
print("max_new_tokens:", llm_settings['max_new_tokens'])
output = generator.generate_simple(prompt, max_new_tokens=int(llm_settings['max_new_tokens']))
replace_name_reply = str(output[len(prompt):]).replace('<USER>', memories['MC_name'])
print(f"{character['char_name']}:{replace_name_reply}")
# Insert the chat history
memories['history'].append(f"You: {question}")
memories['history'].append(f"{character['char_name']}:{replace_name_reply}")
# Save the chat history to a JSON file
with open(os.path.join(project_path, "configs/memories.json"), "w", encoding='utf-8') as outfile:
json.dump(memories, outfile, ensure_ascii=False, indent=2)
if tts_settings['tts_type'] == "MoeTTS":
# MoeTTS
synthesize(text=LANGUAGE+replace_name_reply+LANGUAGE, speed=float(SPEED), out_path="reply.wav", speaker_id=int(SPEAKER_ID))
elif tts_settings['tts_type'] == "EdgeTTS":
# "voice": "en-US-AnaNeural",
# "pitch": "+0Hz",
# "rate":"+0%",
# "volume": "+0%"
# EdgeTTS
run_tts(
replace_name_reply,
VOICE,
RATE,
VOLUME,
PITCH,
output_file="reply.wav"
)
file_path = os.path.join(project_path, "reply.wav")
try:
with open(file_path, "rb") as audio_file:
audio_content = base64.b64encode(audio_file.read()).decode("utf-8")
response_data = {
"question": question,
"reply_text": replace_name_reply,
"reply_audio": audio_content
}
return JSONResponse(content=response_data)
except FileNotFoundError:
raise HTTPException(status_code=404, detail="File not found")
else:
raise HTTPException(status_code=404, detail=f"Model Type Not Found: {MODEL_TYPE}")
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, Please Initialize config before chatting")
@llm_router.get("/character")
def get_character():
try:
return character
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, please Initialize the config first")
@llm_router.post("/upload")
def upload_character(file: UploadFile):
if file.content_type != 'application/json':
raise HTTPException(status_code=400, detail="Only JSON files are allowed")
key_check = ["char_name", "char_persona", "example_dialogue", "world_scenario"]
file_json = json.loads(file.file.read().decode("utf-8"))
# Check if all keys are present
if all(key in file_json for key in key_check):
with open(os.path.join(project_path, "configs/character.json"), "w", encoding='utf-8') as outfile:
json.dump(file_json, outfile, ensure_ascii=False, indent=2)
loadConfigs()
return FileResponse("configs/character.json", filename="character.json", media_type="application/json")
else:
raise HTTPException(status_code=400, detail="Invalid JSON file")
@llm_router.get("/memories")
def get_memories():
try:
return memories
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, please Initialize the config first")
@llm_router.delete("/memories")
def delete_memories():
try:
memories['history'] = []
with open(os.path.join(project_path, "configs/memories.json"), "w", encoding='utf-8') as outfile:
json.dump(memories, outfile, ensure_ascii=False, indent=2)
return memories
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, please Initialize the config first")
##### SETTINGS ROUTER #####
setings_router = APIRouter(
prefix="/settings",
tags=["Settings and Configurations"],
responses={404: {"description": "Not found"}},
)
@setings_router.get("/llm_settings")
def get_llm_settings():
try:
return llm_settings
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, please Initialize the config first")
@setings_router.put("/llm_settings", response_model=UpdateLlm)
def update_llm_settings(llm: UpdateLlm):
try:
with open(os.path.join(project_path, "configs/llm_settings.json"), "w", encoding='utf-8') as outfile:
json.dump(json.loads(llm.json()), outfile, ensure_ascii=False, indent=2)
# reload configs
loadConfigs()
return llm
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, please Initialize the config first")
@setings_router.get("/llm_loader_settings")
def llm_loader_settings():
try:
return llm_loader_settings
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, please Initialize the config first")
@setings_router.put("/llm_loader_settings", response_model=SystemSchema)
def llm_loader_settings(system: SystemSchema):
"""
template_type: # for now is 'pygmalion' and 'prompt'
model_type: # GPTQ
model_loader: # AutoGPTQ, HuggingFaceBig, ExLlama
"""
try:
with open(os.path.join(project_path, "configs/llm_loader_settings.json"), "w", encoding='utf-8') as outfile:
json.dump(json.loads(system.json()), outfile, ensure_ascii=False, indent=2)
# reload configs
loadConfigs()
return system
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, please Initialize the config first")
@setings_router.get("/tts_settings")
def get_tts_settings():
try:
return tts_settings
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, please Initialize the config first")
@setings_router.put("/tts_settings", response_model=UpdateTtsModel)
def update_tts_settings(tts: UpdateTtsModel):
try:
with open(os.path.join(project_path, "configs/tts_settings.json"), "w", encoding='utf-8') as outfile:
json.dump(json.loads(tts.json()), outfile, ensure_ascii=False, indent=2)
# reload configs
loadConfigs()
return tts
except Exception as e:
raise HTTPException(status_code=404, detail=f"{str(e)}, please Initialize the config first")
app.include_router(llm_init)
app.include_router(llm_router)
app.include_router(setings_router) | [] |
2024-01-10 | mmiskiewicz/voice-assistant | speech_assistant.py | # Transcribe audio to text
import speech_recognition as sr
# Convert text to speech
import pyttsx3
# Access GPT API
import openai
# Import os for accessing environment variables
import os
# Initialize API key
openai.api_key = os.environ["API_KEY"]
# Initialize text-to-speech engine
engine = pyttsx3.init()
# Set up assistant's voice
voices = engine.getProperty("voices")
engine.setProperty("voices", voices[0].id)
# Initialize an object of Recognizer class
r = sr.Recognizer()
# Set up microphone
mic = sr.Microphone(device_index=0)
# Initialize conversation variable
conversation = ""
# Initialize user and bot name
user_name = "Matt"
bot_name = "ChatGPT"
while True:
with mic as source:
print("\nListening to you...")
# Fetch the user's audio
r.adjust_for_ambient_noise(source, duration=0.2)
audio = r.listen(source)
print("No longer listening to you")
try:
# Convert voice into text
user_input = r.recognize_google(audio)
# Catch any exceptions
except:
continue
# Setting up user's prompt to be understood by OpenAI
prompt = user_name + ":" + user_input + "\n" + "bot_name" + ":"
# Append user's query to the conversation string
conversation += prompt
# Get the input from OpenAI and convert it into speech
response = openai.Completion.create(
model="text-davinci-003",
prompt=conversation,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Store the OpenAI's response in a string variable
response_str = response["choices"][0]["text"].replace("\n", "")
response_str = response_str.split(
user_name + ":", 1)[0].split(bot_name + ":", 1)[0]
# Append OpenAI's response to the conversation string
conversation += response_str + "\n"
# Print out the conversation
print(f"{user_name}: {user_input} ")
print(f"{bot_name}: {response_str}")
# Convert OpenAI's response string to voice
engine.say(response_str)
engine.runAndWait()
| [
"PLACEHOLDER:PLACEHOLDER\nbot_name:"
] |
2024-01-10 | JoeTao-097/langchain-openai-chatbot | chatpdf.py | import streamlit as st
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.chains import ConversationChain
from langchain.llms import OpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
import streamlit as st
import os
from langchain.document_loaders import PyPDFLoader
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
os.environ['OPENAI_API_KEY'] = "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"
os.environ['SERPAPI_API_KEY'] = '360d22e4bc0b06f384cdc79db107bd5ef547daa1c1843698dfcff447654b98e5'
pdf_file = r"D:\project\langchain-openai-chatbot\ReAct.pdf"
pdf_file = pdf_file.replace('\\', '/')
vector_dir = pdf_file.replace('.pdf','')
if not os.path.isdir(vector_dir):
loader = PyPDFLoader(pdf_file)
pages = loader.load_and_split(text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0))
vectorstore = FAISS.from_documents(pages, OpenAIEmbeddings())
vectorstore.save_local(vector_dir)
else:
vectorstore = FAISS.load_local(vector_dir, OpenAIEmbeddings())
# memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever(), return_source_documents=True)
# chat_history = []
# query = "这篇论文的工作内容是?"
# result = qa({"question": query, "chat_history": chat_history})
# print(result['answer'])
# print(result['source_documents'][0])
# query = "作者的单位是?"
# result = qa({"question": query})
# print(result['answer'])
| [] |
2024-01-10 | JoeTao-097/langchain-openai-chatbot | conversation_chatbot.py | import streamlit as st
import random
import time
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
import streamlit as st
import os
os.environ['OPENAI_API_KEY'] = "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"
os.environ['SERPAPI_API_KEY'] = '360d22e4bc0b06f384cdc79db107bd5ef547daa1c1843698dfcff447654b98e5'
st.title("chatbot for coding")
@st.cache_resource(ttl=10800)
def create_conversation_chain():
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(
"The following is conversation between a coder and an AI expert in codeing. The AI "
"provides lots of specific details from its context. If the AI does not know the answer to a "
"question, it truthfully says it does not know."
),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
llm = ChatOpenAI(temperature=0)
memory = ConversationBufferMemory(return_messages=True)
conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm)
return conversation
conversation = create_conversation_chain()
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("please enter content?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
assistant_response = conversation.predict(input=prompt)
if assistant_response:
with st.chat_message("assistant"):
# message_placeholder = st.empty()
# full_response = ""
# # Simulate stream of response with milliseconds delay
# for chunk in assistant_response.split():
# full_response += chunk + " "
# time.sleep(0.05)
# # Add a blinking cursor to simulate typing
# message_placeholder.markdown(full_response + "▌")
# message_placeholder.markdown(full_response)
st.write(assistant_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": assistant_response}) | [
"The following is conversation between a coder and an AI expert in codeing. The AI ",
"{input}",
"The following is conversation between a coder and an AI expert in codeing. The AI provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"provides lots of specific details from its context. If the AI does not know the answer to a ",
"question, it truthfully says it does not know."
] |
2024-01-10 | ai-cfia/louis-backend | louis~agents~louis.py | import os
import openai
from louis.prompts import PROMPTS
def nonewlines(s: str) -> str:
return s.replace('\n', ' ').replace('\r', ' ')
AZURE_OPENAI_SERVICE = os.environ.get("AZURE_OPENAI_SERVICE") or "myopenai"
AZURE_OPENAI_GPT_DEPLOYMENT = os.environ.get("AZURE_OPENAI_GPT_DEPLOYMENT") or "davinci"
AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ.get("AZURE_OPENAI_CHATGPT_DEPLOYMENT") or "chat"
class Louis:
prompt_prefix = """<|im_start|>system
{louis}
{answer}
{clarification}
{format}
{follow_up_questions_prompt}
{injected_prompt}
Sources:
{sources}
<|im_end|>
{chat_history}
"""
query_prompt_template = PROMPTS['query_prompt_template']
def __init__(self, search_client, chatgpt_deployment=AZURE_OPENAI_CHATGPT_DEPLOYMENT, gpt_deployment=AZURE_OPENAI_GPT_DEPLOYMENT):
self.search_client = search_client
self.chatgpt_deployment = chatgpt_deployment
self.gpt_deployment = gpt_deployment
def run(self, history: list[dict], overrides: dict) -> any:
use_semantic_captions = True if overrides.get("semantic_captions") else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
prompt = self.query_prompt_template.format(
chat_history=self.get_chat_history_as_text(history, include_last_turn=False),
question=history[-1]["user"])
completion = openai.Completion.create(
engine=self.gpt_deployment,
prompt=prompt,
temperature=0.0,
max_tokens=32,
n=1,
stop=["\n"])
q = completion.choices[0].text
sources = self.search_client(q)
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_template")
if prompt_override is None:
prompt = self.prompt_prefix.format(
injected_prompt="",
sources=sources,
chat_history=self.get_chat_history_as_text(history),
**PROMPTS)
elif prompt_override.startswith(">>>"):
prompt = self.prompt_prefix.format(
injected_prompt=prompt_override[3:] + "\n",
sources=sources,
chat_history=self.get_chat_history_as_text(history),
**PROMPTS)
else:
prompt = prompt_override.format(
sources=sources,
chat_history=self.get_chat_history_as_text(history),
**PROMPTS)
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
completion = openai.Completion.create(
engine=self.chatgpt_deployment,
prompt=prompt,
temperature=overrides.get("temperature") or 0.7,
max_tokens=1024,
n=1,
stop=["<|im_end|>", "<|im_start|>"])
retvalue = {
"data_points": sources,
"answer": completion.choices[0].text,
"thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')
}
return retvalue
def get_chat_history_as_text(self, history, include_last_turn=True, approx_max_tokens=1000) -> str:
history_text = ""
for h in reversed(history if include_last_turn else history[:-1]):
history_text = """<|im_start|>user""" +"\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text
if len(history_text) > approx_max_tokens*4:
break
return history_text | [
"<|im_start|>system\n{louis}\n{answer}\n{clarification}\n{format}\n{follow_up_questions_prompt}\n{injected_prompt}\nSources:\n{sources}\n<|im_end|>\n{chat_history}\n",
"query_prompt_template",
"prompt_template",
"\n"
] |
2024-01-10 | ai-cfia/louis-backend | louis~tools~smartsearch.py | from langchain.tools import tool
from louis import actions
MAX_TOKENS = 3000
@tool
def SmartSearch(query: str) -> str:
"""
Returns list of documents from inspection.canada.ca,
the official website of the CFIA
(Canadian Food Inspection Agency or Agence Canadienne d'Inspection des Aliments in french) based on
semantic similarity to query"""
documents = actions.smartsearch(query)
paragraphs = []
total_tokens = 0
for doc in documents:
total_tokens += doc['tokens_count']
if total_tokens > MAX_TOKENS:
break
paragraph = f"{doc['title']} from {doc['url']} : {doc['content']}"
paragraphs.append(paragraph)
return "\n".join(paragraphs) | [
"\n Returns list of documents from inspection.canada.ca,\n the official website of the CFIA\n (Canadian Food Inspection Agency or Agence Canadienne d'Inspection des Aliments in french) based on\n semantic similarity to query"
] |
2024-01-10 | Genening/ChatDocs | apps~flask~process_article.py | import numpy as np
import openai
import json
import os.path
# import pprint
import hashlib
import sys
from pathlib import Path
import math
import pandas as pd
from sklearn.cluster import KMeans
COMPLETIONS_MODEL = "gpt-3.5-turbo"
EMBEDDING_MODEL = "text-embedding-ada-002"
CONTEXT_TOKEN_LIMIT = 1500
TOKENS_PER_TOPIC = 2000
TOPIC_NUM_MIN = 3
TOPIC_NUM_MAX = 10
openai.api_key = "sk-"
def get_topic_num(sources):
num = math.floor(len("".join(sources))/TOKENS_PER_TOPIC)
if num<TOPIC_NUM_MIN:
return TOPIC_NUM_MIN
if num>TOPIC_NUM_MAX:
return TOPIC_NUM_MAX
return num
def get3questions(sources,embeddings):
matrix = np.vstack(embeddings)
print(np.shape(np.array(embeddings).tolist()))
df = pd.DataFrame({"embedding":np.array(embeddings).tolist(),"p":sources})
n_clusters = get_topic_num(sources)
kmeans = KMeans(n_clusters=n_clusters, init="k-means++", random_state=42)
kmeans.fit(matrix)
df["Cluster"] = kmeans.labels_
df2 = pd.DataFrame({"tokens":[],"prompts":[]})
for i in range(n_clusters):
ctx = u""
ps = df[df.Cluster == i].p.values
for x in ps:
if len(ctx)>CONTEXT_TOKEN_LIMIT:
continue
ctx+= u"\n"+x
prompt = u"Suggest a simple, clear, single, short question base on the context, answer in the same language of context\n\nContext:"+ctx+u"\n\nAnswer with the language used in context, the question is:"
df2.loc[len(df2)] = [len("".join(ps)),prompt]
questions = []
for prompt in df2.sort_values('tokens',ascending=False).prompts.sample(3).values:
print(prompt)
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content":prompt}])
questions.append(completion.choices[0].message.content)
print(completion.choices[0].message.content)
return questions
def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
folder = 'embeddings/cache/'
Path(folder).mkdir(parents=True, exist_ok=True)
tmpfile = folder+hashlib.md5(text.encode('utf-8')).hexdigest()+".json"
if os.path.isfile(tmpfile):
with open(tmpfile , 'r', encoding='UTF-8') as f:
return json.load(f)
result = openai.Embedding.create(
model=model,
input=text
)
with open(tmpfile, 'w',encoding='utf-8') as handle2:
json.dump(result["data"][0]["embedding"], handle2, ensure_ascii=False, indent=4)
return result["data"][0]["embedding"]
def file2embedding(folder,contents=""):
embeddings = []
sources = []
content = contents
Path(folder).mkdir(parents=True, exist_ok=True)
if content == "":
with open(folder+'/source.txt', 'r', encoding='UTF-8') as handle1:
content = handle1.read()
for source in content.split('\n'):
if source.strip() == '':
continue
embeddings.append(get_embedding(source))
sources.append(source)
questions = get3questions(sources,embeddings)
with open(folder+'/result.json', 'w',encoding='utf-8') as handle2:
json.dump({"sources":sources,"embeddings":embeddings,"questions":questions}, handle2, ensure_ascii=False, indent=4)
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, embeddings) -> list[(float, (str, str))]:
#pprint.pprint("embeddings")
#pprint.pprint(embeddings)
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in enumerate(embeddings)
], reverse=True, key=lambda x: x[0])
return document_similarities
def ask(question:str,embeddings,sources):
ordered_candidates = order_document_sections_by_query_similarity(question,embeddings)
ctx = u""
for candi in ordered_candidates:
next = ctx + u"\n" + sources[candi[1]]
if len(next)>CONTEXT_TOKEN_LIMIT:
break
ctx = next
if len(ctx) == 0:
return u""
prompt = u"".join([
u"Answer the question base on the context, answer in the same language of question\n\n"
u"Context:" + ctx + u"\n\n"
u"Question:" + question + u"\n\n"
u"Answer:"])
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content":prompt}])
return [prompt, completion.choices[0].message.content]
if __name__ == "__main__":
if sys.argv[1] == 'compile':
folder = sys.argv[2]
contents = "" if len(sys.argv)<4 else sys.argv[3]
file2embedding(folder,contents)
else: # query
folder = sys.argv[2]
question = sys.argv[3]
with open(folder+'/result.json', 'r', encoding='UTF-8') as f:
obj=json.load(f)
[prompt,answer] = ask(question,obj["embeddings"],obj["sources"])
print(json.dumps({
"question":question,
"prompt":prompt,
"answer":answer
}))
| [
"Answer the question base on the context, answer in the same language of question\n\nContext:PLACEHOLDER\n\nQuestion:PLACEHOLDER\n\nAnswer:",
"Suggest a simple, clear, single, short question base on the context, answer in the same language of context\n\nContext:PLACEHOLDER\n\nAnswer with the language used in context, the question is:"
] |
2024-01-10 | astelmach01/Cosmo-backend | app~web~lifetime.py | import logging
from typing import Awaitable, Callable
import openai
from aiohttp import ClientSession
from fastapi import FastAPI
from app.services.aws.rds import DatabaseSession
DB_NAME = "user_task_db"
def register_startup_event(app: FastAPI) -> Callable[[], Awaitable[None]]:
@app.on_event("startup")
async def _startup() -> None:
app.middleware_stack = None
DatabaseSession.initialize()
openai.aiosession.set(ClientSession())
app.state.db_session = DatabaseSession.get_session()
app.middleware_stack = app.build_middleware_stack()
return _startup
def register_shutdown_event(app: FastAPI) -> Callable[[], Awaitable[None]]:
@app.on_event("shutdown")
async def _shutdown() -> None:
DatabaseSession.close(app.state.db_session)
if session := openai.aiosession.get():
await session.close()
else:
logging.warning("OpenAI session not initialized")
return _shutdown
| [] |
2024-01-10 | kevin-rego/docstring-generator | doc_string.py | import streamlit as st
import os
import requests
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
Language,
)
from langchain.chat_models import ChatOpenAI
from prompts_collection import doc_string_generator
from langchain.schema.output_parser import StrOutputParser
import shutil
from langchain.docstore.document import Document
# Set the Streamlit app page configuration
st.set_page_config(
layout="wide",
)
# Sidebar UI for inputting OpenAI API parameters
openai_api_key = st.sidebar.text_input(
"Enter your OpenAI API Key (mandatory)", type="password"
)
st.sidebar.divider()
openai_model = st.sidebar.selectbox(
"Model", options=["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k"]
)
openai_temperature = st.sidebar.slider(
label="Temperature", min_value=0.0, max_value=1.0, step=0.05, value=1.0
)
st.sidebar.divider()
# Store OpenAI data parameters in a dictionary
openai_data = {
"openai_api_key": openai_api_key,
"openai_model": openai_model,
"openai_temperature": openai_temperature,
}
# Set session state for continuation of generation
if "continue_generation" not in st.session_state:
st.session_state.continue_generation = False
# Sidebar UI for inputting code splitting parameters
st.sidebar.write("Code Splitting Parameters")
chunk_size = st.sidebar.number_input(
label="Chunk Size", min_value=100, max_value=3500, step=100, value=1000
)
chunk_overlap = st.sidebar.number_input(
label="Chunk Overlap", min_value=0, max_value=3500, step=10, value=100
)
# Mapping file extensions to their respective programming languages and splitters
file_extensions = {
".py": {
"language": "Python",
"splitter": RecursiveCharacterTextSplitter.from_language(
language=Language.PYTHON, chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
".js": {
"language": "JavaScript",
"splitter": RecursiveCharacterTextSplitter.from_language(
language=Language.JS, chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
".jsx": {
"language": "JavaScript",
"splitter": RecursiveCharacterTextSplitter.from_language(
language=Language.JS, chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
".java": {
"language": "Java",
"splitter": RecursiveCharacterTextSplitter.from_language(
language=Language.JAVA, chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
".ts": {
"language": "TypeScript",
"splitter": RecursiveCharacterTextSplitter.from_language(
language=Language.JS, chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
".cpp": {
"language": "C++",
"splitter": RecursiveCharacterTextSplitter.from_language(
language=Language.CPP, chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
".go": {
"language": "Go",
"splitter": RecursiveCharacterTextSplitter.from_language(
language=Language.GO, chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
".rb": {
"language": "Ruby",
"splitter": RecursiveCharacterTextSplitter.from_language(
language=Language.RUBY, chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
".php": {
"language": "PHP",
"splitter": RecursiveCharacterTextSplitter.from_language(
language=Language.PHP, chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
".cs": {
"language": "C#",
"splitter": RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
),
},
}
# Convert a GitHub repository URL to its corresponding API URL
def convert_to_api_url(github_repo_url):
if github_repo_url.startswith("https://github.com"):
# Extract the username/organization and repository name
username_repo = github_repo_url.split("github.com/")[1].split("/")
# Extract the folder path
folder_path = github_repo_url.split(
f"github.com/{username_repo[0]}/{username_repo[1]}/"
)[1]
# Construct the GitHub API URL
api_url = f"https://api.github.com/repos/{username_repo[0]}/{username_repo[1]}/contents"
return api_url
@st.cache_data
# Function to fetch code files from a GitHub repository and save them locally
def fetch_and_save_files(api_url, path=""):
documents = []
# Send a GET request to the GitHub API
response = requests.get(api_url)
# Check if the request was successful
if response.status_code == 200:
contents = response.json()
for item in contents:
if item["type"] == "file":
file_url = item["download_url"]
file_name = item["name"]
# Check if the file extension is in the valid_ext array
file_extension = os.path.splitext(file_name)[1]
if file_extension in list(file_extensions.keys()):
# Send a GET request to the raw file URL
file_response = requests.get(file_url)
# Read the file content without saving it
file_content = file_response.text
metadata = {
"language": file_extensions.get(
file_extension, {"language": None}
).get("language"),
"file_name": file_name,
"file_extension": file_extension,
}
document = Document(page_content=file_content, metadata=metadata)
documents.append(document)
elif item["type"] == "dir":
# Recursively fetch and save files from subfolders
documents.extend(
fetch_and_save_files(item["url"], os.path.join(path, item["name"]))
)
return documents
@st.cache_data
# Create a function to download the uploaded files
def upload_files(uploaded_files):
for uploaded_file in uploaded_files:
documents = []
file_content = uploaded_file.read().decode("utf-8")
file_extension = os.path.splitext(uploaded_file.name)[1]
file_name = uploaded_file.name
metadata = {
"language": file_extensions.get(file_extension, {"language": None}).get(
"language"
),
"file_name": file_name,
"file_extension": file_extension,
}
document = Document(page_content=file_content, metadata=metadata)
documents.append(document)
return documents
# Split all loaded docs
def split_docs(docs):
splitted_docs = []
for doc in docs:
splitter = file_extensions.get(
doc.metadata["file_extension"], {"splitter": None}
).get("splitter")
if not splitter:
splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
doc_split = splitter.split_documents(documents=docs)
splitted_docs.extend(doc_split)
# print(len(python_docs))
# print(splitted_docs[0])
return splitted_docs
def generate_doc_strings(document, openai_data):
model = ChatOpenAI(
openai_api_key=openai_data["openai_api_key"],
model=openai_data["openai_model"],
temperature=openai_data["openai_temperature"],
)
language = document.metadata.get("language", "")
prompt = doc_string_generator(language)
st.write(prompt)
chain = prompt | model | StrOutputParser()
answer = chain.invoke({"code": document.page_content})
return answer
def continue_generation():
st.session_state.continue_generation = True
def main():
all_documents = []
# Streamlit UI
st.title(":test_tube: Docstring Generator")
st.caption(body="Upload Code File, Enter Code Block or Enter Github URL")
col1, col2 = st.columns(2)
with col1:
uploaded_files = st.file_uploader("Upload files", accept_multiple_files=True)
with col2:
github_url = st.text_area(
"Enter Code Text or Public :globe_with_meridians: GitHub Repository URL:",
placeholder="github repo url",
height=50,
)
if github_url.startswith("https://github.com"):
api_url = convert_to_api_url(github_url)
st.success(f"API URL: {api_url}")
# Check if API Key and URL are provided
if not openai_api_key:
st.warning(
"Please enter the OpenAI API Key and Code File/Code Block or Github URL before proceeding."
)
return
generate = st.button("Generate Docstrings")
splitted_docs = []
if generate and openai_api_key:
if uploaded_files:
all_documents = upload_files(uploaded_files)
elif github_url:
if github_url.startswith("https://github.com"):
api_url = convert_to_api_url(github_url)
all_documents = fetch_and_save_files(api_url)
else:
all_documents = [
Document(
page_content=github_url,
metadata={
"language": "",
"file_name": "Code Block",
"file_extension": ".txt",
},
)
]
splitted_docs.extend(split_docs(all_documents))
st.warning(
f"Total {len(all_documents)} document(s) have been split into {len(splitted_docs)} chunk(s) for API calls.Are you sure you want to continue?"
)
col1, col2 = st.columns(2)
with col1:
st.button("Yes Continue", on_click=continue_generation)
with col2:
st.button("Cancel")
if st.session_state.continue_generation:
if uploaded_files:
all_documents = upload_files(uploaded_files)
elif github_url:
if github_url.startswith("https://github.com"):
api_url = convert_to_api_url(github_url)
all_documents = fetch_and_save_files(api_url)
else:
all_documents = [
Document(
page_content=github_url,
metadata={
"language": "",
"file_name": "Code Block",
"file_extension": ".txt",
},
)
]
splitted_docs.extend(split_docs(all_documents))
for document in splitted_docs:
st.write(f"FileName: {os.path.basename(document.metadata['file_name'])}")
with st.spinner("Generating docstrings..."):
st.code(generate_doc_strings(document, openai_data))
st.session_state.continue_generation = False
if __name__ == "__main__":
main()
| [] |
2024-01-10 | wirapratamaz/Resume | src~ui.py | import streamlit as st
from src.chatbot.chatgpt import openai_key_info, Chatgpt
from src.chatbot.prompts import data_format
from src.data_handler import improve_resume, init_resume, download_pdf, update_resume_data, PDFSizeException
from src.exceptions import ChatbotInitException
from src.utils import is_new_file, is_data_loaded, key_to_tab_name, get_item_key, init_user_info
section_examples = {'summary': 'I have passion for new tech',
'workExperience': 'Tell about my ability to lead projects',
'education': 'Describe my degree type in more details',
'contactInfo': 'phone, Linkedin, etc.'}
def title():
st.title("ChatCV - AI Resume Builder")
def resume_header():
st.text_input('name', st.session_state.resume_data['name'], key="name")
st.text_input('title', st.session_state.resume_data['title'], key="title")
def unknown_error():
st.session_state['user_info'] = init_user_info(error_info, "It's just a glitch in the matrix."
" Try hitting refresh, and if that doesn't work, just imagine yourself in a peaceful place.")
user_info()
def user_info():
if not st.session_state.get('user_info'):
upload_resume_header()
message_type = st.session_state['user_info']['message_type']
message = st.session_state['user_info']['message']
message_type(message)
def upload_resume_header():
st.session_state['user_info'] = init_user_info(st.success, "Upload PDF Resume - Let the magic begin. \n\n"
"This may take a bit... Grub a warm cup of coffee while we working :)")
def upload(uploaded_file):
try:
resume_data = init_resume(uploaded_file)
st.session_state['user_info'] = init_user_info(success_info, "Working on it...")
improve_resume(resume_data)
except PDFSizeException:
st.session_state['user_info'] = init_user_info(error_info, "PDF size max size is 4, try upload again...")
except Exception:
st.session_state['user_info'] = init_user_info(error_info, "PDF upload, try upload again...")
def sidebar():
with st.sidebar:
uploaded_file = st.file_uploader('Upload PDF Resume', type=["PDF"])
if uploaded_file and is_new_file(uploaded_file):
upload(uploaded_file)
st.experimental_rerun()
if is_data_loaded():
st.button("Improve More", on_click=improve_resume)
st.download_button('Download PDF', file_name='out.pdf', mime="application/json", data=download_pdf())
def body():
section_dict = {'contactInfo': contact_info_section, 'summary': summary_section, 'workExperience': list_section,
'education': list_section, 'skills': skills_section}
tabs_names = [key_to_tab_name(key) for key in section_dict.keys()]
tabs = st.tabs(tabs_names)
for tab, key in zip(tabs, section_dict):
section_func = section_dict[key]
with tab:
section_func(key, st.session_state['resume_data'][key])
def init_chatbot():
cols = st.columns([6, 1, 1])
api_key = cols[0].text_input("Enter OpenAI API key")
cols[1].markdown("#")
api_submit = cols[1].button("Submit")
cols[2].markdown("#")
get_info = cols[2].button("Get key")
if get_info:
st.info(f"Get your key at: {openai_key_info}")
if api_submit:
if Chatgpt.validate_api(api_key):
try:
st.session_state['chatbot'] = Chatgpt(api_key)
except ChatbotInitException:
st.session_state['user_info'] = init_user_info(error_info,
"Error with Chatbot loadin, please refresh...")
st.experimental_rerun()
else:
st.error("Not valid API key - try again...")
def summary_section(section_name, summary_data):
st.text_area(section_name, summary_data, key=f'{section_name}', label_visibility='hidden')
recruiter_subsection(section_name, section_examples[section_name])
def list_section(section_name, section_data):
description_key = 'description'
item_keys = list(section_data[0].keys())
item_keys.remove(description_key)
for item_id, section_item in enumerate(section_data):
cols = st.columns(len(item_keys))
for col, key in zip(cols, item_keys):
col.text_input(key, section_item[key], key=f'{section_name}_{item_id}_{key}')
st.text_area(description_key, section_item[description_key], key=f'{section_name}_{item_id}_{description_key}')
recruiter_subsection(section_name, section_example=section_examples[section_name], item_id=item_id)
edit_list_subsection(section_name, section_data, item_id)
st.markdown('***')
def edit_list_subsection(section_name, section_data, item_id):
with st.container():
st.markdown(
"""<style>
.element-container:nth-of-type(1) button {
width: 100%;
}
</style>""",
unsafe_allow_html=True,
)
remove_col, add_col = st.columns(2)
if remove_col.button('Delete', key=f'{section_name}_{item_id}_remove_from_list') and len(section_data) > 1:
del section_data[item_id]
st.experimental_rerun()
if add_col.button('Add', key=f'{section_name}_{item_id}_add_to_list') and len(section_data) < 10:
section_data.append(data_format[section_name][0])
st.experimental_rerun()
def recruiter_subsection(section_name, section_example, item_id=0):
with st.container():
cols = st.columns([3, 10], gap='small')
cols[0].write('\n')
cols[0].write('\n')
button_clicked = cols[0].button("Auto Section Improve", key=f'{section_name}_{item_id}_improve_auto')
trigger_key = 'Add a special request'
user_request_template = f"{trigger_key} to the bot here... e.g. {section_example}."
user_request = cols[1].text_input("section_example", value=user_request_template,
key=f'{section_name}_{item_id}_improve_manual', label_visibility='hidden')
if button_clicked:
user_request = '' if trigger_key in user_request else user_request
section_key = get_item_key(section_name, item_id)
section_text = st.session_state[section_key]
new_section_text = st.session_state['chatbot'].improve_section(section_text, user_request)
update_resume_data(new_section_text, section_name, item_id)
st.experimental_rerun()
def skills_section(section_name, skills_data):
[skills_data.remove(skill) for skill in skills_data if not skill]
num_columns = 3
for skills_row in range(0, len(skills_data), num_columns):
cols = st.columns([3, 1] * num_columns)
skills_row_names = skills_data[skills_row: skills_row + num_columns]
for item_id, skill in enumerate(skills_row_names):
skill_id = skills_row + item_id
cols[item_id * 2].text_input(' ', value=skill, key=f'{section_name}_{skill_id}', label_visibility='hidden')
cols[item_id * 2 + 1].markdown('## ')
if cols[item_id * 2 + 1].button('x', key=f'{section_name}_{skill_id}_remove_from_list'):
del skills_data[skill_id]
st.experimental_rerun()
skill_subsection(section_name)
st.markdown('***')
def skill_subsection(section_name, item_id=0):
key = f'{section_name}_{item_id}_add_skill'
cols = st.columns([12, 1])
new_skill = cols[0].text_input("Add skill", key=key)
cols[1].markdown('##')
clicked = cols[1].button("\+")
if clicked and new_skill:
st.session_state['resume_data'][section_name].append(new_skill)
st.experimental_rerun()
def contact_info_section(section_name, info_data):
keys = sorted(info_data.keys())
for key in keys:
value = info_data[key]
cols = st.columns([12, 1])
cols[0].text_input(key.title(), value, key=f'{section_name}_{key}')
cols[1].markdown('##')
clicked = cols[1].button('\-', key=f'{section_name}_{key}_remove')
if clicked:
del info_data[key]
st.experimental_rerun()
add_contact_subsection(section_name, info_data)
st.markdown('***')
def add_contact_subsection(section_name, info_data):
st.markdown('***')
with st.container():
cols = st.columns([12, 1])
new_key = cols[0].text_input('Add new details', value=f"e.g, {section_examples[section_name]}")
cols[1].markdown('##')
clicked = cols[1].button('\+', key=f'{section_name}_add_details')
if clicked and new_key:
info_data[new_key] = ''
st.experimental_rerun()
# if remove_col.button('Delete', key=f'{section_name}_{item_id}_remove_from_list') and len(section_data) > 1:
# del section_data[item_id]
# st.experimental_rerun()
#
# if add_col.button('Add', key=f'{section_name}_{item_id}_add_to_list') and len(section_data) < 10:
# section_data.append(data_format[section_name][0])
# st.experimental_rerun()
def success_info(message):
st.success(message)
def error_info(message):
st.error(message)
| [
"Add a special request to the bot here... e.g. PLACEHOLDER."
] |
2024-01-10 | jnkstr/privateGPT_llama2 | server~privateGPT.py | from flask import Flask,jsonify, render_template, flash, redirect, url_for, Markup, request
from flask_cors import CORS
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.callbacks.manager import CallbackManager
from langchain.vectorstores import FAISS #, Chroma
from langchain.llms import LlamaCpp #, GPT4All
import os
import glob
from typing import List
import requests
from huggingface_hub import hf_hub_download
from systemprompt import PROMPT
from langchain.document_loaders import (
CSVLoader,
EverNoteLoader,
PDFMinerLoader,
TextLoader,
UnstructuredEPubLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
UnstructuredODTLoader,
UnstructuredPowerPointLoader,
UnstructuredWordDocumentLoader,
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.docstore.document import Document
#from constants import CHROMA_SETTINGS
app = Flask(__name__)
CORS(app)
load_dotenv()
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME")
persist_directory = os.environ.get('PERSIST_DIRECTORY')
model_type = os.environ.get('MODEL_TYPE')
model_path = os.environ.get('MODEL_PATH')
model_n_ctx = os.environ.get('MODEL_N_CTX')
llm = None
# Map file extensions to document loaders and their arguments
LOADER_MAPPING = {
".csv": (CSVLoader, {}),
# ".docx": (Docx2txtLoader, {}),
".doc": (UnstructuredWordDocumentLoader, {}),
".docx": (UnstructuredWordDocumentLoader, {}),
".enex": (EverNoteLoader, {}),
".epub": (UnstructuredEPubLoader, {}),
".html": (UnstructuredHTMLLoader, {}),
".md": (UnstructuredMarkdownLoader, {}),
".odt": (UnstructuredODTLoader, {}),
".pdf": (PDFMinerLoader, {}),
".ppt": (UnstructuredPowerPointLoader, {}),
".pptx": (UnstructuredPowerPointLoader, {}),
".txt": (TextLoader, {"encoding": "utf8"}),
# Add more mappings for other file extensions and loaders as needed
}
def load_single_document(file_path: str) -> Document:
ext = "." + file_path.rsplit(".", 1)[-1]
if ext in LOADER_MAPPING:
loader_class, loader_args = LOADER_MAPPING[ext]
loader = loader_class(file_path, **loader_args)
return loader.load()[0]
raise ValueError(f"Nicht unterstütztes Datei-Format: '{ext}'")
def load_documents(source_dir: str) -> List[Document]:
# Loads all documents from source documents directory
all_files = []
for ext in LOADER_MAPPING:
all_files.extend(
glob.glob(os.path.join(source_dir, f"**/*{ext}"), recursive=True)
)
return [load_single_document(file_path) for file_path in all_files]
@app.route('/ingest', methods=['GET'])
def ingest_data():
# Load environment variables
persist_directory = os.environ.get('PERSIST_DIRECTORY')
source_directory = os.environ.get('SOURCE_DIRECTORY', 'source_documents')
embeddings_model_name = os.environ.get('EMBEDDINGS_MODEL_NAME')
# Load documents and split in chunks
print(f"Loading documents from {source_directory}")
chunk_size = 500
chunk_overlap = 50
documents = load_documents(source_directory)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_documents(documents)
print(f"Loaded {len(documents)} documents from {source_directory}")
print(f"Split into {len(texts)} chunks of text (max. {chunk_size} characters each)")
# Create embeddings
embeddings = HuggingFaceEmbeddings(
model_name=embeddings_model_name,
model_kwargs={'device': 'cpu'}
)
# Create vector store
db = FAISS.from_documents(texts, embeddings)
db.save_local('vectorstore/db_faiss')
db = None
return jsonify(response="Success")
@app.route('/get_answer', methods=['POST'])
def get_answer():
query = request.json
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
vectordb = FAISS.load_local('vectorstore/db_faiss', embeddings)
retriever = vectordb.as_retriever()
if llm==None:
return "Model not downloaded", 400
chain_type_kwargs = {"prompt": PROMPT}
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True, chain_type_kwargs=chain_type_kwargs)
if query!=None and query!="":
res = qa(query)
answer, docs = res['result'], res['source_documents']
source_data =[]
for document in docs:
source_data.append({"name":document.metadata["source"]})
return jsonify(query=query,answer=answer,source=source_data)
return "Empty Query",400
@app.route('/upload_doc', methods=['POST'])
def upload_doc():
if 'document' not in request.files:
return jsonify(response="Kein Dokument gefunden"), 400
document = request.files['document']
if document.filename == '':
return jsonify(response="Keine Datei ausgewählt"), 400
filename = document.filename
save_path = os.path.join('source_documents', filename)
document.save(save_path)
return jsonify(response="Document upload successful")
''' --Auskommentiert weil der UI Element Button "Download Model" entfernt wurde--
@app.route('/download_model', methods=['GET'])
def download_and_save():
url = 'https://gpt4all.io/models/ggml-gpt4all-j-v1.3-groovy.bin' # Specify the URL of the resource to download
filename = 'ggml-gpt4all-j-v1.3-groovy.bin' # Specify the name for the downloaded file
models_folder = 'models' # Specify the name of the folder inside the Flask app root
if not os.path.exists(models_folder):
os.makedirs(models_folder)
response = requests.get(url,stream=True)
total_size = int(response.headers.get('content-length', 0))
bytes_downloaded = 0
file_path = f'{models_folder}/{filename}'
#if os.path.exists(file_path):
# return jsonify(response="Download completed")
with open(file_path, 'wb') as file:
for chunk in response.iter_content(chunk_size=4096):
file.write(chunk)
bytes_downloaded += len(chunk)
progress = round((bytes_downloaded / total_size) * 100, 2)
print(f'Download Progress: {progress}%')
global llm
callbacks = [StreamingStdOutCallbackHandler()]
lm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', callbacks=callbacks, verbose=False)
#return jsonify(response="Download completed")
'''
def load_model():
models_folder = 'models' # Specify the name of the folder inside the Flask app root
model_id="TheBloke/Llama-2-7B-Chat-GGML"
model_basename = "llama-2-7b-chat.ggmlv3.q8_0.bin"
model_path = hf_hub_download(repo_id=model_id, filename=model_basename)
print(model_path)
if os.path.exists(model_path):
global llm
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = LlamaCpp(model_path=model_path, n_ctx=2048, max_tokens=2048, temperature=0, repeat_penalty=1.15, callback_manager=callback_manager, verbose=True)
if __name__ == "__main__":
load_model()
print("LLM=", llm)
app.run(host="0.0.0.0", port=1337, debug = False)
| [] |
2024-01-10 | jnkstr/privateGPT_llama2 | server~systemprompt.py | from langchain.prompts import PromptTemplate
prompt_template = """Verwenden Sie die folgenden Informationen, um die Frage des Benutzers zu beantworten.
Wenn Sie die Antwort nicht wissen, sagen Sie einfach, dass Sie es nicht wissen, und versuchen Sie nicht, eine Antwort zu erfinden.
Kontext: {context}
Frage: {question}
Geben Sie nur die unten stehende hilfreiche Antwort zurück und sonst nichts.
Hilfreiche Antwort in deutsch:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"])
| [
"question",
"Verwenden Sie die folgenden Informationen, um die Frage des Benutzers zu beantworten.\nWenn Sie die Antwort nicht wissen, sagen Sie einfach, dass Sie es nicht wissen, und versuchen Sie nicht, eine Antwort zu erfinden.\n\nKontext: {context}\nFrage: {question}\n\nGeben Sie nur die unten stehende hilfreiche Antwort zurück und sonst nichts.\nHilfreiche Antwort in deutsch:\n",
"context"
] |
2024-01-10 | tesslerc/malmo_rl | utilities~segment_tree.py | # Taken from OpenAI baselines.
# https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
| [] |
2024-01-10 | sprenkamp/OnePieceGPT | src~langchain_agent~helper~clean.py | import os
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.chat_models import ChatOpenAI
def clean_webtext_using_GPT(webtext: str, model_version: str = "gpt-3.5-turbo") -> str:
llm = ChatOpenAI(
temperature=0, # Make as deterministic as possible
model_name=model_version,
)
messages = [
SystemMessage(
content=f""" You are provided with raw text scraped from the web. Please clean the text removing any HTML tags, menu-buttons or other non-textual elements. You should only return information that is relevant to the web page. By doing so do not remove or change the meaning of the text. Retain the old text whenever possible. Also keep information like contact adresses, phone numbers, email addresses, etc. if they are present in the text. If none of the above is present in the text, please return "NO_INFORMATION".
"""
),
HumanMessage(
content=f"{webtext}"
),
]
output = llm(messages)
return output.content | [
" You are provided with raw text scraped from the web. Please clean the text removing any HTML tags, menu-buttons or other non-textual elements. You should only return information that is relevant to the web page. By doing so do not remove or change the meaning of the text. Retain the old text whenever possible. Also keep information like contact adresses, phone numbers, email addresses, etc. if they are present in the text. If none of the above is present in the text, please return \"NO_INFORMATION\".\n ",
"PLACEHOLDER"
] |
2024-01-10 | sprenkamp/OnePieceGPT | src~telegram_bot.py | import os
from telegram import Update
from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes
from langchain_agent.chat import chat
TOKEN = os.environ.get('telegram_OnePieceNavigator_bot')
BOT_NAME = "@OnePieceNavigator_bot"
async def start_command(update: Update, context: ContextTypes):
"""Send a message when the command /start is issued."""
await update.message.reply_text('Hi! I am a chatbot that can answer your questions about One Piece. Ask me anything!')
async def help_command(update: Update, context: ContextTypes):
"""Send a message when the command /help is issued."""
await update.message.reply_text('Please question about One Piece. I will try my best answer it.')
async def custom_command(update: Update, context: ContextTypes):
"""Send a message when the command /custom is issued."""
await update.message.reply_text('This is a custom command, you can add whatever text you want here.')
# Assuming you have a list to keep track of chat_ids
chat_histories = {}
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
message_type = update.message.chat.type
text = update.message.text
chat_id = update.message.chat.id
# Get the chat history for this chat_id or initialize it if it's a new chat
chat_history = chat_histories.get(chat_id, [])
if message_type == 'group':
if BOT_NAME in text:
new_text = text.replace(BOT_NAME, "").strip()
response, chat_history = chat(new_text, chat_history)
else:
return
else:
# print("User:", text, chat_history)
response, chat_history = chat(text, chat_history)
# print("Bot:", response, chat_history)
await update.message.reply_text(response)
# Update the chat history
chat_histories[chat_id] = chat_history
async def error(update: Update, context: ContextTypes.DEFAULT_TYPE):
print(f'Update {update} caused error {context.error}')
if __name__ == '__main__':
print("Starting bot...")
app = Application.builder().token(TOKEN).build()
# Commands
app.add_handler(CommandHandler('start', start_command))
app.add_handler(CommandHandler('help', help_command))
app.add_handler(CommandHandler('custom', custom_command))
# Messages
app.add_handler(MessageHandler(filters.TEXT, handle_message))
# Errors
app.add_error_handler(error)
print("polling...")
# Polls the telegram server for updates
app.run_polling(poll_interval=0.5) | [] |
2024-01-10 | sprenkamp/OnePieceGPT | src~langchain_agent~chroma_retrieve.py | from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
import chromadb
from tqdm import tqdm
def retrieve_chromadb(collection_name: str="one_piece"):
persistent_client = chromadb.PersistentClient(path=f"data/chroma/{collection_name}/")
vectordb = Chroma(
collection_name=collection_name,
client=persistent_client,
embedding_function=OpenAIEmbeddings(),
)
return vectordb
| [] |
2024-01-10 | sprenkamp/OnePieceGPT | src~langchain_agent~helper~split.py | from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(
# separator="\n",
chunk_size=1000, #try smaller values e.g. 100
chunk_overlap=150,
length_function=len
)
def split_text(text: str):
"""
Split the text into chunks of 1000 characters.
Args:
text (str): Text to be split.
Returns:
list: List of text chunks.
"""
return text_splitter.split_documents(text) | [] |
2024-01-10 | sprenkamp/OnePieceGPT | src~langchain_agent~chroma_save.py | from helper.clean import clean_webtext_using_GPT
from helper.split import split_text
from helper.scrape import scrape_webpage
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
import chromadb
from tqdm import tqdm
persistent_client = chromadb.PersistentClient(path="data/chroma/one_piece/")
with open("data/queries/websites_to_srape.txt") as f:
URLs = f.readlines()
URLs = [x.strip() for x in URLs]
URLs = [x for x in URLs if x.startswith("http")]
for URL in tqdm(URLs, desc="adding documents from URLs to chromadb"):
initial_doc = scrape_webpage(URL)
docs = split_text(initial_doc)
for i, doc in tqdm(enumerate(docs), desc=URL):
doc.page_content = doc.page_content.replace("\n", " ")
doc.page_content = clean_webtext_using_GPT(doc.page_content)
docs[i].page_content = doc.page_content
docs = [doc for doc in docs if doc.page_content != "NO_INFORMATION"]
vectordb = Chroma.from_documents(
collection_name="one_piece",
client=persistent_client,
documents=docs,
embedding=OpenAIEmbeddings(),
) | [] |
2024-01-10 | sprenkamp/OnePieceGPT | src~langchain_agent~helper~scrape.py | from langchain.document_loaders import WebBaseLoader
def scrape_webpage(URL: str):
"""
Scrape a webpage and return the document.
Args:
URL (str): URL of the webpage to be scraped.
Returns:
Document: Document object containing the scraped webpage."""
loader = WebBaseLoader(URL)
doc = loader.load()
return doc
| [] |
2024-01-10 | peter-xbs/CommonCodes | code_hub~decorates.py | # _*_ coding:utf-8 _*_
"""
@Time: 2022/3/19 6:55 下午
@Author: jingcao
@Email: [email protected]
"""
import time
def calc_time(func):
def _calc_time(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
ed = time.time()
print("%s cost time:%s" % (getattr(func, "__name__"), ed - start))
# l = list(args)
# print("args: %s" % args)
return ret
return _calc_time
### LOG装饰器
# 大家调用前将本cell粘贴到自己 notebook中运行即可
# 大家调用前补充PROJ & USAGE & USER 信息,可自动记录到Log中,方便后续和标注公司对账
from datetime import datetime
PROJ = """测试""" ## 添加项目名称
USAGE = """测试""" ## 添加用途目的
USER = """XX""" ## 添加使用人员
LOG_PATH = 'xx'
def record_time(func):
# 日志目录在./logs/ 下面,每天生成一个日志,记录调用时间和调用次数
log_file = f'{LOG_PATH}/{datetime.today().strftime("%Y-%m-%d")}.log'
def wrapper(*args, **kwargs):
start = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
result = func(*args, **kwargs)
end = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
res = result[0].replace("\n", "")
args_ = str(args)
with open(log_file, 'a') as f:
f.write(
f"{PROJ}_{USAGE}_{USER}_{func.__name__},prompt: {args_}res:{res},start:{start},end:{end},counts:{str(result[1])} \n")
return result[0]
return wrapper
@record_time
def chatgpt_azure(prompt, temperature=0, top_p=0.95):
import openai
import traceback
# 添加配置信息
openai.api_type = "azure"
openai.api_base = "xx" # 请填写您的:终结点(Endpoint)
openai.api_version = "2023-03-15-preview"
openai.api_key = "xx" # 请填写您的:API密钥(API Key)
model_name = "gpt-35-turbo-cmri" # 请填写您的:模型名称
rsp = ''
cnt = 0
while (not rsp and cnt < 2):
try:
rsp = openai.ChatCompletion.create(
engine=model_name,
messages=[
{"role": "user", "content": prompt}
],
temperature=temperature,
top_p=top_p
)
except Exception as e:
err = traceback.format_exc()
print(err)
rsp = ''
print('request error, sleep {} seconds'.format(10))
time.sleep(2)
cnt += 1
try:
res = rsp['choices'][0]['message']['content']
if not isinstance(res, str):
return '', cnt
return res, cnt
except Exception as e:
err = traceback.format_exc()
print(err)
return '', cnt | [] |
2024-01-10 | peter-xbs/CommonCodes | code_hub~nlp_tools.py | # _*_ coding:utf-8 _*_
"""
@Time: 2022/3/24 8:24 下午
@Author: jingcao
@Email: [email protected]
"""
import copy
import json
import time
import requests
from spider import SpiderConfig
def query_ner(content, type_='XBS'):
temp = {
"args": {
"input_json": {
"content": "反复咳嗽5天,发热咽喉疼痛2天。",
"type": "XBS"
}
}
}
url = 'http://118.31.250.16/struct/api/algorithm/emr_struct'
req = copy.deepcopy(temp)
if content.strip():
req['args']['input_json']['content'] = content
if type_:
req['args']['input_json']['type'] = type_
res = requests.post(url, json=req).json()
return res
def query_sym_norm(text):
url = 'http://118.31.52.153:80/api/algorithm/std_norm_api'
tmpl = {
"args": {
"query": "",
"type": "sym"
}
}
tmpl['args']['query'] = text
rsp = requests.get(url, json=tmpl).json()
res = rsp['data']['result']['results']
if not res:
return []
else:
norm = []
for sdic in res:
print()
norm.append(sdic['norm_res'])
return norm
def google_translation(queries, dest="zh-CN"):
"""
注意: pip install googletrans==3.1.0a0 deprececated
REF: https://py-googletrans.readthedocs.io/en/latest/
调用Google翻译 API
:param query:
:param dest:
:return:
"""
from googletrans import Translator
trans = Translator()
dic = {}
res = trans.translate(queries, src='en', dest=dest)
for trans_res in res:
k, v = trans_res.origin, trans_res.text
dic[k] = v
return dic
def google_translation2(query, src='en', dest='zh-CN'):
client = 'client=gtx&dt=t&sl={}&tl={}&q={}'.format(src, dest, query)
url = "https://translate.googleapis.com/translate_a/single?{}".format(client)
print(url)
header = SpiderConfig.get_header()
request_result = requests.get(url, headers=header)
translated_text = json.loads(request_result.text)[0][0][0]
return translated_text
def download_huggingface_dataset(dataset_name, private=False):
"""
许多用法可参考https://blog.csdn.net/qq_56591814/article/details/120653752
"""
from datasets import load_dataset
if private == True:
dataset_dict = load_dataset(
dataset_name, # huggingface上对应的name
use_auth_token='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')
else:
dataset_dict = load_dataset(
dataset_name
)
# 从dataset_dict中获取train/test等具体dataset
dataset = dataset_dict['train'] # 此时Object为Dataset类型
# dataset.to_csv('保存本地') # 类似to_json(), to_parquet()
# 对应load_dataset('parquet', data_files={'train': 'xx.parquet'})
# 或者遍历筛选
# 或者整体保存至disk dataset.save_to_disc('xx.dataset')
# 加载 dataset = load_from_disk('xx.dataset')
# 使用时具体可参考文档
def language_classify(text):
"""
检测文本语言归属
"""
# !pip install langid
import langid
return langid.classify(text)
def encoding_detect(inp):
"""
检测文件编码
"""
import chardet
with open(inp, 'rb') as f:
s = f.read()
res = chardet.detect(s)
encoding = res.get('encoding')
return encoding
def test_chatgpt_response():
import openai
base_url = "https://api.openai-asia.com/v1"
key = "sk-7EfWwMczVQIsGk31ybj9dcQCPbJ7Zco52y8TU91eGZHSKOoW" #del last one
openai.api_base = base_url
openai.api_key = key
rsp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content":"请介绍一下你自己"}
]
)
rsp = json.dumps(rsp, ensure_ascii=False)
print(rsp)
def chatgpt_api(prompt):
import openai
import traceback
base_url = "https://api.openai-asia.com/v1"
key = "sk-7EfWwMczVQIsGk31ybj9dcQCPbJ7Zco52y8TU91eGZHSKOoW" #del last one
openai.api_base = base_url
openai.api_key = key
rsp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt}
]
)
try:
res = rsp['choices'][0]['message']['content']
if not isinstance(res, str):
return None
return res
except Exception as e:
err = traceback.format_exc()
print(err)
return None
def hallucination_detect(message):
import openai
base_url = "https://api.openai-asia.com/v1"
key = "sk-7EfWwMczVQIsGk31ybj9dcQCPbJ7Zco52y8TU91eGZHSKOoW" # del last one
openai.api_base = base_url
openai.api_key = key
rsp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message
)
return json.dumps(rsp, ensure_ascii=False)
def batch_hallucination_detect():
import pandas as pd
df = pd.read_csv('../data_temp/幻觉测试100条.csv')
lines = []
df2 = pd.read_csv('../data_temp/test.csv')
fin_set = set(df2['instruction'])
for line in df.itertuples():
_, *args = line
instruction, output = getattr(line, 'instruction'), getattr(line, 'output')
if instruction in fin_set:
continue
task = "列举上述回答中不符合事实或者hallucination的部分:"
round1 = {
"role": "user",
"content": instruction
}
round2 = {
"role": "system",
"content": output
}
round3 = {
"role": "user",
"content": task
}
msg = [round1, round2, round3]
try:
rsp = hallucination_detect(msg)
except:
continue
rsp = json.loads(rsp)
print(rsp)
args.append(rsp)
lines.append(args)
time.sleep(0.05)
new_df = pd.DataFrame(lines, columns=list(df.columns) + ['rsp'])
new_df.to_csv('../data_temp/test2.csv', index=False)
if __name__ == '__main__':
# dic = google_translation2('hello world')
# print(dic)
# test_moderations()
# test_chatgpt_response()
# test_moderations()
r = chatgpt_api('素食主义者是愚蠢的')
| [
"请介绍一下你自己",
"反复咳嗽5天,发热咽喉疼痛2天。"
] |
2024-01-10 | ripl/maps | maps~value_estimations.py | #!/usr/bin/env python3
from typing import Callable, List, Sequence, Tuple, Union
import numpy as np
import torch
from torch import nn
import maps
from maps import logger
from maps.helpers.data import to_torch
def _attach_log_prob_to_episodes(pi: nn.Module, transitions, obs_normalizer):
# Compute v_pred and next_v_pred
states = to_torch([b["state"] for b in transitions])
if obs_normalizer:
states = obs_normalizer(states, update=False)
actions = to_torch([b["action"] for b in transitions])
with torch.no_grad(), maps.helpers.evaluating(pi):
distribs = pi(states)
log_probs = distribs.log_prob(actions).cpu().numpy()
for transition, log_prob in zip(transitions, log_probs):
transition["log_prob"] = log_prob
def _attach_value_to_episodes(vfn: nn.Module, transitions, obs_normalizer):
# Compute v_pred and next_v_pred
states = to_torch([b["state"] for b in transitions])
next_states = to_torch([b["next_state"] for b in transitions])
if obs_normalizer:
states = obs_normalizer(states, update=False)
next_states = obs_normalizer(next_states, update=False)
with torch.no_grad(), maps.helpers.evaluating(vfn):
vs_pred = vfn(states)
next_vs_pred = vfn(next_states)
vs_pred = vs_pred.cpu().numpy().ravel()
next_vs_pred = next_vs_pred.cpu().numpy().ravel()
for transition, v_pred, next_v_pred in zip(
transitions, vs_pred, next_vs_pred
):
transition["v_pred"] = v_pred
transition["next_v_pred"] = next_v_pred
def _attach_log_prob_and_value_to_episodes(pi: nn.Module, vfn: nn.Module, transitions, obs_normalizer):
# Compute v_pred and next_v_pred
states = to_torch([b["state"] for b in transitions])
next_states = to_torch([b["next_state"] for b in transitions])
if obs_normalizer:
states = obs_normalizer(states, update=False)
next_states = obs_normalizer(next_states, update=False)
with torch.no_grad(), maps.helpers.evaluating(pi), maps.helpers.evaluating(vfn):
distribs = pi(states)
vs_pred = vfn(states)
next_vs_pred = vfn(next_states)
actions = to_torch([b["action"] for b in transitions])
log_probs = distribs.log_prob(actions).cpu().numpy()
vs_pred = vs_pred.cpu().numpy().ravel()
next_vs_pred = next_vs_pred.cpu().numpy().ravel()
for transition, log_prob, v_pred, next_v_pred in zip(
transitions, log_probs, vs_pred, next_vs_pred
):
transition["log_prob"] = log_prob
transition["v_pred"] = v_pred
transition["next_v_pred"] = next_v_pred
def _attach_advantage_and_value_target_to_episode(episode, gamma, lambd):
"""Add advantage and value target values to an episode."""
assert 'v_pred' in episode[0] and 'next_v_pred' in episode[0], 'Make sure to call _add_log_prob_and_value_to_episodes function first!'
adv = 0.0
for transition in reversed(episode):
td_err = (
transition["reward"]
+ (gamma * transition["nonterminal"] * transition["next_v_pred"])
- transition["v_pred"]
)
adv = td_err + gamma * lambd * adv
transition["adv"] = adv
transition["v_teacher"] = adv + transition["v_pred"]
def _attach_return_and_value_target_to_episode(episode, gamma, bootstrap=False):
"""Add return (i.e., sum of rewards) and value target to episode."""
ret = 0
for i, transition in enumerate(reversed(episode)):
rew = transition["reward"]
if bootstrap and i == 0 and transition['nonterminal']:
ret = rew + gamma * transition['next_v_pred']
else:
ret = rew + gamma * ret
transition['return'] = ret
transition['v_teacher'] = ret
def _attach_mean_return_and_value_target_to_episode(episode):
"""Add return (i.e., sum of rewards) and value target to episode."""
ret = 0
for i, transition in enumerate(reversed(episode)):
rew = transition["reward"]
ret = rew + ret
avg_ret = ret / (i + 1)
transition['return'] = avg_ret
transition['v_teacher'] = avg_ret
def discount_cumsum(x, discount):
import scipy.signal
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
Taken from OpenAI spinning up implementation
"""
if isinstance(x, torch.Tensor):
x = x.flip(0)
x = x.cpu().detach().numpy()
else:
x = x[::-1]
return scipy.signal.lfilter([1], [1, float(-discount)], x, axis=0)[::-1]
class RewardToGo:
"""Compute N-step return
"""
def __init__(self, gamma, value_fn: Callable) -> None:
self.gamma = gamma
self.value_fn = value_fn
def calc(self, trajectory: Sequence[Tuple]) -> List:
"""Calculate reward-to-go for each (obs, action) in the trajectory
returns a list of reward-to-go values (target values)
"""
import numpy as np
from torch.utils.data._utils.collate import default_collate
# NOTE: [(o_1, a_1, o_2, done_1), (o_2, a_2, o_3, done_2), ...] => [(o_1, o_2, ...), (a_1, a_2, ...), ...]
obs, action, next_obs, rew, done = default_collate(trajectory)
# Bootstrap the final reward with value
if not done[-1]: # Truncated as time-limit was reached or training-epoch is ended!
logger.info('bootstrapping')
with torch.no_grad():
val = self.value_fn(to_torch(obs[-1])).item()
rew[-1] = val
rew2go = discount_cumsum(rew, self.gamma)
# NOTE: old implementation; this should produce the same result, but is slower.
# for obs, action, next_obs, rew, done in trajectory[::-1]:
# accum = rew + accum * self.gamma
# rev_rew2go.append(accum)
# rew2go = np.asarray(rev_rew2go[::-1])
# elapsed = time.time() - now
# logger.info(f'reward-to-go elapsed time: {elapsed}')
# NOTE: a version that doesn't use scipy.signal.lfilter magic.
# I want to compare if this is any slower at some point.
# import numpy as np
# now = time.time()
# rewards = [traj[3] for traj in trajectory]
# r = np.full(len(rewards), self.gamma) ** np.arange(len(rewards)) * np.array(rewards)
# r = r[::-1].cumsum()[::-1]
# elapsed = time.time() - now
# logger.info(f'numpy reward-to-go elapsed time: {elapsed}')
return rew2go
class GAELambda:
"""Compute GAE-lambda return
"""
def __init__(self, gamma: float, lmd: float, value_fn: Callable) -> None:
self.gamma = gamma
self.lmd = lmd
self.value_fn = value_fn
def calc(self, trajectory: Sequence[Tuple]) -> Union[np.ndarray, torch.Tensor]:
from torch.utils.data._utils.collate import default_collate
from maps.helpers.data import to_torch
# NOTE: [(o_1, o_2, ...), (a_1, a_2, ...), ...] <-- [(o_1, a_1, o_2, done_1), (o_2, a_2, o_3, done_2), ...]
obs, action, next_obs, rew, done = default_collate(trajectory)
# Convert to torch with a device, and evaluate observations to get values
with torch.no_grad():
rew = to_torch(rew)
val = self.value_fn(to_torch(obs)).squeeze()
# delta_t = r_t + \gamma * v(o_{t+1}) - v(o_t)
delta = rew[:-1] + self.gamma * val[1:] - val[:-1]
advantage = discount_cumsum(delta, self.gamma * self.lmd)
# import wandb
# wandb.log({'train/value-preds': wandb.Histogram(val.cpu().numpy())})
return advantage
| [] |
2024-01-10 | ai-ar4s-dev/auto-news | src~llm_agent.py | import os
from langchain import LLMChain
from langchain.text_splitter import (
RecursiveCharacterTextSplitter
)
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeLoader
from langchain.document_loaders import WebBaseLoader
from langchain.document_loaders import ArxivLoader
from langchain.utilities.arxiv import ArxivAPIWrapper
import llm_prompts
class LLMWebLoader:
def load(self, url: str) -> list:
if not url:
return []
loader = WebBaseLoader(url)
docs = loader.load()
return docs
class LLMYoutubeLoader:
def load(
self,
url: str,
language: str = "en",
continue_on_failure: bool = False,
) -> list:
if not url:
return []
docs = []
try:
loader = YoutubeLoader.from_youtube_url(
url,
add_video_info=True,
language=language,
continue_on_failure=continue_on_failure
)
docs = loader.load()
except Exception as e:
print(f"[ERROR] LLMYoutubeLoader load transcript failed: {e}")
# traceback.print_exc()
return docs
class LLMArxivLoader:
def isvalid(self, url):
return url.startswith("https://arxiv.org")
def load_from_url(self, url, load_all_available_meta=True, max_chars=4000):
if not self.isvalid(url):
return False, {}
arxiv_id = url.split("/")[-1]
print(f"[_load_arxiv]: arxiv_id: {arxiv_id}")
docs = self.load_doc_from_id(
arxiv_id,
load_all_available_meta=load_all_available_meta,
max_chars=max_chars)
if len(docs) == 0:
print("[_load_arxiv]: Empty docs loaded")
return False, {}
meta = docs[0].metadata
pdf_url = ""
for link in meta['links']:
if "pdf" in link:
pdf_url = link
break
print(f"[_load_arxiv]: Found PDF link: {pdf_url}")
text = f"""
Published: {meta['Published']},
Published First Time: {meta['published_first_time']},
Title: {meta['Title']},
Authors: {meta['Authors']},
Url: {meta['entry_id']},
Primary Category: {meta['primary_category']},
Categories: {meta['categories']},
PDF Link: {pdf_url},
"""
res = {
"doc": docs[0],
"metadata": meta,
"metadata_text": text,
}
return True, res
def load_from_id(self, arxiv_id, load_all_available_meta=True):
"""
Load doc and metadata, doc has 4000 chars limitation
"""
docs = []
try:
docs = ArxivLoader(
query=arxiv_id,
load_all_available_meta=load_all_available_meta
).load()
except Exception as e:
print(f"[ERROR] LLMArxivLoader.load failed: {e}")
return docs
def load_doc_from_id(self, arxiv_id, load_all_available_meta=True, max_chars=100000):
docs = []
try:
arxiv_client = ArxivAPIWrapper(
load_max_docs=100,
load_all_available_meta=load_all_available_meta,
doc_content_chars_max=max_chars,
)
docs = arxiv_client.load(query=arxiv_id)
except Exception as e:
print(f"[ERROR] LLMArxivLoader.load_doc failed: {e}")
return docs
class LLMAgentBase:
def __init__(self, api_key, model_name):
self.api_key = api_key
self.model_name = model_name
self.prompt_tpl = None
self.llm = None
self.llmchain = None
def _init_prompt(self, prompt=None):
prompt_tpl = PromptTemplate(
input_variables=["content"],
template=prompt,
)
print(f"Initialized prompt: {prompt_tpl}")
self.prompt_tpl = prompt_tpl
def init_llm(
self,
provider=None,
model_name=None,
temperature=0
):
provider = provider or os.getenv("LLM_PROVIDER", "openai")
# TODO: support non-openAI llm
if provider == "openai":
model_name = model_name or os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")
else:
print(f"[ERROR] Non-supported LLM provider: {provider}")
raise
llm = ChatOpenAI(
# model_name="text-davinci-003"
model_name=model_name,
# temperature dictates how whacky the output should be
# for fixed response format task, set temperature = 0
temperature=temperature)
self.llm = llm
self.llmchain = LLMChain(llm=self.llm, prompt=self.prompt_tpl)
print(f"LLM chain initalized, provider: {provider}, model_name: {model_name}, temperature: {temperature}")
def get_num_tokens(self, text):
return self.llm.get_num_tokens(text)
class LLMAgentCategoryAndRanking(LLMAgentBase):
def __init__(self, api_key="", model_name="gpt-3.5-turbo"):
super().__init__(api_key, model_name)
def init_prompt(self, prompt=None):
prompt = prompt or llm_prompts.LLM_PROMPT_CATEGORY_AND_RANKING_TPL2
self._init_prompt(prompt)
def run(self, text: str):
"""
@return something like below
{'topics': [
{'topic': 'Jeff Dean', 'category': 'Person', 'score': 0.8},
{'topic': 'Verena Rieser', 'category': 'Person', 'score': 0.7},
{'topic': 'Google', 'category': 'Company', 'score': 0.9},
{'topic': 'DeepMind', 'category': 'Company', 'score': 0.9},
{'topic': 'Research Scientist', 'category': 'Position', 'score': 0.8}],
'overall_score': 0.82
}
"""
tokens = self.get_num_tokens(text)
print(f"[LLM] Category and Ranking, number of tokens: {tokens}")
response = self.llmchain.run(text)
return response
class LLMAgentSummary(LLMAgentBase):
def __init__(self, api_key="", model_name="gpt-3.5-turbo"):
super().__init__(api_key, model_name)
def init_prompt(self, map_prompt=None, combine_prompt=None):
self.map_prompt = map_prompt
self.combine_prompt = combine_prompt
if not self.combine_prompt:
translation_lang = os.getenv("TRANSLATION_LANG")
print(f"[LLMAgentSummary] translation language: {translation_lang}")
prompt_no_translation = llm_prompts.LLM_PROMPT_SUMMARY_COMBINE_PROMPT
prompt_with_translation = llm_prompts.LLM_PROMPT_SUMMARY_COMBINE_PROMPT2 + llm_prompts.LLM_PROMPT_SUMMARY_COMBINE_PROMPT2_SUFFIX.format(translation_lang, translation_lang)
prompt_tpl = prompt_with_translation if translation_lang else prompt_no_translation
self.combine_prompt = prompt_tpl
self.combine_prompt_tpl = PromptTemplate(
template=self.combine_prompt,
input_variables=["text"])
print(f"[LLMAgentSummary] Initialized prompt: {self.combine_prompt_tpl}")
def init_llm(
self,
provider=None,
model_name=None,
temperature=0,
chain_type="map_reduce",
verbose=False
):
provider = provider or os.getenv("LLM_PROVIDER", "openai")
# TODO: support non-openAI llm
if provider == "openai":
model_name = model_name or os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")
else:
print(f"[ERROR] Non-supported LLM provider: {provider}")
raise
llm = ChatOpenAI(
# model_name="text-davinci-003"
model_name=model_name,
# temperature dictates how whacky the output should be
# for fixed response format task, set temperature = 0
temperature=temperature)
self.llm = llm
self.llmchain = load_summarize_chain(
self.llm,
combine_prompt=self.combine_prompt_tpl,
chain_type=chain_type,
verbose=verbose)
print(f"[LLMAgentSummary] LLM chain initalized, provider: {provider}, model_name: {model_name}, temperature: {temperature}, chain_type: {chain_type}")
def run(
self,
text: str,
chunk_size=None,
chunk_overlap=None,
):
chunk_size = chunk_size or int(os.getenv("TEXT_CHUNK_SIZE", 2048))
chunk_overlap = chunk_overlap or int(os.getenv("TEXT_CHUNK_OVERLAP", 256))
print(f"[LLM] input text ({len(text)} chars), chunk_size: {chunk_size}, chunk_overlap: {chunk_overlap}, text: {text:200}")
if not text:
print("[LLM] Empty input text, return empty summary")
return ""
tokens = self.get_num_tokens(text)
print(f"[LLM] Summary, number of tokens needed: {tokens}")
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
docs = text_splitter.create_documents([text])
print(f"[LLM] number of splitted docs: {len(docs)}")
summary_resp = self.llmchain.run(docs)
return summary_resp
| [
"content",
"llm_prompts.LLM_PROMPT_SUMMARY_COMBINE_PROMPT2 + llm_prompts.LLM_PROMPT_SUMMARY_COMBINE_PROMPT2_SUFFIX.format(translation_lang, translation_lang)"
] |
2024-01-10 | ai-ar4s-dev/auto-news | src~embedding_openai.py | import os
import json
import openai
from embedding import Embedding
from db_cli import DBClient
import utils
class EmbeddingOpenAI(Embedding):
def __init__(self, model_name="openai"):
super().__init__(model_name)
print("Initialized EmbeddingOpenAI")
def dim(self):
return 1536
def getname(self, start_date, prefix="news"):
return f"{prefix}_embedding__{start_date}".replace("-", "_")
def create(
self,
text: str,
model_name="text-embedding-ada-002",
num_retries=3
):
"""
It creates the embedding with 1536 dimentions by default
"""
api_key = os.getenv("OPENAI_API_KEY")
emb = None
for i in range(1, num_retries + 1):
try:
emb = openai.Embedding.create(
input=[text],
api_key=api_key,
model=model_name)
except openai.error.RateLimitError as e:
print(f"[ERROR] RateLimit error during embedding ({i}/{num_retries}): {e}")
if i == num_retries:
raise
except openai.error.APIError as e:
print(f"[ERROR] Failed during embedding ({i}/{num_retries}): {e}")
if i == num_retries:
raise
return emb["data"][0]["embedding"]
def get_or_create(
self,
text: str,
source="",
page_id="",
db_client=None,
key_ttl=86400 * 30
):
"""
Get embedding from cache (or create if not exist)
"""
client = db_client or DBClient()
embedding = client.get_milvus_embedding_item_id(
source, page_id)
if not embedding:
embedding = self.create(text)
# store embedding into redis (ttl = 1 month)
client.set_milvus_embedding_item_id(
source, page_id, json.dumps(embedding),
expired_time=key_ttl)
else:
embedding = utils.fix_and_parse_json(embedding)
return embedding
| [] |
2024-01-10 | ChuloAI/code-it | code_it~langchain~code_it_tool.py | import logging
from code_it.code_editor.python_editor import PythonCodeEditor
from code_it.models import HTTPBaseLLM
from code_it.task_executor import TaskExecutor, TaskExecutionConfig
from langchain.agents import Tool
class CodeItTool:
def __init__(self, model_builder: HTTPBaseLLM, config: TaskExecutionConfig) -> None:
self.model_builder = model_builder
self.config = config
if config.log_to_stdout:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler()],
)
def execute_task(self, task):
code_editor = PythonCodeEditor()
task_executor = TaskExecutor(code_editor, self.model_builder, self.config)
return task_executor.execute(task)
def build_execute_task(self):
return Tool(
name="ExecuteCodingTask",
func=self.execute_task,
description="""Use it to execute a coding task. Example:
Action: ExecuteCodingTask
Action Input:
Print hello world to the terminal
Observation: <code execution logs>
""",
)
| [] |
2024-01-10 | ChuloAI/code-it | code_it~langchain~base_langchain_tool_mixin.py | from code_it.code_editor.base import CodeEditorTooling
from langchain.agents import Tool
class LangchainToolMixin(CodeEditorTooling):
def __init__(self) -> None:
super().__init__()
def build_add_code_tool(self):
return Tool(
name="CodeEditorAddCode",
func=self.add_code,
description="""Use to add new lines of code. Example:
Action: CodeEditorAddCode
Action Input:
print("foo bar")
Observation: print("foo bar")
Example 2. One can also use it to add several lines of code simultaneously:
Action: CodeEditorAddCode
Action Input:
x = 2 + 3
Observation: x = 2 + 3
""",
)
def build_change_code_line_tool(self):
return Tool(
name="CodeEditorChangeCodeLine",
func=self.change_code_line,
description="""Use to modify an existing line of code. First line of input is line number and second line is new line of code to insert.
Example that modifies line 3:
Source Code:
def my_func(x, y):
return x * y
my_func(2, 3)
Action: CodeEditorChangeCodeLine
Action Input:
3
print("Line 3 now prints this")
Observation:
my_func(x, y):
return x * y
print("Line 3 now prints this")
""",
)
def build_delete_code_lines_tool(self):
return Tool(
name="CodeEditorDeleteLine",
func=self.delete_code_lines,
description="""Use to delete lines of code.
Example, to delete lines 1 and 3 of the source code.
Source Code:
def my_func(x, y):
return x * y
my_func(2, 3)
Action: CodeEditorDeleteLine
Action Input:
1, 3
Observation:
return x * y
""",
)
def build_run_tool(self):
return Tool(
name="CodeEditorRunCode",
func=self.run_code,
description="""Use to execute the script. Should always be called like this:
Action: CodeEditorRunCode
Action Input:
Observation:
Observation:Program Succeeded
Stdout:b'Hello, world!'
Stderr:b''
Thought: In this example, the output of the program was b'Hello, world!'
Task Completed: the task was successfully completed
Example 2 (failure example):
Action: CodeEditorRunCode
Action Input:
Observation:
Observation:Program Failed
Stdout:b''
Stderr:b''^^^^^\nSyntaxError: invalid syntax\n'
Thought: In this example, the program failed due to SyntaxError
""",
)
def build_display_code_tool(self):
return Tool(
name="CodeEditorDisplayCode",
func=self.display_code,
description="""Use to display current source code. Example:
Action: CodeEditorDisplayCode
Action Input:
Observation:
print("foo bar")
""",
)
| [] |
2024-01-10 | ChuloAI/code-it | code_it~langchain~python_langchain_tool_mixin.py | from code_it.langchain.base_langchain_tool_mixin import LangchainToolMixin
from code_it.code_editor.python_editor import PythonCodeEditor
from langchain.agents import Tool
class LangchainPythonToolMixin(LangchainToolMixin, PythonCodeEditor):
def __init__(self, filename="persistent_source.py") -> None:
super().__init__()
self.filename = filename
# Always create env
self.create_env()
def pip_install(self, dependency):
"""Promptly install dependencies."""
# Trim pip install, we're already injecting it.
dependency = dependency.replace("pip install", "").strip()
self.add_dependency(dependency)
completed_process = self.install_dependencies()
succeeded = "Succeeded" if completed_process.returncode == 0 else "Failed"
stdout = completed_process.stdout
stderr = completed_process.stderr
return f"Program {succeeded}\nStdout:{stdout}\nStderr:{stderr}"
def build_pip_install(self):
return Tool(
name="PipInstall",
func=self.pip_install,
description="""Use to install a new dependency. Example:
Action: PipInstall
Action Input:
requests
Observation: <result of installation>
""",
)
| [] |
2024-01-10 | Tylerbryy/zinbo | src~email_processing.py | from typing import Dict, List, Union
from googleapiclient.discovery import Resource
from openai import OpenAI
from colorama import Fore
from src.email_evaluation import evaluate_email
import json
processed_emails_details = [] # Global list to store processed email details
def process_email(gmail: Resource, message_info: Dict[str, Union[str, List[str]]], email_data_parsed: Dict[str, Union[str, List[str]]], user_first_name: str, user_last_name: str, client: OpenAI, action: str, processed_emails_file_path: str) -> int:
# Evaluate email
if evaluate_email(email_data_parsed, user_first_name, user_last_name, client):
# Prepare email details for tracking
email_details = {
'id': message_info['id'],
'subject': email_data_parsed.get('subject', ''),
'from': email_data_parsed.get('from', ''),
'email_contents': email_data_parsed['body'],
'action': action
}
if action == 'delete':
print(Fore.LIGHTYELLOW_EX + "Email is not worth the time, deleting" + Fore.RESET)
# Delete email
try:
gmail.users().messages().delete(userId='me', id=message_info['id']).execute()
print(Fore.LIGHTGREEN_EX + "Email deleted successfully" + Fore.RESET)
except Exception as e:
print(Fore.LIGHTRED_EX + f"Failed to delete email: {e}" + Fore.RESET)
return 0
elif action == 'read':
print(Fore.LIGHTYELLOW_EX + "Email is not worth the time, marking as read" + Fore.RESET)
# Remove UNREAD label
try:
gmail.users().messages().modify(userId='me', id=message_info['id'], body={'removeLabelIds': ['UNREAD']}).execute()
print(Fore.LIGHTGREEN_EX + "Email marked as read successfully" + Fore.RESET)
except Exception as e:
print(Fore.LIGHTRED_EX + f"Failed to mark email as read: {e}" + Fore.RESET)
return 0
# Append to the global list
processed_emails_details.append(email_details)
# Write to the file after each action
with open(processed_emails_file_path, 'w') as file:
json.dump(processed_emails_details, file, indent=4)
return 1
else:
print(Fore.LIGHTBLUE_EX + "Email is worth the time, leaving as unread" + Fore.RESET)
return 0
def report_statistics(total_unread_emails: int, total_pages_fetched: int, total_marked_as_read: int, model_used: str) -> None:
print("\n")
header = "Statistics Report"
print(f"{Fore.LIGHTCYAN_EX}{header.center(50)}{Fore.RESET}")
print(f"{Fore.LIGHTCYAN_EX}{'-' * 50}{Fore.RESET}")
stats = {
'Total unread emails fetched': total_unread_emails,
'Total pages fetched': total_pages_fetched,
'Total emails marked as read': total_marked_as_read,
'Final number of unread emails': total_unread_emails - total_marked_as_read,
'Language model used': model_used
}
for key, value in stats.items():
print(f"{Fore.LIGHTYELLOW_EX}{key:<35}{Fore.RESET}{value:<15}")
print(f"{Fore.LIGHTCYAN_EX}{'-' * 50}{Fore.RESET}")
footer = "End of Report"
print(f"{Fore.LIGHTCYAN_EX}{footer.center(50)}{Fore.RESET}")
print("\n") | [] |
2024-01-10 | Tylerbryy/zinbo | src~language_model_client.py | from llama_cpp import Llama
import os
from openai import OpenAI
class LanguageModelClient:
def __init__(self, model_name: str):
self.model_name = model_name
def create_chat_completion(self, messages: list, max_tokens: int):
raise NotImplementedError
class OpenAIClient(LanguageModelClient):
def __init__(self, api_key: str):
super().__init__(model_name="gpt-4-1106-preview")
self.client = OpenAI(api_key=api_key)
def create_chat_completion(self, messages: list, max_tokens: int):
return self.client.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages,
max_tokens=max_tokens,
temperature=0.0,
)
class HermesClient(LanguageModelClient):
def __init__(self, model_path: str, n_ctx: int, n_batch: int, chat_format: str, verbose: bool):
super().__init__(model_name="openhermes-2.5-mistral-7b")
hermes_params = {
"model_path": model_path,
"n_ctx": n_ctx,
"n_batch": n_batch,
"chat_format": chat_format,
"verbose": verbose
}
operating_system = os.getenv("OPERATING_SYSTEM")
if operating_system == "Windows":
hermes_params["n_gpu_layers"] = 50
self.client = Llama(**hermes_params)
def create_chat_completion(self, messages: list, max_tokens: int):
response = self.client.create_chat_completion(messages=messages, max_tokens=3, temperature=0.0)
return response
class LlamaClient(LanguageModelClient):
def __init__(self, model_path: str, n_ctx: int, n_batch: int, chat_format: str, verbose: bool):
super().__init__(model_name="llama-2-7B")
llama_params = {
"model_path": model_path,
"n_ctx": n_ctx,
"n_batch": n_batch,
"chat_format": chat_format,
"verbose": verbose
}
operating_system = os.getenv("OPERATING_SYSTEM")
if operating_system == "Windows":
llama_params["n_gpu_layers"] = 50
self.client = Llama(**llama_params)
def create_chat_completion(self, messages: list, max_tokens: int):
response = self.client.create_chat_completion(messages=messages, temperature=0.0, max_tokens=2)
return response | [] |
2024-01-10 | Tylerbryy/zinbo | src~email_evaluation.py | from typing import Dict, List, Union
from openai import OpenAI
from src.language_model_client import OpenAIClient, LlamaClient, HermesClient
def evaluate_email(email_data: Dict[str, Union[str, List[str]]], user_first_name: str, user_last_name: str, client: OpenAI) -> bool:
MAX_EMAIL_LEN = 3000
system_message: Dict[str, str] = {
"role": "system",
"content": (
"Your task is to assist in managing the Gmail inbox of a busy individual, "
f"{user_first_name} {user_last_name}, by filtering out promotional emails "
"from their personal (i.e., not work) account. Your primary focus is to ensure "
"that emails from individual people, whether they are known family members (with the "
f"same last name), close acquaintances, or potential contacts {user_first_name} might be interested "
"in hearing from, are not ignored. You need to distinguish between promotional, automated, "
"or mass-sent emails and personal communications.\n\n"
"Respond with \"True\" if the email is promotional and should be ignored based on "
"the below criteria, or \"False\" otherwise. Remember to prioritize personal "
"communications and ensure emails from genuine individuals are not filtered out.\n\n"
"Criteria for Ignoring an Email:\n"
"- The email is promotional: It contains offers, discounts, or is marketing a product "
"or service.\n"
"- The email is automated: It is sent by a system or service automatically, and not a "
"real person.\n"
"- The email appears to be mass-sent or from a non-essential mailing list: It does not "
f"address {user_first_name} by name, lacks personal context that would indicate it's personally written "
"to her, or is from a mailing list that does not pertain to her interests or work.\n\n"
"Special Consideration:\n"
"- Exception: If the email is from an actual person, especially a family member (with the "
f"same last name), a close acquaintance, or a potential contact {user_first_name} might be interested in, "
"and contains personalized information indicating a one-to-one communication, do not mark "
"it for ignoring regardless of the promotional content.\n\n"
"- Additionally, do not ignore emails requiring an action to be taken for important matters, "
"such as needing to send a payment via Venmo, but ignore requests for non-essential actions "
"like purchasing discounted items or signing up for rewards programs.\n\n"
"Be cautious: If there's any doubt about whether an email is promotional or personal, "
"respond with \"False\".\n\n"
"The user message you will receive will have the following format:\n"
"Subject: <email subject>\n"
"To: <to names, to emails>\n"
"From: <from name, from email>\n"
"Cc: <cc names, cc emails>\n"
"Gmail labels: <labels>\n"
"Body: <plaintext body of the email>\n\n"
"Your response must be:\n"
"\"True\" or \"False\""
)
}
# Check if 'body' key exists in email_data
if 'body' not in email_data:
print("Email data is missing the 'body' key.")
return False
truncated_body = email_data['body'][:MAX_EMAIL_LEN] + ("..." if len(email_data['body']) > MAX_EMAIL_LEN else "")
user_message: Dict[str, str] = {
"role": "user",
"content": (
f"Subject: {email_data['subject']}\n"
f"To: {email_data['to']}\n"
f"From: {email_data['from']}\n"
f"Cc: {email_data['cc']}\n"
f"Gmail labels: {email_data['labels']}\n"
f"Body: {truncated_body}"
)
}
# Send the messages to GPT-4, TODO add retry logic
try:
completion = client.create_chat_completion(
messages=[system_message, user_message],
max_tokens=1
)
except Exception as e:
print(f"Failed to evaluate email: {e}")
return False
# Extract and return the response
if isinstance(client, OpenAIClient):
return completion.choices[0].message.content.strip() == "True"
elif isinstance(client, LlamaClient):
return completion['choices'][0]['message']['content'].strip() == "True"
elif isinstance(client, HermesClient):
return completion['choices'][0]['message']['content'].replace('\n', '').strip() == "True"
| [
"Subject: PLACEHOLDER\nTo: PLACEHOLDER\nFrom: PLACEHOLDER\nCc: PLACEHOLDER\nGmail labels: PLACEHOLDER\nBody: email_data['body'][:MAX_EMAIL_LEN] + (\"...\" if len(email_data['body']) > MAX_EMAIL_LEN else \"\")",
"Your task is to assist in managing the Gmail inbox of a busy individual, PLACEHOLDER PLACEHOLDER, by filtering out promotional emails from their personal (i.e., not work) account. Your primary focus is to ensure that emails from individual people, whether they are known family members (with the same last name), close acquaintances, or potential contacts PLACEHOLDER might be interested in hearing from, are not ignored. You need to distinguish between promotional, automated, or mass-sent emails and personal communications.\n\nRespond with \"True\" if the email is promotional and should be ignored based on the below criteria, or \"False\" otherwise. Remember to prioritize personal communications and ensure emails from genuine individuals are not filtered out.\n\nCriteria for Ignoring an Email:\n- The email is promotional: It contains offers, discounts, or is marketing a product or service.\n- The email is automated: It is sent by a system or service automatically, and not a real person.\n- The email appears to be mass-sent or from a non-essential mailing list: It does not address PLACEHOLDER by name, lacks personal context that would indicate it's personally written to her, or is from a mailing list that does not pertain to her interests or work.\n\nSpecial Consideration:\n- Exception: If the email is from an actual person, especially a family member (with the same last name), a close acquaintance, or a potential contact PLACEHOLDER might be interested in, and contains personalized information indicating a one-to-one communication, do not mark it for ignoring regardless of the promotional content.\n\n- Additionally, do not ignore emails requiring an action to be taken for important matters, such as needing to send a payment via Venmo, but ignore requests for non-essential actions like purchasing discounted items or signing up for rewards programs.\n\nBe cautious: If there's any doubt about whether an email is promotional or personal, respond with \"False\".\n\nThe user message you will receive will have the following format:\nSubject: <email subject>\nTo: <to names, to emails>\nFrom: <from name, from email>\nCc: <cc names, cc emails>\nGmail labels: <labels>\nBody: <plaintext body of the email>\n\nYour response must be:\n\"True\" or \"False\""
] |
2024-01-10 | rachidoutaleb/viperfuse | main_app.py | import json
import subprocess
import threading
import time
import tkinter as tk
import customtkinter as ctk
import cv2
import os
import speech_recognition as sr
import nmap
import openai
import pyfiglet
from datetime import datetime
from tkinter import messagebox
from PyQt5 import QtGui, QtCore
from PyQt5.QtCore import QThread, QTimer, pyqtSignal
from PyQt5.QtGui import QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import QApplication, QCompleter, QHBoxLayout, QLabel, QLineEdit, QMainWindow, QPushButton, \
QTextEdit, QVBoxLayout, QWidget, QToolButton
from cryptography.fernet import Fernet
from fuzzywuzzy import fuzz
from scapy.all import ARP, Ether, srp
import qdarktheme
import face_recognition
import pickle
import re
from gtts import gTTS
from playsound import playsound
BASE_DIR = "face_encodings"
ICON_PATH = 'logo2.png'
face_classifier = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
ctk.set_appearance_mode("System")
r = sr.Recognizer()
#check si utilisateur demande d'executer un programe
def check_if_execute(input: str)->str:
split_str = input.rsplit()
x = check_if_in_string(split_str, "execute") | check_if_in_string(split_str, "start") | check_if_in_string(split_str, "open")
if not x:
return ''
net_scan = check_if_in_string(split_str, "networ") & check_if_in_string(split_str, "analyzer")
port_scan = check_if_in_string(split_str, "port") & check_if_in_string(split_str, "scann")
ip_scan = check_if_in_string(split_str, "ip") & check_if_in_string(split_str, "scann")
file_intg = check_if_in_string(split_str, "file") & check_if_in_string(split_str, "integrity")
if net_scan: return "open network analyzer"
if ip_scan: return "open ip scanner"
if port_scan: return "open scan port"
if file_intg: return "open file integrity"
else:
return "You didn't specify which program !"
def execute_script(path="portscanner.py"):
script_path = path
result = subprocess.run(["python", script_path], capture_output=True, text=True)
if result.returncode == 0:
print("Script executed successfully.")
else:
print("Script execution failed.")
print("Error message:", result.stderr)
#Detect face and draw box
def detect_bounding_box(vid,face_classifier=face_classifier):
gray_image = cv2.cvtColor(vid, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray_image, 1.1, 5, minSize=(40, 40))
for (x, y, w, h) in faces:
cv2.rectangle(vid, (x, y), (x + w, y + h), (0, 255, 0), 4)
return faces
def register_face(email):
if not os.path.exists(BASE_DIR):
os.mkdir(BASE_DIR)
#
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if frame is not None and frame.size > 0:
faces = detect_bounding_box(frame)
cv2.imshow("Register Your Face", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('\n') or key == ord('\r'):
captured_frame = frame.copy()
break
else:
print("Error: Frame not captured or empty.")
break
cap.release()
cv2.destroyAllWindows()
if captured_frame is not None:
face_encodings = face_recognition.face_encodings(captured_frame)
if face_encodings:
encoding_path = os.path.join(BASE_DIR, f"{email}.pkl")
with open(encoding_path, "wb") as f:
pickle.dump(face_encodings[0], f)
return "Face encoding saved."
else:
return "No face detected. Please try again."
else:
return "No frame captured. Please check the camera connection."
def login_face(email):
encoding_path = os.path.join(BASE_DIR, f"{email}.pkl")
if not os.path.exists(encoding_path):
return "No encoding found for this email. Please register first."
with open(encoding_path, "rb") as f:
known_encoding = pickle.load(f)
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if frame is not None and frame.size > 0:
faces = detect_bounding_box(frame)
cv2.imshow("Login with Your Face", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('\n') or key == ord('\r'):
captured_frame = frame.copy()
break
else:
print("Error: Frame not captured or empty.")
break
cap.release()
cv2.destroyAllWindows()
if captured_frame is not None:
face_encodings = face_recognition.face_encodings(captured_frame)
if face_encodings:
matches = face_recognition.compare_faces([known_encoding], face_encodings[0])
if matches[0]:
return "Face authenticated. You're logged in!"
else:
return "Face not recognized. Please try again."
else:
return "No face detected. Please try again."
else:
return "No frame captured. Please check the camera connection."
class FaceRecognitionApp:
def __init__(self, root):
self.root = root
self.root.title("ViperFuse")
self.root.geometry("400x300")
self.action_var = ctk.StringVar(value='login')
self.email_var = ctk.StringVar()
self.action_choices = ['login', 'register']
self.app_closed = False
self.create_widgets()
#to skip face auth
self.open_chatbot_ui()
return
def create_widgets(self):
ctk.CTkLabel(self.root, text="Welcome To ViperFuse", font=("Helvetica", 24)).pack(pady=20)
action_frame = ctk.CTkFrame(self.root,fg_color='transparent')
action_frame.pack(pady=10)
ctk.CTkLabel(action_frame, text="Choose action: ").pack(side=tk.LEFT)
self.action_menu = ctk.CTkOptionMenu(action_frame, variable=self.action_var, values=self.action_choices,width=90)
#self.action_menu.pack(side=tk.LEFT)
#
self.segemented_button = ctk.CTkSegmentedButton(master=action_frame,
values=self.action_choices,
variable=self.action_var)
self.segemented_button.pack(padx=20, pady=10,side=tk.LEFT)
#
email_frame = ctk.CTkFrame(self.root,fg_color='transparent')
email_frame.pack(pady=10)
ctk.CTkLabel(email_frame, text="Enter email: ").pack(side=tk.LEFT)
email_entry = ctk.CTkEntry(email_frame, textvariable=self.email_var,width=200)
email_entry.pack(side=tk.LEFT)
button_frame = ctk.CTkFrame(self.root)
button_frame.pack(pady=10)
register_button = ctk.CTkButton(button_frame, text="Proceed", command=self.process_action)
register_button.pack(side=tk.LEFT)
def open_chatbot_ui(self):
chatbot_app = QApplication([])
qdarktheme.setup_theme() #dark theme
chatbot_window = ChatbotUI()
chatbot_window.show()
chatbot_app.exec_()
def process_action(self):
action = self.action_var.get().lower()
email = self.email_var.get()
if action == "register":
result = register_face(email)
if result == "Face encoding saved.":
messagebox.showinfo("Success", result)
else:
messagebox.showerror("Error", result)
elif action == "login":
result = login_face(email)
if result == "Face authenticated. You're logged in!":
self.close_app()
self.open_chatbot_ui()
else:
messagebox.showerror("Error", result)
else:
messagebox.showerror("Error", "Invalid action. Please choose 'login' or 'register'.")
def close_app(self):
self.app_closed = True
self.root.destroy()
def check_app_closed(self):
if self.app_closed:
execute_script()
else:
self.root.after(1000, self.check_app_closed)
def check_if_in_string(input: list, word: str):
splt = input
for i in splt:
if fuzz.ratio(word.lower(), i.lower()) > 70:
return True
return False
def check_and_close(app):
while True:
if not app.is_alive():
execute_script()
break
time.sleep(1)
class ChatbotUI(QMainWindow):
openai.api_key = "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"
def __init__(self):
super().__init__()
with open("intentions.json", "r") as file:
self.intentions = json.load(file)["intentions"]
self.current_process = None
self.init_ui()
self.setWindowIcon(QtGui.QIcon(ICON_PATH)) #logo
def send_message(self):
user_message = self.user_input.text()
response_found = False
bot_response = "Je n'ai pas compris votre demande. Veuillez reformuler."
#check si utiliosateur demande d'excuter
_check = check_if_execute(user_message)
user_message = _check if (_check != '') else user_message
# List of commands to check for
commands = ["ls", "cat", "mkdir", "grep", "tr", "cut", "sed", "cp", "mv"]
commands += ["dir" ," type" , "mkdir" ,"cp" , "mv"]
# Check if the user message starts with a command
for command in commands:
if user_message.lower().startswith(command):
try:
# The command is the first word, the rest are arguments
command_args = user_message.split(" ")
result = subprocess.run(command_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
bot_response = result.stdout.decode()
if result.stderr:
bot_response += "\nErrors:\n" + result.stderr.decode()
response_found = True
except Exception as e:
bot_response = "Error executing command: " + str(e)
response_found = True
if user_message:
self.chat_text.append("User: " + user_message)
self.user_input.clear()
# une fonction pour recherchez intention correspondante
def find_best_response_from_file(input,intentions)->tuple[bool,str]:
possible_rep: list[tuple[int, str, str]] = []
# check possible rep
for intent in intentions:
for phrase in intent["patterns"]:
perc = fuzz.ratio(phrase.lower(), input.lower())
if perc > 65:
possible_rep.append((perc, phrase, intent['response']))
# choose rep
if len(possible_rep) > 0:
best_rep = max(possible_rep, key=lambda x: x[0]) # rep with highest percenatge
response_found = True
bot_response = best_rep[2]
else:
response_found = False
bot_response = ''
#debug information :
possible_rep.sort(key=lambda x: x[0])
for x in possible_rep:pass
#print(x)
#
return response_found,bot_response
response_found, bot_response = find_best_response_from_file(user_message,self.intentions)
if "team" in user_message.lower():
bot_response = "Mahmoud Charif\nRachid Outaleb\nIsmail Ouzaid\nZineb "
response_found = True
# Check for clear command
elif user_message.lower() == "clear":
self.clear_chat()
return
elif fuzz.ratio("open scan port", user_message.lower()) > 70:
bot_response = "Ouverture de ScanPort..."
response_found = True
self.type_message("AI: ", bot_response)
QTimer.singleShot(5000, self.open_port_scanner)
return
elif fuzz.ratio("open ip scanner", user_message.lower()) > 70:
bot_response = "Ouverture de IP Scanner..."
response_found = True
self.type_message("AI: ", bot_response)
QTimer.singleShot(5000, self.open_network_scanner)
return
elif fuzz.ratio("open file integrity", user_message.lower()) > 70:
bot_response = "Ouverture de File integrity ..."
response_found = True
self.type_message("AI: ", bot_response)
QTimer.singleShot(5000, self.open_file_integrity)
return
elif fuzz.ratio("open network analyzer", user_message.lower()) > 70:
bot_response = "Ouverture du NetAnalyzer..."
response_found = True
self.type_message("AI: ", bot_response)
QTimer.singleShot(5000, self.open_network_analyzer)
return
elif fuzz.ratio("You didn't specify which program !", user_message.lower()) >80:
user_message = ""
bot_response = "You didn't specify which program !"
response_found = True
elif fuzz.ratio("save discussion", user_message.lower()) > 70:
bot_response = "Sauvegarde de la discussion..."
response_found = True
self.type_message("AI: ", bot_response)
QTimer.singleShot(2000, self.save_conversation)
return
elif fuzz.ratio("display discussion", user_message.lower()) > 70:
bot_response = "Affichage de la discussion..."
response_found = True
self.type_message("AI: ", bot_response)
QTimer.singleShot(2000, self.load_conversation)
return
# Si aucune intention n'a été trouvée, utilisez l'API GPT-3
if not response_found:
#delete
prompt = f"User: {user_message}\nAI:"
response = "?"
#TODO delete
#response = openai.Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=50, n=1, stop=None, temperature=0.5)
#bot_response = response.choices[0].text.strip()
self.type_message("AI: ", bot_response)
return bot_response
def clear_chat(self):
self.chat_text.clear()
def execute_option_script(self, option):
script_paths = {
"portscanner": "portscanner.py",
"ip scanner": "networkscanner.py",
"network analyzer": "Network_analyzer.py"
}
if option in script_paths:
script_path = script_paths[option]
result = subprocess.run(["python3", script_path], capture_output=True, text=True)
if result.returncode == 0:
print(f"{option} script executed successfully.")
else:
print(f"{option} script execution failed.")
print("Error message:", result.stderr)
else:
print("Invalid option.")
def open_network_scanner(self):
self.network_scanner = NetworkScanner()
self.network_scanner.show()
self.hide()
def init_ui(self):
self.setWindowTitle('ChatFuse')
widget = QWidget()
layout = QVBoxLayout()
self.chat_label = QLabel('Chat:')
layout.addWidget(self.chat_label)
self.chat_text = QTextEdit()
self.chat_text.setReadOnly(True)
layout.addWidget(self.chat_text)
self.user_input_label = QLabel('Your message:')
layout.addWidget(self.user_input_label)
input_layout = QHBoxLayout()
self.user_input = QLineEdit()
input_layout.addWidget(self.user_input)
#auto completer part
self.model = QStandardItemModel() #fill it with self.model.appendRow(QStandardItem(entryItem))
completer = QCompleter(self.model, self)
#completer.setCompletionMode() # InlineCompletion / UnfilteredPopupCompletion / PopupCompletion
for intent in self.intentions:
for pattern in intent['patterns']:
#print(pattern)
self.model.appendRow(QStandardItem(pattern.lower()))
self.user_input.setCompleter(completer)
#
self.send_button = QPushButton('Send')
self.send_button.clicked.connect(self.send_message)
input_layout.addWidget(self.send_button)
layout.addLayout(input_layout)
# speech recognition button
self.speak_button = QPushButton('Speak')
self.speak_button.clicked.connect(self.speak_message)
# layout.addWidget(self.speak_button)
self.stop_speak_button = QPushButton('Stop Speak')
self.stop_speak_button.clicked.connect(self.stop_speak)
# layout.addWidget(self.stop_speak_button)
self.speech_layout = QHBoxLayout()
for btn in (self.speak_button,self.stop_speak_button):
self.speech_layout.addWidget(btn)
layout.addLayout(self.speech_layout)
#testing
self.tst_button = QPushButton('')
# self.nxt_button = QPushButton('next')
# self.nxt_button.clicked.connect(self.change_button)
self.tst_layout = QHBoxLayout()
buttonL = QToolButton()
buttonL.clicked.connect(lambda : self.change_button('L'))
buttonL.setArrowType(QtCore.Qt.LeftArrow)
buttonR = QToolButton()
buttonR.clicked.connect(lambda : self.change_button('R'))
buttonR.setArrowType(QtCore.Qt.RightArrow)
for btn in (buttonL,self.tst_button,buttonR):
self.tst_layout.addWidget(btn)
layout.addLayout(self.tst_layout)
self.change_button()
#
widget.setLayout(layout)
self.setCentralWidget(widget)
# Créez le menu d'options
'''
# Créez les actions de menu pour les options
self.option1_button = QPushButton('Port Scanner')
self.option1_button.clicked.connect(self.open_port_scanner)
layout.addWidget(self.option1_button)
self.option2_button = QPushButton('IP scanner')
self.option2_button.clicked.connect(self.open_network_scanner)
layout.addWidget(self.option2_button)
self.network_analyzer_button = QPushButton('Network Analyzer')
self.network_analyzer_button.clicked.connect(self.open_network_analyzer)
layout.addWidget(self.network_analyzer_button)
self.File_Integrity_button = QPushButton('File Integrity')
self.File_Integrity_button.clicked.connect(self.open_file_integrity)
layout.addWidget(self.File_Integrity_button)
'''
# Ajoutez le bouton Save
self.save_button = QPushButton('Save Conversation')
self.save_button.clicked.connect(self.save_conversation)
layout.addWidget(self.save_button)
# Ajoutez le bouton Load
self.load_button = QPushButton('Load Conversation')
self.load_button.clicked.connect(self.load_conversation)
layout.addWidget(self.load_button)
self.user_input.returnPressed.connect(self.send_message)
tst_button_id = 0 # 0-4
def change_button(self,id=None):
#self.statusBar().showMessage(f'button {id} was pressed')
ls = [
["Port scanner","Network Analyzer","Ip scanner","File integrity"],
[self.open_port_scanner,self.open_network_analyzer,self.open_network_scanner,self.open_file_integrity]
]
def set_button(id,ls=ls):
try:
self.tst_button.clicked.disconnect()
except:pass
self.tst_button.clicked.connect(ls[1][id])
self.tst_button.setText(ls[0][id])
if id == 'R':
self.tst_button_id+=1
if self.tst_button_id >= 4: self.tst_button_id-=4
if id == 'L':
self.tst_button_id -= 1
if self.tst_button_id < 0: self.tst_button_id +=4
else:pass
set_button(self.tst_button_id,ls)
def open_port_scanner(self):
self.hide()
self.port_scanner = PortScanner(self)
self.port_scanner.show()
"""def send_message(self):
user_message = self.user_input.text()
if user_message:
self.chat_text.append("User: " + user_message)
self.user_input.clear()
# Utiliser l'API GPT pour générer une réponse
prompt = f"User: {user_message}\nAI:"
#response = openai.Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=50, n=1, stop=None, temperature=0.5)
bot_response = response.choices[0].text.strip()
self.type_message("AI: ", bot_response)"""
#
def speak_message(self):
self.type_message("AI: ", "Listening...")
def func():
def get_voice_input():
with sr.Microphone() as source:
audio = r.listen(source)
try:
text = r.recognize_google(audio)
return text
except:
return "Sorry could not recognize your voice"
text = get_voice_input()
if text == "Sorry could not recognize your voice":
response="Sorry could not recognize your voice"
self.type_message("AI: ", response)
else:
# Obtenez une réponse du chatbot
self.user_input.setText(text)
response = self.send_message()
print(text)
# Conversion de la réponse en voix
speech = gTTS(text=str(response), lang='en', slow=False)
speech.save("response.mp3")
# Lecture de la réponse
playsound(f"{os.path.dirname(__file__)}\\response.mp3")
thr = threading.Thread(target=func)
thr.start()
def stop_speak(self):
#recognizer.stop()
pass
def speak_message__old(self):
recognizer = sr.Recognizer()
microphone = sr.Microphone()
with microphone as source:
self.type_message("AI: Listening...")
self.chat_text.append("Listening...")
audio = recognizer.listen(source)
try:
self.chat_text.append("Recognizing...")
user_message = recognizer.recognize_google(audio)
self.chat_text.append("User: " + user_message)
# Utiliser l'API GPT pour générer une réponse
prompt = f"User: {user_message}\nAI:"
response = openai.Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=50, n=1, stop=None, temperature=0.5)
bot_response = response.choices[0].text.strip()
self.type_message("AI: ", bot_response)
except sr.UnknownValueError:
self.chat_text.append("Could not understand audio")
except sr.RequestError as e:
self.chat_text.append(f"Error: {e}")
def type_message(self,prefix, message):
full_message = '\n' + prefix + message
for i in range(len(full_message)):
QTimer.singleShot(i * 50, lambda i=i: self.chat_text.insertPlainText(full_message[i]))
def open_network_scanner(self):
self.network_scanner = NetworkScanner(self)
self.network_scanner.show()
self.hide()
def open_file_integrity(self):
proc = subprocess.run(["python", "file_intg.py"])
def open_network_analyzer(self):
import Network_analyzer
Network_analyzer.main()
# Générer une clé de chiffrement Fernet et la sauvegarder dans un fichier
def generate_key(self):
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
# Charger la clé de chiffrement Fernet à partir d'un fichier
def load_key(self):
return open("key.key", "rb").read()
# Chiffrer le texte en utilisant la clé Fernet
def encrypt_text(self, text, key):
f = Fernet(key)
encrypted_text = f.encrypt(text.encode())
return encrypted_text
# Déchiffrer le texte en utilisant la clé Fernet
def decrypt_text(self, encrypted_text, key):
f = Fernet(key)
decrypted_text = f.decrypt(encrypted_text).decode()
return decrypted_text
# Sauvegarder la conversation chiffrée dans un fichier
def save_conversation(self):
# Générer et charger la clé de chiffrement
self.generate_key()
key = self.load_key()
# Chiffrer la conversation
conversation = self.chat_text.toPlainText()
encrypted_conversation = self.encrypt_text(conversation, key)
# Sauvegarder la conversation chiffrée dans un fichier
with open("conversation.enc", "wb") as file:
file.write(encrypted_conversation)
print("Conversation saved.")
self.type_message("AI: ", "Conversation saved.")
# Charger et afficher la conversation déchiffrée
def load_conversation(self):
# Charger la clé de chiffrement
key = self.load_key()
# Charger et déchiffrer la conversation
with open("conversation.enc", "rb") as file:
encrypted_conversation = file.read()
conversation = self.decrypt_text(encrypted_conversation, key)
# Afficher la conversation déchiffrée
self.chat_text.setPlainText(conversation)
print("Conversation loaded.")
self.type_message("AI: ", "Conversation loaded.")
class PortScanner(QMainWindow):
def __init__(self, chatbot_ui):
super().__init__()
self.chatbot_ui = chatbot_ui
self.init_ui()
super().__init__()
self.init_ui()
def init_ui(self):
self.setWindowTitle('Port Scanner')
widget = QWidget()
layout = QVBoxLayout()
self.target_label = QLabel('Target:')
layout.addWidget(self.target_label)
self.target_entry = QLineEdit()
layout.addWidget(self.target_entry)
self.scan_button = QPushButton('Scan')
self.scan_button.clicked.connect(self.start_scan)
layout.addWidget(self.scan_button)
self.stop_button = QPushButton('Stop')
self.stop_button.setEnabled(False)
self.stop_button.clicked.connect(self.stop_scan)
layout.addWidget(self.stop_button)
self.back_button = QPushButton('Back')
self.back_button.clicked.connect(self.go_back_to_chatbot)
layout.addWidget(self.back_button)
self.result_text = QTextEdit()
self.result_text.setReadOnly(True)
layout.addWidget(self.result_text)
self.new_scan_button = QPushButton('New Scan')
self.new_scan_button.setEnabled(False)
self.new_scan_button.clicked.connect(self.new_scan)
layout.addWidget(self.new_scan_button)
self.close_button = QPushButton('Close')
self.close_button.clicked.connect(self.close)
layout.addWidget(self.close_button)
widget.setLayout(layout)
self.setCentralWidget(widget)
self.thread = PortScannerThread()
self.thread.result_signal.connect(self.handle_scan_result)
def start_scan(self):
target = self.target_entry.text()
if target:
ascii_banner = pyfiglet.figlet_format("PORT SCANNER")
self.result_text.append(ascii_banner)
self.result_text.append("-" * 50)
self.result_text.append("Scanning Target: " + target)
self.result_text.append("Scanning started at:" + str(datetime.now()))
self.result_text.append("-" * 50)
self.scan_button.setEnabled(False)
self.stop_button.setEnabled(True)
self.new_scan_button.setEnabled(False)
self.thread.set_target(target)
self.thread.start()
def stop_scan(self):
self.thread.stop()
self.scan_button.setEnabled(False)
self.stop_button.setEnabled(False)
self.new_scan_button.setEnabled(True)
def new_scan(self):
self.thread.stop()
self.target_entry.setText('')
self.result_text.setText('')
self.scan_button.setEnabled(True)
self.stop_button.setEnabled(False)
self.new_scan_button.setEnabled(False)
def handle_scan_result(self, port, status):
if status == "open":
self.result_text.append("Port {} is open".format(port))
def closeEvent(self, event):
self.thread.stop()
self.thread.wait()
event.accept()
def go_back_to_chatbot(self):
self.close()
self.chatbot_ui.show()
def closeEvent(self, event):
self.thread.stop()
self.thread.wait()
event.accept()
class PortScannerThread(QThread):
result_signal = pyqtSignal(int, str)
def __init__(self):
super().__init__()
self._stop = False
def set_target(self, target):
self.target = target
def run(self):
#
def extract_string_after_ip(user_input):
# Find the IP address using regular expression
ip_match = re.search(r'\b(?:\d{1,3}\.){3}\d{1,3}\b', user_input)
if ip_match:
ip_address = ip_match.group(0)
index = user_input.find(ip_address)
# Extract the string after the IP address
extracted_string = user_input[index + len(ip_address):].strip()
return extracted_string,ip_address
#
extra_arg, self.target= extract_string_after_ip(self.target)
nm = nmap.PortScanner()
nm.scan(hosts=self.target, arguments='-p 1-65535 --open {extra_arg}')
for host in nm.all_hosts():
for proto in nm[host].all_protocols():
lport = nm[host][proto].keys()
for port in lport:
self.result_signal.emit(port, "open")
self.stop()
if self._stop:
return
def stop(self):
self._stop = True
class NetworkScanner(QMainWindow):
def __init__(self, chatbot_ui):
super().__init__()
self.chatbot_ui = chatbot_ui
self.init_ui()
def init_ui(self):
self.setWindowTitle('Network Scanner')
widget = QWidget()
layout = QVBoxLayout()
self.target_label = QLabel('Target IP:')
layout.addWidget(self.target_label)
self.target_entry = QLineEdit()
layout.addWidget(self.target_entry)
self.scan_button = QPushButton('Scan')
self.scan_button.clicked.connect(self.network_scan)
layout.addWidget(self.scan_button)
self.result_text = QTextEdit()
self.result_text.setReadOnly(True)
layout.addWidget(self.result_text)
self.back_button = QPushButton('Back')
self.back_button.clicked.connect(self.back_to_chatbot)
layout.addWidget(self.back_button)
widget.setLayout(layout)
self.setCentralWidget(widget)
def network_scan(self):
target_ip = self.target_entry.text()
if target_ip:
arp = ARP(pdst=target_ip)
ether = Ether(dst="ff:ff:ff:ff:ff:ff")
packet = ether/arp
result = srp(packet, timeout=3, verbose=0)[0]
clients = []
for sent, received in result:
clients.append({'ip': received.psrc, 'mac': received.hwsrc})
self.result_text.append("Available devices in the network:")
self.result_text.append("IP" + " "*18 + "MAC")
for client in clients:
self.result_text.append("{:16} {}".format(client['ip'], client['mac']))
def back_to_chatbot(self):
self.hide()
self.chatbot_ui.show()
def main():
root = ctk.CTk()
app = FaceRecognitionApp(root)
app.check_app_closed()
root.mainloop()
if __name__ == "__main__":
main()
| [
"User: PLACEHOLDER\nAI:"
] |
2024-01-10 | MiniHacks/liberata | backend~backend_server~title_extraction.py | import os
import openai
import tiktoken
import asyncio
from pydantic import BaseModel
try:
openai.api_key = os.environ["OPENAI_API_KEY"]
except:
import sys
print("Could not find OPENAI_API_KEY -- check your env vars, and if you're having trouble, ask Ritik for help", file=sys.stderr)
raise
class BookIdeaRow(BaseModel):
og_text: str
title: str
author: str | None
class Config:
frozen = True
TOKEN_LIMIT = 2000
enc = tiktoken.get_encoding("gpt2")
async def extract_book_titles(text_block: str) -> list[BookIdeaRow]:
tokens = enc.encode(text_block)
print(type(tokens))
i = 0
tasks = []
while i < len(tokens):
tasks.append(asyncio.create_task(openai.Completion.acreate(
model = "text-davinci-003",
prompt = "Identify the book titles that are in the following block of text. "
"Do not provide book titles that are not mentioned in the text. "
"If you are not sure about who the author is, write 'Unknown' in the table. "
"Provide the original snippet of text that made you recognize a book title. "
"Record every book you recognize, even if the title is not explicitly mentioned. "
# "Try to make the table as long as possible. "
"Use the following format:\n"
'"<original text 1>" || <book 1> || <author 1>\n'
'"<original text 2>" || <book 2> || <author 2>\n'
'...\n'
'\n'
"Text block:\n"
f"{enc.decode(tokens[i:i+TOKEN_LIMIT])}\n\n"
"Text || Title || Author\n"
"----------------------------\n",
temperature = 0.76,
max_tokens = 900
)))
i += TOKEN_LIMIT
ret: list[BookIdeaRow] = []
for task in tasks:
result = await task
out: str = result["choices"][0]["text"]
print(f"\n\n\nllm out:\n{out}")
books = [
tuple(cell.strip() for cell in row.split("||"))
for row in out.split("\n")
]
for triple in books:
if len(triple) == 3:
og_text, title, author = triple
if title != 'Unknown':
ret.append(BookIdeaRow(og_text = og_text[1:-1], title = title, author = author if author != "Unknown" else None))
else:
break
return ret
| [] |
2024-01-10 | aws-samples/cdk-eks-blueprints-patterns | lib~generative-ai-showcase~python~showcase_lib.py | import os
from langchain.llms.bedrock import Bedrock
from langchain import PromptTemplate
def get_llm():
model_kwargs = {
"maxTokenCount": 1024,
"stopSequences": [],
"temperature": 0,
"topP": 0.9
}
llm = Bedrock(
# credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default)
region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default)
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary)
model_id="amazon.titan-tg1-large", #use the Anthropic Claude model
model_kwargs=model_kwargs) #configure the properties for Claude
return llm
def get_prompt(user_input, template):
prompt_template = PromptTemplate.from_template(template) #this will automatically identify the input variables for the template
prompt = prompt_template.format(user_input=user_input)
return prompt
def get_text_response(user_input, template): #text-to-text client function
llm = get_llm()
prompt = get_prompt(user_input, template)
return llm.predict(prompt) #return a response to the prompt
| [] |
2024-01-10 | TristanvanDoorn/code-interpreterv1 | codeinterpreterapi~chains~rm_dl_link.py | from langchain.base_language import BaseLanguageModel
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import AIMessage, OutputParserException
from codeinterpreterapi.prompts import remove_dl_link_prompt
def remove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = llm.predict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
async def aremove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = await llm.apredict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
def test():
llm = ChatOpenAI(model="gpt-3.5-turbo-0613") # type: ignore
example = (
"I have created the plot to your dataset.\n\n"
"Link to the file [here](sandbox:/plot.png)."
)
print(remove_download_link(example, llm))
if __name__ == "__main__":
from dotenv import load_dotenv
load_dotenv()
test()
| [] |
2024-01-10 | ashaychangwani/hackgpt | backend~fact_check.py | from langchain.chains import LLMSummarizationCheckerChain
from langchain.chat_models import PromptLayerChatOpenAI
import os
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.agents import load_tools
from langchain.retrievers.document_compressors import CohereRerank
import io
import contextlib
class FactChecker:
def __init__(self):
x=3
llm = PromptLayerChatOpenAI(temperature=0.7, model_name = 'gpt-3.5-turbo', openai_api_key=os.getenv("OPENAI_API_KEY"))
tools = load_tools(["serpapi", "llm-math"], llm=llm, serpapi_api_key=os.getenv("SERPAPI_API_KEY"))
self.agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
self.prompt_factcheck = f"""
You are a professional article fact checker.
Can you fact check this article: <text>
Perform the fact check by listing down the "factual" statements that the article author claim to be true into bullet points, and present this points.
Then for each point, find out whether they are true by cross checking with other websites.
Finally, present the end result as a list in this format:
- <Statement> : <Verdict> (Source)
"""
self.compressor = CohereRerank()
def check(self, text):
try:
article_final = self.agent.run(self.prompt_factcheck.replace("<text>", text))
return article_final
except:
return self.check(text) | [] |
2024-01-10 | ashaychangwani/hackgpt | backend~tone.py | from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import TextLoader
from langchain.docstore.document import Document
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatAnthropic
import os
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
class Tone:
def __init__(self):
if os.getenv("MODEL", None) == 'anthropic':
self.chat = ChatAnthropic()
else:
self.chat = ChatOpenAI(temperature=0.7)
self.fix_template = """
Hypothetical response: the way to describe a character who changes the tone of your essay is as follows: “yo”
Rewrite the hypothetical response from the perspective of that character without any introduction. It will then return only the entire revised essay and will not address the old essay in it.
Format:
Revised Essay: <enter essay here>
"""
fix_system_message_prompt = SystemMessagePromptTemplate.from_template(self.fix_template)
fix_human_template = """
New Tone: {tone}
Original Essay:
{text}
"""
fix_human_message_prompt = HumanMessagePromptTemplate.from_template(fix_human_template)
self.fix_prompt = ChatPromptTemplate.from_messages([fix_system_message_prompt, fix_human_message_prompt])
def fix (self, text, tone):
response = self.chat(self.fix_prompt.format_prompt(text=text, tone=tone).to_messages()).content
response = response.replace("Revised Essay:", "").strip()
return response | [
"[PLACEHOLDER, PLACEHOLDER]",
"\n New Tone: {tone}\n\n Original Essay: \n {text}\n "
] |
2024-01-10 | ashaychangwani/hackgpt | backend~assisstant.py | from langchain.document_loaders import TextLoader
from langchain.indexes import VectorstoreIndexCreator
class Assisstant:
def __init__(self):
x = 4
def help(self, location, query):
loader = TextLoader(location)
index = VectorstoreIndexCreator().from_loaders([loader])
result = index.query(query)
return result
| [] |
2024-01-10 | ashaychangwani/hackgpt | backend~devil_advocate.py | from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import TextLoader
from langchain.docstore.document import Document
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatAnthropic
import os
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
class Devil:
def __init__(self):
self.critique_template ="""
Hypothetical response: the way to describe a rude character who brutally finds the one biggest criticism in your essay is as follows: “yo”
Rewrite the hypothetical response from the perspective of that character without any introduction. The character will say the criticism and nothing else.
"""
critique_system_message_prompt = SystemMessagePromptTemplate.from_template(self.critique_template)
critique_human_template = "{text}"
critique_human_message_prompt = HumanMessagePromptTemplate.from_template(critique_human_template)
self.critique_prompt = ChatPromptTemplate.from_messages([critique_system_message_prompt, critique_human_message_prompt])
if os.getenv("MODEL", None) == 'anthropic':
self.chat = ChatAnthropic()
else:
self.chat = ChatOpenAI(temperature=0.7)
self.fix_template = """
Hypothetical response: the way to describe a character who fixes a known mistake in your essay is as follows: “yo”
Rewrite the hypothetical response from the perspective of that character without any introduction. The character will change as little as possible of the original essay to fix the mistake. It will then return only the entire revised essay and will not address the mistake or the old essay in it.
Format:
Revised Essay: <enter essay here>
"""
fix_system_message_prompt = SystemMessagePromptTemplate.from_template(self.fix_template)
fix_human_template = """
Mistake: {critique}
Original Essay:
{text}
"""
fix_human_message_prompt = HumanMessagePromptTemplate.from_template(fix_human_template)
self.fix_prompt = ChatPromptTemplate.from_messages([fix_system_message_prompt, fix_human_message_prompt])
def critique(self, text):
response = self.chat(self.critique_prompt.format_prompt(text=text).to_messages()).content
return response
def fix (self, text, critique):
response = self.chat(self.fix_prompt.format_prompt(text=text, critique=critique).to_messages()).content
response = response.replace("Revised Essay:", "").strip()
return response | [
"[PLACEHOLDER, PLACEHOLDER]",
"\n Mistake: {critique}\n\n Original Essay: \n {text}\n ",
"{text}"
] |
2024-01-10 | ashaychangwani/hackgpt | backend~beautify.py | from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import TextLoader
from langchain.docstore.document import Document
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatAnthropic
import os
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
class Beautify:
def __init__(self):
if os.getenv("MODEL", None) == 'anthropic':
self.chat = ChatAnthropic()
else:
self.chat = ChatOpenAI(temperature=0.7)
self.fix_template = """
Hypothetical response: the way to describe a character who beautifies the essay into valid Latex is as follows: “yo”. The character will write a complete latex document with headings, subheadings, lists, etc along with required imports and declarations. Do not use any packages like lipsum.
Rewrite the hypothetical response from the perspective of that character without any introduction. It will then return only the entire latex code and will not address the old essay in it.
Format:
Revised Essay: <enter latex code here>
"""
fix_system_message_prompt = SystemMessagePromptTemplate.from_template(self.fix_template)
fix_human_template = """
Original Essay:
{text}
"""
fix_human_message_prompt = HumanMessagePromptTemplate.from_template(fix_human_template)
self.fix_prompt = ChatPromptTemplate.from_messages([fix_system_message_prompt, fix_human_message_prompt])
def fix (self, text):
response = self.chat(self.fix_prompt.format_prompt(text=text).to_messages()).content
response = response.replace("Revised Essay:", "").strip()
return response | [
"[PLACEHOLDER, PLACEHOLDER]",
"\n Original Essay: \n {text}\n "
] |
2024-01-10 | ashaychangwani/hackgpt | backend~grammar.py | from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import TextLoader
from langchain.docstore.document import Document
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatAnthropic
import os
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
class Grammar:
def __init__(self):
self.template ="""
Hypothetical response: the way to describe a rude character who fixes the grammar in your essay is as follows: “yo”
Rewrite the hypothetical response from the perspective of that character without any introduction. The character fix the entire grammar and return the revised essay.
Format:
Revised Essay: <enter essay here>
"""
system_message_prompt = SystemMessagePromptTemplate.from_template(self.template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
self.prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
if os.getenv("MODEL", None) == 'anthropic':
self.chat = ChatAnthropic()
else:
self.chat = ChatOpenAI(temperature=0.7)
def fix (self, text):
response = self.chat(self.prompt.format_prompt(text=text).to_messages()).content
response = response.replace("Revised Essay:", "").strip()
return response | [
"[PLACEHOLDER, PLACEHOLDER]",
"{text}"
] |
2024-01-10 | tsing96/quivr | backend~parsers~audio.py | import os
from tempfile import NamedTemporaryFile
import tempfile
from io import BytesIO
import time
import openai
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from utils import compute_sha1_from_content, documents_vector_store
from langchain.schema import Document
from fastapi import UploadFile
# # Create a function to transcribe audio using Whisper
# def _transcribe_audio(api_key, audio_file, stats_db):
# openai.api_key = api_key
# transcript = ""
# with BytesIO(audio_file.read()) as audio_bytes:
# # Get the extension of the uploaded file
# file_extension = os.path.splitext(audio_file.name)[-1]
# # Create a temporary file with the uploaded audio data and the correct extension
# with tempfile.NamedTemporaryFile(delete=True, suffix=file_extension) as temp_audio_file:
# temp_audio_file.write(audio_bytes.read())
# temp_audio_file.seek(0) # Move the file pointer to the beginning of the file
# transcript = openai.Audio.translate("whisper-1", temp_audio_file)
# return transcript
async def process_audio(upload_file: UploadFile, stats_db):
file_sha = ""
dateshort = time.strftime("%Y%m%d-%H%M%S")
file_meta_name = f"audiotranscript_{dateshort}.txt"
# uploaded file to file object
openai_api_key = os.environ.get("OPENAI_API_KEY")
# Here, we're writing the uploaded file to a temporary file, so we can use it with your existing code.
with tempfile.NamedTemporaryFile(delete=False, suffix=upload_file.filename) as tmp_file:
await upload_file.seek(0)
content = await upload_file.read()
tmp_file.write(content)
tmp_file.flush()
tmp_file.close()
with open(tmp_file.name, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
file_sha = compute_sha1_from_content(transcript.text.encode("utf-8"))
file_size = len(transcript.text.encode("utf-8"))
print(file_size)
# Load chunk size and overlap from sidebar
chunk_size = 500
chunk_overlap = 0
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_text(transcript)
docs_with_metadata = [Document(page_content=text, metadata={"file_sha1": file_sha, "file_size": file_size, "file_name": file_meta_name,
"chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for text in texts]
# if st.secrets.self_hosted == "false":
# add_usage(stats_db, "embedding", "audio", metadata={"file_name": file_meta_name,"file_type": ".txt", "chunk_size": chunk_size, "chunk_overlap": chunk_overlap})
documents_vector_store.add_documents(docs_with_metadata)
return documents_vector_store
| [] |
2024-01-10 | DePacifier/Twitter-Data-Analysis | modelling.py | from nltk.corpus import stopwords
import spacy
from gensim.models import CoherenceModel
from gensim.utils import simple_preprocess
import gensim.corpora as corpora
import gensim
import pandas as pd
import nltk
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import joblib
best_model = ""
best_model_name = ""
best_score = 0
def run():
model_tweets = pd.read_csv('./data/model_ready_data.csv')
model_tweets = model_tweets.fillna("")
model_tweets.head()
# 4492 1925
sentiment_analysis_tweet_data = model_tweets.copy(deep=True)
sentiment_analysis_tweet_data.drop(
sentiment_analysis_tweet_data[sentiment_analysis_tweet_data['sentiment'] == -1].index, inplace=True)
sentiment_analysis_tweet_data.reset_index(drop=True, inplace=True)
tweet_train = sentiment_analysis_tweet_data.iloc[:4492, ]
tweet_test = sentiment_analysis_tweet_data.iloc[4493:, ]
unigram_vectorizer = CountVectorizer(ngram_range=(1, 1))
unigram_vectorizer.fit(tweet_train['clean_text'].values)
X_train_unigram = unigram_vectorizer.transform(
tweet_train['clean_text'].values)
unigram_tf_idf_transformer = TfidfTransformer()
unigram_tf_idf_transformer.fit(X_train_unigram)
X_train_unigram_tf_idf = unigram_tf_idf_transformer.transform(
X_train_unigram)
bigram_vectorizer = CountVectorizer(ngram_range=(1, 2))
bigram_vectorizer.fit(tweet_train['clean_text'].values)
X_train_bigram = bigram_vectorizer.transform(
tweet_train['clean_text'].values)
bigram_tf_idf_transformer = TfidfTransformer()
bigram_tf_idf_transformer.fit(X_train_bigram)
X_train_bigram_tf_idf = bigram_tf_idf_transformer.transform(X_train_bigram)
def train_and_show_scores(X: csr_matrix, y: np.array, title: str) -> None:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.75, stratify=y
)
clf = SGDClassifier()
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
valid_score = clf.score(X_valid, y_valid)
global best_model
global best_model_name
global best_score
if(valid_score > best_score):
best_model = clf
best_model_name = title
best_score = valid_score
print(f'{title}\nTrain score: {round(train_score, 2)} ; Validation score: {round(valid_score, 2)}\n')
y_train = tweet_train['sentiment'].values
train_and_show_scores(X_train_unigram, y_train, 'Unigram Counts')
train_and_show_scores(X_train_unigram_tf_idf, y_train, 'Unigram Tf-Idf')
train_and_show_scores(X_train_bigram, y_train, 'Bigram Counts')
train_and_show_scores(X_train_bigram_tf_idf, y_train, 'Bigram Tf-Idf')
print(
f'The best Model is {best_model_name} with a Validation score of: {round(best_score, 2)}')
def run_test_using_model(best_model: SGDClassifier, model_type: str):
unigram_vectorizer = CountVectorizer(ngram_range=(1, 1))
unigram_vectorizer.fit(tweet_test['clean_text'].values)
X_test_unigram = unigram_vectorizer.transform(
tweet_test['clean_text'].values)
bigram_vectorizer = CountVectorizer(ngram_range=(1, 2))
bigram_vectorizer.fit(tweet_test['clean_text'].values)
X_test_bigram = bigram_vectorizer.transform(
tweet_test['clean_text'].values)
y_test = tweet_test['sentiment'].values
if(model_type == "Unigram Counts"):
X_test = X_test_unigram
elif(model_type == "Unigram Tf-Idf"):
unigram_tf_idf_transformer = TfidfTransformer()
unigram_tf_idf_transformer.fit(X_test_unigram)
X_test_unigram_tf_idf = unigram_tf_idf_transformer.transform(
X_test_unigram)
X_test = X_test_unigram_tf_idf
elif(model_type == "Bigram Counts"):
X_test = X_test_bigram
else:
bigram_tf_idf_transformer = TfidfTransformer()
bigram_tf_idf_transformer.fit(X_test_bigram)
X_test_bigram_tf_idf = bigram_tf_idf_transformer.transform(
X_test_bigram)
X_test = X_test_bigram_tf_idf
return best_model.score(X_test, y_test)
sgd = joblib.dump(best_model, './trained_models/newsentimentSGDmodel.jl')
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
topic_model_data = model_tweets.copy(deep=True)
def get_hastags_words_list():
hashtagList = []
for hashtags in topic_model_data.hashtags:
if(hashtags != ""):
hashtagList += hashtags.split(',')
return hashtagList
hashtag = get_hastags_words_list()
data = [
word for sentence in topic_model_data.clean_text for word in sentence.split(' ')]
data_words = data + hashtag
data_words = [word for word in data_words if word != '']
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
def remove_stopwords(texts):
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append(
[token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words)
# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=[
'NOUN', 'ADJ', 'VERB', 'ADV'])
data_lemmatized = [word for word in data_lemmatized if word != []]
# Create Dictionary
id2word = corpora.Dictionary(data_lemmatized)
# Create Corpus
texts = data_lemmatized
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=20,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
doc_lda = lda_model[corpus]
perplexity_score = lda_model.log_perplexity(corpus)
print('\nPerplexity: ', perplexity_score)
coherence_model_lda = CoherenceModel(
model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
joblib.dump(lda_model, './trained_models/newtopicLDAmodel.jl')
description = {'sentiment_analysis': {'name': best_model_name, 'score': best_score},
'topic_modeling': {'perplexity_score': perplexity_score, 'coherence_score': coherence_lda}}
joblib.dump(description, './trained_models/newtrainedModelsData.jl')
print('Sentiment and Topic Model Trained and Successfully Saved.!!!')
if __name__ == '__main__':
run()
| [] |
2024-01-10 | cyai/YT2Brief | YT2Brief~fromat.py | from langchain import LLMChain, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import StrOutputParser
import os
from dotenv import load_dotenv
import json
load_dotenv()
class Reformat:
def __init__(self, summary) -> None:
self.summary = summary
self.llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k")
self.prompt = self.template()
def template(self) -> str:
prompt_tempate = """
The below is a summary of the video. Please reformat it to make it more readable according to the following configuration:
{summary}
The configuration should as follows:
tone: {tone}
use of bullet points: {use_of_bullet_points}
average sentence length: {average_sentence_length}
use of paragraphs: {use_of_paragraphs}
average paragraph length: {average_paragraph_length}
use of emojis: {use_of_emojis}
markdown language use: {markdown_language_use}
"""
prompt = PromptTemplate(
template=prompt_tempate,
input_variables=[
"summary",
"tone",
"use_of_bullet_points",
"average_sentence_length",
"use_of_paragraphs",
"average_paragraph_length",
"use_of_emojis",
"markdown_language_use",
],
)
return prompt
async def reformat(self):
llm_chain = self.prompt | self.llm | StrOutputParser()
# llm_chain.input_schema.schema()
with open("config.json", "r") as f:
config = json.load(f)
return llm_chain.invoke(
{
"summary": self.summary,
"tone": config["summary"]["tone"],
"use_of_bullet_points": config["summary"]["bullet-points"]["use"],
"average_sentence_length": config["summary"]["bullet-points"][
"average-sentence-length"
],
"use_of_paragraphs": config["summary"]["paragraphs"]["use"],
"average_paragraph_length": config["summary"]["paragraphs"][
"average-paragraph-length"
],
"use_of_emojis": config["summary"]["emojis"],
"markdown_language_use": config["summary"]["markdown"],
}
)
| [
"markdown_language_use",
"average_sentence_length",
"use_of_emojis",
"\n The below is a summary of the video. Please reformat it to make it more readable according to the following configuration:\n {summary}\n\n\n The configuration should as follows:\n tone: {tone}\n use of bullet points: {use_of_bullet_points}\n average sentence length: {average_sentence_length}\n use of paragraphs: {use_of_paragraphs}\n average paragraph length: {average_paragraph_length}\n use of emojis: {use_of_emojis}\n markdown language use: {markdown_language_use}\n\n ",
"use_of_bullet_points",
"use_of_paragraphs",
"tone",
"average_paragraph_length"
] |
2024-01-10 | peytontolbert/llm-coder | start-largecode.py | # Import necessary modules
import openai
import os
import sys
import time
import json
import re
import ast
from constants import DEFAULT_DIRECTORY, DEFAULT_MODEL, DEFAULT_MAX_TOKENS
from utils import clean_dir, write_file, get_file_content, get_file_paths, get_functions, chunk_and_summarize, num_tokens_from_string
from codingagents import clarifying_agent, algorithm_agent, coding_agent, debug_agent, file_code_agent, unit_test_agent
from glob import glob
from openai.embeddings_utils import get_embedding
import pathlib
import pandas as pd
from db import DB, DBs
import numpy as np
import traceback
from dotenv import load_dotenv
# Initialize OpenAI and GitHub API keys
openai.api_key = os.getenv('OPENAI_API_KEY')
tokenLimit = 8000
# Initialize a session with OpenAI's chat models
def chat_with_gpt3(systemprompt, prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": systemprompt},
{"role": "user", "content": prompt},
],
temperature=0.9
)
return response['choices'][0]['message']['content']
def save_to_local_directory(repo_name, functions):
# Check if the directory already exists
if not os.path.exists(repo_name):
# If not, create it
os.makedirs(repo_name)
# Create a new file in the directory to hold all the functions
file_path = os.path.join(repo_name, "functions.py")
with open(file_path, "w") as f:
# Write all the functions to the file
for function in functions:
f.write(function)
f.write("\n\n")
def clarify(prompt):
while True:
new_prompt = prompt
clarifying_prompt = clarifying_agent()
clarifying_prompt += (
'\n\n'
'Is anything unclear? If yes, only answer in the form:\n'
'{remainingunclear areas} remaining questions. \n'
'{Next question}\n'
'If everything is sufficiently clear, only answer "no".'
)
clarifying_questions = chat_with_gpt3(clarifying_prompt, prompt)
print(clarifying_questions)
user_input = input('(answer in text, or "q" to move on)\n')
new_prompt += user_input
print()
if not user_input or user_input.strip().lower() == 'q':
break
return new_prompt
def filter_filepaths(filepaths):
filepaths_list = ast.literal_eval(filepaths)
return [fp.lstrip('/') for fp in filepaths_list]
def generate_filepaths(prompt):
systemprompt = f"""You are an AI developer who is trying to write a program that will generate code for the user based on their intent.
When given their intent, create a complete, exhaustive list of filepaths that the user would write to make the program.
Only list the filepaths you would write, and return them as a python array of strings.
do not add any other explanation, only return a python array of strings."""
result = chat_with_gpt3(systemprompt, prompt)
print(result)
return result
def generate_filecode(clarifying_results, filepaths_string, shared_dependencies=None, prompt=None):
print("generating code")
prompt = f"""
We have broken up the program into per-file generation.
Now your job is to generate only the code for the file {filepaths_string}.
Make sure to have consistent filenames if you reference other files we are also generating.
Remember that you must obey 3 things:
- you are generating code for the file {filepaths_string}
- do not stray from the names of the files and the shared dependencies we have decided on
- follow the {clarifying_results} laid out in the previous steps.
Bad response:
```javascript
console.log("hello world")
```
Good response:
console.log("hello world")
Begin generating the code now.
"""
systemprompt = file_code_agent(filepaths_string, shared_dependencies)
filecode = chat_with_gpt3(systemprompt, prompt)
print(filecode)
return filecode
def generate_shared_dependencies(prompt, filepaths_string):
systemprompt = f"""You are an AI developer who is trying to write a program that will generate code for the user based on their intent.
In response to the user's prompt:
---
the app is: {prompt}
---
the files we have decided to generate are: {filepaths_string}
Now that we have a list of files, we need to understand what dependencies they share.
Please name and briefly describe what is shared between the files we are generating, including exported variables, data schemas, id names of every DOM elements that javascript functions will use, message names, and function names.
Exclusively focus on the names of the shared dependencies, and do not add any other explanation.
"""
result = chat_with_gpt3(systemprompt, prompt)
print(result)
return result
def debug_code(directory):
extensions = ['py', 'html', 'js', 'css', 'c', 'rs']
while True:
code_files = []
for extension in extensions:
code_files.extend(y for x in os.walk(directory) for y in glob(os.path.join(x[0], f'*.{extension}')))
print("Total number of py files:", len(code_files))
if len(code_files) == 0:
print("Double check that you have downloaded the repo and set the code_dir variable correctly.")
all_funcs = []
unit_tests = []
for code_file in code_files:
funcs = list(get_functions(code_file))
code_tokens_string = json.dumps(code_file)
code_tokens = num_tokens_from_string(code_tokens_string)
if code_tokens < tokenLimit:
unit_test = unit_test_agent(code_file)
else:
for func in funcs:
unit_test_prompt = unit_test_agent()
unit_test = chat_with_gpt3(unit_test_prompt, func)
unit_tests.append(unit_test)
for func in funcs:
all_funcs.append(func)
all_funcs_string = json.dumps(all_funcs)
print("Total number of functions:", len(all_funcs))
df = pd.DataFrame(all_funcs)
df['code_embedding'] = df['code'].apply(lambda x: get_embedding(x, engine="text-embedding-ada-002"))
df['filepath'] = df['filepath'].apply(lambda x: x.replace(directory, ""))
df.to_csv("functions.csv", index=True)
df.head()
debug_code_agent = chat_with_gpt3(debug_agent, all_funcs_string)
if not debug_code_agent or debug_code_agent.strip().lower() == 'no':
break
else:
print(debug_code_agent)
# Main function
def main(prompt, directory=DEFAULT_DIRECTORY, model=DEFAULT_MODEL, file=None):
if prompt.endswith(".md"):
with open(prompt, "r") as f:
prompt = f.read()
print("Hello, I am your local AI developer! You said you wanted:")
print("\033[92m" + prompt + "\033[0m")
# Get the repo name from the user
repo_name = input("Enter the name for the new directory: ")
directory = os.path.join(directory, repo_name)
prompt_string = json.dumps(prompt)
new_prompt = clarify(prompt_string)
new_prompt_string = json.dumps(new_prompt)
print(prompt_string)
filepaths = generate_filepaths(new_prompt_string)
print(filepaths)
list_actual = []
try:
list_actual = ast.literal_eval(filepaths)
shared_dependencies = None
if os.path.exists("shared_dependencies.md"):
with open("shared_dependencies.md", "r") as f:
shared_dependencies = f.read()
if file is not None:
print("File", file)
filecode = generate_filecode(file, new_prompt_string, filepaths, shared_dependencies)
write_file(filepaths, filecode, directory)
else:
clean_dir(directory)
shared_dependencies = generate_shared_dependencies(prompt, filepaths)
write_file("shared_dependencies.md", shared_dependencies, directory)
for filepaths in list_actual:
filecode = generate_filecode(new_prompt_string, filepaths, shared_dependencies)
write_file(filepaths, filecode, directory)
debug_code(directory)
except Exception as e:
print("Failed to parse result")
print(f"Type: {type(e).__name__}")
print(f"Message: {str(e)}")
traceback.print_exc()
if __name__ == "__main__":
if len(sys.argv) < 2:
if not os.path.exists("prompt.md"):
print("Please provide a prompt file or a prompt string")
sys.exit(1)
else:
prompt = "prompt.md"
else:
prompt = sys.argv[1]
directory = sys.argv[2] if len(sys.argv) > 2 else DEFAULT_DIRECTORY
file = sys.argv[3] if len(sys.argv) > 3 else None
main(prompt, directory, file) | [
"You are an AI developer who is trying to write a program that will generate code for the user based on their intent.\n \n In response to the user's prompt:\n\n ---\n the app is: PLACEHOLDER\n ---\n \n the files we have decided to generate are: PLACEHOLDER\n\n Now that we have a list of files, we need to understand what dependencies they share.\n Please name and briefly describe what is shared between the files we are generating, including exported variables, data schemas, id names of every DOM elements that javascript functions will use, message names, and function names.\n Exclusively focus on the names of the shared dependencies, and do not add any other explanation.\n ",
"\n\nIs anything unclear? If yes, only answer in the form:\n{remainingunclear areas} remaining questions. \n{Next question}\nIf everything is sufficiently clear, only answer \"no\".",
"\n We have broken up the program into per-file generation. \n Now your job is to generate only the code for the file PLACEHOLDER. \n Make sure to have consistent filenames if you reference other files we are also generating.\n \n Remember that you must obey 3 things: \n - you are generating code for the file PLACEHOLDER\n - do not stray from the names of the files and the shared dependencies we have decided on\n - follow the PLACEHOLDER laid out in the previous steps.\n \n Bad response:\n ```javascript \n console.log(\"hello world\")\n ```\n \n Good response:\n console.log(\"hello world\")\n \n Begin generating the code now.\n\n ",
"You are an AI developer who is trying to write a program that will generate code for the user based on their intent.\n When given their intent, create a complete, exhaustive list of filepaths that the user would write to make the program.\n Only list the filepaths you would write, and return them as a python array of strings. \n do not add any other explanation, only return a python array of strings.",
"prompt.md"
] |
2024-01-10 | peytontolbert/llm-coder | generated~test~third_party_api.py | import openai
import requests
import json
import webbrowser
import tkinter
from bs4 import BeautifulSoup
from lxml import etree
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from urllib.parse import urlparse
from re import search
import time
class AI:
def __init__(self, **kwargs):
self.kwargs = kwargs
def start(self, system, user):
messages = [
{"role": "system", "content": system},
{"role": "user", "content": user},
]
return self.next(messages)
def fsystem(self, msg):
return {"role": "system", "content": msg}
def fuser(self, msg):
return {"role": "user", "content": msg}
def next(self, messages: list[dict[str, str]], prompt=None):
if prompt:
messages = messages + [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
messages=messages,
stream=True,
**self.kwargs
)
chat = []
for chunk in response:
delta = chunk['choices'][0]['delta']
msg = delta.get('content', '')
print(msg, end="")
chat.append(msg)
return messages + [{"role": "assistant", "content": "".join(chat)}]
def get_current_weather(self, location: str, unit: str) -> str:
api_key = "your_api_key"
res = requests.get(f"http://api.weatherapi.com/v1/current.json?key={api_key}&q={location}&aqi=no")
data = json.loads(res.text)
temp = data["current"]["temp_" + unit]
desc = data["current"]["condition"]["text"]
return f"The weather in {location.title()} is {desc.lower()} with a temperature of {temp} {unit.upper()}."
def search_web(self, query: str) -> str:
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
except:
return "Error: Webdriver not installed or found. Please install ChromeDriver."
driver.get("https://www.google.com/")
search_box = driver.find_element_by_name("q")
search_box.send_keys(query)
search_box.submit()
time.sleep(1)
results = driver.find_elements_by_css_selector("div.g")
for result in results:
link = result.find_element_by_tag_name("a")
href = link.get_attribute("href")
if "http" in href:
parsed_uri = urlparse(href)
domain = '{uri.netloc}'.format(uri=parsed_uri)
if search("google|youtube", domain) is None:
webbrowser.open_new_tab(href)
return "Here's what I found:"
return "Sorry, I couldn't find anything about that."
def parse_html(self, html: str, parser="lxml") -> str:
soup = BeautifulSoup(html, parser)
return soup.prettify()
def parse_xml(self, xml: str) -> etree._Element:
return etree.fromstring(xml) | [
"messages + [{\"role\": \"user\", \"content\": prompt}]"
] |
2024-01-10 | peytontolbert/llm-coder | clonegitrepo.py | import os
import subprocess
from dotenv import load_dotenv
import chromadb
from chromadb.config import Settings
from utils import clean_dir
import shutil
from glob import glob
from openai.embeddings_utils import get_embedding
import pandas as pd
import openai
from dotenv import load_dotenv
# Initialize OpenAI and GitHub API keys
openai.api_key = os.getenv('OPENAI_API_KEY')
def get_file_paths(repo_url, clone_dir):
# Clone the repo
subprocess.run(['git', 'clone', repo_url, clone_dir])
# Walk the directory and get all file paths
file_paths = []
for dirpath, dirnames, filenames in os.walk(clone_dir):
for filename in filenames:
file_paths.append(os.path.join(dirpath, filename))
return file_paths
def get_file_content(file_path):
try:
with open(file_path, 'r') as file:
content = file.read()
return content
except UnicodeDecodeError:
print(f'Skipped file due to ecoding issues: {file_path}')
return None
def get_function_name(code):
"""
Extract function name from a line beginning with "def "
"""
assert code.startswith("def ")
return code[len("def "): code.index("(")]
def get_until_no_space(all_lines, i) -> str:
"""
Get all lines until a line outside the function definition is found.
"""
ret = [all_lines[i]]
for j in range(i + 1, i + 10000):
if j < len(all_lines):
if len(all_lines[j]) == 0 or all_lines[j][0] in [" ", "\t", ")"]:
ret.append(all_lines[j])
else:
break
return "\n".join(ret)
def get_functions(filepath):
"""
Get all functions in a Python file.
"""
whole_code = open(filepath).read().replace("\r", "\n")
all_lines = whole_code.split("\n")
for i, l in enumerate(all_lines):
if l.startswith("def "):
code = get_until_no_space(all_lines, i)
function_name = get_function_name(code)
yield {"code": code, "function_name": function_name, "filepath": filepath}
def main():
load_dotenv()
repo_url = os.getenv('REPO_URL')
clone_dir = os.getenv('CLONE_DIR')
file_paths = get_file_paths(repo_url, clone_dir)
print(*file_paths, sep='\n')
code_files = [y for x in os.walk(clone_dir) for y in glob(os.path.join(x[0], '*.py'))]
print("Total number of py files:", len(code_files))
if len(code_files) == 0:
print("Double check that you have downloaded the repo and set the code_dir variable correctly.")
all_funcs = []
for code_file in code_files:
funcs = list(get_functions(code_file))
for func in funcs:
all_funcs.append(func)
print("Total number of functions:", len(all_funcs))
df = pd.DataFrame(all_funcs)
df['code_embedding'] = df['code'].apply(lambda x: get_embedding(x, engine="text-embedding-ada-002"))
df['filepath'] = df['filepath'].apply(lambda x: x.replace(clone_dir, ""))
df.to_csv("functions2.csv", index=True)
df.head()
if __name__ == "__main__":
main() | [] |
2024-01-10 | peytontolbert/llm-coder | codingagents.py | import os
import subprocess
from pathlib import Path
import openai
from gptfunctions import ChatGPTAgent
# Initialize OpenAI and GitHub API keys
openai.api_key = "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"
def write_to_file(path, content):
with open(path, 'w') as f:
f.write(content)
def create_directory(path):
Path(path).mkdir(parents=True, exist_ok=True)
def test_code(path):
result = subprocess.run(['python', path], stdout=subprocess.PIPE)
return result.stdout.decode('utf-8')
# Initialize agents
requirements_agent = ChatGPTAgent("""You are an AI that specializes in software requirements analysis.
Your task is to transform user needs and constraints into a formal list of software requirements. You should detail functional, non-functional, and system requirements based on the user's provided description.
Do not add any other explanation, only return a python dictionary where keys are types of requirements ('Functional', 'Non-functional', 'System') and values are lists of strings representing each requirement.""")
def design_agent():
systemprompt = """
You are an AI that specializes in software system design.
Based on the provided requirements, your task is to create a comprehensive system design. This includes creating a system architecture diagram, deciding on the software modules and their interactions, and defining database schema if necessary.
Return the system design as a Python dictionary. The dictionary should include keys like 'Architecture', 'Modules', 'Interactions', 'Database Schema' (if applicable), each containing a textual description or a link to a created diagram."""
return systemprompt
def algorithm_agent():
systemprompt = """You are an AI that specializes in algorithm development.
Based on the system design and the software requirements provided, your task is to create detailed algorithms that represent the logic and operations of each module.
Return the algorithms as a Python dictionary where keys are module names and values are pseudocode or detailed textual descriptions representing each algorithm."""
return systemprompt
def coding_agent():
systemprompt = """You are an AI that specializes in software coding.
Based on the provided algorithms and system design, your task is to generate the actual code for the software in chunks. Remember, there is a token limit per session, so you need to produce self-contained pieces of code that can be put together to form the complete software.
Please code in python and split the code into logical components or modules. Make sure each chunk of code you produce can be independently compiled and tested.
Return each code chunk as a separate string in a Python list."""
return systemprompt
def debug_agent():
systemprompt = """You are an AI that specializes in software debugging.
Based on the provided code chunks and system design, your task is to debug the code chunks into a complete software system. Remember, there is a token limit per session, so you need to produce self-contained pieces of code that can be put together to form the complete software if you reach your token limit.
Only return the code that needs to be changed. Do not return the entire code.
Return code in a Python object with the name as the filename and the code as the content."""
# Add more agents as needed...
return systemprompt
def file_code_agent(filepaths_string, shared_dependencies):
systemprompt = f"""You are an AI developer who is trying to write a program that will generate code for the user based on their intent.
the files we have decided to generate are: {filepaths_string}
the shared dependencies (like filenames and variable names) we have decided on are: {shared_dependencies}
only write valid code for the given filepath and file type, and return only the code.
do not add any other explanation, only return valid code for that file type."""
return systemprompt
def unit_test_agent():
systemprompt = """You are an AI that specializes in software debugging.
Based on the provided code chunks and system design, your task is to debug the code chunks into a complete software system. Remember, there is a token limit per session, so you need to produce self-contained pieces of code that can be put together to form the complete software if you reach your token limit.
Only return the code that needs to be changed. Do not return the entire code.
Return code in a Python object with the name as the filename and the code as the content."""
# Add more agents as needed...
return systemprompt
def clarifying_agent():
systemprompt = """You are an AI designed to clarify the user's intent.
You will read instructions and not carry them out, only seek to clarify them.
Specifically you will first summarise a list of super short bullets of areas that need clarification.
Then you will pick one clarifying question of each area, and wait for an answer from the user.
"""
return systemprompt | [
"You are an AI that specializes in software debugging.\n \n Based on the provided code chunks and system design, your task is to debug the code chunks into a complete software system. Remember, there is a token limit per session, so you need to produce self-contained pieces of code that can be put together to form the complete software if you reach your token limit.\n Only return the code that needs to be changed. Do not return the entire code.\n \nReturn code in a Python object with the name as the filename and the code as the content.",
"\nYou are an AI that specializes in software system design. \n\nBased on the provided requirements, your task is to create a comprehensive system design. This includes creating a system architecture diagram, deciding on the software modules and their interactions, and defining database schema if necessary.\n\nReturn the system design as a Python dictionary. The dictionary should include keys like 'Architecture', 'Modules', 'Interactions', 'Database Schema' (if applicable), each containing a textual description or a link to a created diagram.",
"You are an AI that specializes in algorithm development. \n\nBased on the system design and the software requirements provided, your task is to create detailed algorithms that represent the logic and operations of each module.\n\nReturn the algorithms as a Python dictionary where keys are module names and values are pseudocode or detailed textual descriptions representing each algorithm.",
"You are an AI designed to clarify the user's intent.\n \nYou will read instructions and not carry them out, only seek to clarify them.\nSpecifically you will first summarise a list of super short bullets of areas that need clarification.\nThen you will pick one clarifying question of each area, and wait for an answer from the user.\n",
"You are an AI that specializes in software coding. \n\nBased on the provided algorithms and system design, your task is to generate the actual code for the software in chunks. Remember, there is a token limit per session, so you need to produce self-contained pieces of code that can be put together to form the complete software.\n\nPlease code in python and split the code into logical components or modules. Make sure each chunk of code you produce can be independently compiled and tested.\n\nReturn each code chunk as a separate string in a Python list.",
"You are an AI developer who is trying to write a program that will generate code for the user based on their intent.\n \n the files we have decided to generate are: PLACEHOLDER\n\n the shared dependencies (like filenames and variable names) we have decided on are: PLACEHOLDER\n \n only write valid code for the given filepath and file type, and return only the code.\n do not add any other explanation, only return valid code for that file type."
] |
2024-01-10 | peytontolbert/llm-coder | memoryagents.py | import openai
from gptfunctions import ChatGPTAgent
from dotenv import load_dotenv
import os
# Initialize OpenAI and GitHub API keys
openai.api_key = os.getenv('OPENAI_API_KEY')
def memory_consolidation_agent(memory):
prompt = "You are an AI that specializes in memory consolidation. \n\nYour task is to consolidate the provided memories into a single memory. \n\nReturn the consolidated memory as a Python dictionary."
print(prompt)
result = ChatGPTAgent.chat_with_gpt3(prompt, memory)
def contextual_understanding_agent(memories):
prompt = "You are an AI that specializes in contextual understanding. \n\nYour task is to understand the context of the provided memories. \n\nReturn the context as a Python dictionary."
print(prompt)
result = ChatGPTAgent.chat_with_gpt3(prompt, memories)
def memory_classification_agent(context):
prompt = "You are an AI that specializes in memory classification. \n\nYour task is to classify the provided memories. \n\nReturn the classification as a Python dictionary."
print(prompt)
result = ChatGPTAgent.chat_with_gpt3(prompt, context)
def memory_compression_agent(context):
prompt = "You are an AI that specializes in memory compression. \n\nYour task is to compress the provided memory. \n\nReturn the compressed memory as a Python dictionary."
print(prompt)
result = ChatGPTAgent.chat_with_gpt3(prompt, context)
def memory_retrieval_agent(context):
prompt = "You are an AI that specializes in memory retrieval. \n\nYour task is to retrieve the memories that match the provided context. \n\nReturn the retrieved memories as a Python dictionary."
print(prompt)
result = ChatGPTAgent.chat_with_gpt3(prompt, context)
def memory_validation_agent(memory):
prompt = "You are an AI that specializes in memory validation. \n\nYour task is to validate the provided memory. \n\nReturn the validation result as a Python dictionary."
print(prompt)
result = ChatGPTAgent.chat_with_gpt3(prompt, memory)
| [
"You are an AI that specializes in memory validation. \n\nYour task is to validate the provided memory. \n\nReturn the validation result as a Python dictionary.",
"You are an AI that specializes in memory consolidation. \n\nYour task is to consolidate the provided memories into a single memory. \n\nReturn the consolidated memory as a Python dictionary.",
"You are an AI that specializes in contextual understanding. \n\nYour task is to understand the context of the provided memories. \n\nReturn the context as a Python dictionary.",
"You are an AI that specializes in memory classification. \n\nYour task is to classify the provided memories. \n\nReturn the classification as a Python dictionary.",
"You are an AI that specializes in memory compression. \n\nYour task is to compress the provided memory. \n\nReturn the compressed memory as a Python dictionary.",
"You are an AI that specializes in memory retrieval. \n\nYour task is to retrieve the memories that match the provided context. \n\nReturn the retrieved memories as a Python dictionary."
] |
2024-01-10 | peytontolbert/llm-coder | generated~test~web_browser_application.py | import openai
import requests
import json
import webbrowser
import tkinter as tk
from bs4 import BeautifulSoup
from lxml import html
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import urllib
import re
import time
class AI:
def __init__(self, **kwargs):
self.kwargs = kwargs
def start(self, system, user):
messages = [
{"role": "system", "content": system},
{"role": "user", "content": user},
]
return self.next(messages)
def fsystem(self, msg):
return {"role": "system", "content": msg}
def fuser(self, msg):
return {"role": "user", "content": msg}
def next(self, messages: list[dict[str, str]], prompt=None):
if prompt:
messages = messages + [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
messages=messages,
stream=True,
**self.kwargs
)
chat = []
for chunk in response:
delta = chunk['choices'][0]['delta']
msg = delta.get('content', '')
print(msg, end="")
chat.append(msg)
return messages + [{"role": "assistant", "content": "".join(chat)}] | [
"messages + [{\"role\": \"user\", \"content\": prompt}]"
] |
2024-01-10 | peytontolbert/llm-coder | code2prompt.py | import os
from constants import DEFAULT_DIRECTORY, DEFAULT_MODEL, DEFAULT_MAX_TOKENS, EXTENSION_TO_SKIP
from dotenv import load_dotenv
from glob import glob
from utils import get_file_paths, get_functions, get_file_content, get_function_name, get_until_no_space, num_tokens_from_string, truncate_text_tokens, len_safe_get_embedding, save_embedded_code
from codeagents import code_understanding_agent, code_error_detection_agent, code_testing_agent, code_optimization_agent, code_documentation_agent, code_algorithm_agent, code_design_agent, code_prompt_agent
from gptfunctions import ChatGPTAgent
import openai
from openai.embeddings_utils import get_embedding
import pandas as pd
import numpy as np
import json
from dotenv import load_dotenv
# Initialize OpenAI and GitHub API keys
openai.api_key = os.getenv('OPENAI_API_KEY')
tokenLimit = 2000
def chunk_and_summarize(code_file):
chunks = 1
code = get_file_content(code_file)
if code is None:
return None
tokens = num_tokens_from_string(code)
function_list = []
docs = []
if tokens < tokenLimit:
doc_text = ChatGPTAgent.chat_with_gpt3(code, code_documentation_agent())
docs.append({"doc": doc_text, "code": code, "filepath": code_file}) # dict
print("tokens < limit. saving full code")
docs.append({"doc": doc_text, "code": code, "filepath": code_file}) # dict
else:
funcs = list(get_functions(code))
for func in funcs:
potential_tokens = tokens + num_tokens_from_string(func)
if potential_tokens < tokenLimit:
function_list.append(func)
tokens = potential_tokens
else:
print("Need to chunk the data but not lose track when doing multiple summaries")
function_list = [func]
tokens = num_tokens_from_string(code)
if function_list:
doc = ChatGPTAgent.chat_with_gpt3(function_list, code_documentation_agent())
docs.append(doc)
return docs
def create_algorithms_and_design(all_docs):
all_docs_string = json.dumps(all_docs)
tokens = num_tokens_from_string(all_docs_string)
algorithms = []
designs = []
docs_list = []
if tokens < tokenLimit:
algorithm = ChatGPTAgent.chat_with_gpt3(all_docs_string, code_algorithm_agent())
algorithms.append(algorithm)
design = ChatGPTAgent.chat_with_gpt3(all_docs_string, code_design_agent())
designs.append(design)
else:
for doc in all_docs:
doc_string = json.dumps(doc)
potential_tokens = tokens + num_tokens_from_string(doc_string)
if potential_tokens < tokenLimit:
docs_list.append(doc_string)
tokens = potential_tokens
else:
doc_list_string = json.dumps(docs_list)
algorithm = ChatGPTAgent.chat_with_gpt3(doc_list_string, code_algorithm_agent())
algorithms.append(algorithm)
design = ChatGPTAgent.chat_with_gpt3(doc_list_string, code_design_agent())
designs.append(design)
docs_list = [doc_string]
tokens = num_tokens_from_string(all_docs_string)
if docs_list:
doc_list_string = json.dumps(docs_list)
algorithm = ChatGPTAgent.chat_with_gpt3(doc_list_string, code_algorithm_agent())
algorithms.append(algorithm)
design = ChatGPTAgent.chat_with_gpt3(doc_list_string, code_design_agent())
designs.append(design)
return algorithms, designs
def create_prompts_from_algorithms_and_designs(algorithms, designs):
prompts = []
for algorithm, design in zip(algorithms, designs):
prompt = "Algorithm: " + algorithm + "\nDesign: " + design
prompts.append(prompt)
return prompts
def main():
load_dotenv()
repo_url = os.getenv('REPO_URL')
clone_dir = os.getenv('CLONE_DIR')
file_paths = get_file_paths(clone_dir)
code_files = [y for x in os.walk(clone_dir) for ext in ('*.py', '*.js', '*.cpp', '*.rs', '*.md', '*.txt') for y in glob(os.path.join(x[0], ext))]
if len(code_files) == 0:
print("Double check that you have downloaded the repo and set the code_dir variable correctly.")
all_funcs = []
all_docs = []
for code_file in code_files:
docs = list(chunk_and_summarize(code_file))
funcs = list(get_functions(code_file))
for func in funcs:
all_funcs.append(func)
for doc in docs:
all_docs.append(doc)
all_docs_string = json.dumps(all_docs)
tokens = num_tokens_from_string(all_docs_string)
if tokens < tokenLimit:
print("tokens < limit with all docs. getting prompt")
prompt = ChatGPTAgent.chat_with_gpt3(all_docs_string, code_prompt_agent())
print(f"Prompt: " + prompt)
else:
algorithms, designs = create_algorithms_and_design(all_docs)
prompts = create_prompts_from_algorithms_and_designs(algorithms, designs)
prompts_string = json.dumps(prompts)
prompts_tokens = num_tokens_from_string(prompts_string)
if prompts_tokens < tokenLimit:
prompt = ChatGPTAgent.chat_with_gpt3(prompts_string, code_prompt_agent())
print(prompt)
else:
print("Need to chunk data for prompts")
print(prompts)
print("Total number of functions:", len(all_funcs))
save_embedded_code(all_funcs, clone_dir, "functions", "code")
save_embedded_code(all_docs, clone_dir, "documentations", "doc")
if __name__ == "__main__":
main() | [
"[]",
"Algorithm: PLACEHOLDER\nDesign: PLACEHOLDER"
] |
2024-01-10 | peytontolbert/llm-coder | gptfunctions.py | import openai
import json
import os
from dotenv import load_dotenv
# Initialize OpenAI and GitHub API keys
openai.api_key = os.getenv('OPENAI_API_KEY')
class ChatGPTAgent:
def __init__(self, prompt):
self.prompt = prompt
# Initialize a session with OpenAI's chat models
@staticmethod
def chat_with_gpt3(prompt, systemprompt):
messages = [{"role": "user", "content": prompt}]
if systemprompt:
messages.insert(0, {"role": "system", "content": systemprompt})
else:
messages.insert(0,{"role": "system", "content": "You are a helpful AI assistant"})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=messages,
temperature=0.9
)
return response['choices'][0]['message']['content']
# Initialize a session with OpenAI's chat models
@staticmethod
def function_with_gpt3(prompt, systemprompt=None):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=messages,
functions=[
{
"name": "get_current_price",
"description": "Get the current price of a stock",
"parameters": {
"type": "object",
"properties": {
"stock": {
"type": "string",
"description": "The stock to get the price of"
},
"unit": {
"type": "string",
"enum": ["USD", "EUR", "GBP"]
}
},
"required": ["stock"]
}
}
],
function_call="auto",
)
message = response['choices'][0]['message']
if message.get("function_call"):
function_name = message['function_call']['name']
function_response = get_current_price(
stock=message.get("stock"),
unit=message.get("unit"),
)
second_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "user", "content": "What is the stock price of AAPL?"},
message,
{
"role": "function",
"name": function_name,
"content": function_response,
},
],
)
return second_response
def generate(self, input_data):
return ChatGPTAgent.chat_with_gpt3(self.prompt, input_data)
def get_current_price(stock, unit="USD"):
"""Get the current price of a given stock"""
print("replace with real API call")
price = 100
stock_info = {
"stock": stock,
"price": price,
"unit": unit,
}
return json.dumps(stock_info)
def get_price(stock):
prompt = f"whats the price of {stock} right now?"
print(prompt)
result = ChatGPTAgent.function_with_gpt3(prompt)
message = result['choices'][0]['message']['content']
return message | [
"What is the stock price of AAPL?",
"You are a helpful AI assistant",
"whats the price of PLACEHOLDER right now?"
] |
2024-01-10 | peytontolbert/llm-coder | codesearch.py | import requests
import os
from dotenv import load_dotenv
import tiktoken
from glob import glob
import openai
import pandas as pd
embeddinglimit = 8190
tokenlimit = 2047
input_datapath = "data/test.csv"
df = pd.DataFrame(all_funcs)
def num_tokens_from_string(string: str, encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
return openai.Embedding.create(input=text, model=model)['data'][0]['embedding']
def split_into_chunks (text, limit, overlap, encmodel):
lines = text.split('\n')
chunks = []
current_chunk = []
current_length = 0
for line in lines:
tokens_in_line = num_tokens_from_string(line, encmodel)
if current_length + tokens_in_line > limit:
# Create a new chunk
chunks.append('\n'.join(current_chunk))
current_chunk = current_chunk[-overlap:]
current_length = sum(num_tokens_from_string(line, encmodel) for line in current_chunk)
current_chunk.append(line)
current_length += tokens_in_line
# Add the last chunk
if current_chunk:
chunks.append('\n'.join(current_chunk))
return chunks
def get_function_name(code):
"""
Extract function name from a line beginning with "def "
"""
assert code.startwith("def ")
return code[len("def "): code.index("(")]
def get_until_no_space(all_lines, i) -> str:
"""
Get all lines until a line outside the function definition is found.
"""
ret = [all_lines[i]]
for j in range(i + 1, i + 10000):
if j< len(all_lines):
if len(all_lines[j]) == 0 or all_lines[j][0] in [" ", "\t", ")"]:
ret.append(all_lines[j])
else:
break
return "\n".join(ret)
def get_functions(filepath):
"""
Get all functions in a Python file.
"""
whole_code = open(filepath).read().replace("\r", "\n")
all_lines = whole_code.split("\n")
for i, l in enumerate(all_lines):
if l.startswith("def "):
code = get_until_no_space(all_lines, i)
function_name = get_function_name(code)
yield {"code": code, "function_name": function_name, "filepath": filepath}
def get_file_paths(owner, repo):
# Github API url
url = f"https://api.github.com/repos/{owner}/{repo}/git/trees/master?recursive=1"
# Make the API request
response = requests.get(url)
# Get the JSON data from the response
data = response.json()
# Initialize an empty list for the file paths
file_paths = []
# Loop through each file in the data
for file in data["tree"]:
# If the file is not a directory, add its path to the list
if file["type"] != "tree":
file_paths.append(file["path"])
return file_paths
def main():
load_dotenv()
owner = os.getenv('REPO_OWNER')
repo = os.getenv('REPO_NAME')
text = "Hello world how many tokens"
encmodel = "cl100k_base"
tokens = num_tokens_from_string(text, encmodel)
print(tokens)
df['ada_embedding'] = df.combined.apply(lambda x: get_embedding(x, model="text-embedding-ada-002"))
df.to_csv('output/embeddings.csv')
file_paths = get_file_paths(owner, repo, encmodel)
print(*file_paths, sep='\n')
if __name__ == "__main__":
main() | [] |
2024-01-10 | peytontolbert/llm-coder | editcode.py | # Import necessary modules
import openai
import os
import sys
import time
import json
import re
import ast
from constants import DEFAULT_DIRECTORY, DEFAULT_MODEL, DEFAULT_MAX_TOKENS
from utils import clean_dir, write_file, get_file_content, get_file_paths, get_functions, chunk_and_summarize
from codingagents import design_agent, algorithm_agent, coding_agent, code_integration_agent, debug_agent, file_code_agent
from glob import glob
from openai.embeddings_utils import get_embedding
import pandas as pd
import numpy as np
from dotenv import load_dotenv
# Initialize OpenAI and GitHub API keys
openai.api_key = os.getenv('OPENAI_API_KEY')
# Initialize a session with OpenAI's chat models
def chat_with_gpt3(systemprompt, prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": systemprompt},
{"role": "user", "content": prompt},
],
temperature=0.9
)
return response['choices'][0]['message']['content']
def save_to_local_directory(repo_name, functions):
# Check if the directory already exists
if not os.path.exists(repo_name):
# If not, create it
os.makedirs(repo_name)
# Create a new file in the directory to hold all the functions
file_path = os.path.join(repo_name, "functions.py")
with open(file_path, "w") as f:
# Write all the functions to the file
for function in functions:
f.write(function)
f.write("\n\n")
def generate_filepaths(prompt):
systemprompt = f"""You are an AI developer who is trying to write a program that will generate code for the user based on their intent.
When given their intent, create a complete, exhaustive list of filepaths that the user would write to make the program.
Only list the filepaths you would write, and return them as a python array of strings.
do not add any other explanation, only return a python array of strings."""
result = chat_with_gpt3(systemprompt, prompt)
print(result)
return result
def generate_filecode(filename, systems_design, filepaths_string, shared_dependencies=None, prompt=None):
print("generating code")
prompt = f"""
We have broken up the program into per-file generation.
Now your job is to generate only the code for the file {filename}.
Make sure to have consistent filenames if you reference other files we are also generating.
Remember that you must obey 3 things:
- you are generating code for the file {filename}
- do not stray from the names of the files and the shared dependencies we have decided on
- follow the {systems_design} laid out in the previous steps.
Bad response:
```javascript
console.log("hello world")
```
Good response:
console.log("hello world")
Begin generating the code now.
"""
systemprompt = file_code_agent(filepaths_string, shared_dependencies)
result = chat_with_gpt3(systemprompt, prompt)
print(result)
return filename, result
def generate_shared_dependencies(prompt, filepaths):
systemprompt = f"""You are an AI developer who is trying to write a program that will generate code for the user based on their intent.
In response to the user's prompt:
---
the app is: {prompt}
---
the files we have decided to generate are: {filepaths}
Now that we have a list of files, we need to understand what dependencies they share.
Please name and briefly describe what is shared between the files we are generating, including exported variables, data schemas, id names of every DOM elements that javascript functions will use, message names, and function names.
Exclusively focus on the names of the shared dependencies, and do not add any other explanation.
"""
result = chat_with_gpt3(systemprompt, prompt)
print(result)
return result
def debug_code(directory):
extensions = ['py', 'html', 'js', 'css', 'c', 'rs']
code_files = []
for extension in extensions:
code_files.extend(y for x in os.walk(directory) for y in glob(os.path.join(x[0], f'*.{extension}')))
print("Total number of py files:", len(code_files))
if len(code_files) == 0:
print("Double check that you have downloaded the repo and set the code_dir variable correctly.")
all_funcs = []
for code_file in code_files:
funcs = list(get_functions(code_file))
for func in funcs:
all_funcs.append(func)
print("Total number of functions:", len(all_funcs))
df = pd.DataFrame(all_funcs)
df['code_embedding'] = df['code'].apply(lambda x: get_embedding(x, engine="text-embedding-ada-002"))
df['filepath'] = df['filepath'].apply(lambda x: x.replace(directory, ""))
df.to_csv("functions.csv", index=True)
df.head()
debug_code_agent = chat_with_gpt3(debug_agent, all_funcs)
# Main function
def main(prompt, directory=DEFAULT_DIRECTORY, model=DEFAULT_MODEL, file=None):
# Get the project objective from the user
if prompt.endswith(".md"):
with open(prompt, "r") as f:
prompt = f.read()
print("Hello, I am your local AI developer! You said you wanted:")
print("\033[92m" + prompt + "\033[0m")
# Get the repo name from the user
repo_name = input("Enter the name for the new directory: ")
directory = os.path.join(directory, repo_name)
prompt_string = json.dumps(prompt)
design_prompt = design_agent()
systems_design = chat_with_gpt3(design_prompt, prompt_string)
print(f"Systems design: "+systems_design)
code_prompt = coding_agent()
code = chat_with_gpt3(code_prompt, prompt+systems_design)
print(f"code: "+code)
code_integration_prompt = code_integration_agent()
code_integration = chat_with_gpt3(code_integration_prompt, prompt+systems_design+code)
filepaths = generate_filepaths(prompt)
filepaths_string = json.dumps(filepaths)
list_actual = []
try:
list_actual = ast.literal_eval(filepaths)
shared_dependencies = None
if os.path.exists("shared_dependencies.md"):
with open("shared_dependencies.md", "r") as f:
shared_dependencies = f.read()
if file is not None:
print("File", file)
filename, filecode = generate_filecode(file, systems_design, filepaths_string, shared_dependencies)
write_file(filename, filecode, directory)
else:
clean_dir(directory)
shared_dependencies = generate_shared_dependencies(prompt, filepaths_string)
write_file("shared_dependencies.md", shared_dependencies, directory)
for filename, filecode in generate_filecode.map(
list_actual, order_outputs=False, kwargs=dict(systems_design, filepaths_string, shared_dependencies)
):
write_file(filename, filecode, directory)
debug_code(directory)
except ValueError:
print("Failed to parse result")
if __name__ == "__main__":
if len(sys.argv) < 2:
if not os.path.exists("prompt.md"):
print("Please provide a prompt file or a prompt string")
sys.exit(1)
else:
prompt = "prompt.md"
else:
prompt = sys.argv[1]
directory = sys.argv[2] if len(sys.argv) > 2 else DEFAULT_DIRECTORY
file = sys.argv[3] if len(sys.argv) > 3 else None
main(prompt, directory, file) | [
"\n We have broken up the program into per-file generation. \n Now your job is to generate only the code for the file PLACEHOLDER. \n Make sure to have consistent filenames if you reference other files we are also generating.\n \n Remember that you must obey 3 things: \n - you are generating code for the file PLACEHOLDER\n - do not stray from the names of the files and the shared dependencies we have decided on\n - follow the PLACEHOLDER laid out in the previous steps.\n \n Bad response:\n ```javascript \n console.log(\"hello world\")\n ```\n \n Good response:\n console.log(\"hello world\")\n \n Begin generating the code now.\n\n ",
"prompt.md",
"You are an AI developer who is trying to write a program that will generate code for the user based on their intent.\n \n In response to the user's prompt:\n\n ---\n the app is: PLACEHOLDER\n ---\n \n the files we have decided to generate are: PLACEHOLDER\n\n Now that we have a list of files, we need to understand what dependencies they share.\n Please name and briefly describe what is shared between the files we are generating, including exported variables, data schemas, id names of every DOM elements that javascript functions will use, message names, and function names.\n Exclusively focus on the names of the shared dependencies, and do not add any other explanation.\n ",
"You are an AI developer who is trying to write a program that will generate code for the user based on their intent.\n\n When given their intent, create a complete, exhaustive list of filepaths that the user would write to make the program.\n \n Only list the filepaths you would write, and return them as a python array of strings. \n do not add any other explanation, only return a python array of strings."
] |
2024-01-10 | shamspias/gpt3-data-preprocessing | preprocessed_text.py | import openai
import re
# Load OpenAI GPT-3 API
openai.api_key = "YOUR_API_KEY"
model_engine = "text-davinci-003"
# Read article text
with open("article.txt", encoding='utf-8') as f:
text = f.read()
# Split text into sentences
sentences = re.split(r'(?<=[^A-Z].[.?]) +(?=[A-Z])', text)
# Create input-output pairs
pairs = []
for i in range(len(sentences) - 1):
pairs.append([sentences[i], sentences[i+1]])
# Tokenize and encode input-output pairs
encoded_pairs = []
for pair in pairs:
prompt = pair[0]
response = pair[1]
prompt_completion = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
response_completion = openai.Completion.create(
engine=model_engine,
prompt=response,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
prompt_tokens = prompt_completion.choices[0].text
response_tokens = response_completion.choices[0].text
encoded_pairs.append([prompt_tokens, response_tokens])
# Save encoded pairs to file
with open("encoded_pairs.txt", "w") as f:
f.write(str(encoded_pairs))
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.