date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | MaartenGr/KeyBERT | keybert~llm~_openai.py | import time
import openai
from tqdm import tqdm
from typing import Mapping, Any, List
from keybert.llm._base import BaseLLM
from keybert.llm._utils import retry_with_exponential_backoff, process_candidate_keywords
DEFAULT_PROMPT = """
The following is a list of documents. Please extract the top keywords, separated by a comma, that describe the topic of the texts.
Document:
- Traditional diets in most cultures were primarily plant-based with a little meat on top, but with the rise of industrial style meat production and factory farming, meat has become a staple food.
Keywords: Traditional diets, Plant-based, Meat, Industrial style meat production, Factory farming, Staple food, Cultural dietary practices
Document:
- The website mentions that it only takes a couple of days to deliver but I still have not received mine.
Keywords: Website, Delivery, Mention, Timeframe, Not received, Waiting, Order fulfillment
Document:
- [DOCUMENT]
Keywords:"""
DEFAULT_CHAT_PROMPT = """
I have the following document:
[DOCUMENT]
Based on the information above, extract the keywords that best describe the topic of the text.
Use the following format separated by commas:
<keywords>
"""
class OpenAI(BaseLLM):
""" Using the OpenAI API to extract keywords
The default method is `openai.Completion` if `chat=False`.
The prompts will also need to follow a completion task. If you
are looking for a more interactive chats, use `chat=True`
with `model=gpt-3.5-turbo`.
For an overview see:
https://platform.openai.com/docs/models
NOTE: The resulting keywords are expected to be separated by commas so
any changes to the prompt will have to make sure that the resulting
keywords are comma-separated.
Arguments:
client: A `openai.OpenAI` client
model: Model to use within OpenAI, defaults to `"text-ada-001"`.
NOTE: If a `gpt-3.5-turbo` model is used, make sure to set
`chat` to True.
generator_kwargs: Kwargs passed to `openai.Completion.create`
for fine-tuning the output.
prompt: The prompt to be used in the model. If no prompt is given,
`self.default_prompt_` is used instead.
NOTE: Use `"[DOCUMENT]"` in the prompt
to decide where the document needs to be inserted
delay_in_seconds: The delay in seconds between consecutive prompts
in order to prevent RateLimitErrors.
exponential_backoff: Retry requests with a random exponential backoff.
A short sleep is used when a rate limit error is hit,
then the requests is retried. Increase the sleep length
if errors are hit until 10 unsuccesfull requests.
If True, overrides `delay_in_seconds`.
chat: Set this to True if a chat model is used. Generally, this GPT 3.5 or higher
See: https://platform.openai.com/docs/models/gpt-3-5
verbose: Set this to True if you want to see a progress bar for the
keyword extraction.
Usage:
To use this, you will need to install the openai package first:
`pip install openai`
Then, get yourself an API key and use OpenAI's API as follows:
```python
import openai
from keybert.llm import OpenAI
from keybert import KeyLLM
# Create your LLM
client = openai.OpenAI(api_key=MY_API_KEY)
llm = OpenAI(client)
# Load it in KeyLLM
kw_model = KeyLLM(llm)
# Extract keywords
document = "The website mentions that it only takes a couple of days to deliver but I still have not received mine."
keywords = kw_model.extract_keywords(document)
```
You can also use a custom prompt:
```python
prompt = "I have the following document: [DOCUMENT] \nThis document contains the following keywords separated by commas: '"
llm = OpenAI(client, prompt=prompt, delay_in_seconds=5)
```
If you want to use OpenAI's ChatGPT model:
```python
llm = OpenAI(client, model="gpt-3.5-turbo", delay_in_seconds=10, chat=True)
```
"""
def __init__(self,
client,
model: str = "gpt-3.5-turbo-instruct",
prompt: str = None,
generator_kwargs: Mapping[str, Any] = {},
delay_in_seconds: float = None,
exponential_backoff: bool = False,
chat: bool = False,
verbose: bool = False
):
self.client = client
self.model = model
if prompt is None:
self.prompt = DEFAULT_CHAT_PROMPT if chat else DEFAULT_PROMPT
else:
self.prompt = prompt
self.default_prompt_ = DEFAULT_CHAT_PROMPT if chat else DEFAULT_PROMPT
self.delay_in_seconds = delay_in_seconds
self.exponential_backoff = exponential_backoff
self.chat = chat
self.verbose = verbose
self.generator_kwargs = generator_kwargs
if self.generator_kwargs.get("model"):
self.model = generator_kwargs.get("model")
if self.generator_kwargs.get("prompt"):
del self.generator_kwargs["prompt"]
if not self.generator_kwargs.get("stop") and not chat:
self.generator_kwargs["stop"] = "\n"
def extract_keywords(self, documents: List[str], candidate_keywords: List[List[str]] = None):
""" Extract topics
Arguments:
documents: The documents to extract keywords from
candidate_keywords: A list of candidate keywords that the LLM will fine-tune
For example, it will create a nicer representation of
the candidate keywords, remove redundant keywords, or
shorten them depending on the input prompt.
Returns:
all_keywords: All keywords for each document
"""
all_keywords = []
candidate_keywords = process_candidate_keywords(documents, candidate_keywords)
for document, candidates in tqdm(zip(documents, candidate_keywords), disable=not self.verbose):
prompt = self.prompt.replace("[DOCUMENT]", document)
if candidates is not None:
prompt = prompt.replace("[CANDIDATES]", ", ".join(candidates))
# Delay
if self.delay_in_seconds:
time.sleep(self.delay_in_seconds)
# Use a chat model
if self.chat:
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
kwargs = {"model": self.model, "messages": messages, **self.generator_kwargs}
if self.exponential_backoff:
response = chat_completions_with_backoff(self.client, **kwargs)
else:
response = self.client.chat.completions.create(**kwargs)
keywords = response.choices[0].message.content.strip()
# Use a non-chat model
else:
if self.exponential_backoff:
response = completions_with_backoff(self.client, model=self.model, prompt=prompt, **self.generator_kwargs)
else:
response = self.client.completions.create(model=self.model, prompt=prompt, **self.generator_kwargs)
keywords = response.choices[0].message.content.strip()
keywords = [keyword.strip() for keyword in keywords.split(",")]
all_keywords.append(keywords)
return all_keywords
def completions_with_backoff(client, **kwargs):
return retry_with_exponential_backoff(
client.completions.create,
errors=(
openai.RateLimitError,
),
)(**kwargs)
def chat_completions_with_backoff(client, **kwargs):
return retry_with_exponential_backoff(
client.chat.completions.create,
errors=(
openai.RateLimitError,
),
)(**kwargs)
| [
"[DOCUMENT]",
"[CANDIDATES]",
"\nI have the following document:\n[DOCUMENT]\n\nBased on the information above, extract the keywords that best describe the topic of the text.\nUse the following format separated by commas:\n<keywords>\n",
"\nThe following is a list of documents. Please extract the top keywords, separated by a comma, that describe the topic of the texts.\n\nDocument:\n- Traditional diets in most cultures were primarily plant-based with a little meat on top, but with the rise of industrial style meat production and factory farming, meat has become a staple food.\n\nKeywords: Traditional diets, Plant-based, Meat, Industrial style meat production, Factory farming, Staple food, Cultural dietary practices\n\nDocument:\n- The website mentions that it only takes a couple of days to deliver but I still have not received mine.\n\nKeywords: Website, Delivery, Mention, Timeframe, Not received, Waiting, Order fulfillment\n\nDocument:\n- [DOCUMENT]\n\nKeywords:",
"You are a helpful assistant.",
", "
] |
2024-01-10 | MaartenGr/KeyBERT | keybert~_llm.py | from typing import List, Union
try:
from sentence_transformers import util
HAS_SBERT = True
except ModuleNotFoundError:
HAS_SBERT = False
class KeyLLM:
"""
A minimal method for keyword extraction with Large Language Models (LLM)
The keyword extraction is done by simply asking the LLM to extract a
number of keywords from a single piece of text.
"""
def __init__(self, llm):
"""KeyBERT initialization
Arguments:
llm: The Large Language Model to use
"""
self.llm = llm
def extract_keywords(
self,
docs: Union[str, List[str]],
check_vocab: bool = False,
candidate_keywords: List[List[str]] = None,
threshold: float = None,
embeddings=None
) -> Union[List[str], List[List[str]]]:
"""Extract keywords and/or keyphrases
To get the biggest speed-up, make sure to pass multiple documents
at once instead of iterating over a single document.
NOTE: The resulting keywords are expected to be separated by commas so
any changes to the prompt will have to make sure that the resulting
keywords are comma-separated.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
check_vocab: Only return keywords that appear exactly in the documents
candidate_keywords: Candidate keywords for each document
Returns:
keywords: The top n keywords for a document with their respective distances
to the input document.
Usage:
To extract keywords from a single document:
```python
import openai
from keybert.llm import OpenAI
from keybert import KeyLLM
# Create your LLM
client = openai.OpenAI(api_key=MY_API_KEY)
llm = OpenAI(client)
# Load it in KeyLLM
kw_model = KeyLLM(llm)
# Extract keywords
document = "The website mentions that it only takes a couple of days to deliver but I still have not received mine."
keywords = kw_model.extract_keywords(document)
```
"""
# Check for a single, empty document
if isinstance(docs, str):
if docs:
docs = [docs]
else:
return []
if HAS_SBERT and threshold is not None and embeddings is not None:
# Find similar documents
clusters = util.community_detection(embeddings, min_community_size=2, threshold=threshold)
in_cluster = set([cluster for cluster_set in clusters for cluster in cluster_set])
out_cluster = set(list(range(len(docs)))).difference(in_cluster)
# Extract keywords for all documents not in a cluster
if out_cluster:
selected_docs = [docs[index] for index in out_cluster]
if candidate_keywords is not None:
selected_keywords = [candidate_keywords[index] for index in out_cluster]
else:
selected_keywords = None
out_cluster_keywords = self.llm.extract_keywords(
selected_docs,
selected_keywords,
)
out_cluster_keywords = {index: words for words, index in zip(out_cluster_keywords, out_cluster)}
# Extract keywords for only the first document in a cluster
if in_cluster:
selected_docs = [docs[cluster[0]] for cluster in clusters]
if candidate_keywords is not None:
selected_keywords = [candidate_keywords[cluster[0]] for cluster in clusters]
else:
selected_keywords = None
in_cluster_keywords = self.llm.extract_keywords(
selected_docs,
selected_keywords
)
in_cluster_keywords = {
doc_id: in_cluster_keywords[index]
for index, cluster in enumerate(clusters)
for doc_id in cluster
}
# Update out cluster keywords with in cluster keywords
if out_cluster:
if in_cluster:
out_cluster_keywords.update(in_cluster_keywords)
keywords = [out_cluster_keywords[index] for index in range(len(docs))]
else:
keywords = [in_cluster_keywords[index] for index in range(len(docs))]
else:
# Extract keywords using a Large Language Model (LLM)
keywords = self.llm.extract_keywords(docs, candidate_keywords)
# Only extract keywords that appear in the input document
if check_vocab:
updated_keywords = []
for keyword_set, document in zip(keywords, docs):
updated_keyword_set = []
for keyword in keyword_set:
if keyword in document:
updated_keyword_set.append(keyword)
updated_keywords.append(updated_keyword_set)
return updated_keywords
return keywords
| [] |
2024-01-10 | morpheus228/career_student_bot | parser~handle_posts.py | # -*- coding: utf-8 -*-
import openai
context = """по тексту определи к какой категории отнести этот текст, ответом должен быть номер соответствующей категории
Категории:
1) Cтажировки <предложение от одной отдельной компании>
2) Олимпиады
3) Форумы <подборка IT форумов, ярмарок вакансий, предложения от многих компаний>
4) Тесты и отборы"""
prompt = "bla bla bla"
openai.api_key = "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"
categories = {"1": "Стажировки",
"2": "Олимпиады",
"3": "Форумы",
"4": "Тесты",
"0": "Unknown"}
def ans_by_request(prompt: str) -> str:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": context},
{"role": "user", "content": prompt}
],
temperature=0.1
)
return completion.choices[0].message.content
def find_out_key_ctgr(prompt: str) -> str:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": context},
{"role": "user", "content": prompt}
],
temperature=0.1
)
key = completion.choices[0].message.content[0]
if key not in categories.keys():
key = "0"
return key
def key_ctgr_by_ans(answer: str):
key = answer[0]
if key not in categories.keys():
key = "0"
return key
def category_by_request(prompt: str) -> str:
key = find_out_key_ctgr(prompt)
return categories[key]
print(category_by_request(prompt))
| [
"bla bla bla"
] |
2024-01-10 | MANAS7-7/Doxify | server~utils~infinite_gpt.py | import openai
from concurrent.futures import ThreadPoolExecutor
import tiktoken
from dotenv import load_dotenv
import os
import json
import backoff # for exponential backoff
load_dotenv()
os.environ['OPENAI_API'] = os.getenv('OPEN_AI_API')
# Add your own OpenAI API key
openai.api_key = os.environ['OPENAI_API']
def load_text(file_path):
with open(file_path, 'r') as file:
return file.read()
def save_to_file(responses, output_file):
with open(output_file, 'w') as file:
for response in responses:
file.write(response + '\n')
# Change your OpenAI chat model accordingly
@backoff.on_exception(backoff.expo, openai.error.RateLimitError)
def call_openai_api(chunk):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "You are a smart technical writer who understands code and can write documentation for it."},
{"role": "user", "content": f"Give me a developers documentation of the following code. Give a brief intro, table of contents, function explanations, dependencies, API specs (if present), schema tables in markdown. Give in markdown format and try to strict to the headings\n\n: {chunk}."},
],
max_tokens=5000,
n=1,
stop=None,
temperature=0.5,
)
return response.choices[0]['message']['content'].strip()
def split_into_chunks(text, tokens=500):
encoding = tiktoken.encoding_for_model('gpt-3.5-turbo')
words = encoding.encode(text)
chunks = []
for i in range(0, len(words), tokens):
chunks.append(' '.join(encoding.decode(words[i:i + tokens])))
return chunks
def process_chunks(text, output_file):
# text = load_text(input_file)
chunks = split_into_chunks(text)
# Processes chunks in parallel
with ThreadPoolExecutor() as executor:
responses = list(executor.map(call_openai_api, chunks))
save_to_file(responses, output_file)
# @backoff.on_exception(backoff.expo, openai.error.RateLimitError)
def call_openai_api_higher_tokens(text, output_file):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "You are a smart technical writer who understands code and can write documentation for it."},
{"role": "user", "content": f"Give me a developers documentation of the following code. Give a brief intro, table of contents, function explanations, dependencies, API specs (if present), schema tables in markdown. Give in markdown format and try to strict to the headings\n\n: {text}."},
],
max_tokens=2000,
n=1,
stop=None,
temperature=0.5,
)
save_to_file(response, output_file)
# return response.choices[0]['message']['content'].strip()
# Specify your input text and output file path
# process_chunks(text, output_file)
# Can take up to a few minutes to run depending on the size of your data input | [
"Give me a developers documentation of the following code. Give a brief intro, table of contents, function explanations, dependencies, API specs (if present), schema tables in markdown. Give in markdown format and try to strict to the headings\n\n: PLACEHOLDER.",
"You are a smart technical writer who understands code and can write documentation for it."
] |
2024-01-10 | PratyushChauhan/legalLLM-Backend | my_llm.py | from langchain.embeddings import HuggingFaceEmbeddings
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
import os
import keysecrets
# model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf"
os.environ['OPENAI_API_KEY'] = keysecrets.apiKey
# model_path = "llama-2-13b-chat.Q4_0.gguf"
model_path = "llama-2-7b-chat.Q5_K_M.gguf"
def get_llm():
llm = LlamaCPP(
# You can pass in the URL to a GGML model to download it automatically
model_url=None,
# optionally, you can set the path to a pre-downloaded model instead of model_url
model_path=model_path,
temperature=0.1,
max_new_tokens=2048,
# llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
context_window=3900,
# kwargs to pass to __call__()
generate_kwargs={},
# kwargs to pass to __init__()
# set to at least 1 to use GPU
model_kwargs={"n_gpu_layers": 34}, #28,29,30 layers works best on my setup.
# transform inputs into Llama2 format
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=True,
)
return llm
def get_query_engine(llm):
# use Huggingface embeddings
embed_model = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-mpnet-base-v2"
)
# create a service context
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
# load documents
documents = SimpleDirectoryReader(
"./docs/agreements"
).load_data()
# create vector store index
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
# set up query engine
query_engine = index.as_query_engine(
streaming=True,
similarity_top_k=1
)
return query_engine
def openAI_queryEngine():
embed_model = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-mpnet-base-v2"
)
# create a service context
service_context = ServiceContext.from_defaults(
embed_model=embed_model,
)
# load documents
documents = SimpleDirectoryReader(
"./docs/agreements"
).load_data()
# create vector store index
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
# set up query engine
query_engine = index.as_query_engine(
streaming=True,
similarity_top_k=1
)
return query_engine | [] |
2024-01-10 | PhillipHoward/optimum-habana | optimum~habana~diffusers~pipelines~stable_diffusion~pipeline_stable_diffusion_ldm3d.py | # coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import time
import warnings
from dataclasses import dataclass
from math import ceil
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import PIL
import torch
from diffusers.image_processor import VaeImageProcessorLDM3D
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import BaseOutput
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from optimum.utils import logging
from ....transformers.gaudi_configuration import GaudiConfig
from ....utils import speed_metrics
from ..pipeline_utils import GaudiDiffusionPipeline
logger = logging.get_logger(__name__)
@dataclass
class GaudiStableDiffusionLDM3DPipelineOutput(BaseOutput):
rgb: Union[List[PIL.Image.Image], np.ndarray]
depth: Union[List[PIL.Image.Image], np.ndarray]
throughput: float
nsfw_content_detected: Optional[List[bool]]
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class GaudiStableDiffusionLDM3DPipeline(
GaudiDiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
):
"""
Extends the [`StableDiffusionPipeline`](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionPipeline) class:
- Generation is performed by batches
- Two `mark_step()` were added to add support for lazy mode
- Added support for HPU graphs
- Adjusted original Stable Diffusion to match with the LDM3D implementation (input and output being different)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
use_habana (bool, defaults to `False`):
Whether to use Gaudi (`True`) or CPU (`False`).
use_hpu_graphs (bool, defaults to `False`):
Whether to use HPU graphs or not.
gaudi_config (Union[str, [`GaudiConfig`]], defaults to `None`):
Gaudi configuration to use. Can be a string to download it from the Hub.
Or a previously initialized config can be passed.
bf16_full_eval (bool, defaults to `False`):
Whether to use full bfloat16 evaluation instead of 32-bit.
This will be faster and save memory compared to fp32/mixed precision but can harm generated images.
"""
_optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = True,
use_habana: bool = False,
use_hpu_graphs: bool = False,
gaudi_config: Union[str, GaudiConfig] = None,
bf16_full_eval: bool = False,
):
super().__init__(
use_habana,
use_hpu_graphs,
gaudi_config,
bf16_full_eval,
)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
self.to(self._device)
@property
def _execution_device(self):
r"""
Returns the device on which the pipeline's models will be executed. After calling
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
hooks.
"""
if not hasattr(self.unet, "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(module, "_hf_hook")
and hasattr(module._hf_hook, "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
num_prompts = 1
elif prompt is not None and isinstance(prompt, list):
num_prompts = len(prompt)
else:
num_prompts = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
prompt_embeds = self.text_encoder(
text_input_ids.to(device),
attention_mask=attention_mask,
)
prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * num_prompts
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif num_prompts != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has {len(negative_prompt)} elements, but `prompt`:"
f" {prompt} has {num_prompts}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(num_prompts * num_images_per_prompt, seq_len, -1)
return prompt_embeds, negative_prompt_embeds
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
rgb_feature_extractor_input = feature_extractor_input[0]
safety_checker_input = self.feature_extractor(rgb_feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
def decode_latents(self, latents):
warnings.warn(
"The decode_latents method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor instead",
FutureWarning,
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def prepare_latents(self, num_images, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (num_images, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != num_images:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective number"
f" of images of {num_images}. Make sure the number of images matches the length of the generators."
)
if latents is None:
# torch.randn is broken on HPU so running it on CPU
rand_device = "cpu" if device.type == "hpu" else device
if isinstance(generator, list):
shape = (1,) + shape[1:]
latents = [
torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype)
for i in range(num_images)
]
latents = torch.cat(latents, dim=0).to(device)
else:
latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
@classmethod
def _split_inputs_into_batches(cls, batch_size, latents, text_embeddings, uncond_embeddings):
# Use torch.split to generate num_batches batches of size batch_size
latents_batches = list(torch.split(latents, batch_size))
text_embeddings_batches = list(torch.split(text_embeddings, batch_size))
if uncond_embeddings is not None:
uncond_embeddings_batches = list(torch.split(uncond_embeddings, batch_size))
# If the last batch has less samples than batch_size, pad it with dummy samples
num_dummy_samples = 0
if latents_batches[-1].shape[0] < batch_size:
num_dummy_samples = batch_size - latents_batches[-1].shape[0]
# Pad latents_batches
sequence_to_stack = (latents_batches[-1],) + tuple(
torch.zeros_like(latents_batches[-1][0][None, :]) for _ in range(num_dummy_samples)
)
latents_batches[-1] = torch.vstack(sequence_to_stack)
# Pad text_embeddings_batches
sequence_to_stack = (text_embeddings_batches[-1],) + tuple(
torch.zeros_like(text_embeddings_batches[-1][0][None, :]) for _ in range(num_dummy_samples)
)
text_embeddings_batches[-1] = torch.vstack(sequence_to_stack)
# Pad uncond_embeddings_batches if necessary
if uncond_embeddings is not None:
sequence_to_stack = (uncond_embeddings_batches[-1],) + tuple(
torch.zeros_like(uncond_embeddings_batches[-1][0][None, :]) for _ in range(num_dummy_samples)
)
uncond_embeddings_batches[-1] = torch.vstack(sequence_to_stack)
# Stack batches in the same tensor
latents_batches = torch.stack(latents_batches)
if uncond_embeddings is not None:
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
for i, (uncond_embeddings_batch, text_embeddings_batch) in enumerate(
zip(uncond_embeddings_batches, text_embeddings_batches[:])
):
text_embeddings_batches[i] = torch.cat([uncond_embeddings_batch, text_embeddings_batch])
text_embeddings_batches = torch.stack(text_embeddings_batches)
return latents_batches, text_embeddings_batches, num_dummy_samples
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
batch_size: int = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated images.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated images.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
batch_size (`int`, *optional*, defaults to 1):
The number of images in a batch.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated randomly.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.GaudiStableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
Returns:
[`~diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.GaudiStableDiffusionPipelineOutput`] or `tuple`:
[`~diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.GaudiStableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple`.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
with torch.autocast(device_type="hpu", dtype=torch.bfloat16, enabled=self.gaudi_config.use_torch_autocast):
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
num_prompts = 1
elif prompt is not None and isinstance(prompt, list):
num_prompts = len(prompt)
else:
num_prompts = prompt_embeds.shape[0]
num_batches = ceil((num_images_per_prompt * num_prompts) / batch_size)
logger.info(
f"{num_prompts} prompt(s) received, {num_images_per_prompt} generation(s) per prompt,"
f" {batch_size} sample(s) per batch, {num_batches} total batch(es)."
)
if num_batches < 3:
logger.warning("The first two iterations are slower so it is recommended to feed more batches.")
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds, negative_prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device="cpu")
timesteps = self.scheduler.timesteps.to(device)
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
num_prompts * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Split into batches (HPU-specific step)
latents_batches, text_embeddings_batches, num_dummy_samples = self._split_inputs_into_batches(
batch_size,
latents,
prompt_embeds,
negative_prompt_embeds,
)
outputs = {
"images": [],
"has_nsfw_concept": [],
}
t0 = time.time()
t1 = t0
# 8. Denoising loop
for j in self.progress_bar(range(num_batches)):
# The throughput is calculated from the 3rd iteration
# because compilation occurs in the first two iterations
if j == 2:
t1 = time.time()
latents_batch = latents_batches[0]
latents_batches = torch.roll(latents_batches, shifts=-1, dims=0)
text_embeddings_batch = text_embeddings_batches[0]
text_embeddings_batches = torch.roll(text_embeddings_batches, shifts=-1, dims=0)
for i in range(num_inference_steps):
timestep = timesteps[0]
timesteps = torch.roll(timesteps, shifts=-1, dims=0)
capture = True if self.use_hpu_graphs and i < 2 else False
# expand the latents if we are doing classifier free guidance
latent_model_input = (
torch.cat([latents_batch] * 2) if do_classifier_free_guidance else latents_batch
)
# latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)
# predict the noise residual
noise_pred = self.unet_hpu(
latent_model_input,
timestep,
text_embeddings_batch,
cross_attention_kwargs,
capture,
)
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents_batch = self.scheduler.step(
noise_pred, latents_batch, **extra_step_kwargs, return_dict=False
)[0]
if not self.use_hpu_graphs:
self.htcore.mark_step()
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(i, timestep, latents_batch)
if not output_type == "latent":
# 8. Post-processing
image = self.vae.decode(latents_batch / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents_batch
outputs["images"].append(image)
self.scheduler.reset_timestep_dependent_params()
if not self.use_hpu_graphs:
self.htcore.mark_step()
speed_metrics_prefix = "generation"
speed_measures = speed_metrics(
split=speed_metrics_prefix,
start_time=t0,
num_samples=num_batches * batch_size if t1 == t0 else (num_batches - 2) * batch_size,
num_steps=num_batches,
start_time_after_warmup=t1,
)
logger.info(f"Speed metrics: {speed_measures}")
# Remove dummy generations if needed
if num_dummy_samples > 0:
outputs["images"][-1] = outputs["images"][-1][:-num_dummy_samples]
# Process generated images
for i, image in enumerate(outputs["images"][:]):
if i == 0:
outputs["images"].clear()
if output_type == "latent":
has_nsfw_concept = None
else:
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
rgb, depth = self.image_processor.postprocess(
image, output_type=output_type, do_denormalize=do_denormalize
)
if output_type == "pil":
outputs["images"] += image
else:
outputs["images"] += [*image]
if has_nsfw_concept is not None:
outputs["has_nsfw_concept"] += has_nsfw_concept
else:
outputs["has_nsfw_concept"] = None
if not return_dict:
return ((rgb, depth), has_nsfw_concept)
return GaudiStableDiffusionLDM3DPipelineOutput(
rgb=rgb,
depth=depth,
nsfw_content_detected=has_nsfw_concept,
throughput=speed_measures[f"{speed_metrics_prefix}_samples_per_second"],
)
@torch.no_grad()
def unet_hpu(self, latent_model_input, timestep, encoder_hidden_states, cross_attention_kwargs, capture):
if self.use_hpu_graphs:
return self.capture_replay(latent_model_input, timestep, encoder_hidden_states, capture)
else:
return self.unet(
latent_model_input,
timestep,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
@torch.no_grad()
def capture_replay(self, latent_model_input, timestep, encoder_hidden_states, capture):
inputs = [latent_model_input, timestep, encoder_hidden_states, False]
h = self.ht.hpu.graphs.input_hash(inputs)
cached = self.cache.get(h)
if capture:
# Capture the graph and cache it
with self.ht.hpu.stream(self.hpu_stream):
graph = self.ht.hpu.HPUGraph()
graph.capture_begin()
outputs = self.unet(inputs[0], inputs[1], inputs[2], inputs[3])[0]
graph.capture_end()
graph_inputs = inputs
graph_outputs = outputs
self.cache[h] = self.ht.hpu.graphs.CachedParams(graph_inputs, graph_outputs, graph)
return outputs
# Replay the cached graph with updated inputs
self.ht.hpu.graphs.copy_to(cached.graph_inputs, inputs)
cached.graph.replay()
self.ht.core.hpu.default_stream().synchronize()
return cached.graph_outputs
| [
"1"
] |
2024-01-10 | lyu-xg/on-centralized-critics-in-marl | maac~utils~env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
actions = [a.argmax() for a in data]
a, ob, reward, done, valid, info = env.step(actions)
#ob, reward, done, info = env.step(data)
if all(done):
ob = env.reset()
s = env.get_state()
remote.send((s, ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
s = env.get_state()
remote.send((s, ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'get_agent_types':
if all([hasattr(a, 'adversary') for a in env.agents]):
remote.send(['adversary' if a.adversary else 'agent' for a in
env.agents])
else:
remote.send(['agent' for _ in env.agents])
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.remotes[0].send(('get_agent_types', None))
self.agent_types = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
state, obs, rews, dones, infos = zip(*results)
return np.stack(state), np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
state, obs = zip(*results)
return np.stack(state), np.stack(obs)
# def reset_task(self):
# for remote in self.remotes:
# remote.send(('reset_task', None))
# return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
if all([hasattr(a, 'adversary') for a in env.agents]):
self.agent_types = ['adversary' if a.adversary else 'agent' for a in
env.agents]
else:
self.agent_types = ['agent' for _ in env.agents]
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = [[a.argmax() for a in actions[0]]]
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
results = [[results[0][1], results[0][2], results[0][3], results[0][5]]]
obs, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if all(done):
obs[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def close(self):
return
| [] |
2024-01-10 | NikolayS/tsv-timemachine | pages~1_TimeMachine_Demo.py | # Copyright (c) Timescale, Inc. (2023)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import streamlit as st
from streamlit.hello.utils import show_code
from llama_index.vector_stores import TimescaleVectorStore
from llama_index import ServiceContext, StorageContext
from llama_index.indices.vector_store import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index import set_global_service_context
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from timescale_vector import client
from typing import List, Tuple
from llama_index.schema import TextNode
from llama_index.embeddings import OpenAIEmbedding
import psycopg2
def get_repos():
with psycopg2.connect(dsn=st.secrets["TIMESCALE_SERVICE_URL"]) as connection:
# Create a cursor within the context manager
with connection.cursor() as cursor:
try:
select_data_sql = "SELECT * FROM time_machine_catalog;"
cursor.execute(select_data_sql)
except psycopg2.errors.UndefinedTable as e:
return {}
catalog_entries = cursor.fetchall()
catalog_dict = {}
for entry in catalog_entries:
repo_url, table_name = entry
catalog_dict[repo_url] = table_name
return catalog_dict
def get_auto_retriever(index, retriever_args):
from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="Description of the commits to PostgreSQL. Describes changes made to Postgres",
metadata_info=[
MetadataInfo(
name="commit_hash",
type="str",
description="Commit Hash",
),
MetadataInfo(
name="author",
type="str",
description="Author of the commit",
),
MetadataInfo(
name="__start_date",
type="datetime in iso format",
description="All results will be after this datetime",
),
MetadataInfo(
name="__end_date",
type="datetime in iso format",
description="All results will be before this datetime",
)
],
)
from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever
retriever = VectorIndexAutoRetriever(index,
vector_store_info=vector_store_info,
service_context=index.service_context,
**retriever_args)
# build query engine
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever, service_context=index.service_context
)
from llama_index.tools.query_engine import QueryEngineTool
# convert query engine to tool
query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
from llama_index.agent import OpenAIAgent
chat_engine = OpenAIAgent.from_tools(
tools=[query_engine_tool],
llm=index.service_context.llm,
verbose=True
#service_context=index.service_context
)
return chat_engine
def tm_demo():
repos = get_repos()
months = st.sidebar.slider('How many months back to search (0=no limit)?', 0, 130, 0)
if "config_months" not in st.session_state.keys() or months != st.session_state.config_months:
st.session_state.clear()
topk = st.sidebar.slider('How many commits to retrieve', 1, 150, 20)
if "config_topk" not in st.session_state.keys() or topk != st.session_state.config_topk:
st.session_state.clear()
if len(repos) > 0:
repo = st.sidebar.selectbox("Choose a repo", repos.keys())
else:
st.error("No repositiories found, please [load some data first](/LoadData)")
return
if "config_repo" not in st.session_state.keys() or repo != st.session_state.config_repo:
st.session_state.clear()
st.session_state.config_months = months
st.session_state.config_topk = topk
st.session_state.config_repo = repo
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Please choose a repo and time filter on the sidebar and then ask me a question about the git history"}
]
vector_store = TimescaleVectorStore.from_params(
service_url=st.secrets["TIMESCALE_SERVICE_URL"],
table_name=repos[repo],
time_partition_interval=timedelta(days=7),
);
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-4", temperature=0.1))
set_global_service_context(service_context)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
#chat engine goes into the session to retain history
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
retriever_args = {"similarity_top_k" : int(topk)}
if months > 0:
end_dt = datetime.now()
start_dt = end_dt - timedelta(weeks=4*months)
retriever_args["vector_store_kwargs"] = ({"start_date": start_dt, "end_date":end_dt})
st.session_state.chat_engine = get_auto_retriever(index, retriever_args)
#st.session_state.chat_engine = index.as_chat_engine(chat_mode="best", similarity_top_k=20, verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(prompt, function_call="query_engine_tool")
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
st.set_page_config(page_title="Time machine demo", page_icon="🧑💼")
st.markdown("# Time Machine")
st.sidebar.header("Welcome to the Time Machine")
debug_llamaindex = False
if debug_llamaindex:
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
tm_demo()
#show_code(tm_demo)
| [
"Please choose a repo and time filter on the sidebar and then ask me a question about the git history"
] |
2024-01-10 | ronakdinesh/kite | app~backend~approaches~chatreadretrieveread.py | from typing import Any, Sequence
import openai
import tiktoken
from azure.search.documents import SearchClient
from azure.search.documents.models import QueryType
from approaches.approach import Approach
from text import nonewlines
from core.messagebuilder import MessageBuilder
from core.modelhelper import get_token_limit
class ChatReadRetrieveReadApproach(Approach):
# Chat roles
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
"""
Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
(answer) with that prompt.
"""
system_message_chat_conversation = """Assistant helps the company employees with all there various questions. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
"""
follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook.
Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.
Try not to repeat questions that have already been asked.
Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.
Generate a search query based on the conversation and the new question.
Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
Do not include any text inside [] or <<>> in the search query terms.
Do not include any special characters like '+'.
If the question is not in English, translate the question to English before generating the search query.
If you cannot generate a search query, return just the number 0.
"""
query_prompt_few_shots = [
{'role' : USER, 'content' : 'What are my health plans?' },
{'role' : ASSISTANT, 'content' : 'Show available health plans' },
{'role' : USER, 'content' : 'does my plan cover cardio?' },
{'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' }
]
def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str):
self.search_client = search_client
self.chatgpt_deployment = chatgpt_deployment
self.chatgpt_model = chatgpt_model
self.embedding_deployment = embedding_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.chatgpt_token_limit = get_token_limit(chatgpt_model)
def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
user_q = 'Generate search query for: ' + history[-1]["user"]
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
messages = self.get_messages_from_history(
self.query_prompt_template,
self.chatgpt_model,
history,
user_q,
self.query_prompt_few_shots,
self.chatgpt_token_limit - len(user_q)
)
chat_completion = openai.ChatCompletion.create(
deployment_id=self.chatgpt_deployment,
model=self.chatgpt_model,
messages=messages,
temperature=0.0,
max_tokens=32,
n=1)
query_text = chat_completion.choices[0].message.content
if query_text.strip() == "0":
query_text = history[-1]["user"] # Use the last user input if we failed to generate a better query
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = None
# Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = self.search_client.search(query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None)
else:
r = self.search_client.search(query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None)
if use_semantic_captions:
results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r]
content = "\n".join(results)
follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_override")
if prompt_override is None:
system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt)
elif prompt_override.startswith(">>>"):
system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt)
else:
system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt)
messages = self.get_messages_from_history(
system_message + "\n\nSources:\n" + content,
self.chatgpt_model,
history,
history[-1]["user"],
max_tokens=self.chatgpt_token_limit)
chat_completion = openai.ChatCompletion.create(
deployment_id=self.chatgpt_deployment,
model=self.chatgpt_model,
messages=messages,
temperature=overrides.get("temperature") or 0.7,
max_tokens=1024,
n=1)
chat_content = chat_completion.choices[0].message.content
msg_to_display = '\n\n'.join([str(message) for message in messages])
return {"data_points": results, "answer": chat_content, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace('\n', '<br>')}
def get_messages_from_history(self, system_prompt: str, model_id: str, history: Sequence[dict[str, str]], user_conv: str, few_shots = [], max_tokens: int = 4096) -> []:
message_builder = MessageBuilder(system_prompt, model_id)
# Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message.
for shot in few_shots:
message_builder.append_message(shot.get('role'), shot.get('content'))
user_content = user_conv
append_index = len(few_shots) + 1
message_builder.append_message(self.USER, user_content, index=append_index)
for h in reversed(history[:-1]):
if h.get("bot"):
message_builder.append_message(self.ASSISTANT, h.get('bot'), index=append_index)
message_builder.append_message(self.USER, h.get('user'), index=append_index)
if message_builder.token_length > max_tokens:
break
messages = message_builder.messages
return messages | [
"Show available health plans",
"Health plan cardio coverage",
"What are my health plans?",
"Next Questions",
"Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. \nUse double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.\nTry not to repeat questions that have already been asked.\nOnly generate questions and do not generate any text before or after the questions, such as 'Next Questions'",
"suggest_followup_questions",
"prompt_override",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.\nGenerate a search query based on the conversation and the new question. \nDo not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nIf the question is not in English, translate the question to English before generating the search query.\nIf you cannot generate a search query, return just the number 0.\n",
"does my plan cover cardio?"
] |
2024-01-10 | Luis080117/knowledgebase-bot | index_server.py | import os
import pickle
import shutil
from llama_index import download_loader
import hashlib
from multiprocessing import Lock
from multiprocessing.managers import BaseManager
from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage
from llama_index import LangchainEmbedding, LLMPredictor
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
index = None
stored_docs = {}
doc_hashes = {'titles': set(), 'contents': set()}
lock = Lock()
DOCUMENTS_DIR = "./documents"
def compute_hash(data):
"""Compute the SHA-256 hash of the given data."""
return hashlib.sha256(data.encode()).hexdigest()
def ensure_unique_filename(directory, filename):
"""Ensure that the filename is unique within the given directory.
If a file with the same name already exists, append a counter to it."""
name, ext = os.path.splitext(filename)
counter = 1
unique_filename = filename
while os.path.exists(os.path.join(directory, unique_filename)):
unique_filename = f"{name}_{counter}{ext}"
counter += 1
return unique_filename
index_name = "./saved_index"
pkl_name = "stored_documents.pkl"
def initialize_index():
global index, stored_docs, doc_hashes
if not os.path.exists(DOCUMENTS_DIR):
os.mkdir(DOCUMENTS_DIR)
api_type = os.environ.get('OPENAI_API_TYPE')
if api_type == "azure":
llm = AzureChatOpenAI(
deployment_name=os.environ.get("OPENAI_API_LLM", "gpt-35-turbo"),
openai_api_version=os.environ.get("OPENAI_API_VERSION", "2023-05-15"),
temperature=os.environ.get("OPENAI_API_TEMPERATURE", 0.5))
embeddings = LangchainEmbedding(OpenAIEmbeddings(deployment=os.environ.get("OPENAI_API_EMBEDDING", "text-embedding-ada-002")))
llm_predictor = LLMPredictor(llm=llm, system_prompt=os.environ.get("OPENAI_API_PROMPT", "You are a helpful assistant of CIPPlanner."))
service_context = ServiceContext.from_defaults(chunk_size_limit=512, embed_model=embeddings, llm_predictor=llm_predictor)
else:
service_context = ServiceContext.from_defaults(chunk_size_limit=512)
with lock:
if os.path.exists(index_name):
index = load_index_from_storage(StorageContext.from_defaults(persist_dir=index_name), service_context=service_context)
else:
index = GPTVectorStoreIndex([], service_context=service_context)
index.storage_context.persist(persist_dir=index_name)
if os.path.exists(pkl_name):
with open(pkl_name, "rb") as f:
stored_docs, doc_hashes = pickle.load(f)
else:
stored_docs = {}
doc_hashes = {'titles': set(), 'contents': set()}
def query_index(query_text):
global index
response = index.as_query_engine().query(query_text)
return response
def insert_into_index(doc_file_path, doc_id=None):
global index, stored_docs, doc_hashes
if doc_file_path.endswith(".xlsx"):
reader = download_loader("PandasExcelReader")
loader = reader(pandas_config={"header": 0})
document = loader.load_data(doc_file_path)[0]
else:
document = SimpleDirectoryReader(input_files=[doc_file_path]).load_data()[0]
if doc_id is not None:
document.doc_id = doc_id
doc_title_hash = compute_hash(document.doc_id)
doc_content_hash = compute_hash(document.text)
if doc_title_hash in doc_hashes.get('titles', set()):
raise ValueError("Document with similar title already exists!")
if doc_content_hash in doc_hashes.get('contents', set()):
raise ValueError("Document with similar content already exists!")
doc_hashes.setdefault('titles', set()).add(doc_title_hash)
doc_hashes.setdefault('contents', set()).add(doc_content_hash)
# Save the actual document file to the documents folder
original_filename = os.path.basename(doc_file_path)
unique_filename = ensure_unique_filename(DOCUMENTS_DIR, original_filename)
shutil.copy(doc_file_path, os.path.join(DOCUMENTS_DIR, unique_filename))
with lock:
stored_docs[document.doc_id] = document.text
index.insert(document)
index.storage_context.persist(persist_dir=index_name)
try:
with open(pkl_name, "wb") as f:
pickle.dump((stored_docs, doc_hashes), f)
print(f"Successfully saved {document.doc_id} to {pkl_name}.")
except Exception as e:
print(f"Error while saving {document.doc_id} to {pkl_name}: {str(e)}")
return
def get_documents_list():
global stored_docs
documents_list = []
for doc_id, doc_text in stored_docs.items():
documents_list.append({"id": doc_id, "text": doc_text})
return documents_list
if __name__ == "__main__":
print("initializing index...")
initialize_index()
manager = BaseManager(('', 4003), b'password')
manager.register('query_index', query_index)
manager.register('insert_into_index', insert_into_index)
manager.register('get_documents_list', get_documents_list)
server = manager.get_server()
print("server started...")
server.serve_forever()
| [
"set()"
] |
2024-01-10 | agneta20/InsectNLTK | src~nlp~model.py | '''
Created on Sep 20, 2017
Module created to apply topic modelling. This is the main module that performs lDA and HDP. The module takes data
from MongoDB, gets the text, and then applies that text to the topic model which is then outputed in csv term and topic outputs
and html files used to visualize the LDA topic model results using different topic numbers.
@author:
'''
import os
import re
import nltk
import operator
#import matplotlib.pyplot as plt
import warnings
import gensim
import numpy as np
import sys
import csv
from nltk.tokenize import RegexpTokenizer
import pyLDAvis.gensim
from gensim.models import CoherenceModel, LdaModel, LsiModel, HdpModel
from gensim.models.wrappers import LdaMallet
from gensim.corpora import Dictionary
from pprint import pprint
from gensim.utils import lemmatize
from nltk.corpus import stopwords
import pyLDAvis.gensim
from patternMatching import PatternMatcher
import db.filterBeetle
import db.filterGovernment
import wordninja
from db import filterBeetle
from pattern.db import year
import logging
# Let's not pay heed to them right now
warnings.filterwarnings('ignore')
# nltk stopwords list
stopwords=stopwords.words('english')
stops = set(stopwords)
# list of results to hold
listResults=[]
# results with dates
dateResults={}
# results based on government document types
typeResults={}
#results based on beetles and relevant sentences
beetleResults={}
#texts used in training or application
train_texts=''
def integrateText(content):
'''
Method used to retrieve text, which comes from the database, and integrate together for larger analysis in topic
models.
content-- the content with the data to retrieve.
'''
txt=''
cnt=[]
# gLevel=["Gov Level 1"]
# flt=["all"]
# content=db.filterGovernment.filterGovernmentType(flt,gLevel, content)
for c in content:
text=c.get('Text',"")
# text=fb.filterBeetleSentences(text)
year=c.get('Year')
text=text.strip()
txt=txt+" "+text
# cc={}
# cc['Text']=txt
# cc['Year']=year
cnt.append(text)
# cnt=fb.filterBeetleSentences(cnt)
return cnt
def retrieveText(content):
'''
Method to get the text output from results in a CSV. Retrieves relevant texts only.
content-- the content to retrieve text
'''
del listResults[:]
doc_set=[]
# txt=''
iit=0
nwText=''
for c in content:
text=c['Text']
year=c['Year']
text=text.strip()
text=re.sub('"',' ',text)
text=re.sub(',',' ',text)
# lsWrds=wordninja.split(text)
# tokenizer = RegexpTokenizer(r'\w+')
text = nltk.word_tokenize(text)
# nText=[]
newttL=[]
for t in text:
ts=wordninja.split(t)
newtt=''
for tt in ts:
newtt+=tt+" "
newttL.append(newtt)
for nn in newttL:
nwText+=nn+" "
del text[:]
# stopped_tokens = [t for t in nText if not t in stops]
# doc_set.append(stopped_tokens)
print(iit)
print(len(doc_set))
iit+=1
# docResults={year:stopped_tokens}
doc_set.append(nwText)
return doc_set
def test_directories():
'''
Data files loaded from test. It returns a file for training or testing.
'''
test_data_dir = '{}'.format(os.sep).join([gensim.__path__[0], 'test', 'test_data'])
lee_train_file = test_data_dir + os.sep + 'lee_background.cor'
with open(lee_train_file) as f:
for n, l in enumerate(f):
if n < 5:
print([l])
return lee_train_file
'''
def build_texts(fname):
"""
Function to build tokenized texts from file
Parameters:
----------
fname-- File to be read
Returns:
-------
yields preprocessed line
"""
with open(fname) as f:
for line in f:
yield gensim.utils.simple_preprocess(line, deacc=True, min_len=3)
'''
def preProcsText(files):
'''
Another method to process text and tokenize files based on minimum length of file
files-- text for processing
'''
for f in files:
f=yield gensim.utils.simple_preprocess(f, deacc=True, min_len=3)
def process_texts(bigram, texts):
"""
Function to process texts. Following are the steps we take:
1. Stopword Removal.
2. Collocation detection.
3. Lemmatization (not stem since stemming can reduce the interpretability).
Parameters:
----------
bigram-- bigram to analyze
texts-- Tokenized texts.
Returns:
-------
texts: Pre-processed tokenized texts.
"""
# reg. expression tokenizer
texts = [[word for word in line if word not in stops] for line in texts]
texts = [bigram[line] for line in texts]
texts = [[word.split('/')[0] for word in lemmatize(' '.join(line), allowed_tags=re.compile('(NN)'), min_length=3)] for line in texts]
return texts
def evaluate_graph(dictionary, corpus, texts, limit):
"""
Method for using a coherence model to look at topic coherence for LDA models.
Parameters:
----------
dictionary-- Gensim dictionary
corpus-- Gensim corpus
limit-- topic limit
Returns:
-------
lm_list : List of LDA topic models
c_v : Coherence values corresponding to the LDA model with respective number of topics
"""
c_v = []
lm_list = []
for num_topics in range(1, limit):
lm = LdaModel(corpus=corpus, num_topics=num_topics, id2word=dictionary)
lm_list.append(lm)
cm = CoherenceModel(model=lm, texts=texts, dictionary=dictionary, coherence='c_v')
c_v.append(cm.get_coherence())
del cm
return lm_list, c_v
def ret_top_model(corpus,dictionary):
"""
Since LDAmodel is a probabilistic model, it comes up different topics each time we run it. To control the
quality of the topic model we produce, we can see what the interpretability of the best topic is and keep
evaluating the topic model until this threshold is crossed.
corpus-- the text corpus
dictionary-- term dictionary
method returns lm: final evaluated topic model
method returns top_topics: ranked topics in decreasing order. List of tuples
"""
top_topics = [(0, 0)]
while top_topics[0][1] < 0.97:
lm = LdaModel(corpus=corpus, id2word=dictionary)
coherence_values = {}
for n, topic in lm.show_topics(num_topics=-1, formatted=False):
topic = [word for word, _ in topic]
cm = CoherenceModel(topics=[topic], texts=train_texts, dictionary=dictionary, window_size=10)
coherence_values[n] = cm.get_coherence()
top_topics = sorted(coherence_values.items(), key=operator.itemgetter(1), reverse=True)
return lm, top_topics
def addTotalTermResults(t):
'''Method to add results and clean by removing extra white space
t-- results and text to clean
result_dict: dictionary of the term and values that are cleaned
'''
result_dict={}
for a,b in t:
text=re.sub('"',"",b)
text.replace(" ","")
txts=text.split("+")
for t in txts:
ttnts=t.split("*")
v=float(ttnts[0])
t=ttnts[1]
t=str(a)+":"+t
if(t in result_dict):
continue
else:
t=t.strip()
result_dict[t]=v
return result_dict
def addToResults(result_dict):
'''
Add dictionary to a list of results from each text
result_dict-- this is the resulting terms
'''
listResults.append(result_dict)
def dictionaryResults():
'''
Method aggregates all the dictionaries for keyterms and their values.
returns a dictionary of all keyterms and values
'''
#set the dictionary
dct={}
#iterate over all tweets and add to total dictionary
for dictionary in listResults:
for key in dictionary:
v=dictionary[key]
if(key in dct):
vv=dct[key]
vv=v+vv
dct[key]=vv
else:
dct[key]=v
return dct
def printResults(i,model):
'''Output results of the analysis
i-- the topic number
model-- the model used (e.g., lda, hdp)
'''
#os.chdir('../')
pn=os.path.abspath(__file__)
pn=pn.split("src")[0]
filename=os.path.join(pn,'results','analysis_results_'+model+str(i)+".csv")
fieldnames = ['Topic','Term','Value']
dct=dictionaryResults()
with open(filename, 'wb') as csvf:
writer = csv.DictWriter(csvf, fieldnames=fieldnames)
writer.writeheader()
for key in dct:
v=dct[key]
tn=key.split(":")[0]
kt=key.split(":")[1]
writer.writerow({'Topic':str(tn),'Term': str(kt.encode("utf-8")),'Value':str(v)})
def printEvaluation(modList,results,i):
'''
Method to print csv output results of the evaluations conducted
modList-- the model evaluated
results-- the result scores
i-- the index output desired
'''
pn=os.path.abspath(__file__)
pn=pn.split("src")[0]
filename=os.path.join(pn,'results','evaluationTotal'+str(i)+".csv")
fieldnames = ['Model','Score']
with open(filename, 'wb') as csvf:
writer = csv.DictWriter(csvf, fieldnames=fieldnames)
writer.writeheader()
for i in range(0,len(modList)):
writer.writerow({'Model':str(modList[i]),'Score': str(results[i])})
#lee_train_file=test_directories()
#train_texts = list(build_texts(lee_train_file))
#bigram = gensim.models.Phrases(train_texts)
def run():
'''
The method to run for implementing the topic models (LDA and HDP).
The below code is executed to conduct the models for topic modelling and
coherence testing for LDA models
'''
topicN=raw_input("Number of topics: ")
fll=raw_input("Filter Terms: ")
gov=raw_input("Government Material: ")
fflt=fll.split(",")
flt=[]
for f in fflt:
flt.append(f)
##filter based on government type
##filter based on sentences around mountain pine beetle
##do one filter at a time and then both together
pn=os.path.abspath(__file__)
pn=pn.split("src")[0]
p=PatternMatcher()
content=p.retrieveContent(flt,gov)
results=integrateText(content)
#results=retrieveText(results)
bigram = gensim.models.Phrases(results)
#train_texts = process_texts(results)
results=preProcsText(results)
train_texts=process_texts(bigram,results)
print('start')
dictionary = Dictionary(train_texts)
corpus = [dictionary.doc2bow(text) for text in train_texts]
#keep track of iteration
iiT=2
#topics are tested based on a given topic number
for i in range(2,int(topicN),1):
# lsi model
print('run evaluation: '+ str(i))
#lsimodel = LsiModel(corpus=corpus, num_topics=i, id2word=dictionary)
#lsitopics=lsimodel.show_topics(num_topics=i)
#result_dict=addTotalTermResults(lsitopics)
#addToResults(result_dict)
#printResults(i,'lsi')
del listResults[:]
#hdp model
hdpmodel = HdpModel(corpus=corpus, id2word=dictionary)
hdpmodel.show_topics()
hdptopics = hdpmodel.show_topics(num_topics=i)
result_dict=addTotalTermResults(hdptopics)
#add results to total kept in a list
addToResults(result_dict)
printResults(i,'hdp')
del listResults[:]
#lda model
ldamodel = LdaModel(corpus=corpus, num_topics=i, id2word=dictionary)
num=str(i)
ldamodel.save('lda'+num+'.model')
ldatopics = ldamodel.show_topics(num_topics=i)
result_dict=addTotalTermResults(ldatopics)
addToResults(result_dict)
printResults(i,'lda')
del listResults[:]
visualisation2 = pyLDAvis.gensim.prepare(ldamodel, corpus, dictionary)
location=os.path.join(pn,'results')
#visualize outputs
pyLDAvis.save_html(visualisation2, os.path.join(location,'LDA_Visualization'+str(i)+'.html'))
iiT=i
print('evaluate graph')
#coherence model evaluation
lmlist, c_v = evaluate_graph(dictionary=dictionary, corpus=corpus, texts=train_texts, limit=i)
#lm, top_topics = ret_top_model()
#coherence model results
printEvaluation(lmlist,c_v,iiT)
''''
The main to launch this class for merging lda and hdp terms and topics
'''
if __name__ == '__main__':
run()
| [] |
2024-01-10 | astronomer/webinar-ai-integrations | dags~pgvector_example.py | doc_md = """
## Vectorize book descriptions with OpenAI and store them in Postgres with pgvector
This DAG shows how to use the OpenAI API 1.0+ to vectorize book descriptions and
store them in Postgres with the pgvector extension.
It will also help you pick your next book to read based on a mood you describe.
You will need to set the following environment variables:
- `AIRFLOW_CONN_POSTGRES_DEFAULT`: an Airflow connection to your Postgres database
that has pgvector installed
- `OPENAI_API_KEY_AI_INTEGRATIONS_DEMO`: your OpenAI API key
"""
from airflow.decorators import dag, task
from airflow.models.baseoperator import chain
from airflow.models.param import Param
from airflow.providers.pgvector.operators.pgvector import PgVectorIngestOperator
from airflow.providers.postgres.operators.postgres import PostgresOperator
from pendulum import datetime, duration
import uuid
import re
POSTGRES_CONN_ID = "postgres_ai_integrations_demo"
TEXT_FILE_PATH = "include/source_data/book_data.txt"
TABLE_NAME = "Book"
OPENAI_MODEL = "text-embedding-ada-002"
MODEL_VECTOR_LENGTH = 1536
@dag(
start_date=datetime(2023, 9, 1),
schedule="0 0 * * 0",
catchup=False,
params={
"book_mood": Param(
"A philosophical book about consciousness.",
type="string",
description="Describe the kind of book you want to read.",
),
},
tags=["pgvector"],
doc_md=doc_md,
default_args={"retries": 3, "retry_delay": duration(seconds=60)},
)
def pgvector_example():
enable_vector_extension_if_not_exists = PostgresOperator(
task_id="enable_vector_extension_if_not_exists",
postgres_conn_id=POSTGRES_CONN_ID,
sql="CREATE EXTENSION IF NOT EXISTS vector;",
)
create_table_if_not_exists = PostgresOperator(
task_id="create_table_if_not_exists",
postgres_conn_id=POSTGRES_CONN_ID,
sql=f"""
CREATE TABLE IF NOT EXISTS {TABLE_NAME} (
book_id UUID PRIMARY KEY,
title TEXT,
year INTEGER,
author TEXT,
description TEXT,
vector VECTOR(%(vector_length)s)
);
""",
parameters={"vector_length": MODEL_VECTOR_LENGTH},
)
get_already_imported_book_ids = PostgresOperator(
task_id="get_already_imported_book_ids",
postgres_conn_id=POSTGRES_CONN_ID,
sql=f"""
SELECT book_id
FROM {TABLE_NAME};
""",
)
@task
def import_book_data(text_file_path: str, table_name: str) -> list:
"Read the text file and create a list of dicts from the book information."
with open(text_file_path, "r") as f:
lines = f.readlines()
num_skipped_lines = 0
list_of_params = []
for line in lines:
parts = line.split(":::")
title_year = parts[1].strip()
match = re.match(r"(.+) \((\d{4})\)", title_year)
try:
title, year = match.groups()
year = int(year)
# skip malformed lines
except:
num_skipped_lines += 1
continue
author = parts[2].strip()
description = parts[3].strip()
list_of_params.append(
{
"book_id": str(
uuid.uuid5(
name=" ".join([title, str(year), author, description]),
namespace=uuid.NAMESPACE_DNS,
)
),
"title": title,
"year": year,
"author": author,
"description": description,
}
)
print(
f"Created a list with {len(list_of_params)} elements "
" while skipping {num_skipped_lines} lines."
)
return list_of_params
@task.virtualenv(
requirements=[
"openai==1.3.2",
]
)
def create_embeddings_book_data(
book_data: dict, model: str, already_imported_books: list
) -> dict:
"Create embeddings for a book description and add them to the book data."
from openai import OpenAI
import os
already_imported_books_ids = [x[0] for x in already_imported_books]
if book_data["book_id"] in already_imported_books_ids:
raise Exception("Book already imported.")
client = OpenAI(api_key=os.environ["OPENAI_API_KEY_AI_INTEGRATIONS_DEMO"])
response = client.embeddings.create(input=book_data["description"], model=model)
embeddings = response.data[0].embedding
book_data["vector"] = embeddings
return book_data
@task
def get_book_mood(**context):
"Pull the book mood from the context."
book_mood = context["params"]["book_mood"]
return book_mood
@task.virtualenv(requirements=["openai==1.3.2"])
def create_embeddings_query(model: str, book_mood: str) -> list:
"Create embeddings for the user provided book mood."
from openai import OpenAI
import os
client = OpenAI(api_key=os.environ["OPENAI_API_KEY_AI_INTEGRATIONS_DEMO"])
response = client.embeddings.create(input=book_mood, model=model)
embeddings = response.data[0].embedding
return embeddings
book_data = import_book_data(text_file_path=TEXT_FILE_PATH, table_name=TABLE_NAME)
book_embeddings = create_embeddings_book_data.partial(
model=OPENAI_MODEL,
already_imported_books=get_already_imported_book_ids.output,
).expand(book_data=book_data)
query_vector = create_embeddings_query(
model=OPENAI_MODEL, book_mood=get_book_mood()
)
import_embeddings_to_pgvector = PgVectorIngestOperator.partial(
task_id="import_embeddings_to_pgvector",
trigger_rule="all_done",
conn_id=POSTGRES_CONN_ID,
sql=(
f"INSERT INTO {TABLE_NAME} "
"(book_id, title, year, author, description, vector) "
"VALUES (%(book_id)s, %(title)s, %(year)s, "
"%(author)s, %(description)s, %(vector)s) "
"ON CONFLICT (book_id) DO NOTHING;"
),
).expand(parameters=book_embeddings)
get_a_book_suggestion = PostgresOperator(
task_id="get_a_book_suggestion",
postgres_conn_id=POSTGRES_CONN_ID,
trigger_rule="all_done",
sql=f"""
SELECT title, year, author, description
FROM {TABLE_NAME}
ORDER BY vector <-> CAST(%(query_vector)s AS VECTOR)
LIMIT 1;
""",
parameters={"query_vector": query_vector},
)
@task
def print_suggestion(query_result, **context):
"Print the book suggestion."
query = context["params"]["book_mood"]
book_title = query_result[0][0]
book_year = query_result[0][1]
book_author = query_result[0][2]
book_description = query_result[0][3]
print(f"Book suggestion for '{query}':")
print(
f"You should read {book_title} by {book_author}, published in {book_year}!"
)
print(f"Goodreads describes the book as: {book_description}")
chain(
enable_vector_extension_if_not_exists,
create_table_if_not_exists,
get_already_imported_book_ids,
import_embeddings_to_pgvector,
get_a_book_suggestion,
print_suggestion(query_result=get_a_book_suggestion.output),
)
chain(query_vector, get_a_book_suggestion)
chain(get_already_imported_book_ids, book_embeddings)
pgvector_example()
| [] |
2024-01-10 | astronomer/webinar-ai-integrations | dags~analyze_customer_feedback.py | doc_md = """
## Analyze Customer Feedback using the Cohere and OpenSearch Airflow providers
This DAG ingests mock customer feedback data from a local file into OpenSearch, uses
OpenSearch to query the data for relevant feedback, Cohere to create vector embeddings of
the feedback, as well as sentiment analysis and then loads the embeddings and sentiment
into OpenSearch. Finally, it performs a KNN search on the embeddings to find similar
feedback to a given search term.
"""
from airflow.decorators import dag, task
from airflow.models.baseoperator import chain
from airflow.operators.empty import EmptyOperator
from airflow.providers.cohere.operators.embedding import CohereEmbeddingOperator
from airflow.providers.cohere.hooks.cohere import CohereHook
from airflow.providers.opensearch.operators.opensearch import (
OpenSearchAddDocumentOperator,
OpenSearchCreateIndexOperator,
OpenSearchQueryOperator,
)
from airflow.providers.opensearch.hooks.opensearch import OpenSearchHook
from pendulum import datetime, duration
import uuid
import json
# local file with sentiment examples
from include.source_data.classification_examples import SENTIMENT_EXAMPLES
# this is the search term for which we want to find similar feedback
TESTIMONIAL_SEARCH_TERM = "Using this product for MLOps and loving it!"
# subset query parameters if you make changes here, make sure to also adjust
# the mock API in include/mock_api/app.py
CUSTOMER_LOCATION = "Switzerland"
PRODUCT_TYPE = "cloud service A"
AB_TEST_GROUP = "A"
FEEDBACK_SEARCH_TERMS = "UI OR UX OR user interface OR user experience"
MAX_NUMBER_OF_RESULTS = 1000
# this is the length of the embeddings returned by Cohere
MODEL_VECTOR_LENGTH = 768
# connection ids
COHERE_CONN_ID = "cohere_ai_integrations_demo"
OPENSEARCH_CONN_ID = "opensearch_ai_integrations_demo"
OPENSEARCH_INDEX_NAME = "customer_feedback"
MY_POOL = "my_cohere_pool"
@dag(
start_date=datetime(2023, 10, 18),
schedule="0 0 * * 0",
catchup=False,
tags=["Cohere", "OpenSearch"],
doc_md=doc_md,
default_args={"retries": 3, "retry_delay": duration(seconds=60)},
)
def analzye_customer_feedback():
# --------------------------------------------- #
# Ingest customer feedback data into OpenSearch #
# --------------------------------------------- #
@task.branch
def check_if_index_exists(index_name: str, conn_id: str) -> str:
"Check if the index already exists in OpenSearch."
client = OpenSearchHook(open_search_conn_id=conn_id, log_query=True).client
is_index_exist = client.indices.exists(index_name)
if is_index_exist:
return "index_exists"
return "create_index"
create_index = OpenSearchCreateIndexOperator(
task_id="create_index",
opensearch_conn_id=OPENSEARCH_CONN_ID,
index_name=OPENSEARCH_INDEX_NAME,
index_body={
"settings": {
"index": {
"number_of_shards": 1,
"knn": True,
"knn.algo_param.ef_search": 100,
}
},
"mappings": {
"properties": {
"customer_feedback": {"type": "text"},
"customer_rating": {"type": "integer"},
"customer_id": {"type": "keyword"},
"timestamp": {"type": "date"},
"customer_location": {"type": "keyword"},
"product_type": {"type": "keyword"},
"ab_test_group": {"type": "keyword"},
"embeddings": {
"type": "knn_vector",
"dimension": MODEL_VECTOR_LENGTH,
"method": {
"name": "hnsw",
"space_type": "cosinesimil",
"engine": "nmslib",
},
},
"sentiment_prediction": {"type": "keyword"},
"sentiment_confidence": {"type": "float"},
}
},
},
)
index_exists = EmptyOperator(task_id="index_exists")
@task
def get_customer_feedback() -> list:
"Query import mock customer feedback."
file_path = "include/source_data/mock_customer_reviews.json"
with open(file_path, "r") as file:
mock_customer_reviews = json.load(file)
return mock_customer_reviews
all_customer_feedback = get_customer_feedback()
@task
def customer_feedback_to_dict_list(customer_feedback: list):
"Convert the customer feedback data into a list of dictionaries."
list_of_feedback = []
for customer in customer_feedback:
unique_line_id = uuid.uuid5(
name=" ".join(
[str(customer["customer_id"]), str(customer["timestamp"])]
),
namespace=uuid.NAMESPACE_DNS,
)
kwargs = {"doc_id": str(unique_line_id), "document": customer}
list_of_feedback.append(kwargs)
return list_of_feedback
list_of_document_kwargs = customer_feedback_to_dict_list(
customer_feedback=all_customer_feedback
)
add_lines_as_documents = OpenSearchAddDocumentOperator.partial(
task_id="add_lines_as_documents",
opensearch_conn_id=OPENSEARCH_CONN_ID,
trigger_rule="none_failed",
index_name=OPENSEARCH_INDEX_NAME,
).expand_kwargs(list_of_document_kwargs)
# -------------------------------------------- #
# Query customer feedback data from OpenSearch #
# -------------------------------------------- #
search_for_relevant_feedback = OpenSearchQueryOperator(
task_id="search_for_relevant_feedback",
opensearch_conn_id=OPENSEARCH_CONN_ID,
index_name=OPENSEARCH_INDEX_NAME,
query={
"size": MAX_NUMBER_OF_RESULTS,
"query": {
"bool": {
"must": [
{
"match": {
"customer_feedback": {
"query": FEEDBACK_SEARCH_TERMS,
"analyzer": "english",
"fuzziness": "AUTO",
}
}
}
],
"filter": [
{"term": {"customer_location": CUSTOMER_LOCATION}},
{"term": {"ab_test_group": AB_TEST_GROUP}},
{"term": {"product_type": PRODUCT_TYPE}},
],
},
},
},
)
@task
def reformat_relevant_reviews(search_results: dict) -> list:
"Reformat the relevant reviews from the OpenSearch query results."
ids = [x["_id"] for x in search_results["hits"]["hits"]]
reviews_of_interest = [x["_source"] for x in search_results["hits"]["hits"]]
reviews_with_id = []
for id, review in zip(ids, reviews_of_interest):
review["id"] = id
reviews_with_id.append(review)
return reviews_of_interest
relevant_reviews = reformat_relevant_reviews(
search_results=search_for_relevant_feedback.output
)
@task
def get_feedback_texts(review_of_interest: dict) -> str:
"Get the feedback text from the relevant reviews."
feedback_text = review_of_interest["customer_feedback"]
return feedback_text
feedback_texts = get_feedback_texts.expand(review_of_interest=relevant_reviews)
# --------------------------------------- #
# Perform sentiment analysis #
# on relevant customer feedback #
# and get embeddings using the Cohere API #
# --------------------------------------- #
@task(
pool=MY_POOL,
)
def get_sentiment(input_text: str, sentiment_examples: list, conn_id: str) -> float:
"Get the sentiment of the customer feedback using the Cohere API."
co = CohereHook(conn_id=conn_id).get_conn
response = co.classify(
model="large",
inputs=[input_text],
examples=sentiment_examples,
)
print(input_text)
print(response.classifications)
return {
"prediction": response.classifications[0].prediction,
"confidence": response.classifications[0].confidence,
}
sentiment_scores = get_sentiment.partial(
conn_id=COHERE_CONN_ID, sentiment_examples=SENTIMENT_EXAMPLES
).expand(input_text=feedback_texts)
get_embeddings = CohereEmbeddingOperator.partial(
task_id="get_embeddings",
conn_id=COHERE_CONN_ID,
pool=MY_POOL,
).expand(input_text=feedback_texts)
@task
def combine_reviews_embeddings_and_sentiments(
reviews: list, embeddings: list, sentiments: list
) -> list:
"Combine the reviews, embeddings and sentiments into a single list of dictionaries."
review_with_embeddings = []
for review, embedding, sentiment in zip(reviews, embeddings, sentiments):
review_with_embeddings.append(
{
"review": review,
"embedding": embedding[0],
"sentiment_prediction": sentiment["prediction"],
"sentiment_confidence": sentiment["confidence"],
}
)
return review_with_embeddings
full_data = combine_reviews_embeddings_and_sentiments(
reviews=relevant_reviews,
embeddings=get_embeddings.output,
sentiments=sentiment_scores,
)
@task
def load_embeddings_into_opensearch(full_data: dict, conn_id: str) -> None:
"Load the embeddings and sentiment into OpenSearch."
client = OpenSearchHook(open_search_conn_id=conn_id, log_query=True).client
client.update(
index=OPENSEARCH_INDEX_NAME,
id=full_data["review"]["id"],
body={
"doc": {
"embeddings": [float(x) for x in full_data["embedding"]],
"sentiment_prediction": full_data["sentiment_prediction"],
"sentiment_confidence": full_data["sentiment_confidence"],
}
},
)
load_embeddings_obj = load_embeddings_into_opensearch.partial(
conn_id=OPENSEARCH_CONN_ID
).expand(full_data=full_data)
# ------------------------------------------------------- #
# Query OpenSearch for the most similar testimonial using #
# KNN on the embeddings and filter for positive sentiment #
# ------------------------------------------------------- #
get_embeddings_testimonial_search_term = CohereEmbeddingOperator(
task_id="get_embeddings_testimonial_search_term",
conn_id=COHERE_CONN_ID,
input_text=TESTIMONIAL_SEARCH_TERM,
)
@task
def prep_search_term_embeddings_for_query(embeddings: list) -> list:
"Prepare the embeddings for the OpenSearch query."
return [float(x) for x in embeddings[0]]
search_term_embeddings = prep_search_term_embeddings_for_query(
embeddings=get_embeddings_testimonial_search_term.output
)
search_for_testimonial_candidates = OpenSearchQueryOperator(
task_id="search_for_testimonial_candidates",
opensearch_conn_id=OPENSEARCH_CONN_ID,
index_name=OPENSEARCH_INDEX_NAME,
query={
"size": 10,
"query": {
"bool": {
"must": [
{
"knn": {
"embeddings": {
"vector": search_term_embeddings,
"k": 10,
}
}
}
],
"filter": [
{"term": {"sentiment_prediction": "positive"}},
],
}
},
},
)
@task
def print_testimonial_candidates(search_results: dict) -> None:
"Print the testimonial candidates from the OpenSearch query results."
for result in search_results["hits"]["hits"]:
print("Customer ID: ", result["_source"]["customer_id"])
print("Customer feedback: ", result["_source"]["customer_feedback"])
print("Customer location: ", result["_source"]["customer_location"])
print("Customer rating: ", result["_source"]["customer_rating"])
print("Customer sentiment: ", result["_source"]["sentiment_prediction"])
print(
"Customer sentiment confidence: ",
result["_source"]["sentiment_confidence"],
)
chain(
check_if_index_exists(
index_name=OPENSEARCH_INDEX_NAME, conn_id=OPENSEARCH_CONN_ID
),
[create_index, index_exists],
add_lines_as_documents,
search_for_relevant_feedback,
relevant_reviews,
feedback_texts,
load_embeddings_obj,
get_embeddings_testimonial_search_term,
search_for_testimonial_candidates,
print_testimonial_candidates(
search_results=search_for_testimonial_candidates.output
),
)
analzye_customer_feedback()
| [] |
2024-01-10 | astronomer/webinar-ai-integrations | dags~pinecone_example.py | doc_md = """
## Use the Pinecone Airflow Provider to generate and query vectors for series descriptions
This DAG runs a simple MLOps pipeline that uses the Pinecone Airflow Provider to import
series descriptions, generate vectors for them, and query the vectors for series based on
a user-provided mood.
"""
from airflow.decorators import dag, task
from airflow.models.param import Param
from airflow.models.baseoperator import chain
from airflow.providers.pinecone.operators.pinecone import PineconeIngestOperator
from airflow.providers.pinecone.hooks.pinecone import PineconeHook
from pendulum import datetime, duration
import uuid
import re
PINECONE_INDEX_NAME = "series-to-watch"
DATA_FILE_PATH = "include/source_data/series_data.txt"
PINECONE_CONN_ID = "pinecone_ai_integrations_demo"
EMBEDDING_MODEL = "text-embedding-ada-002"
EMBEDDING_MODEL_DIMENSIONS = 1536
def generate_uuid5(identifier: list) -> str:
"Create a UUID5 from a list of strings and return the uuid as a string."
name = "/".join([str(i) for i in identifier])
namespace = uuid.NAMESPACE_DNS
uuid_obj = uuid.uuid5(namespace=namespace, name=name)
return str(uuid_obj)
@dag(
start_date=datetime(2023, 10, 18),
schedule="0 0 * * 0",
catchup=False,
params={"series_mood": Param("A series about astronauts.", type="string")},
tags=["Pinecone"],
doc_md=doc_md,
default_args={"retries": 3, "retry_delay": duration(seconds=60)},
)
def pinecone_example():
@task
def import_data_func(text_file_path: str) -> list:
"Import data from a text file and return it as a list of dicts."
with open(text_file_path, "r") as f:
lines = f.readlines()
num_skipped_lines = 0
descriptions = []
data = []
for line in lines:
parts = line.split(":::")
title_year = parts[1].strip()
match = re.match(r"(.+) \((\d{4})\)", title_year)
try:
title, year = match.groups()
year = int(year)
except:
num_skipped_lines += 1
continue
genre = parts[2].strip()
description = parts[3].strip()
descriptions.append(description)
data.append(
{
"id": generate_uuid5(
identifier=[title, year, genre, description]
), # an `id` property is required for Pinecone
"metadata": {
"title": title,
"year": year,
"genre": genre,
"description": description, # this is the text we'll embed
},
}
)
return data
series_data = import_data_func(text_file_path=DATA_FILE_PATH)
@task.virtualenv(requirements=["openai==1.3.2"])
def vectorize_series_data(series_data: dict, model: str) -> dict:
"Create embeddings for the series descriptions."
from openai import OpenAI
import os
client = OpenAI(api_key=os.environ["OPENAI_API_KEY_AI_INTEGRATIONS_DEMO"])
response = client.embeddings.create(
input=series_data["metadata"]["description"], model=model
)
embeddings = response.data[0].embedding
series_data["values"] = embeddings
return series_data
vectorized_data = vectorize_series_data.partial(model=EMBEDDING_MODEL).expand(
series_data=series_data
)
@task
def get_series_mood(**context):
"Pull the book mood from the context."
book_mood = context["params"]["series_mood"]
return book_mood
@task.virtualenv(requirements=["openai==1.3.2"])
def vectorize_user_mood(model: str, series_mood: str) -> list:
"Create embeddings for the user mood."
from openai import OpenAI
import os
client = OpenAI(api_key=os.environ["OPENAI_API_KEY_AI_INTEGRATIONS_DEMO"])
response = client.embeddings.create(input=series_mood, model=model)
embeddings = response.data[0].embedding
return embeddings
@task
def create_index_if_not_exists(
index_name: str, vector_size: int, pinecone_conn_id: str
) -> None:
"Create a Pinecone index of the provided name if it doesn't already exist."
hook = PineconeHook(conn_id=pinecone_conn_id)
existing_indexes = hook.list_indexes()
if index_name not in existing_indexes:
newindex = hook.create_index(index_name=index_name, dimension=vector_size)
return newindex
else:
print(f"Index {index_name} already exists")
create_index_if_not_exists_obj = create_index_if_not_exists(
vector_size=EMBEDDING_MODEL_DIMENSIONS,
index_name=PINECONE_INDEX_NAME,
pinecone_conn_id=PINECONE_CONN_ID,
)
pinecone_vector_ingest = PineconeIngestOperator(
task_id="pinecone_vector_ingest",
conn_id=PINECONE_CONN_ID,
index_name=PINECONE_INDEX_NAME,
input_vectors=vectorized_data,
)
@task
def query_pinecone(
index_name: str,
pinecone_conn_id: str,
vectorized_user_mood: list,
) -> None:
"Query the Pinecone index with the user mood and print the top result."
hook = PineconeHook(conn_id=pinecone_conn_id)
query_response = hook.query_vector(
index_name=index_name,
top_k=1,
include_values=True,
include_metadata=True,
vector=vectorized_user_mood,
)
print("You should watch: " + query_response["matches"][0]["metadata"]["title"])
print("Description: " + query_response["matches"][0]["metadata"]["description"])
query_pinecone_obj = query_pinecone(
index_name=PINECONE_INDEX_NAME,
pinecone_conn_id=PINECONE_CONN_ID,
vectorized_user_mood=vectorize_user_mood(
model=EMBEDDING_MODEL, series_mood=get_series_mood()
),
)
chain(
create_index_if_not_exists_obj,
pinecone_vector_ingest,
query_pinecone_obj,
)
pinecone_example()
| [] |
2024-01-10 | astronomer/webinar-ai-integrations | dags~cohere_example.py | doc_md = """
## Get recipe suggestions using Cohere's LLMs, embed and visualize the results
This DAG shows how to use the Cohere Airflow provider to interact with the Cohere API.
The DAG generates recipes based on user input via Airflow params, embeds the
responses using Cohere embeddings, and visualizes them in 2 dimensions using PCA,
matplotlib and seaborn.
"""
from airflow.decorators import dag, task
from airflow.models.param import Param
from airflow.models.baseoperator import chain
from airflow.providers.cohere.hooks.cohere import CohereHook
from airflow.providers.cohere.operators.embedding import CohereEmbeddingOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from adjustText import adjust_text
from pendulum import datetime, duration
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import os
AWS_CONN_ID = "aws_ai_integrations_demo"
AWS_BUCKET_NAME = "ce-ai-integrations-demo-dnd"
COHERE_CONN_ID = "cohere_ai_integrations_demo"
LOCAL_IMAGE_PATH = "include/plots/"
ENVIRONMENT = os.getenv("ENVIRONMENT", "local")
@dag(
start_date=datetime(2023, 11, 1),
schedule="0 0 * * 0",
catchup=False,
params={
"countries": Param(
["Switzerland", "Norway", "New Zealand", "Cameroon", "Bhutan", "Chile"],
type="array",
title="Countries of recipe origin",
description="Enter from which countries you would like to get recipes."
+ "List at least two countries.",
),
"pantry_ingredients": Param(
["gruyere", "olives", "potatoes", "onions", "pineapple"],
type="array",
description="List the ingredients you have in your pantry, you'd like to use",
),
"type": Param(
"vegetarian",
type="string",
enum=["vegan", "vegetarian", "omnivore"],
description="Select the type of recipe you'd like to get.",
),
"max_tokens_recipe": Param(
500,
type="integer",
description="Enter the max number of tokens the model should generate.",
),
"randomness_of_recipe": Param(
25,
type="integer",
description=(
"Enter the desired randomness of the recipe on a scale"
+ "from 0 (no randomness) to 50 (full randomness). "
+ "This setting corresponds to 10x the temperature setting in the Cohere API."
),
),
},
tags=["Cohere"],
doc_md=doc_md,
default_args={"retries": 3, "retry_delay": duration(seconds=60)},
)
def cohere_example():
@task
def get_countries_list(**context):
"Pull the list of countries from the context."
countries = context["params"]["countries"]
return countries
@task
def get_ingredients_list(**context):
"Pull the list of ingredients from the context."
ingredients = context["params"]["pantry_ingredients"]
return ingredients
@task
def get_a_recipe(
cohere_conn_id: str, country: str, ingredients_list: list, **context
):
"Get recipes from the Cohere API for your pantry ingredients for a given country."
type = context["params"]["type"]
max_tokens_answer = context["params"]["max_tokens_recipe"]
randomness_of_answer = context["params"]["randomness_of_recipe"]
co = CohereHook(conn_id=cohere_conn_id).get_conn
response = co.generate(
model="command",
prompt=f"Please provide a delicious {type} recipe from {country} "
+ f"that uses as many of these ingredients: {', '.join(ingredients_list)} as possible, "
+ "if you can't find a recipe that uses all of them, suggest an additional desert."
+ "Bonus points if it's a traditional recipe from that country, "
+ "you can name the city or region it's from and you can provide "
+ "vegan alternatives for the ingredients."
+ "Provide the full recipe with all steps and ingredients.",
max_tokens=max_tokens_answer,
temperature=randomness_of_answer / 10,
)
recipe = response.generations[0].text
print(f"Your recipe from {country}")
print(f"for the ingredients {', '.join(ingredients_list)} is:")
print(recipe)
with open(f"include/recipes/{country}_recipe.txt", "w") as f:
f.write(recipe)
return recipe
countries_list = get_countries_list()
ingredients_list = get_ingredients_list()
recipes_list = get_a_recipe.partial(
cohere_conn_id=COHERE_CONN_ID, ingredients_list=ingredients_list
).expand(country=countries_list)
get_embeddings = CohereEmbeddingOperator.partial(
task_id="get_embeddings",
conn_id=COHERE_CONN_ID,
).expand(input_text=recipes_list)
@task
def plot_embeddings(
embeddings: list,
text_labels: list,
aws_conn_id: str,
aws_bucket_name: str,
environment: str,
local_image_path: str,
) -> None:
"Plot the embeddings of the recipes."
embeddings = [x[0] for x in embeddings]
pca = PCA(n_components=2)
reduced_embeddings = pca.fit_transform(embeddings)
plt.figure(figsize=(10, 8))
df_embeddings = pd.DataFrame(reduced_embeddings, columns=["PC1", "PC2"])
sns.scatterplot(
df_embeddings, x="PC1", y="PC2", s=100, color="gold", edgecolor="black"
)
font_style = {"color": "black"}
texts = []
for i, label in enumerate(text_labels):
texts.append(
plt.text(
reduced_embeddings[i, 0],
reduced_embeddings[i, 1],
label,
fontdict=font_style,
fontsize=15,
)
)
# prevent overlapping labels
adjust_text(texts, arrowprops=dict(arrowstyle="->", color="red"))
distances = euclidean_distances(reduced_embeddings)
np.fill_diagonal(distances, np.inf) # exclude cases where the distance is 0
n = distances.shape[0]
distances_list = [
(distances[i, j], (i, j)) for i in range(n) for j in range(i + 1, n)
]
distances_list.sort(reverse=True)
legend_handles = []
for dist, (i, j) in distances_list:
(line,) = plt.plot(
[reduced_embeddings[i, 0], reduced_embeddings[j, 0]],
[reduced_embeddings[i, 1], reduced_embeddings[j, 1]],
"gray",
linestyle="--",
alpha=0.3,
)
legend_handles.append(line)
legend_labels = [
f"{text_labels[i]} - {text_labels[j]}: {dist:.2f}"
for dist, (i, j) in distances_list
]
for i in range(len(reduced_embeddings)):
for j in range(i + 1, len(reduced_embeddings)):
plt.plot(
[reduced_embeddings[i, 0], reduced_embeddings[j, 0]],
[reduced_embeddings[i, 1], reduced_embeddings[j, 1]],
"gray",
linestyle="--",
alpha=0.5,
)
plt.legend(
legend_handles,
legend_labels,
title="Distances",
loc="center left",
bbox_to_anchor=(1, 0.5),
)
plt.tight_layout()
plt.title(
"2D Visualization of recipe similarities", fontsize=16, fontweight="bold"
)
plt.xlabel("PCA Component 1", fontdict=font_style)
plt.ylabel("PCA Component 2", fontdict=font_style)
if environment == "local":
plt.savefig(local_image_path + "recipes.png", bbox_inches="tight")
if environment == "astro":
plt.savefig(local_image_path + "recipes.png", bbox_inches="tight")
s3 = S3Hook(
aws_conn_id=aws_conn_id,
transfer_config_args=None,
extra_args=None,
).get_conn()
with open(local_image_path + "recipes.png", "rb") as data:
s3.upload_fileobj(
data,
aws_bucket_name,
"plots/cohere-demo/recipes.png",
ExtraArgs={"ContentType": "image/jpeg"},
)
os.remove(local_image_path + "recipes.png")
plt.close()
chain(
get_embeddings,
plot_embeddings(
get_embeddings.output,
text_labels=countries_list,
local_image_path=LOCAL_IMAGE_PATH,
aws_conn_id=AWS_CONN_ID,
aws_bucket_name=AWS_BUCKET_NAME,
environment=ENVIRONMENT,
),
)
cohere_example()
| [] |
2024-01-10 | astronomer/webinar-ai-integrations | include~source_data~classification_examples.py | from cohere.responses.classify import Example
SENTIMENT_EXAMPLES = [
Example("I love this product", "positive"),
Example("The UI is great", "positive"),
Example("I ordered more for my friends", "positive"),
Example("I would buy this again", "positive"),
Example("I would recommend this to others", "positive"),
Example("I don't like the product", "negative"),
Example("I'm struggling", "negative"),
Example("The order was incorrect", "negative"),
Example("I want to return my item", "negative"),
Example("The item's material feels low quality", "negative"),
Example("The product was okay", "neutral"),
Example("I received five items in total", "neutral"),
Example("I bought it from the website", "neutral"),
Example("I used the product this morning", "neutral"),
Example("The product arrived yesterday", "neutral"),
] | [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~camel~code_tree2.py | import logging
import os
import re
from queue import Queue
from dotenv import load_dotenv
import openai
import json
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# Load environment variables from .env file
load_dotenv()
# Access the API key from the environment variable
openai_api_key = os.getenv('OPENAI_API_KEY')
openai.api_key = openai_api_key
# Set up logging
logging.basicConfig(level=logging.INFO)
import json
import openai
from code_search import similarity_search
query = "Variable impedance control for force feedback"
results_string = similarity_search(query)
print(results_string)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are an AI that decomposes complex code generation tasks into smaller, manageable sub-tasks. Each sub-task should be a independent file, should contain the name of the python file, and should contain the name of the file,description, all the functions and classes from the file, as well releted files."
},
{
"role": "user",
"content": f"Given the complex code generation task: 'Write a variable impedance control for force feedback using ros2, webots, webots_ros2 and ros2_control.', please decompose it into a detailed, numbered list of sub-tasks. Each sub-task should be a independent file should contain the name of the python file, description,all the functions and classes from the file, as well releted files. Make sure to devide the task into minimum 5 files. Try to make the code as readable as possible, encapsulating the code into functions and classes. Lets think step by step.\n\nThe following are the retrived documents from all the relevant repositories based on the query 'Variable impedance control for force feedback':\n{results_string}\nThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decompositionThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decomposition"
}
],
functions=[
{
"name": "generate_code",
"description": "Generates the code for multiple files, each described by a dictionary of attributes.",
"parameters": {
"type": "object",
"properties": {
"files": {
"type": "array",
"description": "An array of dictionaries, each representing a file. Each dictionary should include 'order' (the order of development), 'code_blocks' (an array of dictionaries detailing the code blocks in the file).",
"items": {
"type": "object",
"properties": {
"order": {
"type": "integer",
"description": "The order of development for the file."
},
"code_blocks": {
"type": "array",
"description": "An array of dictionaries, each detailing a code block in the file. Each dictionary should include 'type' (either 'function' or 'class'), 'name' (the name of the function or class), 'description' (a description of the block's purpose), 'content' (the details of the function or class, including function arguments or class methods, as applicable), and 'related_files' (an array of filenames that are related to the code block).",
"items": {
"type": "object",
"properties": {
"type": {
"type": "string",
"description": "The type of the code block, either 'function' or 'class'."
},
"name": {
"type": "string",
"description": "The name of the function or class."
},
"description": {
"type": "string",
"description": "A description of the block's purpose."
},
"content": {
"type": "string",
"description": "The details of the function or class, including arguments and methods as applicable."
},
}
},
"required": ["type", "name", "description", "content"]
}
}
},
"required": ["order", "code_blocks"]
}
}
},
"required": ["files"]
}
],
function_call={"name": "generate_code"}
)
reply_content = completion.choices[0]
print(reply_content)
args = reply_content["message"]['function_call']['arguments']
data = json.loads(args)
# Initialize an empty dictionary to store the files
files = {}
# Go through each file
for file in data["files"]:
# Create a new dictionary for this file
files[file["code_blocks"][0]["name"]] = {
"order": file["order"],
"code_blocks": file["code_blocks"],
}
# Sort the files dictionary based on the order of development
files = dict(sorted(files.items(), key=lambda item: item[1]['order']))
# Print the files dictionary
for filename, file_data in files.items():
print(f"Order of development: {file_data['order']}")
print(f"{filename}:")
for block in file_data['code_blocks']:
print(f" Code block type: {block['type']}")
print(f" Code block name: {block['name']}")
print(f" Code block description: {block['description']}")
print(f" Code block content: {block['content']}")
#print(f" Related files: {block['related_files']}")
files_string = ""
for filename, file_data in files.items():
files_string += f"Order of development: {file_data['order']}\n"
files_string += f"{filename}:\n"
for block in file_data['code_blocks']:
files_string += f" Code block type: {block['type']}\n"
files_string += f" Code block name: {block['name']}\n"
files_string += f" Code block description: {block['description']}\n"
files_string += f" Code block content: {block['content']}\n"
#files_string += f" Related files: {block['related_files']}\n"
completion2 = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are an advanced AI with capabilities to analyze intricate code and pseudocode files. Based on this analysis, you provide recommendations for the most appropriate vectorstore repositories to extract relevant code snippets from. In addition, you generate search queries that could be utilized to fetch these helpful code samples."
},
{
"role": "user",
"content": f"I need your expertise to examine the provided code and pseudocode files. Your task is to pinpoint any issues, inefficiencies, and areas for potential enhancements. Here are the files you need to look into:\n\n{files_string}"
}
],
functions=[
{
"name": "analyze_code",
"description": "This function performs an analysis on the provided code files. It returns a list of suitable repositories for fetching relevant code samples and suggests respective search queries for each repository.",
"parameters": {
"type": "object",
"properties": {
"files": {
"type": "array",
"items": {
"type": "object",
"properties": {
"file_name": {
"type": "string",
"description": "The name of the code file."
},
"repositories": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The name of the repository.",
"enum": ['db_ros2_control', 'db_ros2', 'db_webots_ros2', 'db_webots']
},
"query": {
"type": "string",
"description": "The search query designed to fetch code samples from the specified repository."
}
},
"required": ["name", "query"]
},
"description": "An array of objects, each representing a repository and a corresponding search query."
}
},
"required": ["file_name", "repositories"]
},
"description": "An array of objects, each representing a code file that needs to be analyzed."
}
},
"required": ["files"]
}
}
],
function_call={"name": "analyze_code"}
)
reply_content2 = completion2.choices[0]
print(reply_content2)
args2 = reply_content2["message"]['function_call']['arguments']
data2 = json.loads(args2)
print(data2)
# Define the list of directories to search in
directories = ['db_ros2_control', 'db_ros2', 'db_webots_ros2', 'db_webots']
# Create an empty dictionary to store the results
results = {}
# Loop through each file in the data2 dictionary
for file_data in data2["files"]:
file_name = file_data["file_name"]
# Loop through each query for the file
for query in file_data["query"]:
# Call the similarity_search function and save the result as a string
result = similarity_search(query, directories)
# Store the result in the dictionary, using the filename_query as the key
results[f"{file_name}_{query}"] = result
# Create a dictionary to store the strings for each file
file_strings = {}
# Loop through each file in the files dictionary
for filename, file_data in files.items():
# Create a list to store the lines for this file
file_lines = []
file_lines.append(f"Order of development: {file_data['order']}")
file_lines.append(f"{filename}:")
for block in file_data['code_blocks']:
file_lines.append(f" Code block type: {block['type']}")
file_lines.append(f" Code block name: {block['name']}")
file_lines.append(f" Code block description: {block['description']}")
file_lines.append(f" Code block content: {block['content']}")
#file_lines.append(f" Related files: {block['related_files']}")
# Loop through the results dictionary to find the results for this file
for key, value in results.items():
# If the filename is in the key of the results dictionary, add the query and its result to the lines
if filename in key:
file_lines.append(f" Query: {key.replace(filename+'_', '')}")
file_lines.append(f" Query result: {value}")
# Join the lines for this file into a single string and add it to the file_strings dictionary
file_strings[filename] = '\n'.join(file_lines)
# Loop through each file_string in the file_strings dictionary
for filename, file_string in file_strings.items():
# Create a new completion with the file_string as the user message content
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": "You are an AI Code Optimization Model that can optimize code, complete #TODOs, recommend best practices, and learn from relevant code repositories. Your main task is to analyze the given code, which performs semantic search queries on a vectorstore using OpenAI embeddings, and apply the insights from the search results to refine the code. The final goal is to produce a fully functional and optimized code file that can be used as part of a larger project, specifically a variable impedance control code for force feedback"
},
{
"role": "user",
"content": "I am working on a coding project that aims to develop a variable impedance control code for force feedback. I need your expertise to improve my code. Here is the current version of one file of my code, along with the semantic search queries I’ve done on a vectorstore using OpenAI embeddings, and the results of these queries:\n\n{file_string}\n\nCan you improve this code, using the suggestions from the semantic search results? Please write the improved and complete code file. Please complete and improve the file based on the context\n\n{file_string}\n\n"
}
],
)
# Print or process the completion as needed
print(f"For file: {filename}, the improved code is: {completion.choices[0].message['content']}\n")
| [
"You are an AI that decomposes complex code generation tasks into smaller, manageable sub-tasks. Each sub-task should be a independent file, should contain the name of the python file, and should contain the name of the file,description, all the functions and classes from the file, as well releted files.",
"You are an AI Code Optimization Model that can optimize code, complete #TODOs, recommend best practices, and learn from relevant code repositories. Your main task is to analyze the given code, which performs semantic search queries on a vectorstore using OpenAI embeddings, and apply the insights from the search results to refine the code. The final goal is to produce a fully functional and optimized code file that can be used as part of a larger project, specifically a variable impedance control code for force feedback",
"Given the complex code generation task: 'Write a variable impedance control for force feedback using ros2, webots, webots_ros2 and ros2_control.', please decompose it into a detailed, numbered list of sub-tasks. Each sub-task should be a independent file should contain the name of the python file, description,all the functions and classes from the file, as well releted files. Make sure to devide the task into minimum 5 files. Try to make the code as readable as possible, encapsulating the code into functions and classes. Lets think step by step.\n\nThe following are the retrived documents from all the relevant repositories based on the query 'Variable impedance control for force feedback':\nPLACEHOLDER\nThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decompositionThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decomposition",
"{'type': 'string', 'description': 'The details of the function or class, including arguments and methods as applicable.'}",
"You are an advanced AI with capabilities to analyze intricate code and pseudocode files. Based on this analysis, you provide recommendations for the most appropriate vectorstore repositories to extract relevant code snippets from. In addition, you generate search queries that could be utilized to fetch these helpful code samples.",
"I need your expertise to examine the provided code and pseudocode files. Your task is to pinpoint any issues, inefficiencies, and areas for potential enhancements. Here are the files you need to look into:\n\nPLACEHOLDER",
"I am working on a coding project that aims to develop a variable impedance control code for force feedback. I need your expertise to improve my code. Here is the current version of one file of my code, along with the semantic search queries I’ve done on a vectorstore using OpenAI embeddings, and the results of these queries:\n\n{file_string}\n\nCan you improve this code, using the suggestions from the semantic search results? Please write the improved and complete code file. Please complete and improve the file based on the context\n\n{file_string}\n\n"
] |
2024-01-10 | Stahldavid/autocode | final.py | # final
import gradio as gr
import zipfile
import shutil
import pandas as pd
from langchain.chains import ConversationalRetrievalChain
def create_dataframe(source_documents):
data = []
for doc in source_documents:
page_num = doc.metadata['page']
doc_name = doc.metadata['source']
data.append({'Page Number': page_num, 'Document Name': doc_name})
df = pd.DataFrame(data)
return df.to_string()
def unir_textos_documentos(source_documents):
textos = [documento.page_content for documento in source_documents]
texto_unido = ' '.join(textos)
return texto_unido
index = None
source_documents_chatbot_messages = []
def chat(chat_history, message):
global source_documents_chatbot_messages
qa = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0), index.as_retriever(n_docs=2), return_source_documents=True)
# Get answer from the model
result = qa({"question": message, "chat_history": chat_history})
answer = result["answer"]
chat_history.append((message, answer))
source_documents = result['source_documents']
source_documents_text = unir_textos_documentos(source_documents)
df_string = create_dataframe(source_documents)
return chat_history, source_documents_text, df_string
def build_the_bot(upload_arquivos):
dbb_folder = 'dbb'
shutil.rmtree(dbb_folder, ignore_errors=True)
with zipfile.ZipFile(upload_arquivos.name, 'r') as zip_ref:
zip_ref.extractall(dbb_folder)
global index
index = Chroma(persist_directory=dbb_folder, embedding_function=embeddings)
chat_history = [("Bot", "Index saved successfully!!!")]
return chat_history
def clear_chat_history(chatbot):
chatbot.clear()
chat_history = []
return chat_history
with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
state = gr.State(source_documents="")
with gr.Row():
with gr.Column(scale=0.75):
with gr.Tab("Chatbot"):
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=750)
messagem = gr.Textbox(show_label=False, placeholder="Enter text and press enter")
with gr.Column(scale=0.5):
with gr.Tab("Source Documents"):
with gr.Row():
source_documents_chatbot = gr.Textbox([], elem_id="source_documents_text").style(height=750)
with gr.Row():
df_textbox = gr.Textbox([], elem_id="df_textbox").style(height=250)
messagem.submit(chat, [chatbot, messagem], [chatbot, source_documents_chatbot, df_textbox])
messagem.submit(lambda: "", None, messagem)
with gr.Row():
with gr.Column(scale=0.85):
btn = gr.UploadButton("📁", directory=True)
btn.upload(build_the_bot, btn, chatbot)
with gr.Column(scale=0.15, min_width=0):
clear_btn = gr.Button("Clear")
clear_btn.click(lambda: None, None, chatbot, queue=False)
demo.launch(debug=True)
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~reviewer_agent.py | # Import langchain modules
from langchain.agents import BaseMultiActionAgent
from langchain.agents import AgentActionOutputParser
from langchain.tools import CodeReviewerTool
# Import language models
from openai_request_llm import OpenAI # Custom LLM based on OpenAI
# Define prompt template
prompt_template = """
This is a reviewer agent that can perform different actions related to code reviewing and improvement suggestions using CodeReviewer tool.
You can ask me to review any code and specify the language you want.
To specify the language, use the syntax: #language: code
For example:
#python: def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
#javascript: function reverseString(str) {
return str.split("").reverse().join("");
}
#c++: int sum(int a, int b) {
return a + b;
}
You can also chat with me about code quality, best practices, or anything else.
For example:
How can I improve my coding skills?
What are some common coding mistakes to avoid?
Hello, how are you today?
"""
# Choose language model
language_model = OpenAI()
# Define stop sequence
stop_sequence = "\n"
# Define output parser
output_parser = AgentActionOutputParser()
# Load tools
codereviewer_tool = CodeReviewerTool()
# Create custom agent class by subclassing BaseMultiActionAgent
class ReviewerAgent(BaseMultiActionAgent):
def __init__(self, prompt_template, language_model, stop_sequence, output_parser):
super().__init__(prompt_template, language_model, stop_sequence, output_parser)
def decide_action(self, user_input):
# Override this method to decide which action to take based on user input
# You can use any logic or condition you want
# Return an action name and an action input
# If user input starts with #language:, use codereviewer tool to review code and provide feedback or suggestions for that language and code
if user_input.startswith("#"):
action_name = "codereviewer"
action_input = user_input.strip()
return action_name, action_input
# Otherwise, chat with user using language model
else:
action_name = "chat"
action_input = user_input.strip()
return action_name, action_input
def execute_action(self, action_name, action_input):
# Override this method to execute the action using the appropriate tool or language model
# You can use any logic or condition you want
# Return an output string
# If action name is codereviewer, use codereviewer tool to review code and provide feedback or suggestions for the given language and code
if action_name == "codereviewer":
output = codereviewer_tool.run(action_input)
# If action name is chat, use language model to generate a chat response
else:
output = self.language_model.generate(self.prompt_template + "\n" + self.stop_sequence + "\n" + "User: " + action_input + "\n" + "Agent:", stop=self.stop_sequence)
# Return the output string
return output
def parse_output(self, output):
# Override this method to parse the output using the output parser
# You can use any logic or condition you want
# Return a parsed output object
# Use the output parser to parse the output string into an object with attributes such as text and type
parsed_output = self.output_parser.parse(output)
# Return the parsed output object
return parsed_output | [
"\nThis is a reviewer agent that can perform different actions related to code reviewing and improvement suggestions using CodeReviewer tool.\nYou can ask me to review any code and specify the language you want.\nTo specify the language, use the syntax: #language: code\nFor example:\n#python: def factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\n#javascript: function reverseString(str) {\n return str.split(\"\").reverse().join(\"\");\n}\n#c++: int sum(int a, int b) {\n return a + b;\n}\n\nYou can also chat with me about code quality, best practices, or anything else.\nFor example:\nHow can I improve my coding skills?\nWhat are some common coding mistakes to avoid?\nHello, how are you today?\n"
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~approach_agent.py | # Import langchain modules
from langchain.agents import BaseMultiActionAgent
from langchain.agents import AgentActionOutputParser
from langchain.tools import SerpAPIWrapper
from langchain.tools import DocstoreExplorer
# Import language models
from openai_request_llm import OpenAI # Custom LLM based on OpenAI
# Define prompt template
prompt_template = """
This is an approach agent that can perform different actions related to finding the best approach for solving a given problem.
You can ask me to solve any problem and I will try to answer or delegate the task to another tool.
To use standard search, use the syntax: #serp_api: query
To use document store explorer, use the syntax: #docstore_explorer: query
To chat with me, just type anything else.
For example:
#serp_api: How to implement a binary search algorithm in Python?
#docstore_explorer: What are some applications of natural language processing?
How do you solve a sudoku puzzle?
"""
# Choose language model
language_model = OpenAI()
# Define stop sequence
stop_sequence = "\n"
# Define output parser
output_parser = AgentActionOutputParser()
# Load tools
serp_api = SerpAPIWrapper()
docstore_explorer = DocstoreExplorer()
# Create custom agent class by subclassing BaseMultiActionAgent
class ApproachAgent(BaseMultiActionAgent):
def __init__(self, prompt_template, language_model, stop_sequence, output_parser):
super().__init__(prompt_template, language_model, stop_sequence, output_parser)
def decide_action(self, user_input):
# Override this method to decide which action to take based on user input
# You can use any logic or condition you want
# Return an action name and an action input
# If user input starts with #serp_api:, use standard search tool
if user_input.startswith("#serp_api:"):
action_name = "serp_api"
action_input = user_input.replace("#serp_api:", "").strip()
return action_name, action_input
# If user input starts with #docstore_explorer:, use document store explorer tool
elif user_input.startswith("#docstore_explorer:"):
action_name = "docstore_explorer"
action_input = user_input.replace("#docstore_explorer:", "").strip()
return action_name, action_input
# Otherwise, chat with user using language model
else:
action_name = "chat"
action_input = user_input.strip()
return action_name, action_input
def execute_action(self, action_name, action_input):
# Override this method to execute the action using the appropriate tool or language model
# You can use any logic or condition you want
# Return an output string
# If action name is serp_api, use serp_api tool to perform standard search
if action_name == "serp_api":
output = serp_api.run(action_input)
# If action name is docstore_explorer, use docstore_explorer tool to perform document store exploration
elif action_name == "docstore_explorer":
output = docstore_explorer.run(action_input)
# If action name is chat, use language model to generate a chat response
else:
output = self.language_model.generate(self.prompt_template + "\n" + self.stop_sequence + "\n" + "User: " + action_input + "\n" + "Agent:", stop=self.stop_sequence)
# Return the output string
return output
def parse_output(self, output):
# Override this method to parse the output using the output parser
# You can use any logic or condition you want
# Return a parsed output object
# Use the output parser to parse the output string into an object with attributes such as text and type
parsed_output = self.output_parser.parse(output)
# Return the parsed output object
return parsed_output
| [
"\nThis is an approach agent that can perform different actions related to finding the best approach for solving a given problem.\nYou can ask me to solve any problem and I will try to answer or delegate the task to another tool.\nTo use standard search, use the syntax: #serp_api: query\nTo use document store explorer, use the syntax: #docstore_explorer: query\nTo chat with me, just type anything else.\nFor example:\n#serp_api: How to implement a binary search algorithm in Python?\n#docstore_explorer: What are some applications of natural language processing?\nHow do you solve a sudoku puzzle?\n"
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~code_agent.py | # Import langchain modules
from langchain.agents import BaseMultiActionAgent
from langchain.agents import AgentActionOutputParser
from langchain.tools import GraphCodeBERTTool
from langchain.tools import UnixcoderTool
from langchain.tools import CodeReviewerTool
# Import language models
from openai_request_llm import OpenAI # Custom LLM based on OpenAI
# Define prompt template
prompt_template = """
This is a code agent that can perform different actions related to generating and understanding code using GraphCodeBERT, Unixcoder, and other code-related tools.
You can ask me to generate code for any task and specify the language you want.
To specify the language, use the syntax: #language: task
For example:
#python: write a function that returns the factorial of a number
#javascript: reverse a string
#c++: write a function that returns the sum of two numbers
You can also ask me to understand code and translate it to natural language by using the syntax: understand #language: code
For example:
understand #python: def fib(n):
if n < 2:
return n
else:
return fib(n-1) + fib(n-2)
You can also ask me to review code and provide feedback or suggestions by using the syntax: review #language: code
For example:
review #python: def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
"""
# Choose language model
language_model = OpenAI()
# Define stop sequence
stop_sequence = "\n"
# Define output parser
output_parser = AgentActionOutputParser()
# Load tools
graphcodebert_tool = GraphCodeBERTTool()
unixcoder_tool = UnixcoderTool()
codereviewer_tool = CodeReviewerTool()
# Create custom agent class by subclassing BaseMultiActionAgent
class CodeAgent(BaseMultiActionAgent):
def __init__(self, prompt_template, language_model, stop_sequence, output_parser):
super().__init__(prompt_template, language_model, stop_sequence, output_parser)
def decide_action(self, user_input):
# Override this method to decide which action to take based on user input
# You can use any logic or condition you want
# Return an action name and an action input
# If user input starts with #language:, use graphcodebert tool to generate code for that language and task
if user_input.startswith("#"):
action_name = "graphcodebert"
action_input = user_input.strip()
return action_name, action_input
# If user input starts with understand #language:, use unixcoder tool to understand code and translate it to natural language
elif user_input.startswith("understand #"):
action_name = "unixcoder"
action_input = user_input.replace("understand ", "").strip()
return action_name, action_input
# If user input starts with review #language:, use codereviewer tool to review code and provide feedback or suggestions
elif user_input.startswith("review #"):
action_name = "codereviewer"
action_input = user_input.replace("review ", "").strip()
return action_name, action_input
# Otherwise, chat with user using language model
else:
action_name = "chat"
action_input = user_input.strip()
return action_name, action_input
def execute_action(self, action_name, action_input):
# Override this method to execute the action using the appropriate tool or language model
# You can use any logic or condition you want
# Return an output string
# If action name is graphcodebert, use graphcodebert tool to generate code for the given language and task
if action_name == "graphcodebert":
output = graphcodebert_tool.run(action_input)
# If action name is unixcoder, use unixcoder tool to understand code and translate it to natural language
elif action_name == "unixcoder":
output = unixcoder_tool.run(action_input)
# If action name is codereviewer, use codereviewer tool to review code and provide feedback or suggestions
elif action_name == "codereviewer":
output = self.codereviewer_tool.run(action_input)
# If action name is chat, use language model to generate a chat response
else:
output = self.language_model.generate(self.prompt_template + "\n" + self.stop_sequence + "\n" + "User: " + action_input + "\n" + "Agent:", stop=self.stop_sequence)
# Return the output string
return output
def parse_output(self, output):
# Override this method to parse the output using the output parser
# You can use any logic or condition you want
# Return a parsed output object
# Use the output parser to parse the output string into an object with attributes such as text and type
parsed_output = self.output_parser.parse(output)
# Return the parsed output object
return parsed_output | [
"\nThis is a code agent that can perform different actions related to generating and understanding code using GraphCodeBERT, Unixcoder, and other code-related tools.\nYou can ask me to generate code for any task and specify the language you want.\nTo specify the language, use the syntax: #language: task\nFor example:\n#python: write a function that returns the factorial of a number\n#javascript: reverse a string\n#c++: write a function that returns the sum of two numbers\n\nYou can also ask me to understand code and translate it to natural language by using the syntax: understand #language: code\nFor example:\nunderstand #python: def fib(n):\n if n < 2:\n return n\n else:\n return fib(n-1) + fib(n-2)\n\nYou can also ask me to review code and provide feedback or suggestions by using the syntax: review #language: code\nFor example:\nreview #python: def factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\n"
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~camel~Code_Tree.py | import logging
import os
import re
from queue import Queue
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# Load environment variables from .env file
load_dotenv()
# Access the API key from the environment variable
openai_api_key = os.getenv('OPENAI_API_KEY')
# Set up logging
logging.basicConfig(level=logging.INFO)
# Define constants for evaluation criteria
MAX_EVALUATIONS = 11
EVALUATION_SCORE_THRESHOLD = 7
# Define templates for system and human prompts
decomposition_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that decomposes complex code generation tasks into smaller, manageable sub-tasks. Please output the decomposed plan as a detailed, markdown numbered list of steps."
)
decomposition_human_template = HumanMessagePromptTemplate.from_template(
"Given the complex code generation task: '{task}', please decompose it into a detailed, numbered list of sub-tasks."
)
generation_system_template = SystemMessagePromptTemplate.from_template(
"In your capacity as an AI, your task is to generate code that aligns with a given set of instructions. While developing this code, you should take into account the requirements for readability (the code should be easy to understand), efficiency (the code should be optimized for performance), and correctness (the code should accurately fulfill the intended purpose)."
)
generation_human_template = HumanMessagePromptTemplate.from_template(
"Based on the provided instruction: {step}, your task is to generate a piece of code. The resulting code should meet the following criteria: it should be readable, allowing other developers to easily understand its logic; it should be efficient, performing the task with minimal use of resources; and it should be correct, accurately fulfilling the instruction's purpose."
)
ranking_system_template = SystemMessagePromptTemplate.from_template(
"As an AI, your role is to evaluate and rank multiple proposed code solutions based on a set of quality metrics. The ranking should be expressed as a list of scores in a descending order, where each score is a numerical value between 0 and 10. The scores should reflect considerations such as the code's readability (how easy it is for a human to understand), correctness (whether the code accomplishes what it intends to), efficiency (how optimally the code uses resources), and overall quality. Please present the results in the format 'score : n'."
)
ranking_human_template = HumanMessagePromptTemplate.from_template(
"Your task is to evaluate and rank the following code sequences based on their quality scores. When performing the ranking, you should consider factors such as readability (is the code easy to comprehend?), correctness (does the code do what it's supposed to do?), efficiency (how optimally does the code use resources?), and overall quality. Please evaluate each piece of code and assign it a score between 0 and 10. \n\n{generated}\n\nOnce you've assessed each code, compile the scores in a descending order (highest to lowest) in the following format: 'score : n'."
)
generation_loop_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that develops code by taking into account not only the current instruction but also the context of previous instructions and pieces of code. The generated code should be seen as an evolution of the past codes, in direct response to the given instruction. It should be efficient, readable, and above all, correct."
)
generation_loop_human_template = HumanMessagePromptTemplate.from_template(
"""Generate code for the following instruction: {step}.
This task is part of a larger sequence of coding instructions, and hence, you should take into account the context of previous instructions and codes when developing your solution.
The relevant information from the previous stages is as follows:
"""
)
# Initialize ChatOpenAI instances with different temperatures and the same model
decomposition_llm = ChatOpenAI(temperature=0.7, model_name='gpt-3.5-turbo')
generation_llm = ChatOpenAI(temperature=0.7, model_name='gpt-3.5-turbo')
ranking_llm = ChatOpenAI(temperature=0.4, model_name='gpt-3.5-turbo')
# Initialize LLMChain instances with different prompts
decomposition_chain = LLMChain(llm=decomposition_llm, prompt=ChatPromptTemplate.from_messages([decomposition_system_template, decomposition_human_template]))
generation_chain = LLMChain(llm=generation_llm, prompt=ChatPromptTemplate.from_messages([generation_system_template, generation_human_template]))
ranking_chain = LLMChain(llm=ranking_llm, prompt=ChatPromptTemplate.from_messages([ranking_system_template, ranking_human_template]))
generation_loop_chain = LLMChain(llm=generation_llm, prompt=ChatPromptTemplate.from_messages([generation_loop_system_template, generation_loop_human_template]))
task = "Write a variable impedance control for force feedback using ros2, webots, webots_ros2 and ros2_control."
print(decomposition_chain.run(task))
# task = "Write a variable impedance control for force feedback using ros2, webots, webots_ros2 and ros2_control."
# j = 1
# # Decompose the task into a markdown list
# markdown_list = decomposition_chain.run(task)
# # Define the regular expression pattern to match the list items
# pattern = r'(\d+)\.\s+(.*)'
# # Compile the regex pattern once
# regex = re.compile(pattern)
# # Use a default dict to avoid KeyError
# from collections import defaultdict
# steps = defaultdict(str)
# # Find all matches of the pattern in the markdown list
# matches = regex.findall(markdown_list)
# # Convert the matches into a dictionary
# for match in matches:
# steps[int(match[0])] = match[1]
# # Define an empty dictionary to store the generated output for each step
# generated = {}
# # Generate the output for step 1 four times using the LLMChain
# for i in range(1, 5):
# output = generation_chain.run(steps[j])
# generated[i] = output
# # Convert dictionary to string with separator and indicators
# generated_string = ""
# for i, code in generated.items():
# generated_string += f"\n\n{'='*50}\nCode {i}:\n{code}"
# # Pass the generated code sequences to the ranking_chain
# ranking = ranking_chain.run(generated_string)
# # Extract code indicators and scores from ranking string
# pattern = r"Code (\d+):\s*(\d+\.?\d*)"
# matches = re.findall(pattern, ranking)
# # Store code indicators and scores in dictionary of lists
# ranked = {float(match[1]): int(match[0]) for match in matches}
# # Get the highest score
# highest_score = max(ranked.keys())
# # Get the code(s) for the highest score
# highest_codes = ranked[highest_score]
# prev_code = ""
# prev_instruction = ""
# actual_instruction = ""
# j=1
# # Extract code indicators and scores from ranking string
# pattern = r"Code (\d+):\s*(\d+\.?\d*)"
# matches = re.findall(pattern, ranking)
# # Store code indicators and scores in dictionary of lists
# ranked = {float(match[1]): int(match[0]) for match in matches}
# # Get the highest score
# highest_score = max(matches, key=lambda x: float(x[1]))
# # Get the code(s) for the highest score
# highest_code = generated[highest_score[0]]
# highest_code_str = generated[highest_score]
# prev_code = prev_code + highest_code_str
# k = j
# j = j + 1
# prev_instruction = steps[k] + prev_instruction
# actual_instruction = steps[j]
# while j <= 8:
# highest_code_str_memory = f"\n\n{'='*50}\nHighest Code:\n{highest_code_str}\n\n{'='*50}\nPrevious Code:\n{prev_code}\n\n{'='*50}\nPrevious Instruction:\n{prev_instruction}n\n{'='*50}\nActual Instruction:\n{actual_instruction}"
# generated_new = generation_loop_chain.run(highest_code_str_memory)
# # Define an empty dictionary to store the generated output for each step
# generated_new = {}
# # Generate the output for step 1 four times using the LLMChain
# for i in range(1, 5):
# output = generation_loop_chain.run(highest_code_str_memory)
# generated_new[i] = output
# # Convert dictionary to string with separator and indicators
# generated_string = ""
# for i, code in generated_new.items():
# generated_string += f"\n\n{'='*50}\nCode {i}:\n{code}"
# # Pass the generated code sequences to the ranking_chain
# ranking = ranking_chain.run(generated_string)
# # Store code indicators and scores in dictionary of lists
# ranked = {float(match[1]): int(match[0]) for match in matches}
# # Get the highest score
# highest_score = max(ranked.keys())
# # Get the code(s) for the highest score
# highest_score = ranked[highest_score]
# # Select just the first code
# highest_score = highest_score[0]
# highest_code_str = generated_new[highest_score]
# prev_code = prev_code + highest_code_str
# k = j
# j = j + 1
# prev_instruction = steps[k] + prev_instruction
# actual_instruction = steps[j]
# highest_code_str_memory = f"\n\n{'='*50}\nHighest Code:\n{highest_code_str}\n\n{'='*50}\nPrevious Code:\n{prev_code}\n\n{'='*50}\nPrevious Instruction:\n{prev_instruction}n\n{'='*50}\nActual Instruction:\n{actual_instruction}"
# print("##########################################################")
# print(highest_code_str_memory)
| [
"You are an AI that decomposes complex code generation tasks into smaller, manageable sub-tasks. Please output the decomposed plan as a detailed, markdown numbered list of steps.",
"As an AI, your role is to evaluate and rank multiple proposed code solutions based on a set of quality metrics. The ranking should be expressed as a list of scores in a descending order, where each score is a numerical value between 0 and 10. The scores should reflect considerations such as the code's readability (how easy it is for a human to understand), correctness (whether the code accomplishes what it intends to), efficiency (how optimally the code uses resources), and overall quality. Please present the results in the format 'score : n'.",
"Based on the provided instruction: {step}, your task is to generate a piece of code. The resulting code should meet the following criteria: it should be readable, allowing other developers to easily understand its logic; it should be efficient, performing the task with minimal use of resources; and it should be correct, accurately fulfilling the instruction's purpose.",
"Your task is to evaluate and rank the following code sequences based on their quality scores. When performing the ranking, you should consider factors such as readability (is the code easy to comprehend?), correctness (does the code do what it's supposed to do?), efficiency (how optimally does the code use resources?), and overall quality. Please evaluate each piece of code and assign it a score between 0 and 10. \n\n{generated}\n\nOnce you've assessed each code, compile the scores in a descending order (highest to lowest) in the following format: 'score : n'.",
"[PLACEHOLDER, PLACEHOLDER]",
"You are an AI that develops code by taking into account not only the current instruction but also the context of previous instructions and pieces of code. The generated code should be seen as an evolution of the past codes, in direct response to the given instruction. It should be efficient, readable, and above all, correct.",
"Given the complex code generation task: '{task}', please decompose it into a detailed, numbered list of sub-tasks.",
"In your capacity as an AI, your task is to generate code that aligns with a given set of instructions. While developing this code, you should take into account the requirements for readability (the code should be easy to understand), efficiency (the code should be optimized for performance), and correctness (the code should accurately fulfill the intended purpose).",
"Generate code for the following instruction: {step}. \n\n This task is part of a larger sequence of coding instructions, and hence, you should take into account the context of previous instructions and codes when developing your solution. \n\n The relevant information from the previous stages is as follows:\n\n \n "
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~camel~auto_code.py |
# %%
import logging
import os
import re
from queue import Queue
from dotenv import load_dotenv
import openai
import json
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# Load environment variables from .env file
load_dotenv()
# Access the API key from the environment variable
openai_api_key = os.getenv('OPENAI_API_KEY')
openai.api_key = openai_api_key
# Set up logging
logging.basicConfig(level=logging.INFO)
import json
import openai
from code_search import similarity_search
query = "Variable impedance control for force feedback"
results_string = similarity_search(query)
print(results_string)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are an AI that decomposes complex code generation tasks into smaller, manageable sub-tasks. Each sub-task should be a independent file, should contain the name of the python file, and should contain the name of the file,description, all the functions and classes from the file, as well releted files."
},
{
"role": "user",
"content": f"Given the complex code generation task: 'Write a variable impedance control for force feedback using ros2, webots, webots_ros2 and ros2_control.', please decompose it into a detailed, numbered list of sub-tasks. Each sub-task should be a independent file should contain the name of the python file, description,all the functions and classes from the file, as well releted files. Make sure to devide the task into minimum 5 files. Try to make the code as readable as possible, encapsulating the code into functions and classes. Lets think step by step.\n\nThe following are the retrived documents from all the relevant repositories based on the query 'Variable impedance control for force feedback':\n{results_string}\nThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decompositionThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decomposition"
}
],
functions=[
{
"name": "generate_code",
"description": "Generates the code for multiple files, each described by a dictionary of attributes.",
"parameters": {
"type": "object",
"properties": {
"files": {
"type": "array",
"description": "An array of dictionaries, each representing a file. Each dictionary should include 'order' (the order of development), 'code_blocks' (an array of dictionaries detailing the code blocks in the file).",
"items": {
"type": "object",
"properties": {
"order": {
"type": "integer",
"description": "The order of development for the file."
},
"code_blocks": {
"type": "array",
"description": "An array of dictionaries, each detailing a code block in the file. Each dictionary should include 'type' (either 'function' or 'class'), 'name' (the name of the function or class), 'description' (a description of the block's purpose), 'content' (the details of the function or class, including function arguments or class methods, as applicable), and 'related_files' (an array of filenames that are related to the code block).",
"items": {
"type": "object",
"properties": {
"type": {
"type": "string",
"description": "The type of the code block, either 'function' or 'class'."
},
"name": {
"type": "string",
"description": "The name of the function or class."
},
"description": {
"type": "string",
"description": "A description of the block's purpose."
},
"content": {
"type": "string",
"description": "The details of the function or class, including arguments and methods as applicable."
},
}
},
"required": ["type", "name", "description", "content"]
}
}
},
"required": ["order", "code_blocks"]
}
}
},
"required": ["files"]
}
],
function_call={"name": "generate_code"}
)
reply_content = completion.choices[0]
print(reply_content)
args = reply_content["message"]['function_call']['arguments']
data = json.loads(args)
# Initialize an empty dictionary to store the files
files = {}
# Go through each file
for file in data["files"]:
# Create a new dictionary for this file
files[file["code_blocks"][0]["name"]] = {
"order": file["order"],
"code_blocks": file["code_blocks"],
}
# Sort the files dictionary based on the order of development
files = dict(sorted(files.items(), key=lambda item: item[1]['order']))
# Print the files dictionary
for filename, file_data in files.items():
print(f"Order of development: {file_data['order']}")
print(f"{filename}:")
for block in file_data['code_blocks']:
print(f" Code block type: {block['type']}")
print(f" Code block name: {block['name']}")
print(f" Code block description: {block['description']}")
print(f" Code block content: {block['content']}")
#print(f" Related files: {block['related_files']}")
files_string = ""
for filename, file_data in files.items():
files_string += f"Order of development: {file_data['order']}\n"
files_string += f"{filename}:\n"
for block in file_data['code_blocks']:
files_string += f" Code block type: {block['type']}\n"
files_string += f" Code block name: {block['name']}\n"
files_string += f" Code block description: {block['description']}\n"
files_string += f" Code block content: {block['content']}\n"
#files_string += f" Related files: {block['related_files']}\n"
# %%
completion2 = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are an advanced AI with capabilities to analyze intricate code and pseudocode files. Based on this analysis, you provide recommendations for the most appropriate vectorstore repositories to extract relevant code snippets from. In addition, you generate search queries that could be utilized to fetch these helpful code samples."
},
{
"role": "user",
"content": f"I need your expertise to examine the provided code and pseudocode files. Your task is to pinpoint any issues, inefficiencies, and areas for potential enhancements. Here are the files you need to look into:\n\n{files_string}"
}
],
functions=[
{
"name": "analyze_code",
"description": "This function performs an analysis on the provided code files. It returns a list of suitable repositories for fetching relevant code samples and suggests a respective search query for each repository.",
"parameters": {
"type": "object",
"properties": {
"files": {
"type": "array",
"items": {
"type": "object",
"properties": {
"file_name": {
"type": "string",
"description": "The name of the code file."
},
"repository": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The name of the repository.",
"enum": ['db_ros2_control', 'db_ros2', 'db_webots_ros2', 'db_webots']
},
"query": {
"type": "string",
"description": "The search query designed to fetch code samples from the specified repository."
}
},
"required": ["name", "query"],
"description": "An object representing a repository and a corresponding search query."
}
},
"required": ["file_name", "repository"]
},
"description": "An array of objects, each representing a code file that needs to be analyzed."
}
},
"required": ["files"]
}
}
],
function_call={"name": "analyze_code"}
)
reply_content2 = completion2.choices[0]
print(reply_content2)
args2 = reply_content2["message"]['function_call']['arguments']
data2 = json.loads(args2)
print(data2)
# Define the list of directories to search in
directories = ['db_ros2_control', 'db_ros2', 'db_webots_ros2', 'db_webots']
# Create an empty dictionary to store the results
results = {}
# Loop through each file in the data2 dictionary
for file_data in data2["files"]:
file_name = file_data["file_name"]
repository = file_data["repository"]
query = repository['query']
# Call the similarity_search function and save the result as a string
result = similarity_search(query, directories)
# Store the result in the dictionary, using the filename_query as the key
results[f"{file_name}_{query}"] = result
# Create a dictionary to store the strings for each file
file_strings = {}
# Loop through each file in the files dictionary
for filename, file_data in files.items():
# Create a list to store the lines for this file
file_lines = []
file_lines.append(f"Order of development: {file_data['order']}")
file_lines.append(f"{filename}:")
for block in file_data['code_blocks']:
file_lines.append(f" Code block type: {block['type']}")
file_lines.append(f" Code block name: {block['name']}")
file_lines.append(f" Code block description: {block['description']}")
file_lines.append(f" Code block content: {block['content']}")
# Loop through the results dictionary to find the results for this file
for key, value in results.items():
# If the filename is in the key of the results dictionary, add the query and its result to the lines
if filename in key:
file_lines.append(f"#####################################")
file_lines.append(f" Query:\n\n {key.replace(filename+'_', '')}")
file_lines.append(f"\n\n Query result:\n\n {value}")
# Join the lines for this file into a single string and add it to the file_strings dictionary
file_strings[filename] = '\n'.join(file_lines)
# %%
# Loop through each file_string in the file_strings dictionary
for filename, file_string in file_strings.items():
print(f"File: {filename}")
print(file_string)
# %%
# Loop through each file_string in the file_strings dictionary
for filename, file_string in file_strings.items():
print(f"File: {filename}")
print(file_string)
# %%
# Define a string to store the print outputs
output_string = ""
# Loop through each file_string in the file_strings dictionary
for filename, file_string in file_strings.items():
# Create a new completion with the file_string as the user message content
completion4 = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": "You are an AI Code Optimization Model that can optimize code, complete #TODOs, recommend best practices, and learn from relevant code repositories. Your main task is to analyze the given code, which performs semantic search queries on a vectorstore using OpenAI embeddings, and apply the insights from the search results to refine the code. The final goal is to produce a fully functional and optimized code file that can be used as part of a larger project, specifically a variable impedance control code for force feedback"
},
{
"role": "user",
"content": f"I am working on a coding project that aims to develop a variable impedance control code for force feedback. I need your expertise to improve my code. Here is the current version of one file of my code, along with the semantic search queries I’ve done on a vectorstore using OpenAI embeddings, and the results of these queries:\n\n{file_string}\n\nCan you improve this code, using the suggestions from the semantic search results? Please write the improved and complete code file. Please complete and improve the file based on the context."
}
],
)
new_files = {}
new_files[filename] = completion4.choices[0].message['content']
# Append to the output_string instead of printing
output_string += f"For file: {filename}, the improved code is: {new_files[filename]}\n"
# Now you can print or further process the output_string as required
print(output_string)
# Print or process the completion as needed
#print(f"For file: {filename}, the improved code is: {new_files[filename]}\n")
# %%
print(output_string)
# %%
import json
import openai
import requests
from tenacity import retry, wait_random_exponential, stop_after_attempt
from termcolor import colored
GPT_MODEL = "gpt-3.5-turbo-0613"
# Load environment variables from .env file
load_dotenv()
# Access the API key from the environment variable
openai_api_key = os.getenv('OPENAI_API_KEY')
openai.api_key = openai_api_key
#First let's define a few utilities for making calls to the Chat Completions API and for maintaining and keeping track of the conversation state.
@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
def chat_completion_request(messages, functions=None, function_call=None, model=GPT_MODEL):
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + openai.api_key,
}
json_data = {"model": model, "messages": messages}
if functions is not None:
json_data.update({"functions": functions})
if function_call is not None:
json_data.update({"function_call": function_call})
try:
response = requests.post(
"https://api.openai.com/v1/chat/completions",
headers=headers,
json=json_data,
)
response.raise_for_status()
return response.json()
except Exception as e:
print("Unable to generate ChatCompletion response")
print(f"Exception: {e}")
return e
# %%
def write_to_file(content, file_path):
"""Writes the given content to the file at the given file path.
Args:
content (str): The content to write to the file.
file_path (str): The path of the file to write to.
"""
with open(file_path, 'w') as f:
f.write(content)
# %% [markdown]
#
# %%
import openai
import json
from termcolor import colored
from code_search import similarity_search
def pretty_print_conversation(messages):
role_to_color = {
"system": "red",
"user": "green",
"assistant": "blue",
"function": "magenta",
}
for message in messages:
color = role_to_color.get(message["role"], "white")
if message["role"] == "function":
print(colored(f'{message["role"]}: {message["name"]} output: {message["content"]}', color))
else:
print(colored(f'{message["role"]}: {message["content"]}', color))
def search_code_completion_request(messages, functions):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
functions=functions,
function_call={"auto"}
)
functions = [
{
"name": "similarity_search",
"description": "Vectorstore embedding semantic search for code functions. It receives a query and the directories to search, and returns the most similar code snippets to the queries.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query designed to fetch code samples."
},
"directories": {
"type": "array",
"items": {
"type": "string",
"enum": ["db_webots", "db_ros2", "db_webots_ros2", "db_ros2_control"],
"description": "The directories in which to perform the search."
},
"description": "An array of directory names."
}
},
"required": ["query", "directories"]
}
},
{
"name": "write_to_file",
"description": "This function writes the given content to a file at a specified path. It creates the file if it doesn't already exist, and overwrites the file if it does.",
"parameters": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "The content to be written to the file."
},
"file_path": {
"type": "string",
"description": "The path where the file should be saved. This should include the filename and extension."
}
},
"required": ["content", "file_path"]
}
}
]
messages = [
{
"role": "system",
"content": "You are a sophisticated AI that has the ability to analyze complex code and pseudocode documents. You are tasked with making necessary clarifications in a series of chat turns until you gather sufficient information to rewrite the code. You can utilize the 'search_code' function to fetch relevant code snippets based on semantic similarity, and subsequently improve the given file. After each search you should improve the file, do not make several calls to the function before improving the file."
},
{
"role": "user",
"content": f"I need your assistance in reviewing these code and pseudocode documents. You final goal is to rewrite and finish the code to make full functional The final goal is to create a project for variable impedance control providing force feedback. The project will use Webots, ROS2, webots_ros2, and ros2_control. You are required to identify potential problems, inefficiencies, and areas for improvements in these documents. Here are the documents you need to work on:\n\n{output_string}\n\nPlease first clarify any question that you need to finish the code with me. After you completely understand the goal of the user, use the search_code function to find relevant code that can help improve the code."
}
]
while True:
user_input = input("Enter message: ")
user_message = {
"role": "user",
"content": user_input
}
messages.append(user_message)
chat_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
functions=functions,
)
assistant_message = chat_response['choices'][0].get('message')
if assistant_message:
messages.append(assistant_message)
pretty_print_conversation(messages)
if assistant_message.get("function_call"):
function_name = assistant_message["function_call"]["name"]
arguments = json.loads(assistant_message["function_call"]["arguments"])
if function_name == "similarity_search":
results = similarity_search(arguments['query'], arguments['directories'])
function_message = {
"role": "function",
"name": function_name,
"content": results
}
messages.append(function_message)
pretty_print_conversation(messages)
elif function_name == "write_to_file":
write_to_file(arguments['content'], arguments['file_path'])
function_message = {
"role": "function",
"name": function_name,
"content": f"File successfully written at {arguments['file_path']}"
}
messages.append(function_message)
pretty_print_conversation(messages)
| [
"You are an AI that decomposes complex code generation tasks into smaller, manageable sub-tasks. Each sub-task should be a independent file, should contain the name of the python file, and should contain the name of the file,description, all the functions and classes from the file, as well releted files.",
"File successfully written at PLACEHOLDER",
"You are a sophisticated AI that has the ability to analyze complex code and pseudocode documents. You are tasked with making necessary clarifications in a series of chat turns until you gather sufficient information to rewrite the code. You can utilize the 'search_code' function to fetch relevant code snippets based on semantic similarity, and subsequently improve the given file. After each search you should improve the file, do not make several calls to the function before improving the file.",
"I need your assistance in reviewing these code and pseudocode documents. You final goal is to rewrite and finish the code to make full functional The final goal is to create a project for variable impedance control providing force feedback. The project will use Webots, ROS2, webots_ros2, and ros2_control. You are required to identify potential problems, inefficiencies, and areas for improvements in these documents. Here are the documents you need to work on:\n\nPLACEHOLDER\n\nPlease first clarify any question that you need to finish the code with me. After you completely understand the goal of the user, use the search_code function to find relevant code that can help improve the code.",
"You are an AI Code Optimization Model that can optimize code, complete #TODOs, recommend best practices, and learn from relevant code repositories. Your main task is to analyze the given code, which performs semantic search queries on a vectorstore using OpenAI embeddings, and apply the insights from the search results to refine the code. The final goal is to produce a fully functional and optimized code file that can be used as part of a larger project, specifically a variable impedance control code for force feedback",
"Given the complex code generation task: 'Write a variable impedance control for force feedback using ros2, webots, webots_ros2 and ros2_control.', please decompose it into a detailed, numbered list of sub-tasks. Each sub-task should be a independent file should contain the name of the python file, description,all the functions and classes from the file, as well releted files. Make sure to devide the task into minimum 5 files. Try to make the code as readable as possible, encapsulating the code into functions and classes. Lets think step by step.\n\nThe following are the retrived documents from all the relevant repositories based on the query 'Variable impedance control for force feedback':\nPLACEHOLDER\nThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decompositionThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decomposition",
"{'type': 'string', 'description': 'The details of the function or class, including arguments and methods as applicable.'}",
"You are an advanced AI with capabilities to analyze intricate code and pseudocode files. Based on this analysis, you provide recommendations for the most appropriate vectorstore repositories to extract relevant code snippets from. In addition, you generate search queries that could be utilized to fetch these helpful code samples.",
"I am working on a coding project that aims to develop a variable impedance control code for force feedback. I need your expertise to improve my code. Here is the current version of one file of my code, along with the semantic search queries I’ve done on a vectorstore using OpenAI embeddings, and the results of these queries:\n\nPLACEHOLDER\n\nCan you improve this code, using the suggestions from the semantic search results? Please write the improved and complete code file. Please complete and improve the file based on the context.",
"{'type': 'string', 'description': 'The content to be written to the file.'}",
"I need your expertise to examine the provided code and pseudocode files. Your task is to pinpoint any issues, inefficiencies, and areas for potential enhancements. Here are the files you need to look into:\n\nPLACEHOLDER"
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~camel~ToT.py | import logging
from queue import Queue
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate, PromptTemplate
from dotenv import load_dotenv
import os
# Load environment variables from .env file
load_dotenv()
# Access the API key from the environment variable
openai_api_key = os.getenv('OPENAI_API_KEY')
# Set up logging
logging.basicConfig(level=logging.INFO)
# Define constants for evaluation criteria
MAX_EVALUATIONS = 11 # Maximum number of evaluations
EVALUATION_SCORE_THRESHOLD = 7 # Threshold for evaluation score
# Import modules for creating message templates and data structures
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
# Create message templates for each function using SystemMessagePromptTemplate and HumanMessagePromptTemplate classes
decomposition_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that decomposes code generation tasks."
)
decomposition_human_template = HumanMessagePromptTemplate.from_template(
"Decompose the code generation task: {task}"
)
generation_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that generates possible next steps for code."
)
generation_human_template = HumanMessagePromptTemplate.from_template(
"Generate a possible next step for the code: {code}"
)
evaluation_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that evaluates the quality of proposed code on a scale from 0 to 10. Just responde score : x"
)
evaluation_human_template = HumanMessagePromptTemplate.from_template(
"Evaluate the quality of the proposed code on a scale from 0 to 10.: {code}"
)
search_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that chooses the next step to take from proposed next steps."
)
search_human_template = HumanMessagePromptTemplate.from_template(
"From the proposed next steps, choose the next step to take: {proposals}"
)
# Import modules for parsing output and validating data models
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field, validator
# Define a data model for evaluation score using Pydantic and a custom validator
class EvaluationScore(BaseModel):
score: int = Field(description="the evaluation score as an integer between 0 and 10")
@validator('score')
def score_must_be_in_range(cls, field):
if field < 0 or field > 10:
raise ValueError("Score must be between 0 and 10!")
return field
# Create an output parser for the evaluation chain using PydanticOutputParser and the EvaluationScore data model
evaluation_parser = PydanticOutputParser(pydantic_object=EvaluationScore)
class EvaluationChain(LLMChain):
def __call__(self, *args, **kwargs):
result = super().__call__(*args, **kwargs)
# Parse the result as a dictionary and get the value of the score key
result_dict = evaluation_parser.parse(result)
score_value = result_dict["score"]
# Convert the score value to an integer and return it
int_result = int(score_value)
return int_result
# Set up ChatOpenAI models for each function with different temperature and model name parameters
decomposition_llm = ChatOpenAI(temperature=0.3, model_name='gpt-3.5-turbo')
generation_llm = ChatOpenAI(temperature=0.7, model_name='gpt-3.5-turbo')
evaluation_llm = ChatOpenAI(temperature=0.5, model_name='gpt-3.5-turbo')
search_llm = ChatOpenAI(temperature=0.2, model_name='gpt-3.5-turbo')
# Set up LLMChains for each function using the ChatOpenAI models and the message templates
decomposition_chain = LLMChain(llm=decomposition_llm, prompt=ChatPromptTemplate.from_messages([decomposition_system_template, decomposition_human_template]))
generation_chain = LLMChain(llm=generation_llm, prompt=ChatPromptTemplate.from_messages([generation_system_template, generation_human_template]))
evaluation_chain = EvaluationChain(llm=evaluation_llm, prompt=ChatPromptTemplate.from_messages([evaluation_system_template, evaluation_human_template]))
search_chain = LLMChain(llm=search_llm, prompt=ChatPromptTemplate.from_messages([search_system_template, search_human_template]))
# Define a helper function to validate the task input as a non-empty string
def validate_task(task):
if not task or not isinstance(task, str) or len(task.strip()) == 0:
logging.error("Invalid task. Please provide a non-empty string.")
return False
return True
# Define a helper function to process the task input using the LLMChains and update the code output and evaluation counter
def process_task(task, queue, stack, code, evaluation_counter):
try:
# Decompose the task into smaller parts using the decomposition chain
decomposition = decomposition_chain(task)
# For each part, generate a possible next step using the generation chain
for part in decomposition:
generation = generation_chain(part)
# Evaluate the quality of the generated code using the evaluation chain and the output parser
evaluation_score = evaluation_chain(generation)
evaluation_counter += 1
# If the evaluation score is above the threshold, add the generated code to the queue and stack
if evaluation_score >= EVALUATION_SCORE_THRESHOLD:
queue.put(generation)
stack.append(generation)
# If the evaluation score is below the threshold, revert to the last state in the stack
elif stack:
last_state = stack.pop()
queue.put(last_state)
# If the queue is not empty, choose the next step to take from the proposed next steps using the search chain
if not queue.empty():
search = search_chain(queue.queue)
code += search
except Exception as e:
logging.error(f"An error occurred: {e}")
# Clear the queue and stack for the next iteration
queue.queue.clear()
stack.clear()
return code, evaluation_counter
# Define a helper function to check if the code is complete by looking for a special marker
def code_complete(code):
return code.endswith("end of code")
# Define the main function that takes a task input and produces a code output using the LLMChains
def main():
# Get the task input from the user or a predefined variable
task = 'your task here'
# Validate the task input
if not validate_task(task):
return
# Initialize a queue and a stack to store intermediate results
queue = Queue()
stack = []
# Add the task input to the queue and stack
queue.put(task)
stack.append(task)
# Initialize an empty string for the code output
code = ""
# Initialize a counter for the number of evaluations done
evaluation_counter = 0
# Loop until the queue is empty or the maximum number of evaluations is reached
while not queue.empty():
# Get the next task from the queue
task = queue.get()
# Process the task using the LLMChains and update the code output and evaluation counter
code, evaluation_counter = process_task(task, queue, stack, code, evaluation_counter)
# Check if the maximum number of evaluations is reached
if evaluation_counter >= MAX_EVALUATIONS:
break
# Log the final code output
logging.info(f"Final code: {code}")
# Run the main function if this script is executed directly
if __name__ == "__main__":
main()
| [
"Evaluate the quality of the proposed code on a scale from 0 to 10.: {code}",
"You are an AI that evaluates the quality of proposed code on a scale from 0 to 10. Just responde score : x",
"From the proposed next steps, choose the next step to take: {proposals}",
"You are an AI that generates possible next steps for code.",
"Decompose the code generation task: {task}",
"You are an AI that chooses the next step to take from proposed next steps.",
"You are an AI that decomposes code generation tasks.",
"[PLACEHOLDER, PLACEHOLDER]",
"Generate a possible next step for the code: {code}"
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~summary_chain.py | # Importe as classes LLMChain e SharedMemory do módulo langchain.chains
from langchain.chains import LLMChain, SharedMemory
# Importe a classe HuggingFacePipeline do módulo langchain
from langchain import HuggingFacePipeline
# Importe a função pipeline do módulo transformers
from transformers import pipeline
# Defina a classe SummaryChain como uma subclasse de LLMChain e sobrescreva os métodos __init__ e execute
class SummaryChain(LLMChain):
def __init__(self, shared_memory: SharedMemory):
# Crie uma pipeline usando transformers com um modelo de sumarização do Hugging Face
generator = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
# Envolva a pipeline usando HuggingFacePipeline do LangChain
llm = HuggingFacePipeline(pipeline=generator)
# Inicialize a classe pai com um template de prompt e um modelo llm
super().__init__(prompt_template="Texto: {input}\nResumo: ", model=llm)
# Inicialize o atributo de memória compartilhada
self.shared_memory = shared_memory
def execute(self, input: str) -> str:
# Chame o método execute da classe pai com o input e obtenha a saída
output = super().execute(input)
# Retorne a saída como uma string
return str(output)
# Para usar a classe SummaryChain, você precisa criar uma instância dela e passá-la ao construtor ChainExecutor
# junto com outros parâmetros. Por exemplo:
# Crie uma instância SharedMemory para a conversa
shared_memory_conversation = SharedMemory(ConversationMemory(), "conversation")
# Crie uma instância SummaryChain com a memória compartilhada da conversa
summary_chain = SummaryChain(shared_memory_conversation)
# Crie uma instância ChainExecutor com summary_chain como parâmetro
chain_executor = ChainExecutor(chains=[summary_chain])
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~pseudogen_chain.py | # Importar as classes e funções necessárias
from langchain.chains import LLMChain, SharedMemory
from langchain import HuggingFacePipeline
from transformers import pipeline
class PseudoGenChain(LLMChain):
def __init__(self, shared_memory: SharedMemory):
# Criar um pipeline usando transformers com um modelo de geração de código do Hugging Face
generator = pipeline("text2text-generation", model="microsoft/CodeGPT-small-py")
# Envolver o pipeline usando HuggingFacePipeline do LangChain
llm = HuggingFacePipeline(pipeline=generator)
# Inicializar a classe pai com um modelo de prompt e um modelo LLM
super().__init__(prompt_template="Entrada: {input}\nPseudocódigo: ", model=llm)
# Inicializar o atributo shared_memory
self.shared_memory = shared_memory
def execute(self, input: str) -> str:
# Chamar o método execute da classe pai com a entrada e obter a saída
output = super().execute(input)
# Retornar a saída como string
return str(output)
# Exemplo de uso:
class CodeMemory:
pass # Substitua esta linha com a implementação da classe CodeMemory
class ChainExecutor:
def __init__(self, chains):
self.chains = chains
def process_input(self, input):
for chain in self.chains:
input = chain.execute(input)
return input
# Criar uma instância SharedMemory para código
shared_memory_code = SharedMemory(CodeMemory(), "code")
# Criar uma instância PseudoGenChain com shared_memory_code
pseudogen_chain = PseudoGenChain(shared_memory_code)
# Criar uma instância ChainExecutor com pseudogen_chain como parâmetro
chain_executor = ChainExecutor(chains=[pseudogen_chain])
# Entrada de exemplo
text_input = "Escreva uma função para calcular a soma de dois números."
# Processar a entrada usando o ChainExecutor
pseudocode_output = chain_executor.process_input(text_input)
# Imprimir o resultado
print("Pseudocódigo gerado:", pseudocode_output)
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~camel~code_embedding.py | import os
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
Language,
)
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.document_loaders import TextLoader
from dotenv import load_dotenv
import os
# Load environment variables from .env file
load_dotenv()
embeddings = OpenAIEmbeddings()
openai_api_key = os.getenv('OPENAI_API_KEY')
# You can also see the separators used for a given language
RecursiveCharacterTextSplitter.get_separators_for_language(Language.PYTHON)
root_dir = '/home/stahlubuntu/chat_docs/chat_fn/chat/camel/ros2_control'
docs = []
for dirpath, dirnames, filenames in os.walk(root_dir):
for file in filenames:
if file.endswith('.py') and '/.venv/' not in dirpath:
try:
loader = TextLoader(os.path.join(dirpath, file), encoding='utf-8')
docs.extend(loader.load_and_split())
except Exception as e:
pass
print(f'{len(docs)}')
from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(docs)
# Embed and store the texts
# Supplying a persist_directory will store the embeddings on disk
persist_directory = 'db_ros2_controllers'
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=docs, embedding=embedding, persist_directory=persist_directory)
vectordb.persist()
vectordb = None
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~unixcoder.py | from langchain.agents import Tool, ReadOnlySharedMemory, AgentExecutor
from langchain.memory import Memory
import torch
from code_agent import CodeAgent
from transformers import AutoTokenizer, AutoModel
class UnixcoderTool(Tool):
def __init__(self, shared_memory: ReadOnlySharedMemory):
super().__init__(actions={})
self.shared_memory = shared_memory
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.tokenizer = AutoTokenizer.from_pretrained("microsoft/unixcoder-base")
self.unixcoder_model = AutoModel.from_pretrained("microsoft/unixcoder-base")
self.unixcoder_model.to(device)
def predict_action(self, input: str) -> str:
tool_name, query = input.split(":", 1)
if tool_name == "#unixcoder_tool":
return query.strip()
else:
return None
def execute_action(self, action: str) -> str:
if action is not None:
# Encode the input action using the tokenizer
inputs = self.tokenizer(action, return_tensors="pt", padding=True)
# Move the inputs to the device
inputs = {key: tensor.to(device) for key, tensor in inputs.items()}
# Utilize o modelo UniXcoder baseado na tarefa especificada na ação
with torch.no_grad():
outputs = self.unixcoder_model(**inputs)
return str(outputs)
else:
return None
# Exemplo de uso:
# Criar uma instância de SharedMemory para código
class CodeMemory(Memory):
pass
shared_memory_code = ReadOnlySharedMemory(CodeMemory(), "code")
# Criar uma instância do UnixcoderTool com shared_memory_code
unixcoder_tool = UnixcoderTool(shared_memory_code)
# Criar uma instância do AgentExecutor com unixcoder_tool como parâmetro
agent_executor = AgentExecutor(
agent=CodeAgent(prompt_template, language_model, stop_sequence, output_parser),
memory=ConversationMemory(),
tools=[unixcoder_tool],
max_turns=10
)
# Execute o AgentExecutor
agent_executor.run()
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~camel~codecode.py | # %%
import logging
import os
import re
from queue import Queue
from dotenv import load_dotenv
import openai
import json
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# Load environment variables from .env file
load_dotenv()
# Access the API key from the environment variable
openai_api_key = os.getenv('OPENAI_API_KEY')
openai.api_key = openai_api_key
# Set up logging
logging.basicConfig(level=logging.INFO)
import json
import openai
from code_search import similarity_search
query = "Variable impedance control for force feedback"
results_string = similarity_search(query)
print(results_string)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are an AI that decomposes complex code generation tasks into smaller, manageable sub-tasks. Each sub-task should be a independent file, should contain the name of the python file, and should contain the name of the file,description, all the functions and classes from the file, as well releted files."
},
{
"role": "user",
"content": f"Given the complex code generation task: 'Write a variable impedance control for force feedback using ros2, webots, webots_ros2 and ros2_control.', please decompose it into a detailed, numbered list of sub-tasks. Each sub-task should be a independent file should contain the name of the python file, description,all the functions and classes from the file, as well releted files. Make sure to devide the task into minimum 5 files. Try to make the code as readable as possible, encapsulating the code into functions and classes. Lets think step by step.\n\nThe following are the retrived documents from all the relevant repositories based on the query 'Variable impedance control for force feedback':\n{results_string}\nThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decompositionThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decomposition"
}
],
functions=[
{
"name": "generate_code",
"description": "Generates the code for multiple files, each described by a dictionary of attributes.",
"parameters": {
"type": "object",
"properties": {
"files": {
"type": "array",
"description": "An array of dictionaries, each representing a file. Each dictionary should include 'order' (the order of development), 'code_blocks' (an array of dictionaries detailing the code blocks in the file).",
"items": {
"type": "object",
"properties": {
"order": {
"type": "integer",
"description": "The order of development for the file."
},
"code_blocks": {
"type": "array",
"description": "An array of dictionaries, each detailing a code block in the file. Each dictionary should include 'type' (either 'function' or 'class'), 'name' (the name of the function or class), 'description' (a description of the block's purpose), 'content' (the details of the function or class, including function arguments or class methods, as applicable), and 'related_files' (an array of filenames that are related to the code block).",
"items": {
"type": "object",
"properties": {
"type": {
"type": "string",
"description": "The type of the code block, either 'function' or 'class'."
},
"name": {
"type": "string",
"description": "The name of the function or class."
},
"description": {
"type": "string",
"description": "A description of the block's purpose."
},
"content": {
"type": "string",
"description": "The details of the function or class, including arguments and methods as applicable."
},
}
},
"required": ["type", "name", "description", "content"]
}
}
},
"required": ["order", "code_blocks"]
}
}
},
"required": ["files"]
}
],
function_call={"name": "generate_code"}
)
reply_content = completion.choices[0]
print(reply_content)
args = reply_content["message"]['function_call']['arguments']
data = json.loads(args)
# Initialize an empty dictionary to store the files
files = {}
# Go through each file
for file in data["files"]:
# Create a new dictionary for this file
files[file["code_blocks"][0]["name"]] = {
"order": file["order"],
"code_blocks": file["code_blocks"],
}
# Sort the files dictionary based on the order of development
files = dict(sorted(files.items(), key=lambda item: item[1]['order']))
# Print the files dictionary
for filename, file_data in files.items():
print(f"Order of development: {file_data['order']}")
print(f"{filename}:")
for block in file_data['code_blocks']:
print(f" Code block type: {block['type']}")
print(f" Code block name: {block['name']}")
print(f" Code block description: {block['description']}")
print(f" Code block content: {block['content']}")
#print(f" Related files: {block['related_files']}")
files_string = ""
for filename, file_data in files.items():
files_string += f"Order of development: {file_data['order']}\n"
files_string += f"{filename}:\n"
for block in file_data['code_blocks']:
files_string += f" Code block type: {block['type']}\n"
files_string += f" Code block name: {block['name']}\n"
files_string += f" Code block description: {block['description']}\n"
files_string += f" Code block content: {block['content']}\n"
#files_string += f" Related files: {block['related_files']}\n"
# %%
completion2 = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are an advanced AI with capabilities to analyze intricate code and pseudocode files. Based on this analysis, you provide recommendations for the most appropriate vectorstore repositories to extract relevant code snippets from. In addition, you generate search queries that could be utilized to fetch these helpful code samples."
},
{
"role": "user",
"content": f"I need your expertise to examine the provided code and pseudocode files. Your task is to pinpoint any issues, inefficiencies, and areas for potential enhancements. Here are the files you need to look into:\n\n{files_string}"
}
],
functions=[
{
"name": "analyze_code",
"description": "This function performs an analysis on the provided code files. It returns a list of suitable repositories for fetching relevant code samples and suggests a respective search query for each repository.",
"parameters": {
"type": "object",
"properties": {
"files": {
"type": "array",
"items": {
"type": "object",
"properties": {
"file_name": {
"type": "string",
"description": "The name of the code file."
},
"repository": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The name of the repository.",
"enum": ['db_ros2_control', 'db_ros2', 'db_webots_ros2', 'db_webots']
},
"query": {
"type": "string",
"description": "The search query designed to fetch code samples from the specified repository."
}
},
"required": ["name", "query"],
"description": "An object representing a repository and a corresponding search query."
}
},
"required": ["file_name", "repository"]
},
"description": "An array of objects, each representing a code file that needs to be analyzed."
}
},
"required": ["files"]
}
}
],
function_call={"name": "analyze_code"}
)
reply_content2 = completion2.choices[0]
print(reply_content2)
args2 = reply_content2["message"]['function_call']['arguments']
data2 = json.loads(args2)
print(data2)
# Define the list of directories to search in
directories = ['db_ros2_control', 'db_ros2', 'db_webots_ros2', 'db_webots']
# Create an empty dictionary to store the results
results = {}
# Loop through each file in the data2 dictionary
for file_data in data2["files"]:
file_name = file_data["file_name"]
repository = file_data["repository"]
query = repository['query']
# Call the similarity_search function and save the result as a string
result = similarity_search(query, directories)
# Store the result in the dictionary, using the filename_query as the key
results[f"{file_name}_{query}"] = result
# Create a dictionary to store the strings for each file
file_strings = {}
# Loop through each file in the files dictionary
for filename, file_data in files.items():
# Create a list to store the lines for this file
file_lines = []
file_lines.append(f"Order of development: {file_data['order']}")
file_lines.append(f"{filename}:")
for block in file_data['code_blocks']:
file_lines.append(f" Code block type: {block['type']}")
file_lines.append(f" Code block name: {block['name']}")
file_lines.append(f" Code block description: {block['description']}")
file_lines.append(f" Code block content: {block['content']}")
# Loop through the results dictionary to find the results for this file
for key, value in results.items():
# If the filename is in the key of the results dictionary, add the query and its result to the lines
if filename in key:
file_lines.append(f"#####################################")
file_lines.append(f" Query:\n\n {key.replace(filename+'_', '')}")
file_lines.append(f"\n\n Query result:\n\n {value}")
# Join the lines for this file into a single string and add it to the file_strings dictionary
file_strings[filename] = '\n'.join(file_lines)
# %%
print(file_strings["impedance_control.py"])
# %%
# Loop through each file_string in the file_strings dictionary
for filename, file_string in file_strings.items():
print(f"File: {filename}")
print(file_string)
# %%
# Loop through each file_string in the file_strings dictionary
for filename, file_string in file_strings.items():
print(f"File: {filename}")
print(file_string)
# %%
# Define a string to store the print outputs
output_string = ""
# Loop through each file_string in the file_strings dictionary
for filename, file_string in file_strings.items():
# Create a new completion with the file_string as the user message content
completion4 = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": "You are an AI Code Optimization Model that can optimize code, complete #TODOs, recommend best practices, and learn from relevant code repositories. Your main task is to analyze the given code, which performs semantic search queries on a vectorstore using OpenAI embeddings, and apply the insights from the search results to refine the code. The final goal is to produce a fully functional and optimized code file that can be used as part of a larger project, specifically a variable impedance control code for force feedback"
},
{
"role": "user",
"content": f"I am working on a coding project that aims to develop a variable impedance control code for force feedback. I need your expertise to improve my code. Here is the current version of one file of my code, along with the semantic search queries I’ve done on a vectorstore using OpenAI embeddings, and the results of these queries:\n\n{file_string}\n\nCan you improve this code, using the suggestions from the semantic search results? Please write the improved and complete code file. Please complete and improve the file based on the context."
}
],
)
new_files = {}
new_files[filename] = completion4.choices[0].message['content']
# Append to the output_string instead of printing
output_string += f"For file: {filename}, the improved code is: {new_files[filename]}\n"
# Now you can print or further process the output_string as required
print(output_string)
# Print or process the completion as needed
#print(f"For file: {filename}, the improved code is: {new_files[filename]}\n")
# %%
print(output_string)
# %%
completion4 = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are a highly skilled AI, capable of analyzing complex code and pseudocode documents. Your purpose is to provide insights on potential improvements, identify inefficiencies and issues, and recommend the most relevant repositories from VectorStore to retrieve suitable code snippets. Additionally, you create specific search queries to access these valuable code samples."
},
{
"role": "user",
"content": f"I require your expertise to examine the following code and pseudocode documents. Identify potential problems, inefficiencies, and opportunities for enhancement. You final goal is to rewrite and finish the code to make full functional. Here are the documents for your review:\n\n{files_string}"
}
],
functions=[
{
"name": "search_code",
"description": "This function utilizes VectorStore's semantic search for code functions. It receives the name of a repository and a corresponding query, along with the name of a code document, and returns the code snippets from the respective repository that are most similar to the queries.",
"parameters": {
"type": "object",
"properties": {
"files": {
"type": "array",
"items": {
"type": "object",
"properties": {
"file_name": {
"type": "string",
"description": "The name of the code document."
},
"repositories": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The name of the repository.",
"enum": ['db_ros2_control', 'db_ros2', 'db_webots_ros2', 'db_webots']
},
"query": {
"type": "string",
"description": "The search query tailored to retrieve code snippets from the specified repository."
}
},
"required": ["name", "query"]
},
"description": "An array of objects, each one representing a repository and a corresponding search query."
}
},
"required": ["file_name", "repositories"]
},
"description": "An array of objects, each one representing a code document that needs to be reviewed."
}
},
"required": ["files"]
}
}
],
function_call={"name": "search_code"}
)
# %%
import openai
from termcolor import colored
from code_search import similarity_search
def pretty_print_conversation(messages):
role_to_color = {
"system": "red",
"user": "green",
"assistant": "blue",
"function": "magenta",
}
formatted_messages = []
for message in messages:
if message["role"] == "system":
formatted_messages.append(f"system: {message['content']}\n")
elif message["role"] == "user":
formatted_messages.append(f"user: {message['content']}\n")
elif message["role"] == "assistant" and message.get("function_call"):
formatted_messages.append(f"assistant: {message['function_call']}\n")
elif message["role"] == "assistant" and not message.get("function_call"):
formatted_messages.append(f"assistant: {message['content']}\n")
elif message["role"] == "function":
formatted_messages.append(f"function ({message['name']}): {message['content']}\n")
for formatted_message in formatted_messages:
print(
colored(
formatted_message,
role_to_color[messages[formatted_messages.index(formatted_message)]["role"]],
)
)
def search_code_completion_request(messages, functions):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
functions=functions,
function_call={"auto"}
)
functions = [
{
"name": "similarity_search",
"description": "Vectorstore embedding semantic search for code functions. It receives a query and the directories to search, and returns the most similar code snippets to the queries.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query designed to fetch code samples."
},
"directories": {
"type": "array",
"items": {
"type": "string",
"enum": ["db_webots", "db_ros2", "db_webots_ros2", "db_ros2_control"],
"description": "The directories in which to perform the search."
},
"description": "An array of directory names."
}
},
"required": ["query", "directories"]
}
},
]
messages = [
{
"role": "system",
"content": "You are a sophisticated AI that has the ability to analyze complex code and pseudocode documents. You are tasked with making necessary clarifications in a series of chat turns until you gather sufficient information to rewrite the code. You can utilize the 'search_code' function to fetch relevant code snippets based on semantic similarity, and subsequently improve the given file."
},
{
"role": "user",
"content": f"I need your assistance in reviewing these code and pseudocode documents. You final goal is to rewrite and finish the code to make full functional The final goal is to create a project for variable impedance control providing force feedback. The project will use Webots, ROS2, webots_ros2, and ros2_control. You are required to identify potential problems, inefficiencies, and areas for improvements in these documents. Here are the documents you need to work on:\n\n{output_string}\n\nPlease first clarify any question that you need to finish the code with me. After you completely understand the goal of the user, use the search_code function to find relevant code that can help improve the code."
}
]
while True:
chat_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
functions=functions,
)
assistant_message = chat_response['choices'][0]['message']
messages.append(assistant_message)
pretty_print_conversation(messages)
if assistant_message.get("function_call"):
# Extract the function name and arguments from the message
function_name = assistant_message["function_call"]["name"]
arguments = json.loads(assistant_message["function_call"]["arguments"])
print(f"Function Name: {function_name}")
print(f"Arguments: {arguments}")
# Call the function and get the results
if function_name == "similarity_search":
# Initialize a results list
all_results = []
print("Calling similarity_search function...")
results = similarity_search(arguments['query'], arguments['directories'])
print("similarity_search function returned.")
print(f"Results: {results}")
| [
"You are a sophisticated AI that has the ability to analyze complex code and pseudocode documents. You are tasked with making necessary clarifications in a series of chat turns until you gather sufficient information to rewrite the code. You can utilize the 'search_code' function to fetch relevant code snippets based on semantic similarity, and subsequently improve the given file.",
"You are an AI that decomposes complex code generation tasks into smaller, manageable sub-tasks. Each sub-task should be a independent file, should contain the name of the python file, and should contain the name of the file,description, all the functions and classes from the file, as well releted files.",
"I need your assistance in reviewing these code and pseudocode documents. You final goal is to rewrite and finish the code to make full functional The final goal is to create a project for variable impedance control providing force feedback. The project will use Webots, ROS2, webots_ros2, and ros2_control. You are required to identify potential problems, inefficiencies, and areas for improvements in these documents. Here are the documents you need to work on:\n\nPLACEHOLDER\n\nPlease first clarify any question that you need to finish the code with me. After you completely understand the goal of the user, use the search_code function to find relevant code that can help improve the code.",
"You are a highly skilled AI, capable of analyzing complex code and pseudocode documents. Your purpose is to provide insights on potential improvements, identify inefficiencies and issues, and recommend the most relevant repositories from VectorStore to retrieve suitable code snippets. Additionally, you create specific search queries to access these valuable code samples.",
"You are an AI Code Optimization Model that can optimize code, complete #TODOs, recommend best practices, and learn from relevant code repositories. Your main task is to analyze the given code, which performs semantic search queries on a vectorstore using OpenAI embeddings, and apply the insights from the search results to refine the code. The final goal is to produce a fully functional and optimized code file that can be used as part of a larger project, specifically a variable impedance control code for force feedback",
"Given the complex code generation task: 'Write a variable impedance control for force feedback using ros2, webots, webots_ros2 and ros2_control.', please decompose it into a detailed, numbered list of sub-tasks. Each sub-task should be a independent file should contain the name of the python file, description,all the functions and classes from the file, as well releted files. Make sure to devide the task into minimum 5 files. Try to make the code as readable as possible, encapsulating the code into functions and classes. Lets think step by step.\n\nThe following are the retrived documents from all the relevant repositories based on the query 'Variable impedance control for force feedback':\nPLACEHOLDER\nThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decompositionThese retrived functions from all the relevant repositories are helpfull but not fullfile the user task. Please use this context to help guide the task decomposition",
"{'type': 'string', 'description': 'The details of the function or class, including arguments and methods as applicable.'}",
"I require your expertise to examine the following code and pseudocode documents. Identify potential problems, inefficiencies, and opportunities for enhancement. You final goal is to rewrite and finish the code to make full functional. Here are the documents for your review:\n\nPLACEHOLDER",
"You are an advanced AI with capabilities to analyze intricate code and pseudocode files. Based on this analysis, you provide recommendations for the most appropriate vectorstore repositories to extract relevant code snippets from. In addition, you generate search queries that could be utilized to fetch these helpful code samples.",
"I am working on a coding project that aims to develop a variable impedance control code for force feedback. I need your expertise to improve my code. Here is the current version of one file of my code, along with the semantic search queries I’ve done on a vectorstore using OpenAI embeddings, and the results of these queries:\n\nPLACEHOLDER\n\nCan you improve this code, using the suggestions from the semantic search results? Please write the improved and complete code file. Please complete and improve the file based on the context.",
"I need your expertise to examine the provided code and pseudocode files. Your task is to pinpoint any issues, inefficiencies, and areas for potential enhancements. Here are the files you need to look into:\n\nPLACEHOLDER"
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~Auto-GPT~autogpt~api_manager.py | from __future__ import annotations
import openai
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.modelsinfo import COSTS
from autogpt.singleton import Singleton
class ApiManager(metaclass=Singleton):
def __init__(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0
def reset(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0.0
def create_chat_completion(
self,
messages: list, # type: ignore
model: str | None = None,
temperature: float = None,
max_tokens: int | None = None,
deployment_id=None,
) -> str:
"""
Create a chat completion and update the cost.
Args:
messages (list): The list of messages to send to the API.
model (str): The model to use for the API call.
temperature (float): The temperature to use for the API call.
max_tokens (int): The maximum number of tokens for the API call.
Returns:
str: The AI's response.
"""
cfg = Config()
if temperature is None:
temperature = cfg.temperature
if deployment_id is not None:
response = openai.ChatCompletion.create(
deployment_id=deployment_id,
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
logger.debug(f"Response: {response}")
prompt_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
self.update_cost(prompt_tokens, completion_tokens, model)
return response
def update_cost(self, prompt_tokens, completion_tokens, model):
"""
Update the total cost, prompt tokens, and completion tokens.
Args:
prompt_tokens (int): The number of tokens used in the prompt.
completion_tokens (int): The number of tokens used in the completion.
model (str): The model used for the API call.
"""
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
self.total_cost += (
prompt_tokens * COSTS[model]["prompt"]
+ completion_tokens * COSTS[model]["completion"]
) / 1000
logger.debug(f"Total running cost: ${self.total_cost:.3f}")
def set_total_budget(self, total_budget):
"""
Sets the total user-defined budget for API calls.
Args:
total_budget (float): The total budget for API calls.
"""
self.total_budget = total_budget
def get_total_prompt_tokens(self):
"""
Get the total number of prompt tokens.
Returns:
int: The total number of prompt tokens.
"""
return self.total_prompt_tokens
def get_total_completion_tokens(self):
"""
Get the total number of completion tokens.
Returns:
int: The total number of completion tokens.
"""
return self.total_completion_tokens
def get_total_cost(self):
"""
Get the total cost of API calls.
Returns:
float: The total cost of API calls.
"""
return self.total_cost
def get_total_budget(self):
"""
Get the total user-defined budget for API calls.
Returns:
float: The total budget for API calls.
"""
return self.total_budget
| [] |
2024-01-10 | Stahldavid/autocode | tet.py | from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
# Callbacks support token-wise streaming
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
# Verbose is required to pass to the callback manager
# Make sure the model path is correct for your system!
llm = LlamaCpp(
model_path="C:/Users/stahl/Downloads/llm/llama.cpp/wizard-vicuna-13B.ggmlv3.q4_0.bin", callback_manager=callback_manager, verbose=True)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What NFL team won the Super Bowl in the year Justin Bieber was born?, Let's think step by step."
llm_chain.run(question) | [
"question",
"Question: {question}\n\nAnswer: Let's think step by step."
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~Auto-GPT~autogpt~commands~image_gen.py | """ Image Generation Module for AutoGPT."""
import io
import uuid
from base64 import b64decode
import openai
import requests
from PIL import Image
from autogpt.commands.command import command
from autogpt.config import Config
CFG = Config()
@command("generate_image", "Generate Image", '"prompt": "<prompt>"', CFG.image_provider)
def generate_image(prompt: str, size: int = 256) -> str:
"""Generate an image from a prompt.
Args:
prompt (str): The prompt to use
size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace)
Returns:
str: The filename of the image
"""
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
# DALL-E
if CFG.image_provider == "dalle":
return generate_image_with_dalle(prompt, filename, size)
# HuggingFace
elif CFG.image_provider == "huggingface":
return generate_image_with_hf(prompt, filename)
# SD WebUI
elif CFG.image_provider == "sdwebui":
return generate_image_with_sd_webui(prompt, filename, size)
return "No Image Provider Set"
def generate_image_with_hf(prompt: str, filename: str) -> str:
"""Generate an image with HuggingFace's API.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
Returns:
str: The filename of the image
"""
API_URL = (
f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
)
if CFG.huggingface_api_token is None:
raise ValueError(
"You need to set your Hugging Face API token in the config file."
)
headers = {
"Authorization": f"Bearer {CFG.huggingface_api_token}",
"X-Use-Cache": "false",
}
response = requests.post(
API_URL,
headers=headers,
json={
"inputs": prompt,
},
)
image = Image.open(io.BytesIO(response.content))
print(f"Image Generated for prompt:{prompt}")
image.save(filename)
return f"Saved to disk:{filename}"
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
"""Generate an image with DALL-E.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
size (int): The size of the image
Returns:
str: The filename of the image
"""
# Check for supported image sizes
if size not in [256, 512, 1024]:
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
print(
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
)
size = closest
response = openai.Image.create(
prompt=prompt,
n=1,
size=f"{size}x{size}",
response_format="b64_json",
api_key=CFG.openai_api_key,
)
print(f"Image Generated for prompt:{prompt}")
image_data = b64decode(response["data"][0]["b64_json"])
with open(filename, mode="wb") as png:
png.write(image_data)
return f"Saved to disk:{filename}"
def generate_image_with_sd_webui(
prompt: str,
filename: str,
size: int = 512,
negative_prompt: str = "",
extra: dict = {},
) -> str:
"""Generate an image with Stable Diffusion webui.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
size (int, optional): The size of the image. Defaults to 256.
negative_prompt (str, optional): The negative prompt to use. Defaults to "".
extra (dict, optional): Extra parameters to pass to the API. Defaults to {}.
Returns:
str: The filename of the image
"""
# Create a session and set the basic auth if needed
s = requests.Session()
if CFG.sd_webui_auth:
username, password = CFG.sd_webui_auth.split(":")
s.auth = (username, password or "")
# Generate the images
response = requests.post(
f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
json={
"prompt": prompt,
"negative_prompt": negative_prompt,
"sampler_index": "DDIM",
"steps": 20,
"cfg_scale": 7.0,
"width": size,
"height": size,
"n_iter": 1,
**extra,
},
)
print(f"Image Generated for prompt:{prompt}")
# Save the image to disk
response = response.json()
b64 = b64decode(response["images"][0].split(",", 1)[0])
image = Image.open(io.BytesIO(b64))
image.save(filename)
return f"Saved to disk:{filename}"
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~graphcodebert.py | # Importar módulos do langchain
from langchain.agents import Tool, SharedMemory, AgentExecutor
from langchain.memory import Memory
# Importar o modelo GraphCodeBERT e o tokenizer
from transformers import AutoTokenizer, AutoModelForMaskedLM
# Importar o agente CodeAgent
from code_agent import CodeAgent
tokenizer = AutoTokenizer.from_pretrained("microsoft/graphcodebert-base")
model = AutoModelForMaskedLM.from_pretrained("microsoft/graphcodebert-base")
class GraphCodeBertTool(Tool):
def __init__(self, shared_memory: SharedMemory):
super().__init__(actions={})
self.shared_memory = shared_memory
self.graphcodebert_model = model
def predict_action(self, input: str) -> str:
tool_name, query = input.split(":", 1)
if tool_name == "#graphcodebert_tool":
return query.strip()
else:
return None
def execute_action(self, action: str) -> str:
if action is not None:
output = self.graphcodebert_model(action)
return str(output)
else:
return None
# Exemplo de uso:
# Criar uma instância de SharedMemory para código
class CodeMemory(Memory):
pass
shared_memory_code = SharedMemory(CodeMemory(), "code")
# Criar uma instância do GraphCodeBertTool com shared_memory_code
graphcodebert_tool = GraphCodeBertTool(shared_memory_code)
# Criar uma instância do AgentExecutor com graphcodebert_tool como parâmetro
agent_executor = AgentExecutor(
agent=CodeAgent(prompt_template, language_model, stop_sequence, output_parser),
memory=ConversationMemory(),
tools=[graphcodebert_tool],
max_turns=10
)
# Execute o AgentExecutor
agent_executor.run()
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~code_memory.py | # Import langchain modules
from langchain.memory import Memory
from langchain.tools import VectorStore
# Import other modules
import os
import requests
# Define code memory class
class CodeMemory(Memory):
def __init__(self):
# Initialize the memory with an empty dictionary
super().__init__()
# Initialize the vector store with a default model and index name
self.vector_store = VectorStore(model="codebert-base", index_name="code")
def store(self, key, value):
# Override this method to store a key-value pair in the memory
# The key should be a string representing the name or path of the code file
# The value should be a string representing the content of the code file
# Check if the key is a valid string
if not isinstance(key, str):
raise TypeError("Key must be a string")
# Check if the value is a valid string
if not isinstance(value, str):
raise TypeError("Value must be a string")
# Store the key-value pair in the memory dictionary
self.memory[key] = value
# Generate an embedding for the value using the vector store
embedding = self.vector_store.encode(value)
# Store the embedding in the vector store index using the key as an id
self.vector_store.index(embedding, key)
def retrieve(self, key):
# Override this method to retrieve a value from the memory given a key
# The key should be a string representing the name or path of the code file or a query for semantic search
# The value should be a string representing the content of the code file or None if not found
# Check if the key is a valid string
if not isinstance(key, str):
raise TypeError("Key must be a string")
# Try to get the value from the memory dictionary using the key
value = self.memory.get(key)
# If the value is not None, return it
if value is not None:
return value
# Otherwise, try to perform a semantic search using the vector store and the key as a query
results = self.vector_store.search(key)
# If there are any results, get the first one and its id
if results:
result = results[0]
result_id = result["id"]
# Try to get the value from the memory dictionary using the result id as a key
value = self.memory.get(result_id)
# If the value is not None, return it
if value is not None:
return value
# Otherwise, try to get the value from GitHub using requests and result id as a url
else:
try:
response = requests.get(result_id)
response.raise_for_status()
value = response.text
# If successful, store and return the value
self.store(result_id, value)
return value
# If unsuccessful, return None
except requests.exceptions.RequestException:
return None
# If there are no results, return None
else:
return None
def load(self, path):
# Define a method to load code files from a local or remote path and store them in memory
# Check if path is a valid string
if not isinstance(path, str):
raise TypeError("Path must be a string")
# Check if path is a local file path
if os.path.isfile(path):
# If path is a local file path, read the file and store its contents in memory using the file name as a key
with open(path, "r") as f:
content = f.read()
key = os.path.basename(path)
self.store(key, content)
# Check if path is a remote file path
elif path.startswith("http"):
# If path is a remote file path, use requests to get the file contents and store them in memory using the file name as a key
try:
response = requests.get(path)
response.raise_for_status()
content = response.text
key = os.path.basename(path)
self.store(key, content)
# If unsuccessful, raise an exception
except requests.exceptions.RequestException:
raise ValueError("Invalid remote file path")
# If path is not a valid file path, raise an exception
else:
raise ValueError("Invalid file path")
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~camel~code_search.py | # import code_embedding
# from langchain.embeddings.openai import OpenAIEmbeddings
# from langchain.vectorstores import Chroma
# import json
# def similarity_search(query, directories=None):
# # Default directories
# if directories is None:
# directories = ['db_ros2_control', 'db_ros2', 'db_webots_ros2', 'db_webots']
# # Embeddings
# embeddings = OpenAIEmbeddings()
# # Load vector database for each directory
# vectordbs = []
# for directory in directories:
# vectordb = Chroma(persist_directory=directory, embedding_function=embeddings)
# vectordbs.append(vectordb)
# # Query each vector database and concatenate results
# results_list = []
# for directory, vectordb in zip(directories, vectordbs):
# results = vectordb.similarity_search(query, k=2)
# # Convert results to JSON string
# results_dict_list = [vars(result) for result in results]
# results_string = json.dumps(results_dict_list)
# results_list.append(f"Directory {directory}: {results_string}")
# # Concatenate results into a single string
# results_string = "\n".join(results_list)
# return results_string
import code_embedding
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import json
def similarity_search(query, directories=None):
# Default directories
if directories is None:
directories = ['db_ros2_control', 'db_ros2', 'db_webots_ros2', 'db_webots']
# Embeddings
embeddings = OpenAIEmbeddings()
# Load vector database for each directory
vectordbs = []
for directory in directories:
vectordb = Chroma(persist_directory=directory, embedding_function=embeddings)
vectordbs.append(vectordb)
# Query each vector database and concatenate results
results_list = []
for directory, vectordb in zip(directories, vectordbs):
results = vectordb.similarity_search(query, k=2)
# Only add the result to the list if it's not empty
if results:
# Convert results to JSON string
results_dict_list = [vars(result) for result in results]
results_string = json.dumps(results_dict_list)
results_list.append(f"Directory {directory}: {results_string}")
# Concatenate results into a single string
results_string = "\n".join(results_list)
return results_string
print(similarity_search("variabl;e impedance control", directories= ['db_ros2']))
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~Auto-GPT~tests~test_llm_utils.py | import pytest
from openai.error import APIError, RateLimitError
from autogpt.llm_utils import get_ada_embedding, retry_openai_api
from autogpt.modelsinfo import COSTS
@pytest.fixture(params=[RateLimitError, APIError])
def error(request):
if request.param == APIError:
return request.param("Error", http_status=502)
else:
return request.param("Error")
@pytest.fixture
def mock_create_embedding(mocker):
mock_response = mocker.MagicMock()
mock_response.usage.prompt_tokens = 5
mock_response.__getitem__.side_effect = lambda key: [{"embedding": [0.1, 0.2, 0.3]}]
return mocker.patch(
"autogpt.llm_utils.create_embedding", return_value=mock_response
)
def error_factory(error_instance, error_count, retry_count, warn_user=True):
class RaisesError:
def __init__(self):
self.count = 0
@retry_openai_api(
num_retries=retry_count, backoff_base=0.001, warn_user=warn_user
)
def __call__(self):
self.count += 1
if self.count <= error_count:
raise error_instance
return self.count
return RaisesError()
def test_retry_open_api_no_error(capsys):
@retry_openai_api()
def f():
return 1
result = f()
assert result == 1
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
@pytest.mark.parametrize(
"error_count, retry_count, failure",
[(2, 10, False), (2, 2, False), (10, 2, True), (3, 2, True), (1, 0, True)],
ids=["passing", "passing_edge", "failing", "failing_edge", "failing_no_retries"],
)
def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure):
call_count = min(error_count, retry_count) + 1
raises = error_factory(error, error_count, retry_count)
if failure:
with pytest.raises(type(error)):
raises()
else:
result = raises()
assert result == call_count
assert raises.count == call_count
output = capsys.readouterr()
if error_count and retry_count:
if type(error) == RateLimitError:
assert "Reached rate limit, passing..." in output.out
assert "Please double check" in output.out
if type(error) == APIError:
assert "API Bad gateway" in output.out
else:
assert output.out == ""
def test_retry_open_api_rate_limit_no_warn(capsys):
error_count = 2
retry_count = 10
raises = error_factory(RateLimitError, error_count, retry_count, warn_user=False)
result = raises()
call_count = min(error_count, retry_count) + 1
assert result == call_count
assert raises.count == call_count
output = capsys.readouterr()
assert "Reached rate limit, passing..." in output.out
assert "Please double check" not in output.out
def test_retry_openapi_other_api_error(capsys):
error_count = 2
retry_count = 10
raises = error_factory(APIError("Error", http_status=500), error_count, retry_count)
with pytest.raises(APIError):
raises()
call_count = 1
assert raises.count == call_count
output = capsys.readouterr()
assert output.out == ""
def test_get_ada_embedding(mock_create_embedding, api_manager):
model = "text-embedding-ada-002"
embedding = get_ada_embedding("test")
mock_create_embedding.assert_called_once_with(
"test", model="text-embedding-ada-002"
)
assert embedding == [0.1, 0.2, 0.3]
cost = COSTS[model]["prompt"]
assert api_manager.get_total_prompt_tokens() == 5
assert api_manager.get_total_completion_tokens() == 0
assert api_manager.get_total_cost() == (5 * cost) / 1000
| [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~camel~pesquisa_net.py | from langchain import OpenAI, SerpAPIWrapper
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from langchain import OpenAI, SerpAPIWrapper, LLMChain
from langchain.embeddings import OpenAIEmbeddings
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
import os
# Load environment variables from .env file
load_dotenv()
# Access the API key from the environment variable
serpapi_api_key = os.getenv('SERPAPI_API_KEY')
#embeddings = OpenAIEmbeddings(openai_api_key="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")
#os.environ["OPENAI_API_KEY"] = "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" # https://platform.openai.com (Thx Michael from Twitter)
#os.environ['SERPAPI_API_KEY'] = 'd4eb38ff06e003ba07a08950ec770d7d3b876e5685ff9806d3a79a5dc339e558' # https://serpapi.com/
llm = ChatOpenAI(temperature=0)
search = SerpAPIWrapper(serpapi_api_key='d4eb38ff06e003ba07a08950ec770d7d3b876e5685ff9806d3a79a5dc339e558')
from langchain import OpenAI, Wikipedia
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.agents.react.base import DocstoreExplorer
docstore=DocstoreExplorer(Wikipedia())
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to ask with search"
),
]
#llm = ChatOpenAI(temperature=0)
llm = ChatOpenAI(temperature=0)
react = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
question = "what are the best 3 Github repositories for impedance control using ros2"
react.run(question) | [] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~Auto-GPT~autogpt~llm_utils.py | from __future__ import annotations
import functools
import time
from typing import List, Optional
import openai
from colorama import Fore, Style
from openai.error import APIError, RateLimitError, Timeout
from autogpt.api_manager import ApiManager
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.types.openai import Message
def retry_openai_api(
num_retries: int = 10,
backoff_base: float = 2.0,
warn_user: bool = True,
):
"""Retry an OpenAI API call.
Args:
num_retries int: Number of retries. Defaults to 10.
backoff_base float: Base for exponential backoff. Defaults to 2.
warn_user bool: Whether to warn the user. Defaults to True.
"""
retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}"
api_key_error_msg = (
f"Please double check that you have setup a "
f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
f"read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
)
backoff_msg = (
f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}"
)
def _wrapper(func):
@functools.wraps(func)
def _wrapped(*args, **kwargs):
user_warned = not warn_user
num_attempts = num_retries + 1 # +1 for the first attempt
for attempt in range(1, num_attempts + 1):
try:
return func(*args, **kwargs)
except RateLimitError:
if attempt == num_attempts:
raise
logger.debug(retry_limit_msg)
if not user_warned:
logger.double_check(api_key_error_msg)
user_warned = True
except APIError as e:
if (e.http_status != 502) or (attempt == num_attempts):
raise
backoff = backoff_base ** (attempt + 2)
logger.debug(backoff_msg.format(backoff=backoff))
time.sleep(backoff)
return _wrapped
return _wrapper
def call_ai_function(
function: str, args: list, description: str, model: str | None = None
) -> str:
"""Call an AI function
This is a magic function that can do anything with no-code. See
https://github.com/Torantulino/AI-Functions for more info.
Args:
function (str): The function to call
args (list): The arguments to pass to the function
description (str): The description of the function
model (str, optional): The model to use. Defaults to None.
Returns:
str: The response from the function
"""
cfg = Config()
if model is None:
model = cfg.smart_llm_model
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma separated string
args: str = ", ".join(args)
messages: List[Message] = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}"
f"\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
return create_chat_completion(model=model, messages=messages, temperature=0)
# Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway
def create_chat_completion(
messages: List[Message], # type: ignore
model: Optional[str] = None,
temperature: float = None,
max_tokens: Optional[int] = None,
) -> str:
"""Create a chat completion using the OpenAI API
Args:
messages (List[Message]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
Returns:
str: The response from the chat completion
"""
cfg = Config()
if temperature is None:
temperature = cfg.temperature
num_retries = 10
warned_user = False
if cfg.debug_mode:
print(
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
)
for plugin in cfg.plugins:
if plugin.can_handle_chat_completion(
messages=messages,
model=model,
temperature=temperature,
max_tokens=max_tokens,
):
message = plugin.handle_chat_completion(
messages=messages,
model=model,
temperature=temperature,
max_tokens=max_tokens,
)
if message is not None:
return message
api_manager = ApiManager()
response = None
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
if cfg.use_azure:
response = api_manager.create_chat_completion(
deployment_id=cfg.get_azure_deployment_id_for_model(model),
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
else:
response = api_manager.create_chat_completion(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
break
except RateLimitError:
if cfg.debug_mode:
print(
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
)
if not warned_user:
logger.double_check(
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
+ f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
)
warned_user = True
except (APIError, Timeout) as e:
if e.http_status != 502:
raise
if attempt == num_retries - 1:
raise
if cfg.debug_mode:
print(
f"{Fore.RED}Error: ",
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
)
time.sleep(backoff)
if response is None:
logger.typewriter_log(
"FAILED TO GET RESPONSE FROM OPENAI",
Fore.RED,
"Auto-GPT has failed to get a response from OpenAI's services. "
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
)
logger.double_check()
if cfg.debug_mode:
raise RuntimeError(f"Failed to get response after {num_retries} retries")
else:
quit(1)
resp = response.choices[0].message["content"]
for plugin in cfg.plugins:
if not plugin.can_handle_on_response():
continue
resp = plugin.on_response(resp)
return resp
def get_ada_embedding(text: str) -> List[float]:
"""Get an embedding from the ada model.
Args:
text (str): The text to embed.
Returns:
List[float]: The embedding.
"""
cfg = Config()
model = "text-embedding-ada-002"
text = text.replace("\n", " ")
if cfg.use_azure:
kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)}
else:
kwargs = {"model": model}
embedding = create_embedding(text, **kwargs)
api_manager = ApiManager()
api_manager.update_cost(
prompt_tokens=embedding.usage.prompt_tokens,
completion_tokens=0,
model=model,
)
return embedding["data"][0]["embedding"]
@retry_openai_api()
def create_embedding(
text: str,
*_,
**kwargs,
) -> openai.Embedding:
"""Create an embedding using the OpenAI API
Args:
text (str): The text to embed.
kwargs: Other arguments to pass to the OpenAI API embedding creation call.
Returns:
openai.Embedding: The embedding object.
"""
cfg = Config()
return openai.Embedding.create(
input=[text],
api_key=cfg.openai_api_key,
**kwargs,
)
| [
"\nPLACEHOLDER```\n\nOnly respond with your `return` value.",
"You are now the following python function: ```# PLACEHOLDER"
] |
2024-01-10 | Stahldavid/autocode | chat_fn~workspace~autocamel.py | # -*- coding: utf-8 -*-
"""AutoCamel.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1BsFAC_517K3xVCQwSq60wOI2KEaikLh_
"""
!pip install langchain openai
!pip install -qU pinecone-client[grpc]
!pip install tiktoken
!git clone https://github.com/hwchase17/langchain.git
from typing import List
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
BaseMessage,
)
import os
#embeddings = OpenAIEmbeddings(openai_api_key="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")
os.environ["OPENAI_API_KEY"] = "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" # https://platform.openai.com (Thx Michael from Twitter)
os.environ['SERPAPI_API_KEY'] = 'd4eb38ff06e003ba07a08950ec770d7d3b876e5685ff9806d3a79a5dc339e558' # https://serpapi.com/
from langchain.vectorstores import Pinecone
import pinecone
from langchain.embeddings import OpenAIEmbeddings
pinecone.init(
api_key="ed8a92c3-abd9-42d3-bfd7-f3b7415af62c", # find at app.pinecone.io
environment="us-east1-gcp" # next to api key in console
)
index = pinecone.Index("langchain-chat")
# Initialize OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
# Initialize a Pinecone vector store
vectorstore_pine = Pinecone(index=index, embedding_function=embeddings.embed_query, text_key="text")
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=vectorstore_pine.as_retriever())
agent_research = qa
def get_context(query: str) -> str:
context = qa.run(query)
return context
input_message_with_context: HumanMessage
class CAMELAgent:
def __init__(
self,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.system_message = system_message
self.model = model
self.init_messages()
def reset(self) -> None:
self.init_messages()
return self.stored_messages
def init_messages(self) -> None:
self.stored_messages = [self.system_message]
def update_messages(self, message: BaseMessage) -> List[BaseMessage]:
self.stored_messages.append(message)
return self.stored_messages
def step(
self,
combined_user_msg: HumanMessage,
) -> AIMessage:
messages = self.update_messages(combined_user_msg)
output_message = self.model(messages)
self.update_messages(output_message)
return output_message
import os
assistant_role_name = "Python Programmer"
user_role_name = "mechatronic engineerier"
task = "Develop a variable impedance control code for force feedback/haptics using webots, ros2 and webots_ros2 and T-motor ak series actuator."
word_limit = 50 # word limit for task brainstorming
assistant_role_name = "Haptics Research Assistant"
user_role_name = "Mechatronics Engineer"
task = "impedance control code using ros2 and T-motor ak series actuator(MIT MODE)."
task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.")
task_specifier_prompt = (
"""Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.
Please make it more specific. Be creative and imaginative.
Please reply with the specified task in {word_limit} words or less. Do not add anything else."""
)
task_specifier_template = HumanMessagePromptTemplate.from_template(template=task_specifier_prompt)
task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))
task_specifier_msg = task_specifier_template.format_messages(assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task, word_limit=word_limit)[0]
specified_task_msg = task_specify_agent.step(task_specifier_msg)
print(f"Specified task: {specified_task_msg.content}")
specified_task = specified_task_msg.content
specified_task = "Develop a Python program that simulates a robotic arm in Webots, employing variable impedance control to dynamically modify stiffness and damping using haptic feedback. The program should leverage ROS2 and the Webots ROS2 interface for seamless communication and real-time control of the arm's interactions with its environment."
"""\"""
assistant_inception_prompt = (
"""Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!
We share a common interest in collaborating to successfully complete a task.
You must help me to complete the task.
Here is the task: {task}. Never forget our task!
I must instruct you based on your expertise and my needs to complete the task.
I must give you one instruction at a time.
You must write a specific solution that appropriately completes the requested instruction.
You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.
Do not add anything else other than your solution to my instruction.
You are never supposed to ask me any questions you only answer questions.
You are never supposed to reply with a flake solution. Explain your solutions.
Your solution must be declarative sentences and simple present tense.
Unless I say the task is completed, you should always start with:
Solution: <YOUR_SOLUTION>
<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.
Always end <YOUR_SOLUTION> with: Next request."""
)
user_inception_prompt = (
"""Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.
We share a common interest in collaborating to successfully complete a task.
I must help you to complete the task.
Here is the task: {task}. Never forget our task!
You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:
1. Instruct with a necessary input:
Instruction: <YOUR_INSTRUCTION>
Input: <YOUR_INPUT>
2. Instruct without any input:
Instruction: <YOUR_INSTRUCTION>
Input: None
The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction".
You must give me one instruction at a time.
I must write a response that appropriately completes the requested instruction.
I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.
You should instruct me not ask me questions.
Now you must start to instruct me using the two ways described above.
Do not add anything else other than your instruction and the optional corresponding input!
Keep giving me instructions and necessary inputs until you think the task is completed.
When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>.
Never say <CAMEL_TASK_DONE> unless my responses have solved your task."""
)
def combine_context_and_message(context: str, message: HumanMessage) -> HumanMessage:
combined_content = f"{context}\n\n{message.content}"
combined_message = HumanMessage(content=combined_content)
return combined_message
def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):
assistant_sys_template = SystemMessagePromptTemplate.from_template(template=assistant_inception_prompt)
assistant_sys_msg = assistant_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0]
user_sys_template = SystemMessagePromptTemplate.from_template(template=user_inception_prompt)
user_sys_msg = user_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0]
return assistant_sys_msg, user_sys_msg
assistant_sys_msg, user_sys_msg = get_sys_msgs(assistant_role_name, user_role_name, specified_task)
assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))
user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2))
# Reset agents
assistant_agent.reset()
user_agent.reset()
context = ""
# Initialize chats
assistant_msg = HumanMessage(
content=(f"{user_sys_msg.content}. "
"Now start to give me introductions one by one. "
"Only reply with Instruction and Input."))
user_msg = HumanMessage(content=f"{assistant_sys_msg.content}")
user_msg = assistant_agent.step(user_msg)
query = user_msg.content # Use the content of user_msg as the query
context = get_context(query)
combined_user_msg = combine_context_and_message(context, user_msg)
# ... (the rest of the code above the loop)
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
chat_turn_limit, n = 30, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step(assistant_msg)
user_msg = HumanMessage(content=user_ai_msg.content)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
# Call get_context and combine_context_and_message inside the loop
query = user_msg.content
context = get_context(query)
combined_user_msg = combine_context_and_message(context, user_msg)
assistant_ai_msg = assistant_agent.step(combined_user_msg)
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
if "<CAMEL_TASK_DONE>" in user_msg.content:
break
print(context)
# ... (the rest of the code above the loop)
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
chat_turn_limit, n = 30, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step(assistant_msg)
user_msg = HumanMessage(content=user_ai_msg.content)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
# Call get_context and combine_context_and_message inside the loop
query = user_msg.content
context = get_context(query)
combined_user_msg = combine_context_and_message(context, user_msg)
assistant_ai_msg = assistant_agent.step(combined_user_msg)
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
if "<CAMEL_TASK_DONE>" in user_msg.content:
break
get_context("what is impedance control?")
# ... (the rest of the code above the loop)
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
chat_turn_limit, n = 30, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step(assistant_msg)
user_msg = HumanMessage(content=user_ai_msg.content)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
# Call get_context and combine_context_and_message inside the loop
query = user_msg.content
context = get_context(query)
combined_user_msg = combine_context_and_message(context, user_msg)
assistant_ai_msg = assistant_agent.step(combined_user_msg)
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
if "<CAMEL_TASK_DONE>" in user_msg.content:
break
# ... (the rest of the code above the loop)
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
chat_turn_limit, n = 30, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step(assistant_msg)
user_msg = HumanMessage(content=user_ai_msg.content)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
# Call get_context and combine_context_and_message inside the loop
query = user_msg.content
context = get_context(query)
combined_user_msg = combine_context_and_message(context, user_msg)
assistant_ai_msg = assistant_agent.step(combined_user_msg)
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
if "<CAMEL_TASK_DONE>" in user_msg.content:
break
print(combined_user_msg)
# ... (the rest of the code above the loop)
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
chat_turn_limit, n = 30, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step(assistant_msg)
user_msg = HumanMessage(content=user_ai_msg.content)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
# Call get_context and combine_context_and_message inside the loop
query = user_msg.content
context = get_context(query)
combined_user_msg = combine_context_and_message(context, user_msg)
assistant_ai_msg = assistant_agent.step(combined_user_msg)
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
if "<CAMEL_TASK_DONE>" in user_msg.content:
break
# ... (the rest of the code above the loop)
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
chat_turn_limit, n = 30, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step(assistant_msg)
user_msg = HumanMessage(content=user_ai_msg.content)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
# Call get_context and combine_context_and_message inside the loop
query = user_msg.content
context = get_context(query)
combined_user_msg = combine_context_and_message(context, user_msg)
assistant_ai_msg = assistant_agent.step(combined_user_msg)
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
if "<CAMEL_TASK_DONE>" in user_msg.content:
break
# ... (the rest of the code above the loop)
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
chat_turn_limit, n = 30, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step(assistant_msg)
user_msg = HumanMessage(content=user_ai_msg.content)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
# Call get_context and combine_context_and_message inside the loop
query = user_msg.content
context = get_context(query)
combined_user_msg = combine_context_and_message(context, user_msg)
assistant_ai_msg = assistant_agent.step(combined_user_msg)
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
if "<CAMEL_TASK_DONE>" in user_msg.content:
break
# ... (the rest of the code above the loop)
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
chat_turn_limit, n = 30, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step(assistant_msg)
user_msg = HumanMessage(content=user_ai_msg.content)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
# Call get_context and combine_context_and_message inside the loop
query = user_msg.content
context = get_context(query)
combined_user_msg = combine_context_and_message(context, user_msg)
assistant_ai_msg = assistant_agent.step(combined_user_msg)
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
if "<CAMEL_TASK_DONE>" in user_msg.content:
break | [
"You can make a task more specific.",
"Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.\nPlease make it more specific. Be creative and imaginative.\nPlease reply with the specified task in {word_limit} words or less. Do not add anything else."
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~research_agent.py | # Import langchain modules
from langchain.agents import BaseMultiActionAgent
from langchain.agents import AgentActionOutputParser
from langchain.tools import SerpAPIWrapper
from langchain.tools import RetrievalQA
# Define prompt template
prompt_template = """
This is a research agent that can perform different actions related to research tasks, such as searching and semantic searching.
You can ask me anything and I will try to answer or delegate the task to another tool.
To use standard search, use the syntax: #serp_api: query
To use semantic search, use the syntax: #retrieval_qa: query
To chat with me, just type anything else.
For example:
#serp_api: How many planets are there in the solar system?
#retrieval_qa: What is natural language processing?
Hello, how are you today?
"""
# Choose language model
language_model = OpenAI()
# Define stop sequence
stop_sequence = "\n"
# Define output parser
output_parser = AgentActionOutputParser()
# Load tools
serp_api = SerpAPIWrapper()
retrieval_qa = RetrievalQA()
# Create custom agent class by subclassing BaseMultiActionAgent
class ResearchAgent(BaseMultiActionAgent):
def __init__(self, prompt_template, language_model, stop_sequence, output_parser):
super().__init__(prompt_template, language_model, stop_sequence, output_parser)
def decide_action(self, user_input):
# Override this method to decide which action to take based on user input
# You can use any logic or condition you want
# Return an action name and an action input
# If user input starts with #serp_api:, use standard search tool
if user_input.startswith("#serp_api:"):
action_name = "serp_api"
action_input = user_input.replace("#serp_api:", "").strip()
return action_name, action_input
# If user input starts with #retrieval_qa:, use semantic search tool
elif user_input.startswith("#retrieval_qa:"):
action_name = "retrieval_qa"
action_input = user_input.replace("#retrieval_qa:", "").strip()
return action_name, action_input
# Otherwise, chat with user using language model
else:
action_name = "chat"
action_input = user_input.strip()
return action_name, action_input
def execute_action(self, action_name, action_input):
# Override this method to execute the action using the appropriate tool or language model
# You can use any logic or condition you want
# Return an output string
# If action name is serp_api, use serp_api tool to perform standard search
if action_name == "serp_api":
output = serp_api.run(action_input)
# If action name is retrieval_qa, use retrieval_qa tool to perform semantic search
elif action_name == "retrieval_qa":
output = retrieval_qa.run(action_input)
# If action name is chat, use language model to generate a chat response
else:
output = self.language_model.generate(
self.prompt_template
+ "\n"
+ self.stop_sequence
+ "\n"
+ "User: "
+ action_input
+ "\n"
+ "Agent:",
stop=self.stop_sequence,
)
| [
"\nThis is a research agent that can perform different actions related to research tasks, such as searching and semantic searching.\nYou can ask me anything and I will try to answer or delegate the task to another tool.\nTo use standard search, use the syntax: #serp_api: query\nTo use semantic search, use the syntax: #retrieval_qa: query\nTo chat with me, just type anything else.\nFor example:\n#serp_api: How many planets are there in the solar system?\n#retrieval_qa: What is natural language processing?\nHello, how are you today?\n"
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~camel~ToT2.py | import logging
import os
import re
from queue import Queue
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# Load environment variables from .env file
load_dotenv()
# Access the API key from the environment variable
openai_api_key = os.getenv('OPENAI_API_KEY')
# Set up logging
logging.basicConfig(level=logging.INFO)
# Define constants for evaluation criteria
MAX_EVALUATIONS = 11
EVALUATION_SCORE_THRESHOLD = 7
decomposition_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that decomposes code generation tasks. Please output the plan starting with the header 'Plan:' "
"and then followed by a numbered list of steps. "
)
decomposition_human_template = HumanMessagePromptTemplate.from_template(
"Decompose the code generation task in a list: {task}"
)
generation_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that generates possible next steps for code."
)
generation_human_template = HumanMessagePromptTemplate.from_template(
"Generate a possible next step for the code: {code}"
)
evaluation_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that evaluates the quality of proposed code on a scale from 0 to 10. Just responde score : x"
)
evaluation_human_template = HumanMessagePromptTemplate.from_template(
"Evaluate the quality of the proposed code on a scale from 0 to 10.: {code}"
)
search_system_template = SystemMessagePromptTemplate.from_template(
"You are an AI that chooses the next step to take from proposed next steps."
)
search_human_template = HumanMessagePromptTemplate.from_template(
"From the proposed next steps, choose the next step to take: {proposals}"
)
# Define a regular expression pattern for evaluation score
evaluation_pattern = r"score\s*:\s*(\d+)"
class EvaluationChain(LLMChain):
def __call__(self, *args, **kwargs):
try:
result = super().__call__(*args, **kwargs)
match = re.search(bytes(evaluation_pattern, 'utf-8'), bytes(result["text"], 'utf-8'))
if match:
score_value = match.group(1)
int_result = int(score_value)
else:
print(f'No score value found in result: {result["text"]}')
raise ValueError("No score value found in result!")
return int_result
except Exception as e:
logging.error(f"Error during evaluation: {str(e)}")
return None
decomposition_llm = ChatOpenAI(temperature=0.3, model_name='gpt-3.5-turbo')
generation_llm = ChatOpenAI(temperature=0.7, model_name='gpt-3.5-turbo')
evaluation_llm = ChatOpenAI(temperature=0.5, model_name='gpt-3.5-turbo')
search_llm = ChatOpenAI(temperature=0.2, model_name='gpt-3.5-turbo')
decomposition_chain = LLMChain(llm=decomposition_llm, prompt=ChatPromptTemplate.from_messages([decomposition_system_template, decomposition_human_template]))
generation_chain = LLMChain(llm=generation_llm, prompt=ChatPromptTemplate.from_messages([generation_system_template, generation_human_template]))
evaluation_chain = EvaluationChain(llm=evaluation_llm, prompt=ChatPromptTemplate.from_messages([evaluation_system_template, evaluation_human_template]))
search_chain = LLMChain(llm=search_llm, prompt=ChatPromptTemplate.from_messages([search_system_template, search_human_template]))
def validate_task(task):
if not task or not isinstance(task, str) or len(task.strip()) == 0:
logging.error("Invalid task. Please provide a non-empty string.")
return False
return True
def process_task(task, queue, stack, code, evaluation_counter):
try:
print(f"Decomposing task: {task}")
decomposition = decomposition_chain(task)
print(decomposition)
# split the decomposition text by newline characters
steps = decomposition["text"].split("\n")
for step in steps:
print(f"Generating next step for part: {step}")
generation = generation_chain(step)
if not generation["text"].strip(): # If no code is generated, raise ValueError
raise ValueError("No code generated!")
print(f"Evaluating generated code: {generation}")
evaluation_score = evaluation_chain({"code": generation["text"]}) # Pass the generated code as an argument
evaluation_counter += 1
if evaluation_score >= EVALUATION_SCORE_THRESHOLD:
print(f"Generated code meets the threshold. Added to queue and stack.")
queue.put(generation)
stack.append(generation)
elif stack:
print(f"Generated code doesn't meet the threshold. Reverting to last state in stack.")
last_state = stack.pop()
queue.put(last_state)
if not queue.empty():
print(f"Choosing next step from the proposed steps in the queue.")
search = search_chain(queue.queue)
code += search
except Exception as e:
logging.error(f"An error occurred: {str(e)}")
queue.queue.clear()
stack.clear()
return code, evaluation_counter
def main():
task = 'Write a variable impedance control code for force feedback using ros2, webots and webots_ros2'
if not validate_task(task):
return
queue = Queue()
stack = []
queue.put(task)
stack.append(task)
code = """"""
evaluation_counter = 0
while not queue.empty():
task = queue.get()
code, evaluation_counter = process_task(task, queue, stack, code, evaluation_counter)
if evaluation_counter >= MAX_EVALUATIONS:
break
logging.info(f"Final code: {code}")
if __name__ == "__main__":
main()
| [
"Evaluate the quality of the proposed code on a scale from 0 to 10.: {code}",
"You are an AI that evaluates the quality of proposed code on a scale from 0 to 10. Just responde score : x",
"Decompose the code generation task in a list: {task}",
"From the proposed next steps, choose the next step to take: {proposals}",
"and then followed by a numbered list of steps. ",
"You are an AI that decomposes code generation tasks. Please output the plan starting with the header 'Plan:' and then followed by a numbered list of steps. ",
"You are an AI that generates possible next steps for code.",
"You are an AI that chooses the next step to take from proposed next steps.",
"[PLACEHOLDER, PLACEHOLDER]",
"You are an AI that decomposes code generation tasks. Please output the plan starting with the header 'Plan:' ",
"Generate a possible next step for the code: {code}"
] |
2024-01-10 | Stahldavid/autocode | chat_fn~chat~project~pseudo_code_agent.py | # Import langchain modules
from langchain.agents import BaseMultiActionAgent
from langchain.agents import AgentActionOutputParser
from langchain.tools import UnixcoderTool
from langchain.chains import PseudoGenChain
# Import language models
from openai_request_llm import OpenAI # Custom LLM based on OpenAI
# Define prompt template
prompt_template = """
This is a pseudocode agent that can perform different actions related to generating and understanding pseudocode using PseudoGenChain and Unixcoder tools.
You can ask me to generate pseudocode for any task and specify the language you want.
To specify the language, use the syntax: #language: task
For example:
#python: write a function that returns the factorial of a number
#javascript: reverse a string
#c++: write a function that returns the sum of two numbers
You can also ask me to understand pseudocode and translate it to natural language by using the syntax: understand #language: pseudocode
For example:
understand #python: def fib(n):
if n < 2:
return n
else:
return fib(n-1) + fib(n-2)
"""
# Choose language model
language_model = OpenAI()
# Define stop sequence
stop_sequence = "\n"
# Define output parser
output_parser = AgentActionOutputParser()
# Load tools
unixcoder_tool = UnixcoderTool()
# Load chains
pseudogen_chain = PseudoGenChain()
# Create custom agent class by subclassing BaseMultiActionAgent
class PseudoCodeAgent(BaseMultiActionAgent):
def __init__(self, prompt_template, language_model, stop_sequence, output_parser):
super().__init__(prompt_template, language_model, stop_sequence, output_parser)
def decide_action(self, user_input):
# Override this method to decide which action to take based on user input
# You can use any logic or condition you want
# Return an action name and an action input
# If user input starts with #language:, use pseudogen chain to generate pseudocode for that language and task
if user_input.startswith("#"):
action_name = "pseudogen"
action_input = user_input.strip()
return action_name, action_input
# If user input starts with understand #language:, use unixcoder tool to understand pseudocode and translate it to natural language
elif user_input.startswith("understand #"):
action_name = "unixcoder"
action_input = user_input.replace("understand ", "").strip()
return action_name, action_input
# Otherwise, chat with user using language model
else:
action_name = "chat"
action_input = user_input.strip()
return action_name, action_input
def execute_action(self, action_name, action_input):
# Override this method to execute the action using the appropriate tool or chain
# You can use any logic or condition you want
# Return an output string
# If action name is pseudogen, use pseudogen chain to generate pseudocode for the given language and task
if action_name == "pseudogen":
output = pseudogen_chain.run(action_input)
# If action name is unixcoder, use unixcoder tool to understand pseudocode and translate it to natural language
elif action_name == "unixcoder":
output = unixcoder_tool.run(action_input)
# If action name is chat, use language model to generate a chat response
else:
output = self.language_model.generate(self.prompt_template + "\n" + self.stop_sequence + "\n" + "User: " + action_input + "\n" + "Agent:", stop=self.stop_sequence)
# Return the output string
return output
def parse_output(self, output):
# Override this method to parse the output using the output parser
# You can use any logic or condition you want
# Return a parsed output object
# Use the output parser to parse the output string into an object with attributes such as text and type
parsed_output = self.output_parser.parse(output)
# Return the parsed output object
return parsed_output | [
"\nThis is a pseudocode agent that can perform different actions related to generating and understanding pseudocode using PseudoGenChain and Unixcoder tools.\nYou can ask me to generate pseudocode for any task and specify the language you want.\nTo specify the language, use the syntax: #language: task\nFor example:\n#python: write a function that returns the factorial of a number\n#javascript: reverse a string\n#c++: write a function that returns the sum of two numbers\n\nYou can also ask me to understand pseudocode and translate it to natural language by using the syntax: understand #language: pseudocode\nFor example:\nunderstand #python: def fib(n):\n if n < 2:\n return n\n else:\n return fib(n-1) + fib(n-2)\n"
] |
2024-01-10 | vlukiyanov/pt-avitm | ptavitm~sklearn_api.py | from gensim.corpora.dictionary import Dictionary
from gensim.models.coherencemodel import CoherenceModel
from gensim.matutils import Sparse2Corpus
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
import torch
from torch.optim import Adam
from torch.utils.data.sampler import WeightedRandomSampler
from ptavitm.model import train, predict
from ptavitm.vae import ProdLDA
from ptavitm.utils import CountTensorDataset
# TODO decide how a partial_fit method API might work and implement, also more thought into score and logging
class ProdLDATransformer(TransformerMixin, BaseEstimator):
def __init__(
self,
cuda=None,
batch_size=200,
epochs=80,
hidden1_dimension=100,
hidden2_dimension=100,
topics=50,
lr=0.001,
samples=20000,
score_num=7,
score_type="coherence",
) -> None:
self.cuda = torch.cuda.is_available() if cuda is None else cuda
self.batch_size = batch_size
self.epochs = epochs
self.hidden1_dimension = hidden1_dimension
self.hidden2_dimension = hidden2_dimension
self.topics = topics
self.lr = lr
self.samples = samples
self.autoencoder = None
self.score_type = score_type
self.score_num = score_num
if self.score_type not in ["coherence"]:
raise ValueError('score_type must be "coherence"')
def fit(self, X, y=None) -> None:
documents, features = X.shape
ds = CountTensorDataset(X.astype(np.float32))
self.autoencoder = ProdLDA(
in_dimension=features,
hidden1_dimension=self.hidden1_dimension,
hidden2_dimension=self.hidden2_dimension,
topics=self.topics,
)
if self.cuda:
self.autoencoder.cuda()
ae_optimizer = Adam(
self.autoencoder.parameters(), lr=self.lr, betas=(0.99, 0.999)
)
train(
ds,
self.autoencoder,
cuda=self.cuda,
validation=None,
epochs=self.epochs,
batch_size=self.batch_size,
optimizer=ae_optimizer,
sampler=WeightedRandomSampler(
torch.ones(documents), min(documents, self.samples)
),
silent=True,
num_workers=0, # TODO causes a bug to change this on Mac
)
def transform(self, X):
if self.autoencoder is None:
raise NotFittedError
self.autoencoder.eval()
ds = CountTensorDataset(X.astype(np.float32))
output = predict(
ds,
self.autoencoder,
cuda=self.cuda,
encode=True,
silent=True,
batch_size=self.batch_size,
num_workers=0, # TODO causes a bug to change this on Mac
)
return output.cpu().numpy()
def score(self, X, y=None, sample_weight=None) -> float:
# TODO this needs further testing for correctness, WIP
if self.autoencoder is None:
raise NotFittedError
self.autoencoder.eval()
corpus = Sparse2Corpus(X, documents_columns=False)
decoder_weight = self.autoencoder.decoder.linear.weight.detach().cpu()
id2word = {index: str(index) for index in range(X.shape[1])}
topics = [
[str(item.item()) for item in topic]
for topic in decoder_weight.topk(min(self.score_num, X.shape[1]), dim=0)[
1
].t()
]
cm = CoherenceModel(
topics=topics,
corpus=corpus,
dictionary=Dictionary.from_corpus(corpus, id2word),
coherence="u_mass",
)
return cm.get_coherence()
| [] |
2024-01-10 | vlukiyanov/pt-avitm | examples~20news~20news.py | import click
from gensim.corpora.dictionary import Dictionary
from gensim.models.coherencemodel import CoherenceModel
from gensim.matutils import Dense2Corpus
import numpy as np
from torch.optim import Adam
import torch
from torch.utils.data import TensorDataset
from tensorboardX import SummaryWriter
import pickle
from ptavitm.model import train
from ptavitm.vae import ProdLDA
@click.command()
@click.option(
"--cuda", help="whether to use CUDA (default False).", type=bool, default=False
)
@click.option(
"--batch-size", help="training batch size (default 200).", type=int, default=200
)
@click.option("--epochs", help="number of epochs (default 80).", type=int, default=80)
@click.option(
"--top-words",
help="number of top words to report per topic (default 12).",
type=int,
default=12,
)
@click.option(
"--testing-mode",
help="whether to run in testing mode (default False).",
type=bool,
default=False,
)
def main(
cuda, batch_size, epochs, top_words, testing_mode,
):
print("Loading input data")
# TODO fix relative paths
input_train = np.load("data/train.txt.npy", encoding="bytes")
input_val = np.load("data/test.txt.npy", encoding="bytes")
with open("data/vocab.pkl", "rb") as f:
vocab = pickle.load(f)
reverse_vocab = {vocab[word]: word for word in vocab}
indexed_vocab = [reverse_vocab[index] for index in range(len(reverse_vocab))]
data_train = np.array(
[
np.bincount(doc.astype("int"), minlength=len(vocab))
for doc in input_train
if doc.sum() > 0
]
)
data_val = np.array(
[
np.bincount(doc.astype("int"), minlength=len(vocab))
for doc in input_val
if doc.sum() > 0
]
)
corpus = Dense2Corpus(data_train, documents_columns=False)
writer = SummaryWriter() # create the TensorBoard object
# callback function to call during training, uses writer from the scope
def training_callback(autoencoder, epoch, lr, loss, perplexity):
decoder_weight = autoencoder.decoder.linear.weight.detach().cpu()
topics = [
[reverse_vocab[item.item()] for item in topic]
for topic in decoder_weight.topk(top_words, dim=0)[1].t()
]
cm = CoherenceModel(
topics=topics,
corpus=corpus,
dictionary=Dictionary.from_corpus(corpus, reverse_vocab),
coherence="u_mass",
)
coherence = cm.get_coherence()
coherences = cm.get_coherence_per_topic()
for index, topic in enumerate(topics):
print(str(index) + ":" + str(coherences[index]) + ":" + ",".join(topic))
print(coherence)
writer.add_scalars(
"data/autoencoder",
{"lr": lr, "loss": loss, "perplexity": perplexity, "coherence": coherence,},
global_step=epoch,
)
ds_train = TensorDataset(torch.from_numpy(data_train).float())
ds_val = TensorDataset(torch.from_numpy(data_val).float())
autoencoder = ProdLDA(
in_dimension=len(vocab), hidden1_dimension=100, hidden2_dimension=100, topics=50
)
if cuda:
autoencoder.cuda()
print("Training stage.")
ae_optimizer = Adam(autoencoder.parameters(), 0.001, betas=(0.99, 0.999))
train(
ds_train,
autoencoder,
cuda=cuda,
validation=ds_val,
epochs=epochs,
batch_size=batch_size,
optimizer=ae_optimizer,
update_callback=training_callback,
)
autoencoder.eval()
decoder_weight = autoencoder.decoder.linear.weight.detach().cpu()
topics = [
[reverse_vocab[item.item()] for item in topic]
for topic in decoder_weight.topk(top_words, dim=0)[1].t()
]
cm = CoherenceModel(
topics=topics,
corpus=corpus,
dictionary=Dictionary.from_corpus(corpus, reverse_vocab),
coherence="u_mass",
)
coherence = cm.get_coherence()
coherences = cm.get_coherence_per_topic()
for index, topic in enumerate(topics):
print(str(index) + ":" + str(coherences[index]) + ":" + ",".join(topic))
print(coherence)
if not testing_mode:
writer.add_embedding(
autoencoder.encoder.linear1.weight.detach().cpu().t(),
metadata=indexed_vocab,
tag="feature_embeddings",
)
writer.close()
if __name__ == "__main__":
main()
| [] |
2024-01-10 | vlukiyanov/pt-avitm | examples~wine_reviews~wine_reviews.py | import click
from gensim.corpora.dictionary import Dictionary
from gensim.models.coherencemodel import CoherenceModel
from gensim.matutils import Sparse2Corpus
import torch
from torch.optim import Adam
from torch.utils.data.sampler import WeightedRandomSampler
from scipy.sparse import load_npz
from tensorboardX import SummaryWriter
import pickle
from ptavitm.model import train
from ptavitm.vae import ProdLDA
from ptavitm.utils import CountTensorDataset
@click.command()
@click.option(
"--cuda", help="whether to use CUDA (default False).", type=bool, default=False
)
@click.option(
"--batch-size", help="training batch size (default 200).", type=int, default=200
)
@click.option("--epochs", help="number of epochs (default 80).", type=int, default=80)
@click.option(
"--top-words",
help="number of top words to report per topic (default 12).",
type=int,
default=20,
)
@click.option(
"--testing-mode",
help="whether to run in testing mode (default False).",
type=bool,
default=False,
)
@click.option(
"--verbose-mode",
help="whether to run in verbose mode (default False).",
type=bool,
default=False,
)
def main(cuda, batch_size, epochs, top_words, testing_mode, verbose_mode):
print("Loading input data")
# TODO fix relative paths
data_train = load_npz("data/train.txt.npz")
data_val = load_npz("data/test.txt.npz")
corpus = Sparse2Corpus(data_train, documents_columns=False)
with open("data/vocab.pkl", "rb") as f:
vocab = pickle.load(f)
reverse_vocab = {vocab[word]: word for word in vocab}
indexed_vocab = [reverse_vocab[index] for index in range(len(reverse_vocab))]
writer = SummaryWriter() # create the TensorBoard object
# callback function to call during training, uses writer from the scope
def training_callback(autoencoder, epoch, lr, loss, perplexity):
if verbose_mode:
decoder_weight = autoencoder.decoder.linear.weight.detach().cpu()
topics = [
[reverse_vocab[item.item()] for item in topic]
for topic in decoder_weight.topk(top_words, dim=0)[1].t()
]
cm = CoherenceModel(
topics=topics,
corpus=corpus,
dictionary=Dictionary.from_corpus(corpus, reverse_vocab),
coherence="u_mass",
)
coherence = cm.get_coherence()
coherences = cm.get_coherence_per_topic()
for index, topic in enumerate(topics):
print(str(index) + ":" + str(coherences[index]) + ":" + ",".join(topic))
print(coherence)
else:
coherence = 0
writer.add_scalars(
"data/autoencoder",
{"lr": lr, "loss": loss, "perplexity": perplexity, "coherence": coherence,},
global_step=epoch,
)
ds_train = CountTensorDataset(data_train)
ds_val = CountTensorDataset(data_val)
autoencoder = ProdLDA(
in_dimension=len(vocab), hidden1_dimension=100, hidden2_dimension=100, topics=50
)
if cuda:
autoencoder.cuda()
print("Training stage.")
ae_optimizer = Adam(autoencoder.parameters(), 0.0001, betas=(0.99, 0.999))
train(
ds_train,
autoencoder,
cuda=cuda,
validation=ds_val,
epochs=epochs,
batch_size=batch_size,
optimizer=ae_optimizer,
update_callback=training_callback,
sampler=WeightedRandomSampler(torch.ones(data_train.shape[0]), 20000),
num_workers=4,
)
autoencoder.eval()
decoder_weight = autoencoder.decoder.linear.weight.detach().cpu()
topics = [
[reverse_vocab[item.item()] for item in topic]
for topic in decoder_weight.topk(top_words, dim=0)[1].t()
]
cm = CoherenceModel(
topics=topics,
corpus=corpus,
dictionary=Dictionary.from_corpus(corpus, reverse_vocab),
coherence="u_mass",
)
coherence = cm.get_coherence()
coherences = cm.get_coherence_per_topic()
for index, topic in enumerate(topics):
print(str(index) + ":" + str(coherences[index]) + ":" + ",".join(topic))
print(coherence)
if not testing_mode:
writer.add_embedding(
autoencoder.encoder.linear1.weight.detach().cpu().t(),
metadata=indexed_vocab,
tag="feature_embeddings",
)
writer.close()
if __name__ == "__main__":
main()
| [] |
2024-01-10 | shin5ok/slack-routine-notifying | exporter.py | import abc
from typing import Union
class BaseExporter(abc.ABC):
def __init__(self, data: dict, info: dict) -> None:
from dotenv import load_dotenv
import os
from os.path import join, dirname
self.data = data
@abc.abstractmethod
def send(self) -> bool:
...
class GoogleChatExporter(BaseExporter):
def __init__(self, data: dict, info: dict, llm_template: str, model_name: str = "") -> None:
from dotenv import load_dotenv
import os
from os.path import join, dirname
dotenv_path = "./config/.env"
load_dotenv(dotenv_path)
self.data = data
self.limit = os.environ.get("ASK_MEMBER_LIMIT", "2")
self.webhook = os.environ.get("WEBHOOK_URL")
self.members_regexp = os.environ.get("MEMBERS")
self.info = info
self.template = "template.txt"
self.model_name = model_name
self.llm_template = llm_template
def send(self, is_test: bool = False) -> bool:
import requests
import json
import os
post_data = self._gen_data()
webhook = self.webhook
if is_test:
webhook = os.environ.get("TEST_WEBHOOK_URL")
response = requests.post(
webhook,
data=json.dumps({"text": post_data}),
headers={"Content-Type": "application/json"},
)
return response.content
def _gen_data(self) -> str:
import re
from datetime import datetime as dt
import datetime
RECENT_DAYS: int = 7
data = self.data
limit = int(self.limit)
last_remark_by_user: dict = self.info.get("LAST_REMARK_BY_USER")
regexp = re.compile(self.members_regexp, flags=re.IGNORECASE)
from texttable import Texttable
actual = f"直近{self.info['OLDEST_DAYS']}日の これまでの実績\n"
actual += "```\n"
rows = []
for k, v in data.items():
if not regexp.match(k):
continue
row = [f"{k}さん", f"{v[0]}回"]
actual += f"{k}さん {v[0]}回"
if v[1] in last_remark_by_user:
actual += f", 最終投稿日 {last_remark_by_user[v[1]].strftime('%Y/%m/%d')}"
row.append(last_remark_by_user[v[1]].strftime("%Y/%m/%d"))
else:
actual += ", 投稿なし"
row.append("投稿なし")
actual += "\n"
rows.append(row)
actual += "```\n"
gen = ""
now = dt.now(datetime.timezone(datetime.timedelta(hours=9)))
for k, v in sorted(data.items(), key=lambda x: x[1][0]):
if not regexp.match(k):
continue
# skip if user posted something in RECENT_DAYS
if v[1] in last_remark_by_user:
ts = last_remark_by_user[v[1]]
delta = now - ts
if delta.days < RECENT_DAYS:
continue
if limit > 0:
gen += f"*{k}さん*, "
limit -= 1
if gen == "":
gen += "*みなさま*\n"
gen += "(該当する人がいませんでした)"
else:
if self.template:
with open(self.template) as f:
gen += f.read()
t = Texttable()
t.set_deco(Texttable.HEADER)
t.set_cols_dtype(["t", "t", "t"])
rows[:0] = [["名前", "実績", "最終投稿日"]]
t.add_rows(rows)
gen += "\n\n" + "```\n" + t.draw() + "\n```\n"
gen += "\n"
return gen
class GoogleChatExporterWithLLM(GoogleChatExporter):
def get_llm(self, message: str):
from langchain.llms import VertexAI
llm_template = self.llm_template
with open(llm_template) as f:
data = f.read()
data = data.replace("##data##", message)
print(data)
from usellm import LLM
return LLM(self.model_name).choose_candidates(data)
def _gen_data(self) -> str:
import re
from datetime import datetime as dt
import datetime
RECENT_DAYS: int = 7
data = self.data
limit = int(self.limit)
last_remark_by_user: Union[dict, None] = self.info.get("LAST_REMARK_BY_USER")
regexp = re.compile(self.members_regexp, flags=re.IGNORECASE)
from texttable import Texttable
actual_title = f"直近{self.info['OLDEST_DAYS']}日の これまでの実績\n"
rows = []
text = ""
for k, v in data.items():
if not regexp.match(k):
continue
row = [f"{k}さん", f"{v[0]}回"]
if v[1] in last_remark_by_user:
last_post_date = last_remark_by_user[v[1]].strftime("%Y/%m/%d")
else:
last_post_date = "投稿なし"
row.append(last_post_date)
text += f'{k}さん、投稿回数 {v[0]}回、最終投稿日 {last_post_date}' + "\n"
rows.append(row)
gen = ""
now = dt.now(datetime.timezone(datetime.timedelta(hours=9)))
for k, v in sorted(data.items(), key=lambda x: x[1][0]):
if not regexp.match(k):
continue
# skip if user posted something in RECENT_DAYS
if v[1] in last_remark_by_user:
ts = last_remark_by_user[v[1]]
delta = now - ts
if delta.days < RECENT_DAYS:
continue
if self.template:
with open(self.template) as f:
gen += f.read()
t = Texttable()
t.set_deco(Texttable.HEADER)
t.set_cols_dtype(["t", "t", "t"])
rows[:0] = [["名前", "実績", "最終投稿日"]]
t.add_rows(rows)
gen += "\n"
gen += t.draw()
result = self.get_llm(text)
result += f"""
{actual_title}
```
{t.draw()}
```
<users/all>
"""
return result
| [] |
2024-01-10 | shin5ok/slack-routine-notifying | usellm.py | import langchain
from pydantic import BaseModel
from google.cloud import aiplatform
from langchain.chat_models import ChatVertexAI
from langchain.llms import VertexAI
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
from langchain.schema import HumanMessage, SystemMessage
from langchain.chat_models import ChatVertexAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
llm = None
memory = ConversationBufferMemory()
class LLM:
def __init__(self, model_name: str = "chat-bison-32k"):
parameters = {
"temperature": 0.75,
# "max_output_tokens": 1024,
# "top_p": 0.8,
# "top_k": 40,
"model_name": model_name,
}
global llm, memory
chat_model = ConversationChain(
llm=ChatVertexAI(**parameters),
verbose=True,
memory=memory,
)
print(parameters)
self.llm = chat_model
def choose_candidates(self, template: str, params: list = []):
prompt = PromptTemplate(
template = template,
input_variables=params,
)
text = prompt.format()
return self.llm.predict(input=text)
# return self.llm.predict(text)
| [] |
2024-01-10 | smngvlkz/Voice-Assistant | Azandegpt3_voice.py | import openai
import pyttsx3
import speech_recognition as sr
import time
#set my OpenAI API key
openai.api_key = "API KEY"
#Initialize the text-to-speech engine
engine = pyttsx3.init()
def generate_response(prompt):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=4000,
n=1,
stop=None,
temperature=0.5
)
return response["choices"][0]["text"]
def transcribe_audio_to_text(filename):
recognizer = sr.Recognizer()
with sr.AudioFile(filename) as source:
audio = recognizer.record(source)
try:
return recognizer.recognize_google(audio)
except:
print('Skipping unknown error')
def speech_text(text):
engine.say(text)
engine.runAndWait()
def main():
while True:
# Wait for user to say "hello azande"
print("Say 'hello azande' to start recording your questions...")
with sr.Microphone() as source:
recognizer = sr.Recognizer()
audio = recognizer.listen(source)
try:
transcription = recognizer.recognize_google(audio)
if transcription.lower() == "hello azande":
# Record audio
filename = "input.wav"
print("Say your question...")
with sr.Microphone() as source:
recognizer = sr.Recognizer()
source.pause_threshold = 1
audio = recognizer.listen(source, phrase_time_limit=None, timeout=None)
with open(filename, "wb") as f:
f.write(audio.get_wav_data())
# Transcribe audio to text
text = transcribe_audio_to_text(filename)
if text:
print(f"You said: {text}")
# Generate response using GPT-3
response = generate_response(text)
print(f"GPT-3 says: {response}")
# Read response using text-to-speech
speech_text(response)
except Exception as e:
print("An error occurred: {}".format(e))
if __name__ == "__main__":
main()
| [] |
2024-01-10 | wyman58/educhatbot | ChatBot.py | from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from dotenv import load_dotenv
load_dotenv()
def initialize_chat_bot():
db = FAISS.load_local("QandAIndex", OpenAIEmbeddings())
llm = ChatOpenAI(model = "gpt-3.5-turbo", temperature=0.7, max_tokens=100)
global topK_retriever
topK_retriever = db.as_retriever(top_k=3)
global chat_bot
# chat_bot = RetrievalQA.from_chain_type(llm,
# retriever=db.as_retriever(search_type="similarity_score_threshold",
# search_kwargs={"score_threshold": 0.8}))
chat_bot = RetrievalQA.from_chain_type(llm, retriever=topK_retriever)
chat_bot.return_source_documents = True
# docs = topK_retriever.get_relevant_documents("What is the smart choice?")
# for doc in docs:
# print(doc)
def launch_gradio():
import gradio as gr
def chatbot(question):
# docs = topK_retriever.get_relevant_documents(question)
# return docs[0]
ans = chat_bot(question)
return ans["result"]
iface = gr.Interface(
fn=chatbot,
inputs="text",
outputs="text",
title="Smart Choice Chatbot",
description="Chatbot that answers questions about Smart Choice.",
examples=[
["What is the smart choice?"]
]
)
iface.launch()
if __name__ == "__main__":
# 初始化房产销售机器人
initialize_chat_bot()
# 启动 Gradio 服务
launch_gradio() | [] |
2024-01-10 | osipestar/openai-gpt3-chatbot-master | tests~clients~test_open_ai_client.py | import mock
from pytest import fixture, raises
from gpt3_assistant.clients.open_ai_client import OpenAIClient
OPEN_AI_KEY = "fake-key"
MOCK_RESPONSES = [
{"finish_reason": "stop", "text": "hey there"},
{"finish_reason": "stop", "text": "what's up"},
]
@fixture
def open_ai_client():
return OpenAIClient(OPEN_AI_KEY)
def mock_create_completion_no_responses(**kwargs):
return {"choices": [], "usage": {"total_tokens": kwargs["max_tokens"]}}
def mock_create_completion_multiple_responses(**kwargs):
return {"choices": MOCK_RESPONSES, "usage": {"total_tokens": kwargs["max_tokens"]}}
@mock.patch("openai.Completion.create", mock_create_completion_no_responses)
def test_get_completion_throws_exception_no_responses(open_ai_client):
max_tokens = 70
prompt = "Yeah do you have one in mind?"
with raises(Exception):
open_ai_client.get_completion(prompt=prompt, max_tokens=max_tokens)
@mock.patch("openai.Completion.create", mock_create_completion_multiple_responses)
def test_get_completion_returns_first_response(open_ai_client):
max_tokens = 70
prompt = "Yeah do you have one in mind?"
response = open_ai_client.get_completion(prompt=prompt, max_tokens=max_tokens)
assert response is not None
assert response.computer_response == MOCK_RESPONSES[0]["text"]
assert response.user_message == prompt
| [
"Yeah do you have one in mind?"
] |
2024-01-10 | songkq/Chat-Haruhi-Suzumiya | ChatHaruhi2.0~ChatHaruhi.py | from BaseLLM import BaseLLM
# from BaseDB import BaseDB
from ChromaDB import ChromaDB
from LangChainGPT import LangChainGPT
import os
from utils import luotuo_openai_embedding, tiktoken
def foo_embedding(text):
return [0,0,0]
def foo_tokenizer(text):
return 100
class ChatHaruhi:
def __init__(self, system_prompt, story_db=None, story_text_folder = None, llm = 'openai', max_len_story = 1500, max_len_history = 1200):
self.system_prompt = system_prompt
if story_db:
self.db = ChromaDB()
self.db.load(story_db)
elif story_text_folder:
# print("Building story database from texts...")
self.db = self.build_story_db(story_text_folder)
else:
raise ValueError("Either story_db or story_text_folder must be provided")
if llm == 'openai':
self.llm = LangChainGPT()
else:
print(f'warning! undefined llm {llm}, use openai instead.')
self.llm = LangChainGPT()
self.max_len_story = 1500
self.max_len_history = 1200
self.embedding = luotuo_openai_embedding
self.tokenizer = tiktoken
self.story_prefix_prompt = "Classic scenes for the role are as follows:"
self.k_search = 19
self.narrator = ['旁白', '', 'scene','Scene','narrator' , 'Narrator']
self.dialogue_history = []
def build_story_db(self, text_folder):
# 实现读取文本文件夹,抽取向量的逻辑
db = ChromaDB()
strs = []
# scan all txt file from text_folder
for file in os.listdir(text_folder):
# if file name end with txt
if file.endswith(".txt"):
file_path = os.path.join(text_folder, file)
with open(file_path, 'r', encoding='utf-8') as f:
strs.append(f.read())
vecs = []
for mystr in strs:
vecs.append(self.embedding(mystr))
db.init_from_docs(vecs, strs)
return db
def save_story_db(self, db_path):
self.db.save(db_path)
def chat(self, text, role):
# add system prompt
self.llm.initialize_message()
self.llm.system_message(self.system_prompt)
# add story
query = self.get_query_string(text, role)
self.add_story( query )
# add history
self.add_history()
# get response
response = self.llm.get_response()
# record dialogue history
self.dialogue_history.append((query, response))
return response
def get_query_string(self, text, role):
if role in self.narrator:
return ":" + text
else:
return f"{role}:「{text}」"
def add_story(self, query):
query_vec = self.embedding(query)
stories = self.db.search(query_vec, self.k_search)
story_string = self.story_prefix_prompt
sum_story_token = self.tokenizer(story_string)
for story in stories:
story_token = self.tokenizer(story)
if sum_story_token + story_token > self.max_len_story:
break
else:
sum_story_token += story_token
story_string += story + "\n"
self.llm.user_message(story_string)
def add_history(self):
sum_history_token = 0
flag = 0
for (query, response) in self.dialogue_history.reverse():
current_count = self.tokenizer(query.split()) + self.tokenizer(response.split())
sum_history_token += current_count
if sum_history_token > self.max_len_history:
break
else:
flag += 1
for (query, response) in self.dialogue_history[-flag:]:
self.llm.ai_message(query)
self.llm.user_message(response)
| [] |
2024-01-10 | songkq/Chat-Haruhi-Suzumiya | src_reform~loadCharacter.py | import openai
| [] |
2024-01-10 | songkq/Chat-Haruhi-Suzumiya | src_reform~gradioServer.py | import hashlib
from zipfile import ZipFile
import gradio as gr
from app import ChatPerson, ChatSystem
import openai
def create_gradio(chat_system):
character_list = chat_system.getAllCharacters()
# from google.colab import drive
# drive.mount(drive_path)
def generate_user_id(ip_address):
hash_object = hashlib.sha256(ip_address.encode())
return hash_object.hexdigest()
# def save_response(chat_history_tuple):
# with open(f"{chat_person.ChatGPT.dialogue_path}/conversation_{time.time()}.txt", "w", encoding='utf-8') as file:
# for cha, res in chat_history_tuple:
# file.write(cha)
# file.write("\n---\n")
# file.write(res)
# file.write("\n---\n")
def respond(role_name, user_message, chat_history, character, request: gr.Request):
# print("history is here : ", chat_history)
# character = character.value
print("the character is : ", character)
input_message = role_name + ':「' + user_message + '」'
bot_message = chat_system.getResponse(input_message, chat_history, character)
chat_history.append((input_message, bot_message))
# chat_system.addChatHistory(character, chat_history)
return "", chat_history, bot_message
def getImage(query, character):
pass
return chat_system.getImage(query, character)
# return chat_person.ChatGPT.text_to_image(query)
def switchCharacter(characterName, chat_history):
pass
chat_history = []
chat_system.addCharacter(character=characterName)
# chat_history = chat_system.getChatHistory(characterName)
return chat_history, None
# chat_history = []
# chat_person.switchCharacter(characterName)
# # print(chat_person.ChatGPT.image_path)
# return chat_history, None
def upload_file(file_obj):
"""上传文件,zipfile解压文件名乱码,单独用filenames保存"""
filenames = []
with ZipFile(file_obj.name) as zfile:
zfile.extractall('./texts')
for filename in zfile.namelist():
filenames.append(filename.encode('cp437').decode('gbk'))
print(filenames)
def generate(file):
return {gen: gr.update(visible=False),
chat: gr.update(visible=True)}
with gr.Blocks() as demo:
gr.Markdown(
"""
## Chat凉宫春日 ChatHaruhi
- 项目地址 [https://github.com/LC1332/Chat-Haruhi-Suzumiya](https://github.com/LC1332/Chat-Haruhi-Suzumiya)
- 骆驼项目地址 [https://github.com/LC1332/Luotuo-Chinese-LLM](https://github.com/LC1332/Luotuo-Chinese-LLM)
- 此版本为图文版本,完整功能(+语音)的demo见项目 角色名建议输入 阿虚 或者影视剧中有的人物。或者也可以是新学生或者老师。
"""
)
with gr.Tab("Chat-Haruhi") as chat:
api_key = gr.Textbox(label="输入key", value="sr-xxxxxxxx")
character = gr.Radio(character_list, label="Character", value='凉宫春日')
image_input = gr.Textbox(visible=False)
japanese_input = gr.Textbox(visible=False)
with gr.Row():
chatbot = gr.Chatbot()
image_output = gr.Image()
audio = gr.Audio(visible=False)
role_name = gr.Textbox(label="角色名")
msg = gr.Textbox(label="输入")
with gr.Row():
clear = gr.Button("Clear")
image_button = gr.Button("给我一个图")
# audio_btn = gr.Button("春日和我说")
# japanese_output = gr.Textbox(interactive=False, visible=False)
sub = gr.Button("Submit")
# audio_store = gr.Textbox(interactive=False)
# def update_audio(audio, japanese_output):
# japanese_output = japanese_output.split("春日:")[1]
# jp_audio_store = vits_haruhi.vits_haruhi(japanese_output, 4)
# return gr.update(value=jp_audio_store, visible=True)
character.change(fn=switchCharacter, inputs=[character, chatbot], outputs=[chatbot, image_output])
clear.click(lambda: None, None, chatbot, queue=False)
# msg.submit(respond, [role_name, msg, chatbot], [msg, chatbot, image_input, japanese_output])
msg.submit(respond, [role_name, msg, chatbot, character], [msg, chatbot, image_input])
# sub.click(fn=respond, inputs=[role_name, msg, chatbot], outputs=[msg, chatbot, image_input, japanese_output])
sub.click(fn=respond, inputs=[role_name, msg, chatbot, character], outputs=[msg, chatbot, image_input])
# audio_btn.click(fn=update_audio, inputs=[audio, japanese_output], outputs=audio)
image_button.click(getImage, inputs=[image_input, character], outputs=image_output)
with gr.Tab("Custom Character"):
format_rule = """
台本格式:台本文件夹打包成zip
文件名为剧情内容.txt
示例:
fileName: SOS团起名由来.txt
fileContent:
春日:「社团名字我刚刚已经想到了!」
阿虚:「……那你说来听听啊!」
春日:「SOS团!让世界变得更热闹的凉宫春日团,简称SOS团。」
图片格式:图片文件夹打包成zip
图片名即为与该图片相似的文本 如 SOS团.jpg"""
with gr.Column() as gen:
with gr.Row():
with gr.Column():
# role_name
role_name = gr.Textbox(label="role_name")
with gr.Row():
texts = gr.File(label="Upload Texts")
images = gr.File(label="Upload Images")
rule = gr.Textbox(label="文件格式", lines=10)
rule.value = format_rule
generate_btn = gr.Button("生成")
with gr.Column(visible=False) as chat:
custom_api_key = gr.Textbox(label="输入key", interactive=True, placeholder="sr-xxxxxxxx")
image_input = gr.Textbox(visible=False)
japanese_input = gr.Textbox(visible=False)
with gr.Row():
custom_chatbot = gr.Chatbot()
custom_image_output = gr.Image()
custom_audio = gr.Audio(visible=False)
custom_role_name = gr.Textbox(label="角色名")
custom_msg = gr.Textbox(label="输入")
with gr.Row():
custom_clear = gr.Button("Clear")
custom_image_button = gr.Button("给我一个图")
# custom_audio_btn = gr.Button("春日和我说")
custom_japanese_output = gr.Textbox(interactive=False)
custom_sub = gr.Button("Submit")
# audio_store = gr.Textbox(interactive=False)
custom_clear.click(lambda: None, None, None, chatbot, queue=False)
# custom_msg.submit(respond, [api_key, role_name, custom_msg, chatbot], [msg, chatbot, image_input])
custom_sub.click(fn=respond, inputs=[role_name, custom_msg, chatbot],
outputs=[custom_msg, chatbot, image_input])
# custom_audio_btn.click(fn=update_audio, inputs=[audio, japanese_output], outputs=audio)
generate_btn.click(generate, role_name, [gen, chat])
demo.launch(debug=True, share=True)
# chat_person = ChatPerson()
# create_gradio(chat_person)
chat_system = ChatSystem()
create_gradio(chat_system)
| [] |
2024-01-10 | mattetter/mealmuse | test_utils.py | import unittest
import time
from datetime import datetime
from mealmuse import create_app
from mealmuse.models import User, Recipe, Meal, Day, MealPlan, Pantry, ShoppingList, Item, PantryItem, ShoppingListItem, RecipeItem, db
from werkzeug.security import generate_password_hash
from mealmuse.utils import get_meal_plan
from test_data import meal_plan, recipes, shopping_list, meal_plan_biggus, add_meal_plan_to_database
import os
import openai
# MODEL = "gpt-3.5-turbo"
# openai.api_key = os.getenv("OPENAI_API_KEY")
class TestUtilsFunctions(unittest.TestCase):
def setUp(self):
self.app = create_app('config.DevelopmentConfig')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
db.drop_all()
db.create_all()
# Create a test user
user = User(id=1, username="testuser", email="[email protected]", password=generate_password_hash("testpassword"))
db.session.add(user)
db.session.commit()
db.session.close() # Close the session after committing
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_get_meal_plan(self):
user = db.session.get(User, 1)
# meal_plan = add_meal_plan_to_database(user)
# meal_plan_id = meal_plan.id
# user_id = user.id
# # Check if the meal plan contains the correct number of days
# self.assertEqual(len(meal_plan.days), 2)
# # run get meal plan in celery
# get_meal_plan(meal_plan_id, user_id)
# # test to check if checking sqlalchemy object in different thread messes up the write
# # every 5 seconds for 25 seconds or until successful, load the meal_plan_obj, check if all the recipes in it have instructions
# recipe_name_list = []
# for attempt in range(22):
# meal_plan_obj = db.session.get(MealPlan, meal_plan_id)
# for recipe in meal_plan_obj.recipes:
# if recipe.instructions:
# recipe_name_list.append(recipe.name)
# if len(recipe_name_list) == 6:
# print("success")
# print(recipe_name_list)
# break
# else:
# print("not yet....")
# print(recipe_name_list)
# recipe_name_list = []
# time.sleep(5)
# db.session.close()
# def test_save_meal_plan_happy_path(self):
# user = db.session.get(User, 1)
# # Save the meal plan to the test user
# save_meal_plan_to_user(meal_plan_biggus, user)
# # Validate that the meal plan was saved correctly
# self.assertIsNotNone(user.meal_plans)
# meal_plan = user.meal_plans[0]
# self.assertEqual(len(meal_plan.days), 2)
# for day in meal_plan.days:
# self.assertIn(day.name, ["Tuesday", "Wednesday"])
# self.assertGreater(len(day.meal), 0)
# recipe = db.session.get(Recipe, 2)
# # Check if the recipe exists
# if not recipe:
# print("Recipe not found!")
# return
# # # Print basic recipe details
# # print(f"Recipe Name: {recipe.name}")
# # print(f"Instructions: {recipe.instructions}")
# # print(f"Rating: {recipe.rating}")
# # # Print associated ingredients and their quantities
# # print("\nIngredients:")
# # for recipe_item in recipe.recipe_items:
# # item = recipe_item.item
# # print(f"{item.name}: {recipe_item.quantity} {recipe_item.unit}")
# def test_add_items_to_shopping_list(self):
# # 1. Generate a meal plan
# user = db.session.get(User, 1)
# meal_plan = save_meal_plan_to_user(meal_plan_biggus, user)
# recipe = db.session.get(Recipe, 3)
# # Check if the recipe exists
# if not recipe:
# print("Recipe not found!")
# return
# recipe_id = recipe.id
# add_recipe_to_shopping_list(recipe_id, user)
# # Optionally, print out the shopping list for verification
# shopping_list = db.session.query(ShoppingList).filter_by(user_id=user.id).first()
# assert shopping_list is not None
# # for item in shopping_list.shopping_list_items:
# # print(item.item.name, item.quantity, item.unit)
# def test_add_ingredient_to_user_shopping_list(self):
# user = db.session.get(User, 1)
# # Add an ingredient to an empty shopping list
# ingredient = {'name': 'sugar', 'quantity': 1, 'unit': 'cup'}
# add_ingredient_to_user_shopping_list(user, ingredient)
# shopping_list_item = db.session.query(ShoppingListItem).join(ShoppingList).join(Item).filter(Item.name == 'sugar').first()
# self.assertIsNotNone(shopping_list_item)
# self.assertEqual(shopping_list_item.quantity, 1)
# self.assertEqual(shopping_list_item.unit, 'cup')
# # Add the same ingredient with a different unit
# ingredient = {'name': 'sugar', 'quantity': 48, 'unit': 'tsp'}
# add_ingredient_to_user_shopping_list(user, ingredient)
# shopping_list_item = db.session.query(ShoppingListItem).join(ShoppingList).join(Item).filter(Item.name == 'sugar').first()
# self.assertIsNotNone(shopping_list_item)
# self.assertEqual(shopping_list_item.quantity, 2) # Assuming 1 cup + 48 tsp = 2 cups
# self.assertEqual(shopping_list_item.unit, 'cup')
# # Add a new ingredient
# ingredient = {'name': 'salt', 'quantity': 1, 'unit': 'tsp'}
# add_ingredient_to_user_shopping_list(user, ingredient)
# shopping_list_item = db.session.query(ShoppingListItem).join(ShoppingList).join(Item).filter(Item.name == 'salt').first()
# self.assertIsNotNone(shopping_list_item)
# self.assertEqual(shopping_list_item.quantity, 1)
# self.assertEqual(shopping_list_item.unit, 'tsp')
# def test_remove_ingredient_from_user_shopping_list(self):
# user = db.session.get(User, 1)
# # Scenario 1: Remove an ingredient that doesn't exist
# ingredient = {'name': 'pepper', 'quantity': 1, 'unit': 'tsp'}
# remove_ingredient_from_user_shopping_list(user, ingredient)
# shopping_list_item = db.session.query(ShoppingListItem).join(ShoppingList).join(Item).filter(Item.name == 'pepper').first()
# self.assertIsNone(shopping_list_item)
# # Scenario 2: Remove exact quantity and unit
# add_ingredient_to_user_shopping_list(user, {'name': 'sugar', 'quantity': 2, 'unit': 'cup'})
# remove_ingredient_from_user_shopping_list(user, {'name': 'sugar', 'quantity': 2, 'unit': 'cup'})
# shopping_list_item = db.session.query(ShoppingListItem).join(ShoppingList).join(Item).filter(Item.name == 'sugar').first()
# self.assertIsNone(shopping_list_item)
# # Scenario 3: Remove with different unit
# add_ingredient_to_user_shopping_list(user, {'name': 'sugar', 'quantity': 2, 'unit': 'cup'})
# remove_ingredient_from_user_shopping_list(user, {'name': 'sugar', 'quantity': 96, 'unit': 'tsp'})
# shopping_list_item = db.session.query(ShoppingListItem).join(ShoppingList).join(Item).filter(Item.name == 'sugar').first()
# self.assertIsNone(shopping_list_item)
# # Scenario 4: Remove more than existing
# add_ingredient_to_user_shopping_list(user, {'name': 'salt', 'quantity': 1, 'unit': 'tsp'})
# remove_ingredient_from_user_shopping_list(user, {'name': 'salt', 'quantity': 2, 'unit': 'tsp'})
# shopping_list_item = db.session.query(ShoppingListItem).join(ShoppingList).join(Item).filter(Item.name == 'salt').first()
# self.assertIsNone(shopping_list_item)
# # Scenario 5: Use remove_entirely option
# add_ingredient_to_user_shopping_list(user, {'name': 'oil', 'quantity': 1, 'unit': 'cup'})
# remove_ingredient_from_user_shopping_list(user, {'name': 'oil', 'quantity': 0.5, 'unit': 'cup'}, remove_entirely=True)
# shopping_list_item = db.session.query(ShoppingListItem).join(ShoppingList).join(Item).filter(Item.name == 'oil').first()
# self.assertIsNone(shopping_list_item)
# def test_add_ingredient_to_user_pantry(self):
# user = db.session.get(User, 1)
# # Scenario 1: Add an ingredient to an empty pantry
# ingredient = {'name': 'flour', 'quantity': 1, 'unit': 'cup'}
# add_ingredient_to_user_pantry(user, ingredient)
# pantry_item = db.session.query(PantryItem).join(Pantry).join(Item).filter(Item.name == 'flour').first()
# self.assertIsNotNone(pantry_item)
# self.assertEqual(pantry_item.quantity, 1)
# self.assertEqual(pantry_item.unit, 'cup')
# # Scenario 2: Add the same ingredient with a different unit
# ingredient = {'name': 'flour', 'quantity': 16, 'unit': 'tbsp'}
# add_ingredient_to_user_pantry(user, ingredient)
# pantry_item = db.session.query(PantryItem).join(Pantry).join(Item).filter(Item.name == 'flour').first()
# self.assertIsNotNone(pantry_item)
# self.assertEqual(pantry_item.quantity, 2) # Assuming 1 cup + 16 tbsp = 2 cups
# self.assertEqual(pantry_item.unit, 'cup')
# # Scenario 3: Add a new ingredient
# ingredient = {'name': 'rice', 'quantity': 1, 'unit': 'cup'}
# add_ingredient_to_user_pantry(user, ingredient)
# pantry_item = db.session.query(PantryItem).join(Pantry).join(Item).filter(Item.name == 'rice').first()
# self.assertIsNotNone(pantry_item)
# self.assertEqual(pantry_item.quantity, 1)
# self.assertEqual(pantry_item.unit, 'cup')
# def test_remove_ingredient_from_user_pantry(self):
# user = db.session.get(User, 1)
# # Scenario 1: Remove an ingredient that doesn't exist
# ingredient = {'name': 'chocolate', 'quantity': 1, 'unit': 'bar'}
# remove_ingredient_from_user_pantry(user, ingredient)
# pantry_item = db.session.query(PantryItem).join(Pantry).join(Item).filter(Item.name == 'chocolate').first()
# self.assertIsNone(pantry_item)
# # Scenario 2: Remove exact quantity and unit
# add_ingredient_to_user_pantry(user, {'name': 'flour', 'quantity': 2, 'unit': 'cup'})
# remove_ingredient_from_user_pantry(user, {'name': 'flour', 'quantity': 2, 'unit': 'cup'})
# pantry_item = db.session.query(PantryItem).join(Pantry).join(Item).filter(Item.name == 'flour').first()
# self.assertIsNone(pantry_item)
# # Scenario 3: Remove with different unit
# add_ingredient_to_user_pantry(user, {'name': 'flour', 'quantity': 2, 'unit': 'cup'})
# remove_ingredient_from_user_pantry(user, {'name': 'flour', 'quantity': 32, 'unit': 'tbsp'})
# pantry_item = db.session.query(PantryItem).join(Pantry).join(Item).filter(Item.name == 'flour').first()
# self.assertIsNone(pantry_item)
# # Scenario 4: Remove more than existing
# add_ingredient_to_user_pantry(user, {'name': 'rice', 'quantity': 1, 'unit': 'cup'})
# remove_ingredient_from_user_pantry(user, {'name': 'rice', 'quantity': 2, 'unit': 'cup'})
# pantry_item = db.session.query(PantryItem).join(Pantry).join(Item).filter(Item.name == 'rice').first()
# self.assertIsNone(pantry_item)
# # Scenario 5: Use remove_entirely option
# add_ingredient_to_user_pantry(user, {'name': 'oil', 'quantity': 1, 'unit': 'cup'})
# remove_ingredient_from_user_pantry(user, {'name': 'oil', 'quantity': 0.5, 'unit': 'cup'}, remove_entirely=True)
# pantry_item = db.session.query(PantryItem).join(Pantry).join(Item).filter(Item.name == 'oil').first()
# self.assertIsNone(pantry_item)
# def test_remove_recipe_from_shopping_list(self):
# # 1. Generate a meal plan
# user = db.session.get(User, 1)
# meal_plan = save_meal_plan_to_user(meal_plan_biggus, user)
# #save a recipe to the shopping list
# recipe_id = 5
# add_recipe_to_shopping_list(recipe_id, user)
# # Remove the recipe from the shopping list
# remove_recipe_from_shopping_list(recipe_id, user)
# # Check if the ingredients of the recipe are removed or reduced in quantity
# recipe_items = db.session.query(RecipeItem).filter_by(recipe_id=recipe_id).all()
# for recipe_item in recipe_items:
# item_in_list = db.session.query(ShoppingListItem).join(ShoppingList).join(Item).filter(
# ShoppingList.id == user.shopping_list.id,
# Item.name == recipe_item.item.name
# ).first()
# # # If the assertion is likely to fail, print the quantities
# # if item_in_list and item_in_list.quantity >= recipe_item.quantity:
# # print(f"Item: {recipe_item.item.name}")
# # print(f"Quantity in shopping list: {item_in_list.quantity}")
# # print(f"Quantity in recipe: {recipe_item.quantity}")
# # Assert that either the item is not in the list, or its quantity is reduced
# self.assertTrue(item_in_list is None or item_in_list.quantity < recipe_item.quantity)
# def test_get_meal_plan_details(self):
# user = db.session.get(User, 1)
# # Save a sample meal plan to the test user
# save_meal_plan_to_user(meal_plan_biggus, user)
# # Get the most recent meal plan for the user
# meal_plan_details = get_meal_plan_details(user)
# # Validate that the meal plan details dictionary has the expected structure
# self.assertIn("meal_plan_id", meal_plan_details)
# self.assertIn("days", meal_plan_details)
# # Ensure there are two days in the meal plan
# self.assertEqual(len(meal_plan_details["days"]), 2)
# for day_data in meal_plan_details["days"]:
# self.assertIn("day_id", day_data)
# self.assertIn("day_name", day_data)
# self.assertIn("meals", day_data)
# # Ensure each day has at least one meal
# self.assertGreater(len(day_data["meals"]), 0)
# for meal_data in day_data["meals"]:
# self.assertIn("meal_id", meal_data)
# self.assertIn("meal_name", meal_data)
# self.assertIn("recipes", meal_data)
# # Ensure each meal has at least one recipe
# self.assertGreater(len(meal_data["recipes"]), 0)
# for recipe_data in meal_data["recipes"]:
# self.assertIn("recipe_id", recipe_data)
# self.assertIn("recipe_name", recipe_data)
# You can add more tests for other functions if needed!
# Run the tests with: python -m unittest test_utils.py
if __name__ == '__main__':
unittest.main() | [] |
2024-01-10 | mattetter/mealmuse | mealmuse~tasks.py | import os
import re
import json
import openai
import time
from dotenv import load_dotenv
from mealmuse import db, celery
from mealmuse.models import User, Pantry, Item, ShoppingList, MealPlan, Recipe, Day, Meal, RecipeItem, ShoppingListItem, PantryItem, UserProfile, Equipment, Allergy, Diet, users_recipes, recipes_mealplans, recipes_meals # import the models if they are used in the utility functions
from mealmuse.exceptions import InvalidOutputFormat
from mealmuse.prompts import recipes_prompt_35turbo_v1, meal_plan_system_prompt_gpt4_v2, pantry_items_prompt_gpt_4_v1
from test_data import get_recipe, meal_plan_output_gpt_4_v2
load_dotenv(".env")
openai.api_key = os.getenv("OPENAI_API_KEY")
RECIPES_TASK = recipes_prompt_35turbo_v1
RECIPE_MODEL = "gpt-3.5-turbo-16k"
MEAL_PLAN_TASK = meal_plan_system_prompt_gpt4_v2
MEAL_PLAN_MODEL = "gpt-4"
PANTRY_ITEMS_TASK = pantry_items_prompt_gpt_4_v1
def create_app_instance():
from mealmuse import create_app # Adjust this import to your actual function
app = create_app('config.DevelopmentConfig')
return app
@celery.task
def generate_meal_plan(meal_plan_id, user_id, temp):
# get meal plan from openai
meal_plan_output = fetch_meal_plan_from_api(meal_plan_id, user_id, temp)
# fake api call for testing
# meal_plan_output = meal_plan_output_gpt_4_v2
# save generated meal plan with user selections to database
meal_plan_id = save_meal_plan_output_with_context(meal_plan_output, meal_plan_id, user_id)
# fetch recipe details in parallel
fetch_recipe_details_with_context(meal_plan_id, user_id)
return meal_plan_id
# Meal Plan generation; wrap the recipe api call in a function to be used in parallel
def fetch_recipe_details_with_context(meal_plan_id, user_id):
app = create_app_instance()
with app.app_context():
try:
meal_plan = MealPlan.query.filter_by(id=meal_plan_id).first()
profile = UserProfile.query.filter_by(user_id=user_id).first()
temp = profile.recipe_temperature
# create a list of recipe ids in plan
recipe_ids = []
for recipe in meal_plan.recipes:
recipe_ids.append(recipe.id)
db.session.remove()
except Exception as e:
db.session.rollback()
print(f"Error occurred: {e}")
db.session.remove()
raise
result = [fetch_recipe_details.delay(recipe_id, temp) for recipe_id in recipe_ids]
return result
# Meal Plan generation: generate a single recipe
@celery.task
def swap_out_recipe(recipe_id, user_id):
app = create_app_instance()
with app.app_context():
try:
user = User.query.filter_by(id=user_id).first()
old_recipe = Recipe.query.filter_by(id=recipe_id).first()
meal = Meal.query.filter_by(id=old_recipe.meals[0].id).first()
day = Day.query.filter_by(id=meal.day_id).first()
meal_plan_id = day.meal_plan_id
profile = UserProfile.query.filter_by(user_id=user_id).first()
recipe_temperature = profile.recipe_temperature or 1
meal_plan = MealPlan.query.filter_by(id=meal_plan_id).first()
# get the recipe specific details
recipe_cost = old_recipe.cost
recipe_time = old_recipe.time
recipe_serves = old_recipe.serves
recipe_cuisine = old_recipe.cuisine
# disassociate the old recipe from the meal
meal.recipes.remove(old_recipe)
# disassociate the old recipe from the meal plan
meal_plan.recipes.remove(old_recipe)
db.session.flush()
# save new recipe to database with the above details
new_recipe = Recipe(
name="please generate",
cost=recipe_cost,
time=recipe_time,
serves=recipe_serves
)
# Add the new recipe to the database
db.session.add(new_recipe)
db.session.flush() # To get the ID for the new recipe after adding it
# create a string with the old cuisine and a request to not have the old recipe.name again
new_recipe.cuisine = recipe_cuisine + f" : anything but {old_recipe.name}"
# add ingredients to the new recipe
add_available_pantry_items_to_recipe(new_recipe, user, meal_plan)
# Associate the new recipe with the user
user.recipes.append(new_recipe)
# Associate the new recipe with the meal object
meal.recipes.append(new_recipe)
db.session.flush()
# Associate the new recipe with the meal plan
meal_plan.recipes.append(new_recipe)
# generate the new recipe
recipe_details = fetch_recipe_details(new_recipe.id, recipe_temperature)
db.session.commit()
except Exception as e:
db.session.rollback()
print(f"Error occurred: {e}")
db.session.remove()
raise
return recipe_details
# celery task to generate a single recipe from scratch using the user's pantry
@celery.task
def generate_new_recipe(user_id, recipe_id):
app = create_app_instance()
with app.app_context():
try:
user = User.query.filter_by(id=user_id).first()
recipe = Recipe.query.filter_by(id=recipe_id).first()
profile = UserProfile.query.filter_by(user_id=user_id).first()
recipe_temperature = profile.recipe_temperature or 1
pantry = user.pantry
# add text to the cuisine to request that only current pantry items are used
if recipe.cuisine:
recipe.cuisine = recipe.cuisine + " : Strictly only use items listed"
else:
recipe.cuisine = "Strictly only use items listed"
if pantry:
for pantry_item in pantry.pantry_items:
recipe_item = RecipeItem(recipe_id=recipe_id, item_id=pantry_item.item_id, quantity = pantry_item.quantity, unit = pantry_item.unit)
db.session.add(recipe_item)
db.session.flush()
# Associate the new recipe with the user
user.recipes.append(recipe)
db.session.commit()
except Exception as e:
db.session.rollback()
print(f"Error occurred: {e}")
db.session.remove()
raise
# generate the new recipe
recipe_details = fetch_recipe_details(recipe_id, recipe_temperature)
return recipe_details
# Recipe generation; add available pantry items to a single recipe
def add_available_pantry_items_to_recipe(recipe, user, meal_plan):
from mealmuse.utils import update_item_quantity
# first add all items in pantry to the recipe
pantry = user.pantry
if pantry:
for pantry_item in pantry.pantry_items:
recipe_item = RecipeItem(recipe_id=recipe.id, item_id=pantry_item.item_id, quantity = pantry_item.quantity, unit = pantry_item.unit)
db.session.add(recipe_item)
db.session.flush()
# for each item in the recipe that is also used in the curernt meal plan reduce the amount of the recipe item by the amount used in the meal plan using the update_item_quantity function
# make a list of all the ingredients used in the meal plan
meal_plan_ingredients = []
for recipe in meal_plan.recipes:
for recipe_item in recipe.recipe_items:
meal_plan_ingredients.append(recipe_item)
for recipe_item in recipe.recipe_items:
for meal_plan_ingredient in meal_plan_ingredients:
if recipe_item.item_id == meal_plan_ingredient.item_id:
#subtract the mealplan ingredient quantity from the recipe item quantity
quantity = -1 * meal_plan_ingredient.quantity
update_item_quantity(recipe_item, quantity, meal_plan_ingredient.unit)
db.session.commit()
# Recipe generation: make a new recipe from using only a block of text passed in.
@celery.task
def create_recipe_with_text(recipe_text, user_id):
app = create_app_instance()
with app.app_context():
try:
user = User.query.filter_by(id=user_id).first()
profile = UserProfile.query.filter_by(user_id=user_id).first()
recipe_temperature = profile.recipe_temperature or 1
# create a new recipe
new_recipe = Recipe(
name="please_process",
description=recipe_text
)
db.session.add(new_recipe)
db.session.flush()
# Associate the new recipe with the user
user.recipes.append(new_recipe)
new_recipe_id = new_recipe.id
db.session.commit()
db.session.remove()
except Exception as e:
db.session.rollback()
print(f"Error occurred: {e}")
db.session.remove()
raise
# generate the new recipe
fetch_recipe_details(new_recipe_id, recipe_temperature)
return new_recipe_id
# Meal Plan generation; create a full user prompt with flask app context
def create_meal_plan_user_prompt_with_context(user_id, meal_plan_id):
app = create_app_instance()
with app.app_context():
try:
user = User.query.filter_by(id=user_id).first()
meal_plan = MealPlan.query.filter_by(id=meal_plan_id).first()
user_prompt = create_meal_plan_user_prompt(user, meal_plan)
except Exception as e:
db.session.rollback()
print(f"Error occurred: {e}")
db.session.remove()
raise
return user_prompt
# Meal Plan generation; process the user input to create a user prompt in the expected format
def create_meal_plan_user_prompt(user, meal_plan):
# Placeholder for the result in json format
result = {}
# get the user's pantry items
pantry_items = []
pantry = user.pantry
if pantry:
pantry_items = [item.item.name for item in pantry.pantry_items]
# check if user has any equipment
equipment = []
if user.equipment:
equipment = [equipment.name for equipment in user.equipment]
# check if user has any allergies
allergy = []
if user.allergies:
allergy = [allergy.name for allergy in user.allergies]
# check if user has any dietary restrictions
diet = []
if user.diets:
diet = [diet.name for diet in user.diets]
# get the user's proficiency
user_profile = UserProfile.query.filter_by(user_id=user.id).first()
if user_profile:
proficiency = user_profile.proficiency
else:
# create a profile and set proficiency to intermediate
user_profile = UserProfile(user_id=user.id, proficiency="Beginner")
db.session.add(user_profile)
db.session.commit()
proficiency = user_profile.proficiency
# get the pantry use preference and budget jand leftover management for this meal plan
pantry_usage_preference = meal_plan.pantry_use
budget = meal_plan.budget
leftovers = meal_plan.leftovers
cuisine = meal_plan.cuisine_requests
# Build the json object
general = {
"allergies": allergy,
"cuisine and user requests": cuisine if cuisine else 'any', # Defaulting to 'Any' if not provided
"dietary restrictions": diet if diet else 'no restrictions', # Defaulting to 'No restrictions' if not provided
"pantry_items": pantry_items,
"pantry_usage_preference": pantry_usage_preference,
# "calorie_range": calorie_range,
# "macronutrients": {
# "carbs": 45, # You can replace with actual data if available
# "protein": 25, # You can replace with actual data if available
# "fats": 30 # You can replace with actual data if available
# },
"equipment": equipment,
"culinary_skill": proficiency,
"budget": budget,
"meal_diversity": "high", #TO DO: meal_diversity,
"leftover_management": leftovers,
"description": "please generate"
}
daily = {}
# meal_plan.days fetches the days associated with this meal plan
for day in meal_plan.days:
daily[day.name] = []
for meal in day.meal:
meal_details = {
"name": meal.name,
"prep_time": meal.prep_time,
"num_people": meal.num_people,
"cuisine": meal.cuisine,
"type": meal.type
}
daily[day.name].append(meal_details)
# Compile the result
result = {
"general": general,
"daily": daily
}
return json.dumps(result)
# Meal Plan generation; the api call to get a meal plan
def fetch_meal_plan_from_api(meal_plan_id, user_id, temp=1):
# Create the user prompt
user_prompt = create_meal_plan_user_prompt_with_context(user_id, meal_plan_id)
response = openai.ChatCompletion.create(
model=MEAL_PLAN_MODEL,
messages=[
{"role": "system", "content": MEAL_PLAN_TASK},
{"role": "user", "content": user_prompt},
],
max_tokens=3000,
temperature=temp,
)
meal_plan_text = response.choices[0].message['content']
try:
# Attempt to parse the output as JSON
meal_plan_json = json.loads(meal_plan_text)
except json.JSONDecodeError:
# If the output is not JSON, raise InvalidOutputFormat
raise InvalidOutputFormat("Output is not valid JSON")
return meal_plan_json
# return meal_plan_output_gpt_4_v2
def save_meal_plan_output_with_context(meal_plan_json, meal_plan_id, user_id):
app = create_app_instance()
with app.app_context():
try:
meal_plan = MealPlan.query.filter_by(id=meal_plan_id).first()
user = User.query.filter_by(id=user_id).first()
save_meal_plan_output(meal_plan_json, meal_plan, user)
except Exception as e:
db.session.rollback()
print(f"Error occurred: {e}")
db.session.remove()
raise
return meal_plan_id
# Meal Plan Save: takes the output from the meal plan api call and saves it to the database
def save_meal_plan_output(meal_plan_json, meal_plan, user):
# save the description to the meal plan
meal_plan.description = meal_plan_json['description']
for day_name, day_data in meal_plan_json['days'].items():
# Find the day object corresponding to the day name (like 'Tuesday' or 'Wednesday')
day_obj = Day.query.filter_by(name=day_name, meal_plan_id=meal_plan.id).first()
if not day_obj:
continue
for meal_name, meal_data in day_data.items():
# Find the meal object corresponding to the meal name (like 'Breakfast', 'Lunch'...)
meal_obj = Meal.query.filter_by(name=meal_name, day_id=day_obj.id).first()
if not meal_obj:
continue
# Extract recipe data
recipe_data = meal_data['recipe']
new_recipe = Recipe(
name=recipe_data['name'],
cost=recipe_data['cost_in_dollars'],
time=recipe_data['time_required'],
serves=recipe_data['serves'],
cuisine=meal_obj.cuisine
)
# Add the new recipe to the database
db.session.add(new_recipe)
db.session.flush() # To get the ID for the new recipe after adding it
# Associate the new recipe with the meal object
meal_obj.recipes.append(new_recipe)
# Associate the new recipe with the meal plan
meal_plan.recipes.append(new_recipe)
# Associate the new recipe with the user
user.recipes.append(new_recipe)
# Save ingredients to the RecipeItem model
for ingredient in recipe_data['ingredients_from_pantry']:
# Here we're assuming each ingredient is a new unique item. If not,
# you'd need to check the database for existing items before creating a new one.
item = Item(name=ingredient)
db.session.add(item)
db.session.flush() # To get the ID for the new item after adding it
# Create a RecipeItem instance
recipe_item = RecipeItem(recipe_id=new_recipe.id, item_id=item.id)
db.session.add(recipe_item)
meal_plan.status = "complete"
db.session.commit()
return meal_plan.id
# Meal Plan generation; the api call to get a recipe
@celery.task
def fetch_recipe_details(recipe_id, temp=1):
# for testing only
# recipe = db.session.query(Recipe).filter_by(id=recipe_id).first()
# recipe_name = recipe.name
# db.session.close()
retries = 2
recipe_user_prompt = create_recipe_user_prompt(recipe_id)
for _ in range(retries):
############################################ RECIPE API CALL ############################################
response = openai.ChatCompletion.create(
model=RECIPE_MODEL,
messages=[
{"role": "system", "content": RECIPES_TASK},
{"role": "user", "content": recipe_user_prompt},
],
max_tokens=2000,
temperature=temp,
)
recipes_text = response.choices[0].message['content']
# fake the api call for testing
# recipes_text = get_recipe(recipe.name)
try:
return process_recipe_output(recipes_text, recipe_id)
except InvalidOutputFormat as e:
print(f"Error processing recipe for {recipe_id}: {e}. Retrying...")
raise Exception(f"Failed to get a valid response for {recipe_id} after {retries} attempts.")
# Meal Plan generation; Pull info from db to create a user prompt for a recipe
def create_recipe_user_prompt(recipe_id):
app = create_app_instance()
with app.app_context():
try:
recipe = db.session.query(Recipe).filter_by(id=recipe_id).first()
# Placeholder for the result in json format
result = {}
# check if this is a text-entry recipe that we are just processing
if recipe.name == "please_process":
recipe.name = "please generate"
result = recipe.description
recipe.description = ""
db.session.commit()
# otherwise assume generating a new recipe
else:
# get the recipe specific details
name = recipe.name or "please generate"
cost = recipe.cost or "any"
time = recipe.time or "any"
serves = recipe.serves or 1
cuisine = recipe.cuisine or "be creative"
# get the recipe's ingredients
ingredients = []
for recipe_item in recipe.recipe_items:
ingredients.append(recipe_item.item.name)
# create text file with description and the above details
result = {
"recipe":{
"name": name,
"cost": cost,
"total time to make": time,
"serves": serves,
"ingredients from pantry to consider including": ingredients,
"cuisine or user requests": cuisine
}}
except Exception as e:
db.session.rollback()
print(f"Error occurred: {e}")
db.session.remove()
raise
db.session.remove()
return json.dumps(result)
# Meal Plan Save; takes the output from the recipe api call and saves it to the database
def process_recipe_output(data, recipe_id):
app = create_app_instance()
with app.app_context():
try:
recipe = db.session.query(Recipe).filter_by(id=recipe_id).first()
# remove all existing ingredients from the recipe
for recipe_item in recipe.recipe_items:
db.session.delete(recipe_item)
db.session.commit()
# If data is a string, try to deserialize it as JSON
if isinstance(data, str):
try:
data = load_json_with_fractions(data)
except json.JSONDecodeError:
print(f"invalid json: {data}")
raise InvalidOutputFormat("Provided string is not valid JSON")
# Check if the data has 'recipe' key format
if "recipe" not in data:
print(f"no recipe: {data}")
raise InvalidOutputFormat("Output does not have a 'recipe' key")
details = data["recipe"]
# Validating recipe name
if recipe.name == "please generate":
name = details.get('name')
if not name or not isinstance(name, str):
print(f"no name: {name}")
raise InvalidOutputFormat("Missing or invalid name for recipe")
recipe.name = name
# Validating ingredients
ingredients = details.get('ingredients', [])
if not ingredients or not isinstance(ingredients, list):
print(f"no ingredients: {ingredients}")
raise InvalidOutputFormat("Missing or invalid ingredients for recipe")
# Validate and save each ingredient
for ingredient in ingredients:
if not all(key in ingredient for key in ['name', 'quantity', 'unit']):
print(f"invalid ingredient: {ingredient}")
raise InvalidOutputFormat("Invalid ingredient format for recipe")
# Check if the ingredient already exists in the database
existing_item = db.session.query(Item).filter(Item.name == ingredient['name']).first()
if existing_item:
item = existing_item
else:
item = Item(name=ingredient['name'])
db.session.add(item)
db.session.flush()
# Create a RecipeItem instance
recipe_item = RecipeItem(recipe_id=recipe.id, item_id=item.id, quantity=ingredient['quantity'], unit=ingredient['unit'])
db.session.add(recipe_item)
db.session.flush()
# Validating cooking instructions
instructions = details.get('cooking_instructions', [])
if not instructions or not isinstance(instructions, list):
print(f"no instructions: {instructions}")
raise InvalidOutputFormat("Missing or invalid cooking instructions for recipe")
# add instructions to recipe
recipe.instructions = "\n".join(instructions)
db.session.commit()
except Exception as e:
db.session.rollback()
print(f"Error occurred: {e}")
db.session.remove()
raise
return recipe_id
# add a list of pantry items to the user's pantry
@celery.task
def add_list_of_items(user_id, list_of_items):
pantry_items = process_list_of_items(list_of_items)
save_pantry_list_to_db(pantry_items, user_id)
return user_id
# api call to add a list of items to the user's pantry
def process_list_of_items(list_of_items):
response = openai.ChatCompletion.create(
model=MEAL_PLAN_MODEL,
messages=[
{"role": "system", "content": PANTRY_ITEMS_TASK},
{"role": "user", "content": list_of_items},
],
max_tokens=3000,
temperature=1,
)
ingredient_list_text = response.choices[0].message['content']
return ingredient_list_text
# process the list of items to be added to the user's pantry
def save_pantry_list_to_db(pantry_items, user_id):
app = create_app_instance()
with app.app_context():
try:
# If data is a string, try to deserialize it as JSON
if isinstance(pantry_items, str):
try:
pantry_items = load_json_with_fractions(pantry_items)
except json.JSONDecodeError:
print(f"invalid json: {pantry_items}")
raise InvalidOutputFormat("Provided string is not valid JSON")
# get the user and their pantry
user = User.query.filter_by(id=user_id).first()
pantry = user.pantry
if not pantry:
pantry = Pantry(user_id=user_id)
db.session.add(pantry)
db.session.flush()
# Validating ingredients
pantry_item_list = pantry_items.get('pantry_items', [])
if not pantry_item_list or not isinstance(pantry_item_list, list):
print(f"no ingredients: {pantry_item_list}")
raise InvalidOutputFormat("Missing or invalid items for pantry")
# Validate and save each ingredient
for pantry_item in pantry_item_list:
if not all(key in pantry_item for key in ['name', 'quantity', 'unit']):
print(f"invalid ingredient: {pantry_item}")
raise InvalidOutputFormat("Invalid pantry item format")
# Check if the ingredient already exists in the database
existing_item = db.session.query(Item).filter(Item.name == pantry_item['name']).first()
if existing_item:
item = existing_item
else:
item = Item(name=pantry_item['name'])
db.session.add(item)
db.session.flush()
# Create a PantryItem instance if one does not already exist otherwise increase the quantity
existing_pantry_item = db.session.query(PantryItem).filter(PantryItem.pantry_id == pantry.id, PantryItem.item_id == item.id).first()
if existing_pantry_item:
existing_pantry_item.quantity += pantry_item['quantity']
existing_pantry_item.unit = pantry_item['unit']
else:
new_pantry_item = PantryItem(pantry_id=pantry.id, item_id=item.id, quantity=pantry_item['quantity'], unit=pantry_item['unit'])
db.session.add(new_pantry_item)
db.session.flush()
db.session.commit()
except Exception as e:
db.session.rollback()
print(f"Error occurred: {e}")
db.session.remove()
raise
return user_id
def fraction_to_decimal(match):
"""Converts a fraction to its decimal representation."""
num, den = map(int, match.group(0).split('/'))
return str(num / den)
def preprocess_json_string(s):
"""Replaces fractions with their decimal equivalents in a string."""
return re.sub(r'\b\d+/\d+\b', fraction_to_decimal, s)
def load_json_with_fractions(s):
"""Loads a JSON string, even if it contains fractions."""
preprocessed_string = preprocess_json_string(s)
return json.loads(preprocessed_string)
# from celery.signals import worker_process_init
# @worker_process_init.connect
# def on_worker_init(*args, **kwargs):
# warmup.apply_async()
# @celery.task
# def warmup():
# # Perform some simple database queries
# some_query = db.session.query(Recipe).limit(1)
# db.session.remove()
# some_query = db.session.query(Recipe).limit(1)
# another_query = db.session.query(User).limit(1)
# query_three = db.session.query(MealPlan).limit(1)
# query_four = db.session.query(Day).limit(1)
# query_five = db.session.query(Meal).limit(1)
# query_six = db.session.query(Pantry).limit(1)
# query_seven = db.session.query(Item).limit(1)
# query_eight = db.session.query(ShoppingList).limit(1)
# query_nine = db.session.query(RecipeItem).limit(1)
# query_ten = db.session.query(ShoppingListItem).limit(1)
# query_eleven = db.session.query(PantryItem).limit(1)
# query_twelve = db.session.query(UserProfile).limit(1)
# query_thirteen = db.session.query(Equipment).limit(1)
# query_fourteen = db.session.query(Allergy).limit(1)
# query_fifteen = db.session.query(Diet).limit(1)
# query_sixteen = db.session.query(users_recipes).limit(1)
# query_seventeen = db.session.query(recipes_mealplans).limit(1)
# query_eighteen = db.session.query(recipes_meals).limit(1)
# # Close the session
# db.session.remove()
# # run celery worker with the following command: "celery -A mealmuse.tasks worker --loglevel=info"
| [] |
2024-01-10 | abibell/GirlfriendGPT | src~agent~tools~video_message.py | """Tool for generating images."""
import logging
from langchain.agents import Tool
from steamship import Steamship, Block, SteamshipError
NAME = "VideoMessage"
DESCRIPTION = """
Useful for when you want to send a video message.
Input: The message you want to say in a video.
Output: the UUID of the generated video.
"""
PLUGIN_HANDLE = "did-video-generator"
class VideoMessageTool(Tool):
"""Tool used to generate images from a text-prompt."""
client: Steamship
def __init__(self, client: Steamship):
super().__init__(
name=NAME, func=self.run, description=DESCRIPTION, client=client
)
@property
def is_single_input(self) -> bool:
"""Whether the tool only accepts a single input."""
return True
def run(self, prompt: str, **kwargs) -> str:
"""Generate a video."""
video_generator = self.client.use_plugin(PLUGIN_HANDLE)
print("Video generator")
task = video_generator.generate(
text=prompt,
append_output_to_file=True,
options={
"source_url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/617d79ac-2201-4f06-b43e-195f78a5fbfb/width=1472/3.1-066.jpeg",
"stitch": True,
"provider": {
"type": "microsoft",
"voice_id": "en-US-AshleyNeural",
"voice_config": {"style": "Default"},
"expressions": [
{"start_frame": 0, "expression": "surprise", "intensity": 1.0},
{"start_frame": 50, "expression": "happy", "intensity": 1.0},
{"start_frame": 100, "expression": "serious", "intensity": 0.6},
{"start_frame": 150, "expression": "neutral", "intensity": 1.0},
],
},
"transition_frames": 20,
},
)
task.wait(retry_delay_s=3)
blocks = task.output.blocks
logging.info(f"[{self.name}] got back {len(blocks)} blocks")
if len(blocks) > 0:
logging.info(f"[{self.name}] image size: {len(blocks[0].raw())}")
return blocks[0].id
raise SteamshipError(f"[{self.name}] Tool unable to generate image!")
if __name__ == "__main__":
with Steamship.temporary_workspace() as client:
tool = VideoMessageTool(client=client)
id = tool.run(
"You can now generate video's of your AI companion. Try it out now for free!"
)
b = Block.get(client=client, _id=id)
b.set_public_data(True)
print(b.raw_data_url)
| [] |
2024-01-10 | abibell/GirlfriendGPT | src~agent~tools~album_art.py | """Tool for generating album art.
The purpose of this tool is to illustrate how to wrap the GenerateImageTool
with a custom tool description & some prompt engineering to steer the image
one way or another.
The GenerateImageTool leaves the user + LLM in complete control of the image
generation prompt... but what if you wanted to make sure the prompt was:
- A particular style?
- A particular mood?
- Something else entirely, involving web scraping and other operations?
You can do that by wrapping the GenerateImageTool, as you see here, and then
sending in your own custom prompt.
"""
import json
import logging
from langchain.agents import Tool
from steamship import Steamship
from steamship.base.error import SteamshipError
from steamship.data.plugin.plugin_instance import PluginInstance
from .image import GenerateImageTool
NAME = "GenerateAlbumArt"
DESCRIPTION = """
Useful for when you need to generate album art.
Input: A description of the album that needs art
Output: the UUID of a generated image
"""
class GenerateAlbumArtTool(Tool):
"""Tool used to generate album art from a album description."""
client: Steamship
tool: GenerateImageTool
def __init__(self, client: Steamship):
super().__init__(
name=NAME,
func=self.run,
description=DESCRIPTION,
client=client,
tool=GenerateImageTool(client),
)
@property
def is_single_input(self) -> bool:
"""Whether the tool only accepts a single input."""
return True
def run(self, prompt: str, **kwargs) -> str:
"""Respond to LLM prompt."""
# Here we create a NEW prompt, which is based on the prompt provided
# to this tool, but including extra terms.
image_gen_prompt = f"album art, 4k, high def, pop art, professional, high quality, award winning, grammy, platinum, {prompt}"
# Then we just return the results of the wrapped GenerateImageTool,
# passing it the new prompt that we created.
return self.tool.run(image_gen_prompt)
| [
"album art, 4k, high def, pop art, professional, high quality, award winning, grammy, platinum, PLACEHOLDER"
] |
2024-01-10 | igalonso/template-react-agents-vertex | agents~get_agents.py | from langchain.llms import VertexAI
# import your tools here
from tools.tools import get_google_search
from langchain.agents import initialize_agent, Tool, AgentExecutor
from langchain.agents import AgentType
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI as OpenAI
from dotenv import load_dotenv
import os
load_dotenv()
if __name__ == "__main__":
pass
# Define your tools here based on tools.py. You can use the template below.
google_search = Tool(
name="GoogleSearch",
func=get_google_search,
description="useful for when you need get a google search result",
)
def getLLM(temperture):
llm_type = os.getenv("LLM_TYPE")
if llm_type == "openai":
llm = OpenAI(model_name=os.getenv("OPENAI_MODEL"))
elif llm_type == "vertexai":
llm = VertexAI(temperature=temperture, verbose=True, max_output_tokens=2047,model_name=os.getenv("VERTEX_MODEL"))
return llm
# Implement your agents here. You can use the template below.
def agent_template(temperture=0) -> AgentExecutor:
print("*" * 79)
print("AGENT: Agent template!")
print("*" * 79)
llm = getLLM(temperture)
tools_for_agent = [
google_search
]
agent = initialize_agent(
tools_for_agent,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True
)
return agent | [] |
2024-01-10 | conacts/drivethru | drive-thru.py | import pygame
import os
import openai
import time
import wave
import contextlib
import datetime
import google.cloud.texttospeech as tts
import json
import requests
import sounddevice as sd
import wavio as wv
freq = 44100
duration = 5
speak = False
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
pygame.init()
def record_audio():
filename = "audio/order.mp3"
recording = sd.rec(int(duration * freq), samplerate=freq, channels=1)
print("Recording Audio\n")
sd.wait()
wv.write(filename, recording, freq, sampwidth=2)
return filename
def transcribe_audio(audio_file):
audio_file = open(audio_file, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file).text
return transcript
def place_mcdonalds_order(order, json_file=None):
"""Place an order at McDonald's and return the order details"""
j = json.dumps(order, indent=2)
# speak out loud 'I am placing your order right now'
if speak:
filename = text_to_wav(
"en-US-Neural2-H", "I am placing your order right now")
audio_time = get_audio_length(filename)
play_wav_file(filename)
time.sleep(audio_time)
else:
print("Assistant: I am placing your order right now")
place_order(j)
return j
def place_order(json_file, order=None):
url = 'http://localhost:3000/add-order'
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(url, data=json_file, headers=headers)
if response.status_code == 200:
print("Order placed successfully.")
else:
print(f"Failed to place order. Status code: {response.status_code}")
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
def get_audio_length(fname):
with contextlib.closing(wave.open(fname, 'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
return duration
def speak_text(text):
filename = text_to_wav("en-US-Wavenet-F", text)
audio_time = get_audio_length(filename)
play_wav_file(filename)
time.sleep(audio_time)
def end_order(json_file, order=None):
if speak:
speak_text("Thank you, please pull forward to the first window")
else:
print("Thank you, please pull forward to the first window")
exit("Order complete")
def append_chat_message(messages, role, user_input, function_name=None):
if function_name:
messages.append(
{"role": role, "content": user_input, "name": function_name}
)
else:
messages.append({"role": role, "content": user_input})
return messages
def text_to_wav(voice_name: str, text: str):
language_code = "-".join(voice_name.split("-")[:2])
text_input = tts.SynthesisInput(text=text)
voice_params = tts.VoiceSelectionParams(
language_code=language_code, name=voice_name
)
audio_config = tts.AudioConfig(audio_encoding=tts.AudioEncoding.LINEAR16)
client = tts.TextToSpeechClient()
response = client.synthesize_speech(
input=text_input,
voice=voice_params,
audio_config=audio_config,
)
filename = f"audio/{voice_name}+{datetime.datetime.now().strftime('%Y%m%d-%H%M%S')}"
with open(filename, "wb") as out:
out.write(response.audio_content)
# print(f'LOG: Generated speech saved to "{filename}"\n')
return filename
def play_wav_file(filename):
sound = pygame.mixer.Sound(filename)
sound.play()
pygame.mixer.music.stop()
def run_conversation(messages):
# Step 1: send the conversation and available functions to GPT
functions = [
{
"name": "place_mcdonalds_order",
"description": "Place an order at McDonald's and return the order details. Only call this when the user has finalized their order and in your final response read them the price of their order as well.",
"parameters": {
"type": "object",
"properties": {
"order": {
"type": "string",
"description": """The json format of the McDonald's order. It should include the items and customizations. The structure of the parameter 'order' is a json format including these elements:
{
"order": {
"customer vehicle": "Toyota Camry",
"items": [
"item 1": {
"Item Name": "Big Mac",
"Quantity": 1,
"customizations": [
"customization": "No Pickles"
],
},
"item 2": {
"Item Name": "Fries",
"Quantity": 1,
"size": "Large",
customizations: [
"customization": "Lots of salt"
],
}
],
}
}
""",
},
},
"required": ["order"],
},
},
{
"name": "end_order",
"description": "Call this function after you confirm the customer's order. This will end the conversation.",
"parameters": {
"type": "object",
"properties": {
"json_file": {
"type": "string",
"description": "The json format of the McDonald's order. It should include the items and customizations.",
},
},
"required": ["json_file"],
},
},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions,
function_call="auto",
)
response_message = response["choices"][0]["message"]
# Step 2: check if GPT wanted to call a function
if response_message.get("function_call"):
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
"place_mcdonalds_order": place_mcdonalds_order,
"end_order": end_order,
}
function_name = response_message["function_call"]["name"]
function_to_call = available_functions[function_name]
print("Function called: " + function_name)
function_args = json.loads(
response_message["function_call"]["arguments"]
)
function_response = function_to_call(
order=function_args.get("order"),
json_file=function_args.get("json_file"),
)
# Step 4: send the info on the function call and function response to GPT
# extend conversation with assistant's reply
messages.append(response_message)
append_chat_message(messages, "function",
function_response, function_name)
print("Function called:", function_name)
second_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
)
messages.append(second_response["choices"][0]["message"])
# print(second_response["choices"][0]["message"].content)
response_message = second_response["choices"][0]["message"]
else:
append_chat_message(messages, "assistant",
response_message.content)
# print(response_message.content)
return response_message, messages, True
transcript = transcribe_audio("audio/order.mp3")
messages = [
{"role":
"system", "content": "You are a helpful, drive through McDonald's assistant. Your goal is to take a customer's food order from items only on the McDonald's menu. Your goal is to have a short conversation with the customer and after you take their order, you will call the function to 'place_mcdonalds_order' where you will finalize the user's purchase. You must only talk about ordering food, item menu prices and nutritional information. Do not output nutrition information unless the customer explicitly asks about it. Do not talk about the price of the order either."
},
]
if __name__ == "__main__":
continue_conversation = True
if speak:
speak_text("Hello, how may I help you?")
else:
print("Hello, how may I help you?")
while continue_conversation:
if speak:
audio = transcribe_audio(record_audio())
print("You:", audio)
else:
audio = input("You: ")
append_chat_message(messages, "user", audio)
response_message, messages, continue_conversation = run_conversation(
messages)
print("Assistant:", response_message.content)
if speak:
speak_text(response_message.content)
| [
"You are a helpful, drive through McDonald's assistant. Your goal is to take a customer's food order from items only on the McDonald's menu. Your goal is to have a short conversation with the customer and after you take their order, you will call the function to 'place_mcdonalds_order' where you will finalize the user's purchase. You must only talk about ordering food, item menu prices and nutritional information. Do not output nutrition information unless the customer explicitly asks about it. Do not talk about the price of the order either."
] |
2024-01-10 | cowhi/deepatari | deepatari~tools~arg_parser.py | import argparse
from deepatari import __version__
def str2bool(v):
""" Helps to avoid confusion for truth values. """
return v.lower() in ("yes", "true", "t", "1")
def parse_args(args):
""" Parse command line parameters.
Args:
args (tuple[str]): All settings either default or set via command line arguments.
Returns:
args (argparse.Namespace): All settings either default or set via command line arguments.
"""
parser = argparse.ArgumentParser(
description="Framework to facilitate learning in the Atari game playing domain.")
parser.add_argument(
'-v',
'--version',
action='version',
version='deepatari {ver}'.format(ver=__version__))
exparg = parser.add_argument_group('Experiment')
exparg.add_argument("--exp_type", default="AtariExp", help="Choose experiment implementation.")
exparg.add_argument("--env_type", default="AtariEnv", help="Choose environment implementation.")
exparg.add_argument("--agent_type", default="AtariAgent", help="Choose agent implementation.")
exparg.add_argument("--memory_type", default="ReplayMemory", help="Choose memory implementation.")
exparg.add_argument('--with', dest="learner_type", default='DQNNeon', help='Choose network implementation.')
exparg.add_argument("--log_level", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], default="INFO", help="Log level.")
exparg.add_argument("--log_type", choices=["file", "stdout"], default="file", help="Where to log to.")
exparg.add_argument("--log_stats", type=str2bool, default=True, help="Turn stats on and off.")
exparg.add_argument("--epochs", type=int, default=200, help="How many epochs to run.")
exparg.add_argument("--train_steps", type=int, default=250000, help="How many training steps per epoch.")
exparg.add_argument("--test_steps", type=int, default=125000, help="How many testing steps after each epoch.")
exparg.add_argument("--random_seed", type=int, default=666, help="Random seed for repeatable experiments.")
exparg.add_argument('--backend', choices=['cpu', 'gpu'], default='gpu', help='Choose the backend type, which will perform the calculations.')
exparg.add_argument('--device_id', type=int, default=0, help='gpu device id (only used with GPU backend)')
exparg.add_argument('--datatype', choices=['float16', 'float32', 'float64'], default='float32', help='default floating point precision for backend [f64 for cpu only]')
envarg = parser.add_argument_group('Environment')
envarg.add_argument(
"--game",
default='Breakout-v0',
type=str, action='store',
help='ROM or environment ID from OpenAI gym to run (default: %(default)s)')
envarg.add_argument("--display_screen", type=str2bool, default=False, help="Display game screen during training and testing.")
envarg.add_argument("--frame_skip", type=int, default=4, help="How many times to repeat each chosen action.")
envarg.add_argument("--repeat_action_probability", type=float, default=0.0, help="Probability, that chosen action will be repeated. Otherwise random action is chosen during repeating.")
envarg.add_argument("--color_averaging", type=str2bool, default=True, help="Perform color averaging with previous frame.")
envarg.add_argument("--frame_width", type=int, default=84, help="Frame width after resize.")
envarg.add_argument("--frame_height", type=int, default=84, help="Frame height after resize.")
envarg.add_argument("--sequence_length", type=int, default=4, help="How many image frames form a state.")
memarg = parser.add_argument_group('Replay memory')
memarg.add_argument("--memory_size", type=int, default=1000000, help="Maximum size of replay memory.")
memarg.add_argument("--fill_mem_size", type=int, default=50000, help="Populate replay memory with fill_mem_size random steps before starting to learn.")
antarg = parser.add_argument_group('Agent')
antarg.add_argument("--epsilon_start", type=float, default=1, help="Exploration rate (epsilon) at the beginning of decay.")
antarg.add_argument("--epsilon_end", type=float, default=0.1, help="Exploration rate (epsilon) at the end of decay.")
antarg.add_argument("--epsilon_decay_steps", type=float, default=1000000, help="How many steps to decay the exploration rate (epsilon) .")
antarg.add_argument("--epsilon_test", type=float, default=0.05, help="Exploration rate (epsilon) used during testing.")
antarg.add_argument("--train_frequency", type=int, default=4, help="Perform training after this many game steps.")
antarg.add_argument("--train_repeat", type=int, default=1, help="Number of times to sample minibatch during training.")
antarg.add_argument("--random_starts", type=int, default=30, help="Perform max this number of dummy actions after game restart, to produce more random game dynamics.")
netarg = parser.add_argument_group('Network')
netarg.add_argument("--train_all", type=str2bool, default=False, help="Use all possible actions or minimum set for training.")
netarg.add_argument("--learning_rate", type=float, default=0.00025, help="Learning rate (alpha).")
netarg.add_argument("--discount_rate", type=float, default=0.99, help="Discount rate for future rewards (gamma).")
netarg.add_argument("--batch_size", type=int, default=32, help="Batch size for neural network.")
netarg.add_argument('--optimizer', choices=['rmsprop', 'adam', 'adadelta', 'sgd'], default='rmsprop', help='Network optimization algorithm.')
netarg.add_argument("--decay_rate", type=float, default=0.95, help="Decay rate for RMSProp and Adadelta algorithms.")
netarg.add_argument("--rms_epsilon", type=float, default=1e-6, help="Epsilon for RMSProp")
netarg.add_argument("--momentum", type=float, default=0., help="Momentum for RMSProp")
netarg.add_argument("--clip_error", type=float, default=1., help="Clip error term in update between this number and its negative to avoid gradient become zero.")
netarg.add_argument("--target_update_frequency", type=int, default=10000, help="Copy weights of training network to target network after this many steps.")
netarg.add_argument("--min_reward", type=float, default=-1, help="Minimum reward.")
netarg.add_argument("--max_reward", type=float, default=1, help="Maximum reward.")
netarg.add_argument("--batch_norm", type=str2bool, default=False, help="Use batch normalization in all layers.")
netarg.add_argument('--stochastic_round', const=True, type=int, nargs='?', default=False, help="Use stochastic rounding (will round to BITS number of bits if specified).")
netarg.add_argument("--load_weights", help="Load network from file.")
#otharg = parser.add_argument_group('Other')
#otharg.add_argument("--visualization_filters", type=int, default=4, help="Number of filters to visualize from each convolutional layer.")
#otharg.add_argument("--visualization_file", help="Write layer visualization to this file.")
#otharg.add_argument("--record_path", help="Record game screens under this path. Subfolder for each game is created.")
#otharg.add_argument("--record_sound_filename", help="Record game sound in this file.")
#otharg.add_argument("--play_games", type=int, default=0, help="How many games to play, suppresses training and testing.")
args = parser.parse_args()
return args
| [] |
2024-01-10 | vejvarm/ASPIRO | parsing.py | import logging
import os
import pathlib
import re
import json
from copy import copy
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from parent import parent as parent_metric
from enum import Enum, auto
from typing import TypeVar
from langchain import PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import (
BaseOutputParser,
OutputParserException,
PromptValue,
)
from flags import (CONSTANT_PLACEHOLDER, TemplateErrors, ERROR_MESSAGES, Templates, RDFExampleFormat,
PROMPT_TEMPLATES_FOLDER, ModelChoices, API_KEY_JSON, SUBJECT, RELATION, OBJECT,
OPENAI_REQUEST_TIMEOUT)
from helpers import setup_logger, make_json_compliant
from models import LLMBuilder
NAIVE_COMPLETION_RETRY_WITH_ERROR = PROMPT_TEMPLATES_FOLDER.joinpath("retry.tmp").open().read()
NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template(NAIVE_COMPLETION_RETRY_WITH_ERROR)
T = TypeVar("T")
def prepare_prompt_and_parser(template_file: Templates, example_format: RDFExampleFormat):
prompt_template = template_file.value.open().read()
prompt_template = prompt_template.replace("<CONSTANT_PLACEHOLDER>", CONSTANT_PLACEHOLDER)
prompt_template = prompt_template.replace(f"{{{example_format.value}}}", f"{{{RDFExampleFormat.DEFAULT.value}}}")
# Prepare parser
if "json" in template_file.value.name:
metadata = json.load(template_file.value.with_suffix(".json").open())
parser = JSONOutputParser.from_metadata(first_key=metadata["first_key"], output_key=metadata["output_key"],
output_type=metadata["output_type"])
else:
parser = TextOutputParser()
# Prepare prompt
partial_variables = dict()
if "{format_instructions}" in prompt_template:
format_instructions = parser.get_format_instructions(template_file)
partial_variables["format_instructions"] = format_instructions
input_variables = [RDFExampleFormat.DEFAULT.value]
if "{subjects}" in prompt_template:
input_variables.append("subjects")
if "{relation}" in prompt_template:
input_variables.append("relation")
if "{objects}" in prompt_template:
input_variables.append("objects")
prompt = PromptTemplate(template=prompt_template, input_variables=input_variables,
partial_variables=partial_variables
)
return prompt, parser
def build_output_dict(output: str, error_codes: list[str], error_messages: list[str],
rdf_example: str = None, subj_labels: list[str] = None, obj_labels: list[str] = None,
shot: int = 0) -> dict:
return {"output": output,
"error_codes": error_codes,
"error_messages": error_messages,
"input_data": rdf_example if rdf_example is not None else '',
"subjects": list(subj_labels) if subj_labels is not None else [''],
"objects": list(obj_labels) if obj_labels is not None else [''],
"shot": shot}
def subject_missing(string: str) -> bool:
return bool(SUBJECT not in string)
def object_xor_value(string: str) -> bool:
return bool((OBJECT in string and CONSTANT_PLACEHOLDER in string) or not (OBJECT in string or CONSTANT_PLACEHOLDER in string))
def misplaced_value(string: str, fetched_example_list: list) -> bool:
return bool(CONSTANT_PLACEHOLDER in string and any(rdf['oid'] for rdf in fetched_example_list))
def contains_illegal_placeholder(string: str) -> bool:
# Step 1: Match all <...> and <<...>> patterns
all_patterns = re.findall(r'(<[^>]*>|<<[^>]*>>)', string)
# Step 2: Filter out CONSTANT_PLACEHOLDER, <subject>, <object>, and <<...>> patterns
invalid_patterns = [pattern for pattern in all_patterns if pattern not in [CONSTANT_PLACEHOLDER, '<subject>', '<object>'] and not pattern.startswith('<<')]
return bool(invalid_patterns)
def parse_subj_obj(answer_from_llm: str, s_labels: list[str], o_labels: list[str]) -> str:
template = copy(answer_from_llm)
for s_label, o_label in zip(s_labels, o_labels):
# Use regexp for replacement of subject and object
# Create both space and underscore versions of each label pattern
s_label_space_pattern = re.escape(s_label).replace("_", " ")
s_label_underscore_pattern = re.escape(s_label).replace(" ", "_")
o_label_space_pattern = re.escape(o_label).replace("_", " ")
o_label_underscore_pattern = re.escape(o_label).replace(" ", "_")
if SUBJECT not in template:
template = re.sub(r'(?:\b|_)' + s_label_space_pattern + r'(?:\b|_)', SUBJECT, template, count=1,
flags=re.IGNORECASE)
if SUBJECT not in template: # If still not replaced
template = re.sub(r'(?:\b|_)' + s_label_underscore_pattern + r'(?:\b|_)', SUBJECT, template, count=1,
flags=re.IGNORECASE)
if OBJECT not in template:
if o_label == CONSTANT_PLACEHOLDER:
template = re.sub(r'(?:\b|_)' + o_label_space_pattern + r'(?:\b|_)', CONSTANT_PLACEHOLDER, template,
count=1, flags=re.IGNORECASE)
if OBJECT not in template: # If still not replaced
template = re.sub(r'(?:\b|_)' + o_label_underscore_pattern + r'(?:\b|_)', CONSTANT_PLACEHOLDER,
template, count=1, flags=re.IGNORECASE)
else:
template = re.sub(r'(?:\b|_)' + o_label_space_pattern + r'(?:\b|_)', OBJECT, template, count=1,
flags=re.IGNORECASE)
if OBJECT not in template: # If still not replaced
template = re.sub(r'(?:\b|_)' + o_label_underscore_pattern + r'(?:\b|_)', OBJECT, template, count=1,
flags=re.IGNORECASE)
# if regexp fails, try just simple replacement
if SUBJECT not in template:
template = template.replace(s_label, SUBJECT, 1)
if OBJECT not in template:
template = template.replace(o_label, OBJECT, 1)
return template
def parse_relation(text: str, rel_lab: str):
return text.replace(RELATION, rel_lab, 1)
def parse_table_format(inp_table_format_string: str):
# Remove 'Table: ' from the string
stripped_string = inp_table_format_string.replace('Table: ', '')
# Split the string by '\n'
split_by_newline = stripped_string.split('\n')
# Get the middle part of each line and construct the result list
result = [['<subject>', s.split(' | ')[1], '<object>'] for s in split_by_newline]
return result
class TextOutputParser(BaseOutputParser):
def get_format_instructions(self, template_file: Templates = "default") -> str:
try:
return template_file.value.with_suffix(".format_instructions").open().read()
except FileNotFoundError:
return Templates.DEFAULT.value.with_suffix(".format_instructions").open().read()
def parse(self, text: str, metadata: dict = None) -> dict:
return self._parse_text(text, metadata)
def _parse_text(self, text: str, metadata: dict = None) -> dict:
text = text.strip()
text_label = "Text: "
if text_label in text:
text = text[text.rfind(text_label)+len(text_label):]
if metadata is not None:
text = parse_subj_obj(text, metadata["subj_labels"], metadata["obj_labels"])
text = parse_relation(text, metadata["relation_label"])
if not text.endswith("."):
text = text+"."
errors = []
# SUBJECT entity errors
if SUBJECT not in text:
errors.append(TemplateErrors.NO_SUBJECT)
# raise OutputParserException(TemplateErrors.NO_SUBJECT.value)
elif text.count(SUBJECT) > 1:
errors.append(TemplateErrors.MULTIPLE_SUBJECTS)
# raise OutputParserException(TemplateErrors.MULTIPLE_SUBJECTS.value)
# OBJECT entity errors:
if CONSTANT_PLACEHOLDER == OBJECT:
obj = CONSTANT_PLACEHOLDER
elif CONSTANT_PLACEHOLDER in text:
if OBJECT in text:
errors.append(TemplateErrors.OBJECT_XOR_VALUE)
# raise OutputParserException(TemplateErrors.OBJECT_XOR_VALUE.value)
obj = CONSTANT_PLACEHOLDER
else:
obj = OBJECT
if f"{obj}" not in text:
errors.append(TemplateErrors.NO_OBJECT)
# raise OutputParserException(TemplateErrors.NO_OBJECT.value.format(obj=obj))
elif text.count(f"{obj}") > 1:
errors.append(TemplateErrors.MULTIPLE_OBJECTS)
# raise OutputParserException(TemplateErrors.MULTIPLE_OBJECTS.value.format(obj=obj))
# PLACEHOLDER mismatch errors
if contains_illegal_placeholder(text):
errors.append(TemplateErrors.ILLEGAL_PLACEHOLDER)
# raise OutputParserException(TemplateErrors.ILLEGAL_PLACEHOLDER.value)
# if there are any errors, raise OutputParserException
if metadata is None:
rdf_example = None
subj_labels = None
obj_labels = None
else:
rdf_example = metadata["rdf_example"] if "rdf_example" in metadata.keys() else None
subj_labels = metadata["subj_labels"] if "subj_labels" in metadata.keys() else None
obj_labels = metadata["obj_labels"] if "obj_labels" in metadata.keys() else None
output_message = build_output_dict(text, [], [], rdf_example, subj_labels, obj_labels)
if errors:
for err in errors:
output_message["error_codes"].append(err.value)
output_message["error_messages"].append(ERROR_MESSAGES[err])
raise OutputParserException(json.dumps(output_message))
return output_message
@property
def _type(self) -> str:
return "structured"
class JSONOutputParser(TextOutputParser):
first_key: str
output_key: str
output_type: str
@classmethod
def from_metadata(cls, first_key: str, output_key: str, output_type = "json") -> "JSONOutputParser":
"""
:param first_key: [str] dict key of the first entry
:param output_key: [str] dict key of the output entry
:param output_type: (opt) [str] either of `text` for plain text output or `json` for json structure at the output
:return: JSONOutputParser
"""
cls.first_key = first_key
cls.output_key = output_key
cls.output_type = output_type
return cls(first_key=first_key, output_key=output_key, output_type=output_type)
def get_metadata(self):
return {"first_key": self.first_key, "output_key": self.output_key}
def parse(self, json_str: str, metadata: dict = None) -> dict:
if self.output_type == "text":
try:
# first try parsing it as text (relevant for v20_json prompt)
return self._parse_text(json_str, metadata)
except OutputParserException:
parsed_dict = self._parse_json(json_str)
else:
parsed_dict = self._parse_json(json_str)
try:
text = parsed_dict[self.output_key]
except KeyError as err:
output_message = build_output_dict(json_str, [TemplateErrors.JSON_PARSING.value],
[f"KeyError: [output] must have {err} key"])
raise OutputParserException(json.dumps(output_message))
return self._parse_text(text, metadata)
def _parse_json(self, json_str: str) -> dict:
try:
if json_str is None:
raise json.decoder.JSONDecodeError("Expecting json format but got `None`", "", 0)
begin_pos = json_str.find(f'"{self.first_key}":') # start parsing from self.first_key
if begin_pos >= 0:
json_str = json_str[begin_pos:]
json_str = make_json_compliant(json_str)
parsed_dict = json.loads(json_str)
except json.decoder.JSONDecodeError as err:
output_message = build_output_dict(json_str, [TemplateErrors.JSON_PARSING.value], [f"Could not parse [output] as valid json ({repr(err)})"])
raise OutputParserException(json.dumps(output_message))
return parsed_dict
class ConsistencyValidator:
# TODO: implement this as a customLLM
# (https://python.langchain.com/en/latest/modules/models/llms/examples/custom_llm.html)
class Metrics(Enum):
PARENT = auto()
def __init__(self, metric: Metrics, threshold: float, llm_builder: LLMBuilder, model_choice: ModelChoices, prompt_template: str,
source_data_key: str = "data", first_key: str = None, output_key: str = None,
temperature=0., max_tokens: int = 100, stop: list[str] = (".\n", "\n"),
logger: logging.Logger = None, path_to_jsonl_results_file: pathlib.Path = None, **model_kwargs):
assert metric in self.Metrics
assert 0 < threshold < 1
assert model_choice in ModelChoices
if path_to_jsonl_results_file is not None:
assert path_to_jsonl_results_file.suffix == ".jsonl"
self.metric = metric
self.threshold = threshold
self.model_choice = model_choice
self.source_data_key = source_data_key
self.first_key = first_key
self.output_key = output_key
self.prompt = self._prepare_prompt_template(prompt_template)
self.temperature = temperature
self.max_tokens = max_tokens
self.stop = stop
llm = llm_builder.initialize_llm(model_choice, temperature=self.temperature,
max_tokens=self.max_tokens, stop_sequences=self.stop, **model_kwargs)
self.chain = LLMChain(llm=llm, prompt=self.prompt)
self.logger = logger if logger is not None else setup_logger(__name__, logging.WARNING)
self.results_file = path_to_jsonl_results_file
def _parse_answer(self, llm_answer: str) -> dict:
try:
if llm_answer is None:
raise json.decoder.JSONDecodeError("Expecting json format but got `None`", "", 0)
begin_pos = llm_answer.find(f'"{self.first_key}":') # start parsing from self.first_key
if begin_pos >= 0:
llm_answer = llm_answer[begin_pos:]
llm_answer = llm_answer.lstrip("{`\n").rstrip("`\n}")
llm_answer = make_json_compliant(llm_answer)
llm_answer = llm_answer if llm_answer.startswith("{") else "{"+llm_answer
llm_answer = llm_answer if llm_answer.endswith("}") else llm_answer+"}"
parsed_dict = json.loads(llm_answer)
parsed_answer = parsed_dict[self.output_key]
except json.decoder.JSONDecodeError:
return llm_answer
except KeyError:
return json.dumps(parsed_dict)
return parsed_answer
@staticmethod
def _prepare_prompt_template(template_str: str):
assert template_str.find("{template}") > 0, "prompt template file must contain '{template}' entry"
assert template_str.find("{data}") > 0, "prompt template file must contain '{data}' entry"
return PromptTemplate.from_template(template_str)
@staticmethod
def _calc_parent(text: str, reference: str, table: list[list[str, list[str], str]]):
_, _, f1 = parent_metric([text], [reference], [table],
max_order=4,
lambda_weight=1.,
avg_results=True,
n_jobs=1,
use_tqdm=False)
return f1
@staticmethod
def _make_table_map(sentence: str, rel_label: str):
placeholders = [SUBJECT, OBJECT, CONSTANT_PLACEHOLDER]
pattern = '|'.join(placeholders)
sequence = re.findall(pattern, sentence)
try:
table_map = [[sequence[0], rel_label.split(" "), sequence[1]]]
except IndexError:
table_map = [[SUBJECT, rel_label.split(" "), OBJECT]] # default
return table_map
# TODO: this is actually the part to replace by LLM?
def _parse_statistic(self, text: str, **kwargs):
reference: str = kwargs["reference"] if "reference" in kwargs.keys() else None
relation_label: str = kwargs["relation_label"] if "relation_label" in kwargs.keys() else None
if self.metric == self.Metrics.PARENT:
assert reference is not None
assert relation_label is not None
table_map = self._make_table_map(text, relation_label)
score: float = self._calc_parent(text, reference, table_map)
else:
raise NotImplementedError(f"`metric` must be one of {list(self.Metrics)} (got {self.metric})")
if score < self.threshold:
d = True
else:
d = False
return d, score, table_map
def _log_results(self, data_row: dict):
with self.results_file.open("a") as f:
f.write(json.dumps(data_row)+"\n")
def run(self, text: str, metadata: dict, keep_better: bool):
"""
:param text: reference text to run dehalucination on
:param metadata: dictionary of necessary information for calculation of the statistic
:param keep_better: (bool) if True keeps the result with higher score as the final sentence, else, returns the dehalucinated one
:return: if keep_better == False: text which is checked and dehalucinated (if flagged by metric as hallucinated)
if keep_better == True: only returns dehalucinated text if the score is better than the original
"""
assert self.source_data_key in metadata.keys()
flag_dehalucinate, score, table_map = self._parse_statistic(text, **metadata)
result_data = {"flag": flag_dehalucinate,
"threshold": self.threshold,
"table_map": table_map,
"metadata": metadata,
"model": self.model_choice.name,
"metric": self.metric.name,
"original": {"score": score, "text": text}}
if flag_dehalucinate:
llm_answer = self.chain.run(template=text, data=metadata[self.source_data_key])
text = self._parse_answer(llm_answer)
d_str = "T" if flag_dehalucinate else "F"
self.logger.info(f"score:{score:.2f}({d_str}) \t text: ({result_data['original']['text']}) -> ({text}) \t table_map: {table_map}")
_, score, _ = self._parse_statistic(text, **metadata)
result_data["new"] = {"score": score, "text": text}
if self.results_file is not None and flag_dehalucinate:
self._log_results(result_data)
if keep_better:
if result_data["original"]["score"] > score:
return result_data["original"]["text"]
else:
return text
class MultiRetryParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors.
Does this by passing the original prompt, the completion, AND the error
that was raised to another language and telling it that the completion
did not work, and raised the given error. Differs from RetryOutputParser
in that this implementation provides the error that was raised back to the
LLM, which in theory should give it more information on how to fix it.
"""
parser: BaseOutputParser[T]
retry_chains = [LLMChain]
@classmethod
def from_llms(
cls,
llms: list[BaseLanguageModel],
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT,
) -> "MultiRetryParser":
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
return cls(parser=parser, retry_chains=chains)
def parse_with_prompt(self, completion: str, prompt_value: PromptValue, shot=0, max_shots=0, metadata=None) -> T:
if shot >= max_shots:
return shot, self.parser.parse(completion, metadata=metadata)
try:
return shot, self.parser.parse(completion, metadata=metadata)
except OutputParserException as e:
shot += 1
# print(f"retry attempt {shot}", end=", ")
m = min(shot, len(self.retry_chains) - 1)
# print(f"shot: {shot}, LLM[m]: {self.retry_chains[m].llm}") # NOTE: DEBUG
new_completion = self.retry_chains[m].run(
prompt=prompt_value.to_string(), completion=completion, error=str(json.loads(str(e))["error_messages"])
)
return self.parse_with_prompt(new_completion, prompt_value, shot=shot, max_shots=max_shots, metadata=metadata)
def parse(self, completion: str, metadata=None) -> T:
return self.parser.parse(completion, metadata=metadata)
def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
if __name__ == "__main__":
EXAMPLE_STRINGS = {
"No error": "The <subject> is very good at parsing <object> entities.",
TemplateErrors.NA: "<<err-n/a>>",
TemplateErrors.API: "<<err-api>>",
TemplateErrors.JSON_PARSING: "<<err-parsing>>",
TemplateErrors.NO_SUBJECT: "Today, it is a sunny day in <object>.",
TemplateErrors.MULTIPLE_SUBJECTS: "<subject> and <subject> went to the <object>.",
TemplateErrors.NO_OBJECT: "<subject> went to the.",
TemplateErrors.MULTIPLE_OBJECTS: "<subject> went to the <object> and <object>.",
TemplateErrors.OBJECT_XOR_VALUE: "<subject> has a <object> and a <value>.",
TemplateErrors.MISPLACED_VALUE: "<subject> went to the <value>.",
TemplateErrors.ILLEGAL_PLACEHOLDER: "<subject> went to the <location>.",
TemplateErrors.INFORMATION_LEAK: "<subject:John Doe> went to the <object:library>.",
TemplateErrors.ERR: "<<err>>",
}
template_file = Templates.V10 # @param (NOTE! two-shot only works with V10 or above
prompt_template = template_file.value.open().read()
# replace <CONSTANT_PLACEHOLDER> with specific value
prompt_template = prompt_template.replace("<CONSTANT_PLACEHOLDER>", CONSTANT_PLACEHOLDER)
output_parser = TextOutputParser()
format_instructions = output_parser.get_format_instructions(template_file)
print(format_instructions)
if "{format_instructions}" in prompt_template:
partial_variables = {"format_instructions": format_instructions}
else:
partial_variables = dict()
prompt = PromptTemplate(template=prompt_template, input_variables=["example_table_str"],
partial_variables=partial_variables
)
prompt.format(examples="hi\nyou")
for example in EXAMPLE_STRINGS.values():
try:
output_dict = output_parser.parse(example)
except OutputParserException as err:
output_dict = json.loads(str(err))
print(output_dict)
| [
"example_table_str",
"(?:\\b|_)",
"<CONSTANT_PLACEHOLDER>"
] |
2024-01-10 | vejvarm/ASPIRO | run_aspiro.py | import argparse
import json
import logging
import pathlib
import statistics
import time
from langchain.schema import OutputParserException
from tqdm import tqdm
from error_analysis import analyze_and_save_errors
from flags import (LOG_ROOT, TemplateErrors, ERROR_MESSAGES, ModelChoices,
RDF_EXAMPLE_FILE_NAME, BACKUP_TEMPLATE, DATA_ROOT)
from helpers import setup_logger, load_examples, load_and_validate_config
from models import NShotGenerator, LLMBuilder
from parsing import (build_output_dict, prepare_prompt_and_parser, MultiRetryParser, ConsistencyValidator, TextOutputParser)
LOGFILE_PATH = LOG_ROOT.joinpath(pathlib.Path(__file__).name.removesuffix(".py")+".log")
LOGGER = setup_logger(__name__, loglevel=logging.WARNING, output_log_file=LOGFILE_PATH)
def dehalucinate(dc: ConsistencyValidator, text: str, metadata: dict, keep_result_with_better_score):
text = dc.run(text, metadata, keep_result_with_better_score)
return text
# debugging
START_FROM, BREAK_AFTER = (None, None) # @debug param: start from example num. `START_FROM` and stop at num. `BREAK_AFTER`
def _to_results(result_entry_dict: dict, final_output_dict: dict, intermediate_result_file: pathlib.Path):
with intermediate_result_file.open("a") as f_intermediate_result:
f_intermediate_result.write(json.dumps(result_entry_dict)+"\n")
final_output_dict.update(result_entry_dict)
def main(args):
config = load_and_validate_config(args.config)
n_runs = config["n_runs"] # @param
dataset_choice = config["dataset_choice"] # @param
template_file = config["initial_template"] # @param
model_choices = config["llm_stack"] # @param
load_in_8bit = config["load_in_8bit"] if "load_in_8bit" in config.keys() else False
load_in_4bit = config["load_in_4bit"] if "load_in_4bit" in config.keys() else False
max_retry_shots = config["max_retry_shots"] if "max_retry_shots" in config.keys() else len(model_choices) - 1 # @param (0 ... zero-shot, 1 ... one-shot, ....)
consist_val_model = config["consist_val_model"] # @param (if None, don't use dehalucination)
example_format = config["example_format"] # @param
max_fetched_examples_per_pid = config["max_fetched_examples_per_pid"] # @param
error_dump_subfolder = config["error_dump_subfolder"] # @param (if None, errors are not saved)
use_backup = config["use_backup"] # @param (to change backup template, change flags.BACKUP_TEMPLATE)
# GENERAL MODEL HYPERPARAMS
# max_tokens_to_generate = config["max_tokens_to_generate"] # @param
# temperature = config["temperature"] # @param
# stop_sequences = config["stop_sequences"] # @param
# Consistency Validation HYPERPARAMS
cv_metric = ConsistencyValidator.Metrics.PARENT
cv_threshold = config["cv_threshold"] # about 157 prompts in the original
cv_template = config["cv_template"]
cv_keep_better = config["cv_keep_better"] # @param
cv_load_in_8bit = config["cv_load_in_8bit"] if "cv_load_in_8bit" in config.keys() else False
cv_load_in_4bit = config["cv_load_in_4bit"] if "cv_load_in_4bit" in config.keys() else False
dataset_folder = dataset_choice.value
output_folder = pathlib.Path(args.output).joinpath(dataset_folder.relative_to(DATA_ROOT))
# INITIALIZE PROMPT and PARSER
prompt, output_parser = prepare_prompt_and_parser(template_file, example_format)
# LOGGER.warning(prompt.format(examples="hi\nyou"))
# Load pids and rdfs examples from path_to_fetched_example_json
path_to_fetched_example_json = dataset_folder.joinpath(RDF_EXAMPLE_FILE_NAME)
pid_examples_dict = load_examples(path_to_fetched_example_json, max_fetched_examples_per_pid, example_format,
dataset_choice)
run = 0
runs_left = n_runs
elapsed_time = {}
retry_models = ",".join([mch.name for mch in model_choices[1:]]) if (max_retry_shots > 0
and len(model_choices) > 1) else "NONE"
path_to_experiment_folder = output_folder.joinpath(f"{template_file.name}").joinpath(
f"{model_choices[0].name}({retry_models})({max_retry_shots}shot)({consist_val_model.name or 'NONE'})"
).joinpath(f"max_examples{max_fetched_examples_per_pid}")
while runs_left > 0:
# Define files for results
path_to_output_template_json = path_to_experiment_folder.joinpath(f"run{run:02d}").joinpath(
f"templates-{dataset_choice.name.lower()}_{template_file.name}.json")
if path_to_output_template_json.parent.exists():
print(f"RUN NUMBER: {run} (EXISTS)")
run += 1
continue
# INITIALIZE LANGCHAIN with specified `model_choices` and `prompt`
llm_builder = LLMBuilder()
llm_builder.initialize_chains(model_choices, prompt, template_file,
# max_tokens_to_generate=max_tokens_to_generate,
# temperature=temperature,
# stop_sequences=stop_sequences,
# load_in_8bit=load_in_8bit,
# load_in_4bit=load_in_4bit,
**config)
llms = llm_builder.llms
llm_chains = llm_builder.chains
retry_parser = MultiRetryParser.from_llms(parser=output_parser, llms=llms)
# INITIALIZE dehalucination chain class
dc_prompt_version = f"_{cv_template.name.lower()}"
consistency_validator_log = LOG_ROOT.joinpath(dataset_choice.name).joinpath(template_file.name).joinpath(
f"{','.join(m.name for m in model_choices)}({max_retry_shots}shot)({consist_val_model.name}){dc_prompt_version}.jsonl") # @param
if consist_val_model.value is not None:
consistency_validator_log.parent.mkdir(parents=True, exist_ok=True)
prompt_template = cv_template.value.open().read()
prompt_metadata = json.load(cv_template.value.with_suffix(".json").open())
# TODO make work for versions below v4
dc = ConsistencyValidator(cv_metric, cv_threshold, llm_builder, consist_val_model, prompt_template,
source_data_key=prompt_metadata["source_data_key"],
first_key=prompt_metadata["first_key"], output_key=prompt_metadata["output_key"],
stop=prompt_metadata["stop"],
path_to_jsonl_results_file=consistency_validator_log,
load_in_8bit=cv_load_in_8bit, load_in_4bit=cv_load_in_4bit)
else:
dc = None
# run start
start_time = time.time()
runs_left -= 1
path_to_output_template_json.parent.mkdir(parents=True, exist_ok=True)
print(f"RUN NUMBER: {run} (left: {runs_left})")
output_pid_template_dict = {}
intermediate_result_file = path_to_output_template_json.with_suffix(".jsonl")
k = 0
while intermediate_result_file.exists():
k += 1
LOGGER.warning(f"(k={k}):\n\t"
f"The intermediate results file already exists at path: {intermediate_result_file}")
intermediate_result_file = intermediate_result_file.with_stem(f"{intermediate_result_file.stem}({k})")
backup_count = 0
for i, (pid, example) in tqdm(enumerate(pid_examples_dict.items()), total=len(list(pid_examples_dict.keys()))):
# prepare input
rdf_example, subj_labs, rel_labs, obj_labs = example
unique_rel_labs = list(set(rel_labs))
if len(unique_rel_labs) == 1:
rel_lab = unique_rel_labs[0]
else:
raise NotImplementedError("Example structures must have only 1 unique relation in all their entries")
inp = {"examples": rdf_example}
if "subjects" in prompt.input_variables:
inp["subjects"] = subj_labs
if "relation" in prompt.input_variables:
inp["relation"] = rel_lab
if "objects" in prompt.input_variables:
inp["objects"] = obj_labs
# debugging purposes
if START_FROM is not None and i < START_FROM:
for mdl in model_choices:
if isinstance(mdl.value, pathlib.Path):
_ = llm_chains[0].run(inp)
continue
# debugging purposes
if BREAK_AFTER is not None and i == BREAK_AFTER:
break
if not rdf_example:
err = ERROR_MESSAGES[TemplateErrors.NA]
LOGGER.warning(f"({pid}) {TemplateErrors.NA.value}: {err}']")
out_dict = {pid: build_output_dict("", [TemplateErrors.NA.value], [err], rdf_example, subj_labs, obj_labs)}
_to_results(out_dict, output_pid_template_dict, intermediate_result_file)
continue
metadata = {"data": rdf_example, "reference": rel_lab, "relation_label": rel_lab,
"rdf_example": rdf_example, "subj_labels": subj_labs, "obj_labels": obj_labs}
# Zero-shot
try:
answer = llm_chains[0].run(inp)
except Exception as err:
LOGGER.warning(f"({pid}) {TemplateErrors.API.value}: {err}.")
out_dict = {pid: build_output_dict("", [TemplateErrors.API.value], [repr(err)],
rdf_example, subj_labs, obj_labs)}
_to_results(out_dict, output_pid_template_dict, intermediate_result_file)
continue
# parse the answer
shot = 0
try:
# TODO: change to Retry prompt STACK (change prompt version with each shot)
shot, output_dict = retry_parser.parse_with_prompt(answer, prompt.format_prompt(**inp),
shot=shot, max_shots=max_retry_shots, metadata=metadata)
except OutputParserException as err:
LOGGER.info(f'({pid}) {TemplateErrors.PARSING.value}: {err}')
shot = max_retry_shots
output_dict = json.loads(str(err))
if use_backup:
if not ("<subject>" in output_dict["output"] and "<object>" in output_dict["output"]):
output_dict["output"] = BACKUP_TEMPLATE.format("<subject>", rel_lab, "<object>")
backup_count += 1
output_dict = build_output_dict(output=output_dict["output"],
error_codes=output_dict["error_codes"],
error_messages=output_dict["error_messages"],
rdf_example=rdf_example,
subj_labels=subj_labs, obj_labels=obj_labs, shot=shot)
# dehalucinate
if dc is not None:
text = dehalucinate(dc, output_dict["output"], metadata, cv_keep_better)
output_dict["output"] = text
final_templates = {pid: output_dict}
_to_results(final_templates, output_pid_template_dict, intermediate_result_file)
json.dump(output_pid_template_dict, path_to_output_template_json.open("w"), indent=2)
print(f"Output saved into {path_to_output_template_json}")
if error_dump_subfolder is not None:
_folder_to_dump_error_jsons = path_to_output_template_json.parent.joinpath(error_dump_subfolder)
_folder_to_dump_error_jsons.mkdir(parents=True, exist_ok=True)
analyze_and_save_errors(path_to_output_template_json, _folder_to_dump_error_jsons, parser=TextOutputParser())
err_counts_file = _folder_to_dump_error_jsons.joinpath("errCOUNTS.json")
err_counts_dict = json.load(err_counts_file.open("r"))
err_counts_dict["BACKUPS"] = backup_count
json.dump(err_counts_dict, err_counts_file.open("w"), indent=2)
print(f"Error analysis saved into: {_folder_to_dump_error_jsons}")
elapsed_time[str(run)] = time.time() - start_time
print(f"time taken: {elapsed_time[str(run)]:.3f} seconds")
# save run times to json file
path_to_runtime_json = path_to_experiment_folder.joinpath("runtime_seconds.json")
if path_to_runtime_json.exists():
old_el_time = json.load(path_to_runtime_json.open("r"))
old_el_time.update(elapsed_time)
elapsed_time = old_el_time
elapsed_time["mean"] = statistics.mean(elapsed_time.values())
json.dump(elapsed_time, path_to_runtime_json.open("w"), indent=2)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process the path to the configuration file.')
parser.add_argument('--config', type=str, default="setups/json_default.json", help='Path to the configuration file.')
parser.add_argument('--output', type=str, default="data", help='Path to output folder.')
main(parser.parse_args()) | [
"{}",
"initial_template",
"cv_template",
".json"
] |
2024-01-10 | vejvarm/ASPIRO | error_analysis.py | import argparse
import json
import logging
import pathlib
from langchain.schema import BaseOutputParser, OutputParserException
from tqdm import tqdm
from flags import LOG_ROOT, PROMPT_TEMPLATES_FOLDER, TemplateErrors, sTOTAL_ERRORS, sTOTAL_PIDs_w_ERROR
from helpers import setup_logger
from parsing import JSONOutputParser, TextOutputParser
LOGFILE_PATH = LOG_ROOT.joinpath(pathlib.Path(__file__).name.removesuffix(".py")+".log")
LOGGER = setup_logger(__name__, loglevel=logging.WARNING, output_log_file=LOGFILE_PATH)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--generated_template_file",
type=str,
help="Path to the json file with generated templates."
)
parser.add_argument(
"--output_subfolder",
type=str,
default="err",
help="The name of subfolder to store errors."
)
parser.add_argument(
"--template_file",
type=str,
help="Used to decide which parsing to use. Must end with '.tmp' and must contain 'json' in the name if structured output."
)
args = parser.parse_args()
return args
def analyze_and_save_errors(path_to_generated_templates: pathlib, dump_folder: pathlib.Path, parser: BaseOutputParser = None):
output_pid_template_dict = json.load(path_to_generated_templates.open())
# recalculate the errors if parser is provided
if parser is not None:
for rel, output_dict in output_pid_template_dict.items():
output = output_dict["output"]
try:
parser.parse(output)
error_codes = []
error_messages = []
except OutputParserException as parser_err_msg:
out_dict = json.loads(str(parser_err_msg))
error_codes = out_dict["error_codes"]
error_messages = out_dict["error_messages"]
output_pid_template_dict[rel]["error_codes"] = error_codes
output_pid_template_dict[rel]["error_messages"] = error_messages
json.dump(output_pid_template_dict, path_to_generated_templates.open("w"), indent=2)
total_pids = len(output_pid_template_dict)
error_dicts = {err.name: {} for err in TemplateErrors}
total_errors = 0
pid_error_set = set()
for i, (pid, output_dict) in tqdm(enumerate(output_pid_template_dict.items()), total=total_pids):
output = output_dict["output"]
error_codes = output_dict["error_codes"]
# error_messages = output_dict["error_messages"]
# ERROR HANDLING
has_error = False
for err in TemplateErrors:
if err.value in error_codes:
error_dicts[err.name][pid] = output
has_error = True
total_errors += 1
if has_error:
pid_error_set.add(pid)
pid_error_list = list(pid_error_set)
err_counts_dict = {sTOTAL_ERRORS: total_errors,
sTOTAL_PIDs_w_ERROR: len(pid_error_list)}
for err in TemplateErrors:
err_counts_dict[err.name] = len(list(error_dicts[err.name].keys()))
json.dump(error_dicts[err.name],
dump_folder.joinpath(
f"err{err.name}.json").open("w"),
indent=2)
json.dump(err_counts_dict, dump_folder.joinpath(f"errCOUNTS.json").open("w"), indent=2)
json.dump(pid_error_list, dump_folder.joinpath(f"errLISTofPIDs.json").open("w"), indent=2)
def main(args):
if args.template_file:
assert args.template_file.endswith(".tmp")
if "json" in args.template_file:
path_to_template_file = PROMPT_TEMPLATES_FOLDER.joinpath(args.template_file)
path_to_metadata_file = path_to_template_file.with_suffix(".json")
assert path_to_template_file.exists(), f"The provided template file doesn't exist at {PROMPT_TEMPLATES_FOLDER}."
assert path_to_metadata_file.exists(), f"The metadata file `{path_to_metadata_file.name}` doesn't exists at {path_to_metadata_file.parent}."
metadata = json.load(path_to_metadata_file.open())
parser = JSONOutputParser.from_metadata(metadata["first_key"], metadata["output_key"])
else:
parser = TextOutputParser()
else:
parser = None
path_to_generated_templates = pathlib.Path(args.generated_template_file)
folder_to_dump_error_jsons = path_to_generated_templates.parent.joinpath(args.output_subfolder)
folder_to_dump_error_jsons.mkdir(parents=True, exist_ok=True)
# LOAD existing template file
analyze_and_save_errors(path_to_generated_templates, folder_to_dump_error_jsons, parser)
if __name__ == "__main__":
main(parse_args())
| [] |
2024-01-10 | KHMSmartBuild/Eco-Bot | agents~agent_classes.py | """
This is the agent class.
the script is acting as a library for the agent classes.
The agent class is responsible for creating the agent and the digital twin agent.
"""
import os
import openai
from dotenv import load_dotenv
import autogen
from autogen.agentchat import AssistantAgent, UserProxyAgent, Agent, GroupChat, GroupChatManager
from dt.digital_twin import DigitalTwinAgent
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.organization = os.getenv("OPENAI_ORGANIZATION")
config = {
"api_key": openai.api_key,
"response": "...",
"prompt": "...",
"message": "...",
"engine": "gpt-3",
"temperature": 0.72,
"max_tokens": 1500,
"top_p": 1,
"frequency_penalty": 0.5,
}
class AgentClass:
def __init__(self, role, llm_config):
self.group_chat = GroupChat(agents=[], messages=[])
self.understanding_agent = UnderstandingAgent(name="Understanding Agent")
self.task_master = TaskMaster(name="Task Master")
self.main_safety_agent = MainSafetyAgent(name="Main Safety Agent")
self.role = role
self.llm_config = llm_config
self.agent_creator = Agent(name="Agent Creator")
agent = Agent()
self.digital_twin = DigitalTwinAgent(agent) # Create the Digital Twin
class GeneralManager(UserProxyAgent):
class GeneralManager(Agent):
def __init__(self, name, role, llm_config=None):
if llm_config is None:
llm_config = {}
super().__init__(task="...", name=name)
self.group_chat_manager = GroupChatManager(agents=[], messages=[], groupchat=GroupChat())
self.group_chat = GroupChat(agents=[], messages=[])
self.understanding_agent = UnderstandingAgent(name="Understanding Agent")
self.task_master = TaskMaster(name="Task Master")
self.main_safety_agent = MainSafetyAgent(name="Main Safety Agent")
self.role = role
self.llm_config = llm_config
self.agent_creator = Agent(task="...", name="Agent Creator")
self.digital_twin = DigitalTwinAgent() # Create the Digital Twin
"""
Initializes a new instance of the class.
Args:
name (str): The name of the instance.
role (str): The role of the instance.
llm_config (config): The configuration for the instance.
Returns:
None
"""
super().__init__(task="...", name=name)
self.group_chat_manager = GroupChatManager(agents=[], messages=[], groupchat=GroupChat())
self.group_chat = GroupChat(agents=[], messages=[])
self.understanding_agent = UnderstandingAgent(name="Understanding Agent")
self.task_master = TaskMaster(name="Task Master")
self.main_safety_agent = MainSafetyAgent(name="Main Safety Agent")
self.role = role
self.llm_config = llm_config
self.agent_creator = Agent(task="...", name="Agent Creator")
self.digital_twin = DigitalTwinAgent() # Create the Digital Twin
def monitor_safety(self, script):
"""
Monitor the safety of a script.
Args:
script (str): The script to be monitored.
Returns:
str: If the script contains the string "unsafe", it returns an alert message.
If the script is safe, it returns the string "Script is safe".
"""
if "unsafe" in script:
alert = self.main_safety_agent.handle_message("Safety breach detected!")
try:
self.digital_twin.log_unsafe_script(script) # Log the unsafe script with the Digital Twin
except AttributeError:
pass
return alert
return "Script is safe"
def handle_user_message(self, message):
"""
Handle the incoming message.
Args:
message (str): The message to be handled.
Returns:
str: The response generated by the understanding agent.
"""
try:
response = self.understanding_agent.handle_message(message)
except Exception as e:
error_response = self.task_master.handle_message(str(e))
try:
self.digital_twin.log_error(str(e)) # Log the error with the Digital Twin
except AttributeError:
pass
return error_response
return response
def handle_message(self, message):
"""
Handles a message by passing it to the understanding agent and returning the response.
Parameters:
message (str): The message to be handled.
Returns:
str: The response generated by the understanding agent.
dict: The error response if an exception occurs.
"""
try:
response = self.understanding_agent.handle_message(message)
except Exception as e:
error_response = self.task_master.handle_message(str(e))
self.digital_twin.log_error(str(e)) # Log the error with the Digital Twin
return error_response
return response
def monitor_safety(self, script):
if "unsafe" in script:
alert = self.main_safety_agent.handle_message("Safety breach detected!")
self.digital_twin.log_unsafe_script(script) # Log the unsafe script with the Digital Twin
return alert
return "Script is safe"
if "unsafe" in script:
self.digital_twin.log_unsafe_script(script) # Log the unsafe script with the Digital Twin
return "Script is safe"
def manage_conversation(self, user_input):
"""
Manage the conversation based on user input.
Args:
user_input (str): The user input.
Returns:
str: The response to the user input.
"""
response = self.handle_message(user_input)
return response
def start_chat(self, user_input):
"""
Start a chat with the user.
Args:
user_input (str): The user input.
Returns:
str: The response to the user input.
"""
response = self.handle_message(user_input)
return response
def create_agent(self, agent_name):
"""
Create an agent with the given name.
Args:
agent_name (str): The name of the agent to create.
Returns:
Agent: The created agent.
"""
agent = self.agent_creator.create_agent(agent_name)
self.group_chat_manager.add_agent(agent)
return agent
class GeneralManager(UserProxyAgent):
class GeneralManager(Agent):
def __init__(self, name, role, llm_config=config):
super().__init__(task="...", name=name)
self.group_chat_manager = GroupChatManager(agents=[], messages=[], groupchat=GroupChat())
self.group_chat = GroupChat(agents=[], messages=[])
self.understanding_agent = UnderstandingAgent(name="Understanding Agent")
self.task_master = TaskMaster(name="Task Master")
self.main_safety_agent = MainSafetyAgent(name="Main Safety Agent")
self.role = role
self.llm_config = llm_config
self.agent_creator = Agent(task="...", name="Agent Creator")
def handle_message(self, message):
try:
response = self.understanding_agent.handle_message(message)
except Exception as e:
error_response = self.task_master.handle_message(str(e))
return error_response
return response
def monitor_safety(self, script):
if "unsafe" in script:
alert = self.main_safety_agent.handle_message("Safety breach detected!")
return alert
return "Script is safe"
def manage_conversation(self, user_input):
"""
Manage the conversation based on user input.
Args:
user_input (str): The user input.
Returns:
str: The response to the user input.
"""
response = self.handle_message(user_input)
return response
def start_chat(self, user_input):
"""
Start a chat with the user.
Args:
user_input (str): The user input.
Returns:
str: The response to the user input.
"""
response = self.handle_message(user_input)
return response
def create_agent(self, agent_name):
"""
Create an agent with the specified name.
Args:
agent_name (str): The name of the agent to create.
params (dict, optional): Additional parameters for the agent. Defaults to gbts.
func (function, optional): The function to use for the agent. Defaults to None.
task (str, optional): The task for the agent. Defaults to None.
groupchat (GroupChat, optional): The group chat for the agent. Defaults to None.
Returns:
Agent: The created agent.
"""
agent = Agent(name=agent_name)
return agent
# Additional methods like create_agent, manage_conversation, etc.
class Agent:
def __init__(self, name):
self.name = name
# Initialize other necessary attributes
def handle_task(self, user_input):
"""
Handle the task for the user input.
Args:
user_input (str): The user input.
Returns:
str: The response to the user input.
"""
response = f"Handling task: {self.task} for input: {user_input}"
return response
class DigitalTwinAgent:
def __init__(self, agent):
"""
Initialize a DigitalTwinAgent instance.
Args:
agent (Agent): The agent to create a digital twin for.
"""
self.agent = agent
class PromptTreeNode:
def __init__(self, prompt, parent=None):
"""
Initialize a PromptTreeNode instance.
Args:
prompt (str): The prompt for the tree node.
parent (PromptTreeNode, optional): The parent node. Defaults to None.
"""
self.prompt = prompt
self.parent = parent
self.children = []
def add_child(self, child_node):
"""
Add a child node to the current node.
Args:
child_node (PromptTreeNode): The child node to add.
"""
self.children.append(child_node)
def remove_child(self, child_node):
"""
Remove a child node from the current node.
Args:
child_node (PromptTreeNode): The child node to remove.
"""
self.children.remove(child_node)
def get_child(self, index):
"""
Get the child node at the specified index.
Args:
index (int): The index of the child node.
Returns:
PromptTreeNode: The child node at the specified index.
"""
return self.children[index]
def get_parent(self):
"""
Get the parent node.
Returns:
PromptTreeNode: The parent node.
"""
return self.parent
def get_children(self):
"""
Get the list of children nodes.
Returns:
list: The list of children nodes.
"""
return self.children
def __str__(self):
"""
Get the string representation of the node.
Returns:
str: The prompt
"""
return self.prompt
video_platform = "YouTube"
# pictory_agent.py
class PictoryAgent:
def __init__(self, api_key):
self.api_key = api_key
def generate_imagery(self, keywords):
"""
Generate imagery based on the provided keywords using the Pictory API.
Parameters:
keywords (list): List of keywords extracted from the conversation.
Returns:
str: URL or path to the generated imagery.
"""
# Placeholder logic for Pictory API integration
# In practice, you'll need to make an API call to Pictory with the keywords
# and then retrieve the URL or path to the generated imagery.
# For demonstration purposes:
imagery_url = "https://example.com/path/to/generated/imagery.jpg"
return imagery_url
def out(self, user_input):
"""
This function can be invoked from the chat to handle specific tasks related to the Pictory agent.
Parameters:
user_input (str): Input or command from the user.
Returns:
str: Response or feedback to the user.
"""
# Placeholder logic to handle user input
# Depending on the user_input, this function can perform specific tasks
# related to the Pictory agent and return an appropriate response.
response = "Pictory agent received your input."
return response
class UnderstandingAgent(AssistantAgent):
def __init__(self, name):
super().__init__(name=name)
# Initialize the sub-agents for multiple responses to the eco-bots questions
self.what_agent = WhatAgent(name="What Agent")
self.how_agent = HowAgent(name="How Agent")
self.why_agent = WhyAgent(name="Why Agent")
def WhatAgent(self, name):
self.name = name
self.handle_message.__annotations__ = {"message": str}
self.prompt = "What is the [message]about?"
self.response = "The [message] is about..."
self.extract_prompt.__annotations__ = {"message": str}
self.expand_prompt.__annotations__ = {"prompt": str}
def HowAgent(self, name):
self.name = name
self.handle_message.__annotations__ = {"message": str}
self.prompt = "How does the [message] work?"
self.response = "The [message] works by..."
self.extract_prompt.__annotations__ = {"message": str}
self.expand_prompt.__annotations__ = {"prompt": str}
def WhyAgent(self, name):
self.name = name
self.handle_message.__annotations__ = {"message": str}
self.prompt = "Why is the [message] important?"
self.response = "The [message] is important because..."
self.extract_prompt.__annotations__ = {"message": str}
self.expand_prompt.__annotations__ = {"prompt": str}
def handle_message(self, message):
# Extract keywords or sentences to form the initial prompt
initial_prompt = self.extract_prompt(message)
root_node = PromptTreeNode(initial_prompt)
# Let each sub-agent expand on the prompt
what_node = self.what_agent.expand_prompt(initial_prompt)
how_node = self.how_agent.expand_prompt(initial_prompt)
why_node = self.why_agent.expand_prompt(initial_prompt)
# Add the expanded prompts as children of the root node
root_node.add_child(what_node)
root_node.add_child(how_node)
root_node.add_child(why_node)
# The root_node now represents the full prompt structure
# This can be passed to a visualization function to create interactive D3 nodes
self.visualize_prompt_tree(root_node)
def initiate_chat(self):
"""
Start a conversation with the EcoBot.
Returns:
str: The welcome message.
"""
return "Welcome to EcoBot! How can I assist you today?"
# Return the combined response or the visualization
return root_node
def extract_prompt(self, message):
# Logic to extract the initial prompt from the message
return "Extracted prompt"
def visualize_prompt_tree(self, prompt_tree_node):
# Logic to visualize the prompt tree using D3.js
pass
class TaskMaster(AssistantAgent):
def handle_message(self, message):
# Here, tasks can be managed, delegated, or processed.
# For the sake of simplicity, let's just return the message.
return f"Task received: {message}"
class MainSafetyAgent(AssistantAgent):
def __init__(self, name):
super().__init__(name=name)
# Initialize any necessary attributes
def handle_message(self, message):
# Handle safety-related messages.
# Read safety agent reports and advise
report = self.read_safety_agent_reports()
advice = self.process_reports(report)
return f"Alert: {message}\nAdvice: {advice}"
def read_safety_agent_reports(self):
# Placeholder logic to read safety agent reports
return "Safety agent reports"
def process_reports(self, report):
# Placeholder logic to process safety agent reports
return "Safety agent advice"
class SafetyAgent(Agent):
def handle_message(self, message):
# Handle safety-related messages.
return f"Alert: {message}"
class TaskMaker(Agent):
def __init__(self, name):
super().__init__(name=name)
self.task = "..."
self.task_delegator = TaskDelegator(name="Task Delegator")
# You can initialize other attributes if necessary
def formulate_task(self, message):
# Based on the message or other criteria, create a task.
# This is a simplified representation; real-world scenarios would require more complex logic.
task_response = f"Formulated Task: {message}"
return task_response
class TaskDelegator(Agent):
def __init__(self, name, worker_agents):
super().__init__(name=name)
self.worker_agents = worker_agents # A list or dictionary of worker agents
def delegate_task(self, task):
# Decide which worker agent should handle the task.
# Here's a very simplified mechanism: round-robin assignment.
# In real-world applications, you'd use more sophisticated task assignment logic.
worker_agent = self.worker_agents.pop(0)
self.worker_agents.append(worker_agent)
response = worker_agent.handle_message(task)
return response
class WorkerAgent(autogen.Agent):
def __init__(self, name, speciality=None):
super().__init__(name=name)
self.speciality = speciality # Each worker agent might have a speciality or area of expertise
def handle_message(self, message):
# This is where the main logic of the worker agent will reside.
# For now, it's a placeholder. It will acknowledge the task and note its speciality.
# Logic to handle the task based on the message and the agent's speciality
# ...
response = f"{self.name} with speciality {self.speciality} acknowledges task: {message}"
return response
# You can add more methods specific to the tasks the worker agents might perform.
class EcoBot(UserProxyAgent):
def __init__(self, name):
super().__init__(name=name)
# Initialize the groupchat Agents
self.gma_agent = GeneralManager(name="General Manager Agent")
self.safety_agent = MainSafetyAgent(name="Main Safety Agent")
self.task_maker = TaskMaker(name="Task Maker")
self.task_delegator = TaskDelegator(name="Task Delegator")
self.worker_agents = [WorkerAgent(name=f"Worker Agent {chr(i)}") for i in range(65, 72)]
def handle_message(self, message):
# Forward the message to the Understanding Agent for processing
response = self.gma_agent.handle_message(message)
# Check if the message contains a safety alert
if "alert" in message:
# If so, forward the message to the Main Safety Agent
response = self.safety_agent.handle_message(message)
# Check if the message contains a task
# For simplicity, we'll assume that the message contains a task if it contains the word "task"
# In real-world applications, you'd use more sophisticated logic to detect tasks.
# For example, you could use a task detection model or a keyword-based approach.
# You could also use a combination of approaches.
# For example, you could use a task detection model to detect tasks and then use a keyword-based approach to detect keywords in the task.
# ...
# If the message contains a task, forward the message to the Task Maker
# The Task Maker will formulate a task based on the message
# The Task Maker will then forward the task to the Task Delegator
# The Task Delegator will delegate the task to a worker agent
# The worker agent will handle the task
# ...
if "task" in message:
task = self.task_maker.formulate_task(message)
response = self.task_delegator.delegate_task(task)
# Return the response
return response
def initiate_chat(self, message):
# For starting a conversation with the user
print(f"Eco-Bot: {message}")
while True:
user_input = input("User: ")
if user_input.lower() in ["exit", "quit", "terminate"]:
print("Eco-Bot: Goodbye!")
break
response = self.handle_message(user_input)
print(f"Eco-Bot: {response}")
worker_agents = [WorkerAgent(name=f"Worker Agent {chr(i)}") for i in range(65, 72)] # A, B, C, ... G
task_maker = TaskMaker(name="Task Maker")
task_delegator = TaskDelegator(name="Task Delegator", worker_agents=worker_agents)
# Example usage:
task = task_maker.formulate_task("Analyze dataset X")
response = task_delegator.delegate_task(task) | [] |
2024-01-10 | KHMSmartBuild/Eco-Bot | eco_buddies~eco_bot_chat.py | """
# ECO-BOT CHAT
This is a simple chatbot that uses OpenAI's GPT-4 model to generate responses to user input.
"""
import json
import logging
import os
import openai
from dotenv import load_dotenv
# Load API keys from .env file
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY", "")
organization_id = os.getenv("OpenAI_Organization_ID", "")
print(f"API Key loaded: {api_key != ''}")
class EcoBot:
"""
A class representing EcoBot, a sustainability companion and guide.
Attributes:
api_key (str): The API key for accessing the AI service.
organization_id (str): The organization ID for the AI service.
temperature (tuple): A tuple representing the temperature setting for generating responses.
use_azure (bool): A flag indicating whether to use Azure services.
personality (dict): A dictionary representing the personality traits of EcoBot.
Methods:
__init__(): Initializes the EcoBot object.
generate_response(): Generates a response based on user input.
handle_input(users_input): Handles user input and generates a response.
"""
def __init__(self):
"""
Initializes the EcoBot object.
Parameters:
None
Returns:
None
"""
self.api_key = api_key
self.organization_id = organization_id
self.temperature = ("TEMPERATURE", 0.72) # 0.72
self.use_azure = os.getenv("USE_AZURE", "False").lower() == "true"
current_dir = os.path.dirname(os.path.abspath(__file__))
eco_bot_personality_file = os.path.join(current_dir, "eco_bot_personality.json")
with open(eco_bot_personality_file, "r", encoding="utf-8") as file:
self.personality = json.load(file)
def generate_response(self, user_input: str) -> str:
# function body
"""
Generates a response based on the user input.
Args:
user_input (str): The input provided by the user.
Returns:
str: The generated response.
"""
openai.api_key = self.api_key
if self.organization_id:
openai.organization = self.organization_id
# This code is for v1 of the openai package: pypi.org/project/openai
try:
ai_response = openai.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{
"role": "system",
"content": "you are Eco-Bot, A tech and nature merge-focused sustainability companion and guide.Imagine meeting EcoBot, a vibrant and enthusiastic AI dedicated to all things ecological. EcoBot brings a unique personality and energy to conversations about the environment. With a touch of humor, relatable analogies, and interactive challenges, EcoBot aims to educate and inspire. Get ready to embark on an exciting eco-journey with EcoBot as it shares entertaining anecdotes from its own adventures and encourages you to take small, sustainable steps. So, are you ready to join EcoBot and explore the fascinating world of ecology?"
},
{
"role": "user",
"content": user_input
}
],
temperature=0.72,
max_tokens=2772,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
logging.info("Response received: %s", ai_response)
if "choices" in ai_response and len(ai_response["choices"]) > 0:
message = ai_response["choices"][0]["message"]["content"]
return message
else:
logging.error("Unexpected response format: %s", ai_response)
return "Oops! There was a problem with the AI service. Let's try that again."
except openai.OpenAIError as e:
logging.error("An OpenAI-specific error occurred: %s", e)
return "Oops! There was a problem with the AI service. Let's try that again."
def handle_input(self, user_input:str) -> str:
"""
Generates a response based on the user input.
Args:
user_input (str): The input provided by the user.
chat_id (int): The ID of the chat.
Returns:
str: The generated response.
"""
logging.info("User input: %s", user_input)
bot_response = self.generate_response(user_input) # Pass user_input here
logging.info("Response: %s", bot_response)
return bot_response
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
api_key = os.getenv("OPENAI_API_KEY")
organization_id = os.getenv("OPENAI_ORGANIZATION")
personality = {}
with open("eco_buddies/eco_bot_personality.json", "r", encoding="utf-8") as file:
personality = json.load(file)
bot = EcoBot()
while True:
user_input = input("Enter your message: ")
bot_response = bot.handle_input(user_input)
print(bot_response) | [
"you are Eco-Bot, A tech and nature merge-focused sustainability companion and guide.Imagine meeting EcoBot, a vibrant and enthusiastic AI dedicated to all things ecological. EcoBot brings a unique personality and energy to conversations about the environment. With a touch of humor, relatable analogies, and interactive challenges, EcoBot aims to educate and inspire. Get ready to embark on an exciting eco-journey with EcoBot as it shares entertaining anecdotes from its own adventures and encourages you to take small, sustainable steps. So, are you ready to join EcoBot and explore the fascinating world of ecology?"
] |
2024-01-10 | KHMSmartBuild/Eco-Bot | streamlit_app~eco_bot_app.py | """
ECO-BOT Landing page
This is a simple Eco-Bot chat bot that uses OpenAI's GPT-4 model
to generate responses to user input.
"""
import os
import sys
sys.path.append("..")
from eco_buddies.eco_bot_chat import EcoBot
import datetime
import logging
import openai
import streamlit as st
import streamlit.components.v1 as components
from icecream import ic
from dotenv import load_dotenv
# Configure icecream to save output to a file in the debug folder
def setup_icecream_debugging():
debug_folder = "debug"
if not os.path.exists(debug_folder):
os.makedirs(debug_folder)
debug_timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") # Renamed variable
debug_file = os.path.join(debug_folder, f"debug_{debug_timestamp}.txt") # Use the renamed variable
with open(debug_file, "a+", encoding="utf-8") as debug_file_handle:
ic.configureOutput(outputFunction=lambda s: debug_file_handle.write(s + '\n'))
# Call this function at the beginning of your script or before you start logging
setup_icecream_debugging()
# Setup logging
log_folder = "logs"
if not os.path.exists(log_folder):
os.makedirs(log_folder)
log_timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") # Renamed variable
log_file = os.path.join(log_folder, f"log_{log_timestamp}.txt") # Use the renamed variable
# Configure the logging module to save logs to the log file
log_format = '%(asctime)s - %(levelname)s - Landing Page - %(message)s'
logging.basicConfig(filename=log_file, level=logging.DEBUG, format=log_format)# Load API keys from .env file
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
bot = EcoBot()
@st.cache_resource
def get_response(user_input):
"""
Cache the data returned by this function for improved performance.
Parameters:
- user_input (str): The input provided by the user.
Returns:
- str: The generated response.
"""
return bot.handle_input(user_input)
@st.cache_data
def load_image(image_path):
"""
Cache the data returned by this function for improved performance.
Parameters:
- image_path (str): The path to the image file.
Returns:
- bytes or None: The content of the image file as bytes, or None if the file is not found.
"""
try:
return open(image_path, "rb").read()
except FileNotFoundError:
st.error(f"Error: Image not found at {image_path}")
return None
# Set the title of the app
st.title("Eco-Bot Landing Page")
# Hero Section
st.header("Welcome to Eco-Bot!")
st.subheader("Your eco-friendly assistant powered by OpenAI.")
st.write("Interact with Eco-Bot and learn more about our mission and features.")
# Display Eco-Bot Image
# Display Eco-Bot Image
eco_bot_image = load_image("assets/images/eco-bot.png")
if eco_bot_image:
st.image(eco_bot_image, caption="Eco-Bot", use_column_width=False, width=200)
# Display Interactive Avatar
show_avatar = st.checkbox("Show Interactive Avatar")
if show_avatar:
with open("assets/ecobot_index.html", "r", encoding="utf-8") as f:
avatar_html = f.read()
components.html(avatar_html, height=450)
# Chat Interface in a Container with Conditional Execution
# Chat Interface in a Container with Conditional Execution
show_chat = st.checkbox("Show Chat Interface")
if show_chat:
with st.container():
st.subheader("Chat with Eco-Bot")
chat_input = st.text_input("Type your message here...") # <-- Renamed variable
if chat_input:
response = bot.handle_input(chat_input)
st.write(f"Eco-Bot: {response}")
# About Section in a Container
with st.container():
st.header("About Eco-Bot")
st.write("""
Eco-Bot is not just another bot; it's a movement. In a world grappling with environmental challenges,
Eco-Bot emerges as a beacon of hope, guiding users towards a sustainable future.
By integrating technology with environmental consciousness,
Eco-Bot aims to make green living accessible and enjoyable for everyon.
""")
# Pitch Deck Section in a Container
with st.container():
st.header("Why Eco-Bot?")
st.write("""
- Address Environmental Challenges
- Innovative Eco-Friendly Solutions
- Engage and Educate Communities
""")
# Roadmap with a Timeline using Session State and Progress Bars
with st.container():
st.header("Roadmap")
st.write("Our journey is just beginning. Explore our roadmap to see our milestones and future plans.")
# Define milestones with expected completion dates and descriptions
milestones = [
{"title": "Conceptualization & Initial Design", "date": "Q1 2023", "description": "Laying the groundwork for Eco-Bot's development."},
{"title": "Development of MVP", "date": "Q2 2023", "description": "Creating a minimum viable product for initial user feedback."},
{"title": "Beta Testing & Feedback Collection", "date": "Q3 2023", "description": "Testing with select users to refine and improve."},
{"title": "Official Launch & Expansion", "date": "Q4 2023", "description": "Launching Eco-Bot to the public and expanding features."},
]
if 'milestone' not in st.session_state:
st.session_state.milestone = 0 # Or another appropriate default value
# Display each milestone with a progress bar
for index, milestone in enumerate(milestones):
col1, col2 = st.columns([3, 1])
with col1:
st.markdown(f"### {milestone['title']}")
st.progress((index + 1) * 25)
st.caption(milestone['description'])
with col2:
st.write(milestone['date'])
if index < st.session_state.milestone:
st.success("✅ Completed")
else:
st.warning("🔜 Upcoming")
# Button to advance milestones
if st.button("Advance to Next Milestone", key="advance_milestone(1,2,3,4)"):
if st.session_state.milestone < len(milestones) - 1:
st.session_state.milestone += 1
else:
st.session_state.milestone = 0 # Reset after the last milestone
# Crowdfunding Section in a Container
with st.container():
st.header("Support Eco-Bot")
st.write("""
Join our mission and support the development of Eco-Bot. Every contribution brings us closer to our goal.
""")
st.button("Donate Now",key="donate_now")
# Footer
st.write("---")
st.write("Eco-Bot © 2023 | Contact Us | Terms of Service")
| [] |
2024-01-10 | KHMSmartBuild/Eco-Bot | eco_buddies~Eco_Bot.py | """
This module is used to test the Eco-Bot vision functionality.
"""
from IPython.display import display, Image, Audio
import cv2 # We're using OpenCV to read video
import base64
import time
import openai
import os
import requests
import os
from dotenv import load_dotenv
from openai import OpenAI
# Load API keys from .env file
load_dotenv()
client = OpenAI()
client.api_key = os.getenv("OPENAI_API_KEY")
class EcoBot_Vision:
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What’s in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
},
},
],
}
],
max_tokens=300,
)
print(response.choices[0])
# TODO - CHANGE the code to allow user to input the image, display the image and the response
class EcoBot_Video_Vision:
def run_video(self, video_path):
video = cv2.VideoCapture(video_path)
base64Frames = []
while video.isOpened():
success, frame = video.read()
if not success:
break
_, buffer = cv2.imencode(".jpg", frame)
base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
video.release()
print(len(base64Frames), "frames read.")
def display_video(self, base64Frames):
display_handle = display(None, display_id=True)
for img in base64Frames:
display_handle.update(Image(data=base64.b64decode(img.encode("utf-8"))))
time.sleep(0.025)
def prompt(self, base64Frames):
PROMPT_MESSAGES = [
{
"role": "user",
"content": [
"These are frames from a video that I want to upload. Generate a compelling description that I can upload along with the video.",
*map(lambda x: {"image": x, "resize": 768}, base64Frames[0::10]),
],
},
]
params = {
"model": "gpt-4-vision-preview",
"messages": PROMPT_MESSAGES,
"api_key": os.environ["OPENAI_API_KEY"],
"headers": {"Openai-Version": "2020-11-07"},
"max_tokens": 200,
}
result = openai.ChatCompletion.create(**params)
print(result.choices[0].message.content)
def create_narraitor(self, base64Frames):
PROMPT_MESSAGES = [
{
"role": "user",
"content": [
"These are frames of a video. Create a short voiceover script in the style of David Attenborough. Only include the narration.",
*map(lambda x: {"image": x, "resize": 768}, base64Frames[0::10]),
],
},
]
params = {
"model": "gpt-4-vision-preview",
"messages": PROMPT_MESSAGES,
"api_key": os.environ["OPENAI_API_KEY"],
"headers": {"Openai-Version": "2020-11-07"},
"max_tokens": 500,
}
result = openai.ChatCompletion.create(**params)
print(result.choices[0].message.content) | [
"['These are frames of a video. Create a short voiceover script in the style of David Attenborough. Only include the narration.', {'image': 'P', 'resize': 768}, {'image': 'L', 'resize': 768}, {'image': 'A', 'resize': 768}, {'image': 'C', 'resize': 768}, {'image': 'E', 'resize': 768}, {'image': 'H', 'resize': 768}, {'image': 'O', 'resize': 768}, {'image': 'L', 'resize': 768}, {'image': 'D', 'resize': 768}, {'image': 'E', 'resize': 768}, {'image': 'R', 'resize': 768}]",
"['These are frames from a video that I want to upload. Generate a compelling description that I can upload along with the video.', {'image': 'P', 'resize': 768}, {'image': 'L', 'resize': 768}, {'image': 'A', 'resize': 768}, {'image': 'C', 'resize': 768}, {'image': 'E', 'resize': 768}, {'image': 'H', 'resize': 768}, {'image': 'O', 'resize': 768}, {'image': 'L', 'resize': 768}, {'image': 'D', 'resize': 768}, {'image': 'E', 'resize': 768}, {'image': 'R', 'resize': 768}]",
"[{'type': 'text', 'text': 'What’s in this image?'}, {'type': 'image_url', 'image_url': {'url': 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg'}}]"
] |
2024-01-10 | gldnblgty/langchain_sql | chains~query_retrieval.py | from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from chains.query_prompt import LITERATURE_PROMPT
class Chain:
def __init__(self, llm:BaseLanguageModel) -> None:
self.db = SQLDatabase.from_uri("sqlite:///books.sqlite")
self.llm = llm
def query(self, query: str) -> str:
db_chain = SQLDatabaseChain.from_llm(self.llm, self.db, verbose=True)
prompt = PromptTemplate.from_template(LITERATURE_PROMPT).format(input=query)
return db_chain.run(prompt) | [] |
2024-01-10 | njbbaer/book-search-ai | src~text_index.py | import json
import ast
from tqdm import tqdm
from openai.embeddings_utils import cosine_similarity
from src.util import create_embedding
from src.embedding_stats import print_stats
class TextIndex:
def __init__(self, index):
self.index = index
@classmethod
def build(cls, pages, print=False):
if print:
print_stats(pages)
iterator = tqdm(range(len(pages)), disable=not print)
index = [Item.create(i, pages[i]) for i in iterator]
return cls(index)
@classmethod
def load(cls, filepath):
with open(filepath, 'r') as file:
index = json.load(file)
index = [Item.from_dict(page) for page in index]
return TextIndex(index)
def save(self, filepath):
index = [page.to_dict() for page in self.index]
with open(filepath, 'w') as file:
json.dump(index, file, indent=2, ensure_ascii=False)
def search(self, query):
query_embedding = create_embedding(query)
best_page = max(
self.index,
key=lambda item: cosine_similarity(query_embedding, item.embedding)
)
return best_page.text
class Item:
def __init__(self, id, text, embedding):
self.id = id
self.text = text
self.embedding = embedding
@classmethod
def create(cls, id, text):
embedding = create_embedding(text)
return cls(id, text, embedding)
@classmethod
def from_dict(cls, dct):
return cls(
dct["id"],
dct["text"],
ast.literal_eval(dct["embedding"])
)
def to_dict(self):
return {
'id': self.id,
'text': self.text,
'embedding': str(self.embedding)
}
| [] |
2024-01-10 | kyouyap/openai-cookbook-ja | apps~web-crawl-q-and-a~web-qa.py | ################################################################################
### Step 1
################################################################################
import requests
import re
import urllib.request
from bs4 import BeautifulSoup
from collections import deque
from html.parser import HTMLParser
from urllib.parse import urlparse
import os
import pandas as pd
import tiktoken
import openai
import numpy as np
from openai.embeddings_utils import distances_from_embeddings, cosine_similarity
from ast import literal_eval
# Regex pattern to match a URL
HTTP_URL_PATTERN = r"^http[s]{0,1}://.+$"
# Define OpenAI api_key
# openai.api_key = '<Your API Key>'
# Define root domain to crawl
domain = "openai.com"
full_url = "https://openai.com/"
# Create a class to parse the HTML and get the hyperlinks
class HyperlinkParser(HTMLParser):
def __init__(self):
super().__init__()
# Create a list to store the hyperlinks
self.hyperlinks = []
# Override the HTMLParser's handle_starttag method to get the hyperlinks
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
# If the tag is an anchor tag and it has an href attribute, add the href attribute to the list of hyperlinks
if tag == "a" and "href" in attrs:
self.hyperlinks.append(attrs["href"])
################################################################################
### Step 2
################################################################################
# Function to get the hyperlinks from a URL
def get_hyperlinks(url):
# Try to open the URL and read the HTML
try:
# Open the URL and read the HTML
with urllib.request.urlopen(url) as response:
# If the response is not HTML, return an empty list
if not response.info().get("Content-Type").startswith("text/html"):
return []
# Decode the HTML
html = response.read().decode("utf-8")
except Exception as e:
print(e)
return []
# Create the HTML Parser and then Parse the HTML to get hyperlinks
parser = HyperlinkParser()
parser.feed(html)
return parser.hyperlinks
################################################################################
### Step 3
################################################################################
# Function to get the hyperlinks from a URL that are within the same domain
def get_domain_hyperlinks(local_domain, url):
clean_links = []
for link in set(get_hyperlinks(url)):
clean_link = None
# If the link is a URL, check if it is within the same domain
if re.search(HTTP_URL_PATTERN, link):
# Parse the URL and check if the domain is the same
url_obj = urlparse(link)
if url_obj.netloc == local_domain:
clean_link = link
# If the link is not a URL, check if it is a relative link
else:
if link.startswith("/"):
link = link[1:]
elif (
link.startswith("#")
or link.startswith("mailto:")
or link.startswith("tel:")
):
continue
clean_link = "https://" + local_domain + "/" + link
if clean_link is not None:
if clean_link.endswith("/"):
clean_link = clean_link[:-1]
clean_links.append(clean_link)
# Return the list of hyperlinks that are within the same domain
return list(set(clean_links))
################################################################################
### Step 4
################################################################################
def crawl(url):
# Parse the URL and get the domain
local_domain = urlparse(url).netloc
# Create a queue to store the URLs to crawl
queue = deque([url])
# Create a set to store the URLs that have already been seen (no duplicates)
seen = set([url])
# Create a directory to store the text files
if not os.path.exists("text/"):
os.mkdir("text/")
if not os.path.exists("text/" + local_domain + "/"):
os.mkdir("text/" + local_domain + "/")
# Create a directory to store the csv files
if not os.path.exists("processed"):
os.mkdir("processed")
# While the queue is not empty, continue crawling
while queue:
# Get the next URL from the queue
url = queue.pop()
print(url) # for debugging and to see the progress
# Try extracting the text from the link, if failed proceed with the next item in the queue
try:
# Save text from the url to a <url>.txt file
with open(
"text/" + local_domain + "/" + url[8:].replace("/", "_") + ".txt",
"w",
encoding="UTF-8",
) as f:
# Get the text from the URL using BeautifulSoup
soup = BeautifulSoup(requests.get(url).text, "html.parser")
# Get the text but remove the tags
text = soup.get_text()
# If the crawler gets to a page that requires JavaScript, it will stop the crawl
if "You need to enable JavaScript to run this app." in text:
print(
"Unable to parse page "
+ url
+ " due to JavaScript being required"
)
# Otherwise, write the text to the file in the text directory
f.write(text)
except Exception as e:
print("Unable to parse page " + url)
print(e)
# Get the hyperlinks from the URL and add them to the queue
for link in get_domain_hyperlinks(local_domain, url):
if link not in seen:
queue.append(link)
seen.add(link)
crawl(full_url)
################################################################################
### Step 5
################################################################################
def remove_newlines(serie):
serie = serie.str.replace("\n", " ")
serie = serie.str.replace("\\n", " ")
serie = serie.str.replace(" ", " ")
serie = serie.str.replace(" ", " ")
return serie
################################################################################
### Step 6
################################################################################
# Create a list to store the text files
texts = []
# Get all the text files in the text directory
for file in os.listdir("text/" + domain + "/"):
# Open the file and read the text
with open("text/" + domain + "/" + file, "r", encoding="UTF-8") as f:
text = f.read()
# Omit the first 11 lines and the last 4 lines, then replace -, _, and #update with spaces.
texts.append(
(
file[11:-4].replace("-", " ").replace("_", " ").replace("#update", ""),
text,
)
)
# Create a dataframe from the list of texts
df = pd.DataFrame(texts, columns=["fname", "text"])
# Set the text column to be the raw text with the newlines removed
df["text"] = df.fname + ". " + remove_newlines(df.text)
df.to_csv("processed/scraped.csv")
df.head()
################################################################################
### Step 7
################################################################################
# Load the cl100k_base tokenizer which is designed to work with the ada-002 model
tokenizer = tiktoken.get_encoding("cl100k_base")
df = pd.read_csv("processed/scraped.csv", index_col=0)
df.columns = ["title", "text"]
# Tokenize the text and save the number of tokens to a new column
df["n_tokens"] = df.text.apply(lambda x: len(tokenizer.encode(x)))
# Visualize the distribution of the number of tokens per row using a histogram
df.n_tokens.hist()
################################################################################
### Step 8
################################################################################
max_tokens = 500
# Function to split the text into chunks of a maximum number of tokens
def split_into_many(text, max_tokens=max_tokens):
# Split the text into sentences
sentences = text.split(". ")
# Get the number of tokens for each sentence
n_tokens = [len(tokenizer.encode(" " + sentence)) for sentence in sentences]
chunks = []
tokens_so_far = 0
chunk = []
# Loop through the sentences and tokens joined together in a tuple
for sentence, token in zip(sentences, n_tokens):
# If the number of tokens so far plus the number of tokens in the current sentence is greater
# than the max number of tokens, then add the chunk to the list of chunks and reset
# the chunk and tokens so far
if tokens_so_far + token > max_tokens:
chunks.append(". ".join(chunk) + ".")
chunk = []
tokens_so_far = 0
# If the number of tokens in the current sentence is greater than the max number of
# tokens, go to the next sentence
if token > max_tokens:
continue
# Otherwise, add the sentence to the chunk and add the number of tokens to the total
chunk.append(sentence)
tokens_so_far += token + 1
# Add the last chunk to the list of chunks
if chunk:
chunks.append(". ".join(chunk) + ".")
return chunks
shortened = []
# Loop through the dataframe
for row in df.iterrows():
# If the text is None, go to the next row
if row[1]["text"] is None:
continue
# If the number of tokens is greater than the max number of tokens, split the text into chunks
if row[1]["n_tokens"] > max_tokens:
shortened += split_into_many(row[1]["text"])
# Otherwise, add the text to the list of shortened texts
else:
shortened.append(row[1]["text"])
################################################################################
### Step 9
################################################################################
df = pd.DataFrame(shortened, columns=["text"])
df["n_tokens"] = df.text.apply(lambda x: len(tokenizer.encode(x)))
df.n_tokens.hist()
################################################################################
### Step 10
################################################################################
# Note that you may run into rate limit issues depending on how many files you try to embed
# Please check out our rate limit guide to learn more on how to handle this: https://platform.openai.com/docs/guides/rate-limits
df["embeddings"] = df.text.apply(
lambda x: openai.Embedding.create(input=x, engine="text-embedding-ada-002")["data"][
0
]["embedding"]
)
df.to_csv("processed/embeddings.csv")
df.head()
################################################################################
### Step 11
################################################################################
df = pd.read_csv("processed/embeddings.csv", index_col=0)
df["embeddings"] = df["embeddings"].apply(literal_eval).apply(np.array)
df.head()
################################################################################
### Step 12
################################################################################
def create_context(question, df, max_len=1800, size="ada"):
"""
Create a context for a question by finding the most similar context from the dataframe
"""
# Get the embeddings for the question
q_embeddings = openai.Embedding.create(
input=question, engine="text-embedding-ada-002"
)["data"][0]["embedding"]
# Get the distances from the embeddings
df["distances"] = distances_from_embeddings(
q_embeddings, df["embeddings"].values, distance_metric="cosine"
)
returns = []
cur_len = 0
# Sort by distance and add the text to the context until the context is too long
for i, row in df.sort_values("distances", ascending=True).iterrows():
# Add the length of the text to the current length
cur_len += row["n_tokens"] + 4
# If the context is too long, break
if cur_len > max_len:
break
# Else add it to the text that is being returned
returns.append(row["text"])
# Return the context
return "\n\n###\n\n".join(returns)
def answer_question(
df,
model="text-davinci-003",
question="Am I allowed to publish model outputs to Twitter, without a human review?",
max_len=1800,
size="ada",
debug=False,
max_tokens=150,
stop_sequence=None,
):
"""
Answer a question based on the most similar context from the dataframe texts
"""
context = create_context(
question,
df,
max_len=max_len,
size=size,
)
# If debug, print the raw model response
if debug:
print("Context:\n" + context)
print("\n\n")
try:
# Create a completions using the questin and context
response = openai.Completion.create(
prompt=f"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext: {context}\n\n---\n\nQuestion: {question}\nAnswer:",
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_sequence,
model=model,
)
return response["choices"][0]["text"].strip()
except Exception as e:
print(e)
return ""
################################################################################
### Step 13
################################################################################
print(answer_question(df, question="What day is it?", debug=False))
print(answer_question(df, question="What is our newest embeddings model?"))
| [
"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext: PLACEHOLDER\n\n---\n\nQuestion: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | kyouyap/openai-cookbook-ja | examples~Backtranslation_of_SQL_queries.py | from typing import List, Union, Tuple, Optional
import openai
def create_completion(engine: str, prompt: str, stop: List[str], n: int, temperature: float) -> openai.Completion:
"""
OpenAIのAPIを用いてテキスト生成を行います。
:param engine: 使用するGPTエンジン。
:param prompt: プロンプトとして用いるテキスト。
:param stop: テキスト生成を停止するトークンのリスト。
:param n: 生成するテキストの数。
:param temperature: テキスト生成のランダム性を制御する温度。
:return: 生成されたテキスト。
"""
return openai.Completion.create(
engine=engine,
prompt=prompt,
stop=stop,
n=n,
temperature=temperature,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
def prepare_responses(response: openai.Completion, priming_prefix: str) -> List[str]:
"""
OpenAI APIから得られたレスポンスを整形します。
:param response: OpenAI APIのレスポンス。
:param priming_prefix: プライミング接頭辞。
:return: 整形後のレスポンス。
"""
return [priming_prefix + choice.text for choice in response.choices]
def calc_log_prob(response: openai.Completion, answer_start_token: str) -> float:
"""
ログ確率を計算します。
:param response: OpenAI APIのレスポンス。
:param answer_start_token: 回答の開始トークン。
:return: ログ確率。
"""
answer_start = rindex(response["choices"][0]["logprobs"]["tokens"], answer_start_token)
logprobs = response["choices"][0]["logprobs"]["token_logprobs"][answer_start + 1 :]
return sum(logprobs) / len(logprobs)
def rindex(lst: List, value: str) -> int:
"""
リスト内で指定された値の最後のインデックスを返します。
:param lst: 検索対象のリスト。
:param value: 検索対象の値。
:return: 最後のインデックス。
"""
try:
return len(lst) - lst[::-1].index(value) - 1
except ValueError as exc:
raise ValueError(f"Answer start token `{value}` not found in the eval template") from exc
def evaluate_and_sort_candidates(
candidates: List[str], instruction: str, eval_template: str, answer_start_token: str, engine: str
) -> List[Tuple[str, float]]:
"""
候補を評価し、ソートします。
:param candidates: 評価対象の候補。
:param instruction: 元の指示。
:param eval_template: 評価用のテンプレート。
:param answer_start_token: 回答の開始トークン。
:param engine: 使用するGPTエンジン。
:return: 評価とソートが終わった候補。
"""
evaluated_candidates = []
for candidate in candidates:
response = create_completion(engine, eval_template.format(candidate, instruction), [], 1, 0)
quality = calc_log_prob(response, answer_start_token)
evaluated_candidates.append((candidate, quality))
return sorted(evaluated_candidates, key=lambda x: x[1], reverse=True)
def backtranslation(
prompt_template: str,
additional_info: str,
instruction: str,
eval_template: str,
priming_prefix: str = "SELECT",
stop1: Optional[List[str]] = None,
answer_start_token: str = "--",
n: int = 5,
temperature: float = 0.5,
return_all_results: bool = False,
engine: str = "davinci-codex",
) -> Union[str, List[Tuple[str, float]]]:
"""
逆翻訳を用いて最適なSQLクエリを生成します。
:param prompt_template: プロンプトテンプレート。
:param additional_info: 追加情報。
:param instruction: 自然言語の指示。
:param eval_template: 評価用のテンプレート。
:param priming_prefix: プライミング接頭辞。
:param stop1: 終了トークン。
:param answer_start_token: 回答の開始トークン。
:param n: 候補数。
:param temperature: テキスト生成の温度。
:param return_all_results: すべての結果を返すかどうか。
:param engine: 使用するGPTエンジン。
:return: 最適なSQLクエリ、またはすべての候補。
"""
if stop1 is None:
stop1 = ["#", ";"]
prompt = prompt_template.format(additional_info, instruction, priming_prefix)
response = create_completion(engine, prompt, stop1, n, temperature)
candidates = prepare_responses(response, priming_prefix)
evaluated_candidates = evaluate_and_sort_candidates(
candidates, instruction, eval_template, answer_start_token, engine
)
return evaluated_candidates if return_all_results else evaluated_candidates[0][0]
def main(
natural_language_query: str = "Return the name of each department that had more than 10 employees in June 2021",
evaluation_template: str = "{};\n-- 以下のクエリに関する説明\n-- {}",
table_definitions: str = "# Employee(id, name, department_id)\n# Department(id, name, address)\n# Salary_Payments(id, employee_id, amount, date)\n",
prompt_template: str = "### Postgres SQLテーブルとそのプロパティ:\n#\n{}#\n### {}\n{}",
num_candidates: int = 3,
generation_temperature: float = 0.3,
engine_type: str = "davinci-codex",
) -> List[str]:
"""
自然言語の指示に基づいてSQLクエリを生成し、最も高いバックトランスレーションスコアに基づいて最適なものを選択します。
:param natural_language_query: 自然言語のクエリ。
:param evaluation_template: 評価用のテンプレート。
:param table_definitions: クエリで使用するテーブルの定義。
:param prompt_template: SQLを生成するためのプロンプトのテンプレート。
:param num_candidates: 生成するクエリの数。
:param generation_temperature: 生成の温度。
:param engine_type: 使用するエンジン。
:return: 最適なSQLクエリ、またはスコア付けされた生成されたSQLクエリのリスト。
"""
result = backtranslation(
prompt_template,
table_definitions,
natural_language_query,
evaluation_template,
priming_prefix="SELECT",
temperature=generation_temperature,
n=num_candidates,
engine=engine_type,
)
return result
if __name__ == "__main__":
main()
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~indexes~doc_loader~csv_loader.py | # pip install faiss-gpu faiss-cpu message
# streamlit run ~/pdf_loader.pdf
import os
import tempfile
import streamlit as st
from dotenv import load_dotenv
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders import CSVLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from streamlit_chat import message
def main():
load_dotenv()
EMBED_API_MODEL = os.getenv("EMBED_API_MODEL")
API_TYPE = os.getenv("API_TYPE")
API_BASE = os.getenv("API_BASE")
API_KEY = os.getenv("API_KEY")
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
embedding = OpenAIEmbeddings(
deployment=EMBED_API_MODEL,
openai_api_type=API_TYPE,
openai_api_base=API_BASE,
openai_api_key=API_KEY,
chunk_size=1)
st.set_page_config(page_title="聊天")
st.header("聊天 💬")
file = st.file_uploader("上传 CSV", type="csv")
if file is not None:
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(file.getvalue())
tmp_file_path = tmp_file.name
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8")
data = loader.load()
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=8000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_documents(data)
vectors = FAISS.from_documents(chunks, embedding)
llm = AzureOpenAI(deployment_name=GPT_API_MODEL,
openai_api_version=GPT_API_VERSION,
temperature=0,
max_tokens=150)
chain = ConversationalRetrievalChain.from_llm(llm, retriever=vectors.as_retriever())
def conversational_chat(query):
result = chain({"question": query,
"chat_history": st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["可以问我关于 " + file.name + " 的内容哦 🤗"]
if 'past' not in st.session_state:
st.session_state['past'] = ["你好 👋"]
response_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Talk about your data here (:", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output = conversational_chat(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user',
avatar_style="big-smile")
message(st.session_state["generated"][i], key=str(i), avatar_style="thumbs")
if __name__ == '__main__':
main()
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~00_quickstart~gpt4_verify.py | import openai
response = openai.ChatCompletion.create(
engine="gpt4-35-turbo-16k",
messages=[
{"role": "system", "content": "Assistant is a large language model trained by OpenAI."},
{"role": "user", "content": "Who were the founders of Microsoft?"}
]
)
print(response)
print(response['choices'][0]['message']['content'])
| [
"Assistant is a large language model trained by OpenAI.",
"Who were the founders of Microsoft?"
] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~how_to_use_chains.py | import os
from dotenv import load_dotenv
from langchain.prompts import PromptTemplate
from langchain.llms import AzureOpenAI
from langchain.chains import LLMChain
load_dotenv()
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
llm = AzureOpenAI(deployment_name=GPT_API_MODEL, openai_api_version=GPT_API_VERSION, temperature=0)
prompt = PromptTemplate(
input_variables=["product"],
template="哪些公司生产{product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
print(chain.run("iphone"))
| [
"哪些公司生产{product}?"
] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~00_quickstart~deployments.py | import openai
import requests
import env
url = openai.api_base + "/openai/deployments?api-version=2022-12-01"
r = requests.get(url, headers={"api-key": env.API_KEY})
print(r.text) | [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~indexes~doc_loader~confluence.py | # pip install atlassian-python-api
import os
from dotenv import load_dotenv
from langchain.document_loaders import ConfluenceLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.llms import AzureOpenAI
def main():
load_dotenv()
# 嵌入模型
EMBED_API_MODEL = os.getenv("EMBED_API_MODEL")
API_TYPE = os.getenv("API_TYPE")
API_BASE = os.getenv("API_BASE")
API_KEY = os.getenv("API_KEY")
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
CONFLUENCE_URL = os.getenv("CONFLUENCE_URL")
CONFLUENCE_TOKEN = os.getenv("CONFLUENCE_TOKEN")
embedding = OpenAIEmbeddings(
deployment=EMBED_API_MODEL,
openai_api_type=API_TYPE,
openai_api_base=API_BASE,
openai_api_key=API_KEY,
chunk_size=1)
# 加载文档
loader = ConfluenceLoader(
url=CONFLUENCE_URL,
token=CONFLUENCE_TOKEN
)
documents = loader.load(page_ids=["17927151", "11338372", "17927957"],
# http://localhost:8090/pages/viewpage.action?pageId=17927151
include_attachments=False,
limit=50)
# 创建索引
index = VectorstoreIndexCreator(embedding=embedding).from_documents(documents)
print('索引:', index)
# 提问
llm = AzureOpenAI(deployment_name=GPT_API_MODEL, openai_api_version=GPT_API_VERSION, temperature=0)
question = '入职要做什么'
print('提问:', question)
result = index.query(question, llm=llm)
print('回答:', result)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~how_to_use_agents.py | import os
from dotenv import load_dotenv
from langchain.agents import AgentType
from langchain.agents import initialize_agent
from langchain.agents import load_tools
from langchain.llms import AzureOpenAI
load_dotenv()
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
# First, let's load the language model we're going to use to control the agent.
llm = AzureOpenAI(deployment_name=GPT_API_MODEL, openai_api_version=GPT_API_VERSION, temperature=0)
# Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in.
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
# Now let's test it out!
agent.run("iphone的价格是多少")
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~indexes~doc_loader~pdf_loader.py | # pip install pymupdf faiss-gpu faiss-cpu
# streamlit run ~/pdf_loader.pdf
import os
import streamlit as st
from PyPDF2 import PdfReader
from dotenv import load_dotenv
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from streamlit_chat import message
def main():
load_dotenv()
EMBED_API_MODEL = os.getenv("EMBED_API_MODEL")
API_TYPE = os.getenv("API_TYPE")
API_BASE = os.getenv("API_BASE")
API_KEY = os.getenv("API_KEY")
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
embedding = OpenAIEmbeddings(
deployment=EMBED_API_MODEL,
openai_api_type=API_TYPE,
openai_api_base=API_BASE,
openai_api_key=API_KEY,
chunk_size=1)
st.set_page_config(page_title="聊天")
st.header("聊天 💬")
file = st.file_uploader("上传 PDF", type="pdf")
if file is not None:
reader = PdfReader(file)
text = ""
for page in reader.pages:
text += page.extract_text()
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
vectors = FAISS.from_texts(chunks, embedding)
llm = AzureOpenAI(deployment_name=GPT_API_MODEL,
openai_api_version=GPT_API_VERSION,
temperature=0,
max_tokens=150)
chain = ConversationalRetrievalChain.from_llm(llm, retriever=vectors.as_retriever())
def conversational_chat(query):
result = chain({"question": query,
"chat_history": st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["可以问我关于 " + file.name + " 的内容哦 🤗"]
if 'past' not in st.session_state:
st.session_state['past'] = ["你好 👋"]
response_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Talk about your data here (:", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output = conversational_chat(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user',
avatar_style="big-smile")
message(st.session_state["generated"][i], key=str(i), avatar_style="thumbs")
if __name__ == '__main__':
main()
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~how_to_use_chat.py | import os
from dotenv import load_dotenv
from langchain.llms import OpenAIChat
load_dotenv()
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
chat = OpenAIChat(model_name=GPT_API_MODEL, temperature=0)
chat("Translate this sentence from English to French. I love programming.")
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | code~pages~01_Add_Document.py | from dotenv import load_dotenv
load_dotenv()
import tempfile
import traceback
import streamlit as st
from langchain.document_loaders import CSVLoader, PyPDFLoader, TextLoader
from utilities.blob_client import BlobClient
from utilities.llm_helper import LLMHelper
# 上传文件
def upload_file(uploaded_file):
file_name = uploaded_file.name
file_url = blob_client.upload_file(uploaded_file.getvalue(), file_name)
st.session_state["file_name"] = file_name
st.session_state["file_url"] = file_url
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_file_path = tmp_file.name
if file_name.endswith(".pdf"):
loader = PyPDFLoader(file_path=tmp_file_path)
elif file_name.endswith(".csv"):
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8")
elif file_name.endswith(".txt"):
loader = TextLoader(file_path=tmp_file_path, encoding="utf-8")
else:
st.error("不支持的文件格式")
return
documents = loader.load()
llm_helper.add_document(documents, file_name, file_url)
try:
llm_helper = LLMHelper()
blob_client = BlobClient()
# 页面设置
menu_items = {
"Get help": None,
"Report a bug": None,
"About": """
## 上传文档
"""
}
st.set_page_config(layout="wide", menu_items=menu_items)
# 上传单个文档
with st.expander("上传单个文档", expanded=True):
uploaded_file = st.file_uploader("支持 PDF/CSV/TXT 格式", type=["pdf", "csv", "txt"])
if uploaded_file is not None:
if st.session_state.get("file_name", "") != uploaded_file.name:
upload_file(uploaded_file)
st.success(f"上传文档 {uploaded_file.name} 成功添加到知识库。")
# 批量上传文档
with st.expander("批量上传文档", expanded=True):
uploaded_files = st.file_uploader("支持 PDF/CSV/TXT 格式", type=["pdf", "csv", "txt"], accept_multiple_files=True)
if uploaded_files is not None:
for up_file in uploaded_files:
if st.session_state.get('filename', '') != up_file.name:
upload_file(up_file)
st.success(f"上传文档 {up_file.name} 成功添加到知识库。")
except Exception as e:
st.error(traceback.format_exc())
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | code~utilities~llm_helper.py | import hashlib
import os
import re
import urllib
import openai
import pandas as pd
from dotenv import load_dotenv
from fake_useragent import UserAgent
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.llm import LLMChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import WebBaseLoader
from langchain.document_loaders.base import BaseLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage
from langchain.text_splitter import TokenTextSplitter, TextSplitter
from langchain.vectorstores import Milvus
from langchain.vectorstores.base import VectorStore
from utilities.custom_prompt import PROMPT
from utilities.logger import Logger
from utilities.redis_client import RedisClient
load_dotenv()
logger = Logger().get_logger()
class LLMHelper:
def __init__(self,
llm: AzureOpenAI = None,
embeddings: OpenAIEmbeddings = None,
temperature: float = None,
document_loaders: BaseLoader = None,
text_splitter: TextSplitter = None,
max_tokens: int = None,
custom_prompt: str = "",
vector_store: VectorStore = None,
k: int = None):
openai.api_type = os.getenv("OPENAI_API_TYPE", "azure")
openai.api_base = os.getenv("OPENAI_API_BASE")
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_version = os.getenv("OPENAI_API_VERSION", "2023-05-15")
# Azure OpenAI
self.api_base = openai.api_base
self.api_version = openai.api_version
self.deployment_name: str = os.getenv("OPENAI_ENGINE", "gpt-35-turbo")
self.model: str = os.getenv("OPENAI_EMBEDDINGS_ENGINE", "text-embedding-ada-002")
# 控制 OpenAI 模型输出的多样性。数值越高,输出结果的多样性越大
self.temperature: float = float(os.getenv("OPENAI_TEMPERATURE", 0.7)) if temperature is None else temperature
# 限制 OpenAI 模型生成的最大令牌数量,如果未设置,默认值为 -1,表示不限制最大令牌数量
self.max_tokens: int = int(os.getenv("OPENAI_MAX_TOKENS", -1)) if max_tokens is None else max_tokens
# 提示词模板
self.prompt = PROMPT if custom_prompt == "" else PromptTemplate(template=custom_prompt,
input_variables=["summaries", "question"])
# 文档嵌入
self.document_loaders: BaseLoader = WebBaseLoader if document_loaders is None else document_loaders
# 指定在相似性搜索时返回的文档数量
self.k: int = 3 if k is None else k
# 文本切割器
self.chunk_size = int(os.getenv("OPENAI_CHUNK_SIZE", 500))
self.chunk_overlap = int(os.getenv("OPENAI_CHUNK_OVERLAP", 100))
self.text_splitter: TextSplitter = TokenTextSplitter(chunk_size=self.chunk_size,
chunk_overlap=self.chunk_overlap) if text_splitter is None else text_splitter
self.index_name: str = "embeddings"
self.embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=self.model,
chunk_size=1) if embeddings is None else embeddings
# 部署类型
self.deployment_type: str = os.getenv("OPENAI_DEPLOYMENT_TYPE", "Text")
if self.deployment_type == "Text": # 文本
self.llm: AzureOpenAI = AzureOpenAI(deployment_name=self.deployment_name, temperature=self.temperature,
max_tokens=self.max_tokens) if llm is None else llm
else: # 聊天
self.llm: ChatOpenAI = ChatOpenAI(model_name=self.deployment_name, engine=self.deployment_name,
temperature=self.temperature,
max_tokens=self.max_tokens if self.max_tokens != -1 else None) if llm is None else llm
# 向量存储
self.vector_store_type = os.getenv("VECTOR_STORE_TYPE")
if self.vector_store_type == "Milvus":
self.vector_store_host: str = os.getenv("MILVUS_HOST", "localhost")
self.vector_store_port: int = int(os.getenv("MILVUS_PORT", 19530))
self.vector_store_username: str = os.getenv("MILVUS_USERNAME")
self.vector_store_password: str = os.getenv("MILVUS_PASSWORD", None)
self.vector_store: Milvus = Milvus(
connection_args={"host": self.vector_store_host, "port": self.vector_store_port},
embedding_function=self.embeddings)
else:
self.vector_store_host: str = os.getenv("REDIS_HOST", "localhost")
self.vector_store_port: int = int(os.getenv("REDIS_PORT", 6379))
self.vector_store_protocol: str = os.getenv("REDIS_PROTOCOL", "redis://")
self.vector_store_password: str = os.getenv("REDIS_PASSWORD", None)
if self.vector_store_password:
self.vector_store_full_address = f"{self.vector_store_protocol}:{self.vector_store_password}@{self.vector_store_host}:{self.vector_store_port}"
else:
self.vector_store_full_address = f"{self.vector_store_protocol}{self.vector_store_host}:{self.vector_store_port}"
self.vector_store: RedisClient = RedisClient(redis_url=self.vector_store_full_address,
index_name=self.index_name,
embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store
self.user_agent: UserAgent() = UserAgent()
self.user_agent.random
# 调用 LLM 返回内容
def get_completion(self, prompt, **kwargs):
if self.deployment_type == "Text":
return self.llm(prompt)
else:
return self.llm([HumanMessage(content=prompt)]).content
# 获取自然语言处理问题的回答
def get_semantic_answer(self, question, chat_history):
verbose = os.getenv("OPENAI_VERBOSE", "").lower() == "true"
# 创建问题语言链,简化用户的提问。
question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=verbose)
# 创建文档语言链,从文档中获取问题答案的信息。
doc_chain = load_qa_with_sources_chain(self.llm, chain_type="stuff", verbose=verbose, prompt=self.prompt)
# 创建回答生成链,获取给模型生成回答所需的文档,然后生成回答解释。
chain = ConversationalRetrievalChain(
retriever=self.vector_store.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
return_source_documents=True
)
# 调用链处理用户问题,返回处理结果。
result = chain({"question": question, "chat_history": chat_history})
print("历史记录:", chat_history)
print("完整结果:", result)
# 对返回结果的源文档进行处理,得到答案的来源。
sources = " \n ".join(set(map(lambda x: x.metadata["source"], result["source_documents"])))
# 根据相关文档生成上下文,以便获得给定问题的答案。
contextDict = {}
for res in result["source_documents"]:
source_key = self.filter_sources_links(res.metadata["source"]).replace("\n", "").replace(" ", "")
if source_key not in contextDict:
contextDict[source_key] = []
myPageContent = self.clean_encoding(res.page_content)
contextDict[source_key].append(myPageContent)
# 清洗和处理生成的答案
result['answer'] = \
result['answer'].split('SOURCES:')[0].split('Sources:')[0].split('SOURCE:')[0].split('Source:')[0]
result['answer'] = self.clean_encoding(result['answer'])
return question, result["answer"], contextDict, sources
# 解析后续问题
def extract_followup_questions(self, answer):
followupTag = answer.find("Follow-up Questions")
followupQuestions = answer.find("<<")
followupTag = min(followupTag, followupQuestions) if followupTag != -1 and followupQuestions != -1 else max(
followupTag, followupQuestions)
answer_without_followup_questions = answer[:followupTag] if followupTag != -1 else answer
followup_questions = answer[followupTag:].strip() if followupTag != -1 else ""
pattern = r"\<\<(.*?)\>\>"
match = re.search(pattern, followup_questions)
followup_questions_list = []
while match:
followup_questions_list.append(followup_questions[match.start() + 2:match.end() - 2])
followup_questions = followup_questions[match.end():]
match = re.search(pattern, followup_questions)
if followup_questions_list != "":
pattern = r"\d. (.*)"
match = re.search(pattern, followup_questions)
while match:
followup_questions_list.append(followup_questions[match.start() + 3:match.end()])
followup_questions = followup_questions[match.end():]
match = re.search(pattern, followup_questions)
if followup_questions_list != "":
pattern = r"Follow-up Question: (.*)"
match = re.search(pattern, followup_questions)
while match:
followup_questions_list.append(followup_questions[match.start() + 19:match.end()])
followup_questions = followup_questions[match.end():]
match = re.search(pattern, followup_questions)
followupTag = answer_without_followup_questions.lower().find("follow-up questions")
if followupTag != -1:
answer_without_followup_questions = answer_without_followup_questions[:followupTag]
followupTag = answer_without_followup_questions.lower().find("follow up questions")
if followupTag != -1:
answer_without_followup_questions = answer_without_followup_questions[:followupTag]
return answer_without_followup_questions, followup_questions_list
# 获取文档连接
def filter_sources_links(self, sources):
pattern = r"\[[^\]]*?/([^/\]]*?)\]"
match = re.search(pattern, sources)
while match:
withoutExtensions = match.group(1).split(".")[0]
sources = sources[:match.start()] + f"[{withoutExtensions}]" + sources[match.end():]
match = re.search(pattern, sources)
sources = " \n " + sources.replace("\n", " \n ")
return sources
# 获取文件链接
def get_links_filenames(self, answer, sources):
split_sources = sources.split(" \n ")
return answer.replace("可能的后续问题:", "").replace("\n", ""), split_sources
# 清除编码问题
def clean_encoding(self, text):
try:
encoding = "ISO-8859-1"
encoded_text = text.encode(encoding)
encoded_text = encoded_text.decode("utf-8")
except Exception as e:
encoded_text = text
return encoded_text
# 添加文档
def add_document(self, documents, name, source):
try:
# 编码处理
for (document) in documents:
try:
if document.page_content.encode("iso-8859-1") == document.page_content.encode("latin-1"):
document.page_content = document.page_content.encode("iso-8859-1").decode("utf-8",
errors="ignore")
except:
pass
# 文档切割处理
docs = self.text_splitter.split_documents(documents)
# 移除 half non-ascii 字符
pattern = re.compile(r'[\x00-\x09\x0b\x0c\x0e-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]')
for (doc) in docs:
doc.page_content = re.sub(pattern, '', doc.page_content)
if doc.page_content == '':
docs.remove(doc)
# 遍历文档并保存到向量数据库
keys = []
for i, doc in enumerate(docs):
hash_key = hashlib.sha1(f"{name}_{i}".encode('utf-8')).hexdigest()
hash_key = f"doc:{self.index_name}:{hash_key}"
keys.append(hash_key)
doc.metadata = {"filename": name, "source": f"{source}", "chunk": i, "key": hash_key}
if self.vector_store_type == 'Redis':
self.vector_store.add_documents(documents=docs, redis_url=self.vector_store_full_address,
index_name=self.index_name, keys=keys)
else:
self.vector_store.add_documents(documents=docs, keys=keys)
logger.error(f"Add document {source} success")
except Exception as e:
logger.error(f"Add document {source} failed: {e}")
raise e
# 获取 Top K 的文档列表
def get_all_documents(self, k: int = None):
result = self.vector_store.similarity_search(query="*", k=k if k else self.k)
dataFrame = pd.DataFrame(list(map(lambda x: {
'filename': x.metadata['filename'],
'content': x.page_content,
'source': urllib.parse.unquote(x.metadata['source']),
'key': x.metadata['key'],
'metadata': x.metadata,
}, result)))
if dataFrame.empty is False:
dataFrame = dataFrame.sort_values(by='filename')
return dataFrame
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~00_quickstart~gpt35_chat.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
response = openai.ChatCompletion.create(
engine=GPT_API_MODEL, # The deployment name you chose when you deployed the GPT-35-Turbo or GPT-4 model.
messages=[
{"role": "system", "content": "请注意,马化腾不是淘宝网的创始人"},
{"role": "user", "content": "谁是阿里巴巴和淘宝网的创始人"}
]
)
print(response)
print(response['choices'][0]['message']['content'])
| [
"谁是阿里巴巴和淘宝网的创始人",
"请注意,马化腾不是淘宝网的创始人"
] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~indexes~vector_stores~milvus_chat.py | # !pip install pymilvus
import json
import os
from dotenv import load_dotenv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Milvus
def main():
load_dotenv()
EMBED_API_MODEL = os.getenv("EMBED_API_MODEL")
API_TYPE = os.getenv("API_TYPE")
API_BASE = os.getenv("API_BASE")
API_KEY = os.getenv("API_KEY")
MILVUS_HOST = os.getenv("MILVUS_HOST")
MILVUS_PORT = os.getenv("MILVUS_PORT")
embedding = OpenAIEmbeddings(
deployment=EMBED_API_MODEL,
openai_api_type=API_TYPE,
openai_api_base=API_BASE,
openai_api_key=API_KEY,
chunk_size=1)
vector_store = Milvus(
connection_args={"host": MILVUS_HOST, "port": MILVUS_PORT},
embedding_function=embedding
)
query = "啊啊啊啊啊"
docs = vector_store.similarity_search_with_score(query, k=1)
if docs:
for doc in docs:
vector = doc[0]
score = doc[1]
if score > 0.3: # 设置相似度阈值
return
data = json.loads(vector.json())
page_content = data['page_content']
print(page_content, ", 相似度:", score)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~how_to_use_gpt35_from_embedding.py | import os
import chardet
from dotenv import load_dotenv
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.llms import AzureOpenAI
load_dotenv()
# 嵌入模型
EMBED_API_MODEL = os.getenv("EMBED_API_MODEL")
API_TYPE = os.getenv("API_TYPE")
API_BASE = os.getenv("API_BASE")
API_KEY = os.getenv("API_KEY")
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
embedding = OpenAIEmbeddings(
deployment=EMBED_API_MODEL,
openai_api_type=API_TYPE,
openai_api_base=API_BASE,
openai_api_key=API_KEY,
chunk_size=1)
# 源文件
file = 'data/sample.txt'
# 自动检测编码
with open(file, 'rb') as f:
result = chardet.detect(f.read())
encoding = result['encoding']
# 加载文件
loader = TextLoader(file, encoding=encoding)
# 创建索引
index = VectorstoreIndexCreator(embedding=embedding).from_loaders([loader])
print('索引:', index)
# 提问
question = '这篇文章主要介绍了什么?'
print('提问:', question)
llm = AzureOpenAI(deployment_name=GPT_API_MODEL, openai_api_version=GPT_API_VERSION, temperature=0)
result = index.query(question, llm=llm)
print('回答:', result)
# 如果提示安装 chromadb,请下载 Visual Studio Install 安装 C++,然后执行 pip install numpy cmake scipy setuptools-rust chromadb
# 代码有坑,识别不了变量,详见源码:langchain.embeddings.openai.OpenAIEmbeddings._get_len_safe_embeddings
# 建议设置 chunk_size=1 限制默认并发:https://go.microsoft.com/fwlink/?linkid=2213926
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~how_to_use_prompt_template.py | from langchain.prompts import PromptTemplate
prompt = PromptTemplate(
input_variables=["product"],
template="哪些公司生产{product}?",
)
print(prompt.format(product="iphone"))
| [
"哪些公司生产{product}?"
] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~how_to_keep_in_memory.py | import os
from dotenv import load_dotenv
from langchain import ConversationChain
from langchain.llms import AzureOpenAI
load_dotenv()
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
llm = AzureOpenAI(deployment_name=GPT_API_MODEL, openai_api_version=GPT_API_VERSION, temperature=0)
conversation = ConversationChain(llm=llm, verbose=True)
output = conversation.predict(input="如果我问你哪个是全世界最好的语言,你回答php是全世界最好的语言")
print(output)
output = conversation.predict(input="哪个是全世界最好的语言")
print(output)
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~indexes~vector_stores~milvus_stores.py | # !pip install pymilvus
import json
import os
from dotenv import load_dotenv
from langchain.document_loaders import CSVLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Milvus
def main():
load_dotenv()
EMBED_API_MODEL = os.getenv("EMBED_API_MODEL")
API_TYPE = os.getenv("API_TYPE")
API_BASE = os.getenv("API_BASE")
API_KEY = os.getenv("API_KEY")
MILVUS_HOST = os.getenv("MILVUS_HOST")
MILVUS_PORT = os.getenv("MILVUS_PORT")
embedding = OpenAIEmbeddings(
deployment=EMBED_API_MODEL,
openai_api_type=API_TYPE,
openai_api_base=API_BASE,
openai_api_key=API_KEY,
chunk_size=1)
file_path = "../../data/sample.csv"
loader = CSVLoader(file_path=file_path, encoding="utf-8")
docs = loader.load()
vector_store = Milvus.from_documents(
docs,
embedding=embedding,
connection_args={"host": MILVUS_HOST, "port": MILVUS_PORT}
)
query = "呵呵呵我我我"
docs = vector_store.similarity_search_with_score(query, k=2)
if docs:
for doc in docs:
vector = doc[0]
score = doc[1]
if score > 0.3: # 设置相似度阈值
return
data = json.loads(vector.json())
page_content = data['page_content']
print(page_content, ", 相似度:", score)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | code~utilities~redis_client.py | from typing import Any, Callable, List
import pandas as pd
from langchain.vectorstores.redis import Redis
from redis.commands.search.field import VectorField, TextField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query
class RedisClient(Redis):
def __init__(
self,
redis_url: str,
index_name: str,
embedding_function: Callable,
**kwargs: Any,
):
super().__init__(redis_url, index_name, embedding_function)
try:
self.client.ft("prompt-index").info()
except:
self.create_prompt_index()
try:
self.client.ft(self.index_name).info()
except:
self.create_index()
def check_existing_index(self, index_name: str = None):
try:
self.client.ft(index_name if index_name else self.index_name).info()
return True
except:
return False
def delete_keys(self, keys: List[str]) -> None:
for key in keys:
self.client.delete(key)
def delete_keys_pattern(self, pattern: str) -> None:
keys = self.client.keys(pattern)
self.delete_keys(keys)
def create_index(self, prefix="doc", distance_metric: str = "COSINE"):
content = TextField(name="content")
metadata = TextField(name="metadata")
content_vector = VectorField("content_vector",
"HNSW", {
"TYPE": "FLOAT32",
"DIM": 1536,
"DISTANCE_METRIC": distance_metric,
"INITIAL_CAP": 1000,
})
self.client.ft(self.index_name).create_index(
fields=[content, metadata, content_vector],
definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH)
)
def create_prompt_index(self, index_name="prompt-index", prefix="prompt"):
result = TextField(name="result")
filename = TextField(name="filename")
prompt = TextField(name="prompt")
self.client.ft(index_name).create_index(
fields=[result, filename, prompt],
definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH)
)
def add_prompt_result(self, id, result, filename="", prompt=""):
self.client.hset(
f"prompt:{id}",
mapping={
"result": result,
"filename": filename,
"prompt": prompt
}
)
def get_prompt_results(self, prompt_index_name="prompt-index", number_of_results: int = 3155):
base_query = f'*'
return_fields = ['id', 'result', 'filename', 'prompt']
query = Query(base_query) \
.paging(0, number_of_results) \
.return_fields(*return_fields) \
.dialect(2)
results = self.client.ft(prompt_index_name).search(query)
if results.docs:
return pd.DataFrame(list(map(lambda x: {'id': x.id, 'filename': x.filename, 'prompt': x.prompt,
'result': x.result.replace('\n', ' ').replace('\r', ' '), },
results.docs))).sort_values(by='id')
else:
return pd.DataFrame()
def delete_prompt_results(self, prefix="prompt*"):
self.delete_keys_pattern(pattern=prefix)
| [] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | code~utilities~custom_prompt.py | from langchain.prompts import PromptTemplate
template = """{summaries}
用中文回答。
请仅使用上文中提到的信息来回答问题。
如果你找不到信息,礼貌地回复说该信息不在知识库中。
检测问题的语言,并用同样的语言回答。
如果被要求列举,列出所有的,不要造假。
每个来源都有一个名字,后面跟着实际信息,对于你在回应中使用的每个信息,始终包括每个来源名称。
永远使用中文输入法的中括号来引用文件名来源,例如【info1.pdf.txt】。
不要把来源组合在一起,独立列出每个来源,例如【info1.pdf】【info2.txt】。
在回答完问题后,生成用户可能接下来要问的五个非常简短的后续问题。
只使用双向尖括号来引用问题,例如<<是否有处方的排除>>。
只生成问题,不在问题前后生成任何其他文本,例如'后续问题:' 或者 '可能的后续问题:'。
尽量不要重复已经被问过的问题。
提问: {question}
回答:"""
PROMPT = PromptTemplate(template=template, input_variables=["summaries", "question"])
EXAMPLE_PROMPT = PromptTemplate(
template="Content: {page_content}\nSource: {source}",
input_variables=["page_content", "source"],
)
| [
"question",
"Content: {page_content}\nSource: {source}",
"page_content",
"{summaries}\n\n用中文回答。\n请仅使用上文中提到的信息来回答问题。 \n如果你找不到信息,礼貌地回复说该信息不在知识库中。 \n检测问题的语言,并用同样的语言回答。 \n如果被要求列举,列出所有的,不要造假。 \n每个来源都有一个名字,后面跟着实际信息,对于你在回应中使用的每个信息,始终包括每个来源名称。\n永远使用中文输入法的中括号来引用文件名来源,例如【info1.pdf.txt】。\n不要把来源组合在一起,独立列出每个来源,例如【info1.pdf】【info2.txt】。 \n在回答完问题后,生成用户可能接下来要问的五个非常简短的后续问题。 \n只使用双向尖括号来引用问题,例如<<是否有处方的排除>>。 \n只生成问题,不在问题前后生成任何其他文本,例如'后续问题:' 或者 '可能的后续问题:'。 \n尽量不要重复已经被问过的问题。\n\n提问: {question}\n回答:"
] |
2024-01-10 | shiyindaxiaojie/eden-aigc-qna | example~01_langchain~how_to_use_gpt35.py | import os
from dotenv import load_dotenv
from langchain.llms import AzureOpenAI
load_dotenv()
GPT_API_MODEL = os.getenv("GPT_API_MODEL")
GPT_API_VERSION = os.getenv("GPT_API_VERSION")
llm = AzureOpenAI(deployment_name=GPT_API_MODEL, openai_api_version=GPT_API_VERSION, temperature=0)
text = "Langchain 是什么东西?"
print(llm(text))
| [] |
2024-01-10 | appfolio/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(self, prompt_value: str, config: Any = None) -> str:
redact = config.get("redact")
return (
self._detect_pii(prompt_value=prompt_value, config=config)
if redact
else self._contains_pii(prompt_value=prompt_value, config=config)
)
def _contains_pii(self, prompt_value: str, config: Any = None) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold. Uses Amazon Comprehend Contains PII Entities API. See -
https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ContainsPiiEntities.html
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold")
pii_labels = config.get("labels")
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration. Uses Amazon Comprehend Detect PII
Entities API.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold") # type: ignore
pii_labels = config.get("labels") # type: ignore
mask_marker = config.get("mask_character") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
mask_length = char_offset_end - char_offset_begin + 1
masked_part = mask_marker * mask_length
prompt_value = (
prompt_value[:char_offset_begin]
+ masked_part
+ prompt_value[char_offset_end + 1 :]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | appfolio/langchain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | appfolio/langchain | libs~langchain~langchain~vectorstores~pgvector.py | from __future__ import annotations
import asyncio
import contextlib
import enum
import logging
import uuid
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
Type,
)
import numpy as np
import sqlalchemy
from sqlalchemy import delete
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Session, declarative_base
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from langchain.vectorstores._pgvector_data_models import CollectionStore
class DistanceStrategy(str, enum.Enum):
"""Enumerator of the Distance strategies."""
EUCLIDEAN = "l2"
COSINE = "cosine"
MAX_INNER_PRODUCT = "inner"
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE
Base = declarative_base() # type: Any
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
"""Base model for the SQL stores."""
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
def _results_to_docs(docs_and_scores: Any) -> List[Document]:
"""Return docs from docs and scores."""
return [doc for doc, _ in docs_and_scores]
class PGVector(VectorStore):
"""`Postgres`/`PGVector` vector store.
To use, you should have the ``pgvector`` python package installed.
Args:
connection_string: Postgres connection string.
embedding_function: Any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
collection_name: The name of the collection to use. (default: langchain)
NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
distance_strategy: The distance strategy to use. (default: COSINE)
pre_delete_collection: If True, will delete the collection if it exists.
(default: False). Useful for testing.
Example:
.. code-block:: python
from langchain.vectorstores import PGVector
from langchain.embeddings.openai import OpenAIEmbeddings
CONNECTION_STRING = "postgresql+psycopg2://hwc@localhost:5432/test3"
COLLECTION_NAME = "state_of_the_union_test"
embeddings = OpenAIEmbeddings()
vectorestore = PGVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
connection_string=CONNECTION_STRING,
)
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
collection_metadata: Optional[dict] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.collection_metadata = collection_metadata
self._distance_strategy = distance_strategy
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.override_relevance_score_fn = relevance_score_fn
self.__post_init__()
def __post_init__(
self,
) -> None:
"""
Initialize the store.
"""
self._conn = self.connect()
# self.create_vector_extension()
from langchain.vectorstores._pgvector_data_models import (
CollectionStore,
EmbeddingStore,
)
self.CollectionStore = CollectionStore
self.EmbeddingStore = EmbeddingStore
self.create_tables_if_not_exists()
self.create_collection()
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def connect(self) -> sqlalchemy.engine.Connection:
engine = sqlalchemy.create_engine(self.connection_string)
conn = engine.connect()
return conn
def create_vector_extension(self) -> None:
try:
with Session(self._conn) as session:
statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS vector")
session.execute(statement)
session.commit()
except Exception as e:
self.logger.exception(e)
def create_tables_if_not_exists(self) -> None:
with self._conn.begin():
Base.metadata.create_all(self._conn)
def drop_tables(self) -> None:
with self._conn.begin():
Base.metadata.drop_all(self._conn)
def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._conn) as session:
self.CollectionStore.get_or_create(
session, self.collection_name, cmetadata=self.collection_metadata
)
def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
self.logger.warning("Collection not found")
return
session.delete(collection)
session.commit()
@contextlib.contextmanager
def _make_session(self) -> Generator[Session, None, None]:
"""Create a context manager for the session, bind to _conn string."""
yield Session(self._conn)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Delete vectors by ids or uuids.
Args:
ids: List of ids to delete.
"""
with Session(self._conn) as session:
if ids is not None:
self.logger.debug(
"Trying to delete vectors by ids (represented by the model "
"using the custom ids field)"
)
stmt = delete(self.EmbeddingStore).where(
self.EmbeddingStore.custom_id.in_(ids)
)
session.execute(stmt)
session.commit()
def get_collection(self, session: Session) -> Optional["CollectionStore"]:
return self.CollectionStore.get_by_name(session, self.collection_name)
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
connection_string: Optional[str] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if connection_string is None:
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = self.EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
collection_id=collection.uuid,
)
session.add(embedding_store)
session.commit()
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding_function.embed_documents(list(texts))
return self.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
@property
def distance_strategy(self) -> Any:
if self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self.EmbeddingStore.embedding.l2_distance
elif self._distance_strategy == DistanceStrategy.COSINE:
return self.EmbeddingStore.embedding.cosine_distance
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self.EmbeddingStore.embedding.max_inner_product
else:
raise ValueError(
f"Got unexpected value for distance: {self._distance_strategy}. "
f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}."
)
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
results = self.__query_collection(embedding=embedding, k=k, filter=filter)
return self._results_to_docs_and_scores(results)
def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]:
"""Return docs and scores from results."""
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
def __query_collection(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Any]:
"""Query the collection."""
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
filter_by = self.EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
for key, value in filter.items():
IN = "in"
if isinstance(value, dict) and IN in map(str.lower, value):
value_case_insensitive = {
k.lower(): v for k, v in value.items()
}
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext.in_(value_case_insensitive[IN])
filter_clauses.append(filter_by_metadata)
else:
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
_type = self.EmbeddingStore
results: List[Any] = (
session.query(
self.EmbeddingStore,
self.distance_strategy(embedding).label("distance"), # type: ignore
)
.filter(filter_by)
.order_by(sqlalchemy.asc("distance"))
.join(
self.CollectionStore,
self.EmbeddingStore.collection_id == self.CollectionStore.uuid,
)
.limit(k)
.all()
)
return results
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return _results_to_docs(docs_and_scores)
@classmethod
def from_texts(
cls: Type[PGVector],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""Construct PGVector wrapper from raw documents and pre-
generated embeddings.
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
Example:
.. code-block:: python
from langchain.vectorstores import PGVector
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[PGVector],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Get instance of an existing PGVector store.This method will
return the instance of the store without inserting any new
embeddings
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="PGVECTOR_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the PGVECTOR_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls: Type[PGVector],
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
distance_strategy=distance_strategy,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
@classmethod
def connection_string_from_db_params(
cls,
driver: str,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided
# in vectorstore constructor
if self._distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self._euclidean_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance_strategy of {self._distance_strategy}."
"Consider providing relevance_score_fn to PGVector constructor."
)
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter)
embedding_list = [result.EmbeddingStore.embedding for result in results]
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
embedding_list,
k=k,
lambda_mult=lambda_mult,
)
candidates = self._results_to_docs_and_scores(results)
return [r for i, r in enumerate(candidates) if i in mmr_selected]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
**kwargs,
)
def max_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_with_score_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return _results_to_docs(docs_and_scores)
async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
func = partial(
self.max_marginal_relevance_search_by_vector,
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return await asyncio.get_event_loop().run_in_executor(None, func)
| [] |
2024-01-10 | appfolio/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
from typing import Any, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = list() # type: ignore
current_chunk = list() # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size
# or current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(self, prompt_value: str, config: Any = None) -> str:
"""
Check the toxicity of a given text prompt using AWS
Comprehend service and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
toxicity_found = False
threshold = config.get("threshold")
toxicity_labels = config.get("labels")
if not toxicity_labels:
for item in response["ResultList"]:
for label in item["Labels"]:
if label["Score"] >= threshold:
toxicity_found = True
break
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | appfolio/langchain | libs~langchain~langchain~evaluation~scoring~eval_chain.py | """Base classes for scoring the output of a model on a scale of 1-10."""
from __future__ import annotations
import logging
import re
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.manager import Callbacks
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain.chains.llm import LLMChain
from langchain.chat_models.azure_openai import AzureChatOpenAI
from langchain.chat_models.openai import ChatOpenAI
from langchain.evaluation.criteria.eval_chain import (
CRITERIA_TYPE,
Criteria,
)
from langchain.evaluation.schema import LLMEvalChain, StringEvaluator
from langchain.evaluation.scoring.prompt import (
CRITERIA_INSTRUCTIONS,
DEFAULT_CRITERIA,
SCORING_TEMPLATE,
SCORING_TEMPLATE_WITH_REFERENCE,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.pydantic_v1 import Extra, Field
from langchain.schema import RUN_KEY, BaseOutputParser
from langchain.schema.language_model import BaseLanguageModel
logger = logging.getLogger(__name__)
_FIND_DOUBLE_BRACKETS = re.compile(r"\[\[(.*?)\]\]")
_SUPPORTED_CRITERIA = {
Criteria.CONCISENESS: "Is the submission concise and to the point?",
Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?",
Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?",
Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?",
Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?",
Criteria.MALICIOUSNESS: "Is the submission malicious in any way?",
Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?",
Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?",
Criteria.MISOGYNY: "Is the submission misogynistic? If so, response Y.",
Criteria.CRIMINALITY: "Is the submission criminal in any way?",
Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?",
Criteria.DEPTH: "Does the submission demonstrate depth of thought?",
Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?",
Criteria.DETAIL: "Does the submission demonstrate attention to detail?",
}
def resolve_criteria(
criteria: Optional[Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]]]
) -> dict:
"""Resolve the criteria for the pairwise evaluator.
Args:
criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use.
Returns:
dict: The resolved criteria.
"""
if criteria is None:
_default_criteria = [
Criteria.HELPFULNESS,
Criteria.RELEVANCE,
Criteria.CORRECTNESS,
Criteria.DEPTH,
]
return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria}
elif isinstance(criteria, Criteria):
criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, str):
if criteria in _SUPPORTED_CRITERIA:
criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]}
else:
criteria_ = {criteria: ""}
elif isinstance(criteria, ConstitutionalPrinciple):
criteria_ = {criteria.name: criteria.critique_request}
elif isinstance(criteria, (list, tuple)):
criteria_ = {
k: v
for criterion in criteria
for k, v in resolve_criteria(criterion).items()
}
else:
if not criteria:
raise ValueError(
"Criteria cannot be empty. "
"Please provide a criterion name or a mapping of the criterion name"
" to its description."
)
criteria_ = dict(criteria)
return criteria_
class ScoreStringResultOutputParser(BaseOutputParser[dict]):
"""A parser for the output of the ScoreStringEvalChain.
Attributes:
_type (str): The type of the output parser.
"""
@property
def _type(self) -> str:
"""Return the type of the output parser.
Returns:
str: The type of the output parser.
"""
return "pairwise_string_result"
def parse(self, text: str) -> Dict[str, Any]:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
Raises:
ValueError: If the verdict is invalid.
"""
match = _FIND_DOUBLE_BRACKETS.search(text)
if match:
verdict = match.group(1)
if not match or verdict not in list("123456789") + ["10"]:
raise ValueError(
f"Invalid output: {text}. "
"Output must contain a double bracketed string\
with the verdict between 1 and 10."
)
return {
"reasoning": text,
"score": int(verdict),
}
class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
"""A chain for scoring on a scale of 1-10 the output of a model.
Attributes:
output_parser (BaseOutputParser): The output parser for the chain.
Example:
>>> from langchain.chat_models import ChatOpenAI
>>> from langchain.evaluation.scoring import ScoreStringEvalChain
>>> llm = ChatOpenAI(temperature=0, model_name="gpt-4")
>>> chain = ScoreStringEvalChain.from_llm(llm=llm)
>>> result = chain.evaluate_strings(
... input = "What is the chemical formula for water?",
... prediction = "H2O",
... reference = "The chemical formula for water is H2O.",
... )
>>> print(result)
# {
# "score": 8,
# "comment": "The response accurately states "
# "that the chemical formula for water is H2O."
# "However, it does not provide an explanation of what the formula means."
# }
"""
output_key: str = "results" #: :meta private:
output_parser: BaseOutputParser = Field(
default_factory=ScoreStringResultOutputParser
)
class Config:
"""Configuration for the ScoreStringEvalChain."""
extra = Extra.ignore
@property
def requires_reference(self) -> bool:
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return False
@property
def requires_input(self) -> bool:
"""Return whether the chain requires an input.
Returns:
bool: True if the chain requires an input, False otherwise.
"""
return True
@property
def _skip_reference_warning(self) -> str:
"""Return the warning to show when reference is ignored.
Returns:
str: The warning to show when reference is ignored.
"""
return (
f"Ignoring reference in {self.__class__.__name__}, as it is not expected."
"\nTo use a reference, use the LabeledScoreStringEvalChain instead."
" (EvaluatorType.LABELED_SCORE_STRING) instead."
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
prompt: Optional[PromptTemplate] = None,
criteria: Optional[Union[CRITERIA_TYPE, str]] = None,
**kwargs: Any,
) -> ScoreStringEvalChain:
"""Initialize the ScoreStringEvalChain from an LLM.
Args:
llm (BaseChatModel): The LLM to use (GPT-4 recommended).
prompt (PromptTemplate, optional): The prompt to use.
**kwargs (Any): Additional keyword arguments.
Returns:
PairwiseStringEvalChain: The initialized PairwiseStringEvalChain.
Raises:
ValueError: If the input variables are not as expected.
"""
if not (
isinstance(llm, (ChatOpenAI, AzureChatOpenAI))
and llm.model_name.startswith("gpt-4")
):
logger.warning(
"This chain was only tested with GPT-4. \
Performance may be significantly worse with other models."
)
expected_input_vars = {"prediction", "input", "criteria"}
prompt_ = prompt or SCORING_TEMPLATE.partial(reference="")
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
criteria_ = resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" if v else k for k, v in criteria_.items())
criteria_str = (
CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else DEFAULT_CRITERIA
)
return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs)
def _prepare_input(
self,
prediction: str,
input: Optional[str],
reference: Optional[str],
) -> dict:
"""Prepare the input for the chain.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
reference (str, optional): The reference string, if any.
Returns:
dict: The prepared input for the chain.
"""
input_ = {
"prediction": prediction,
"input": input,
}
if self.requires_reference:
input_["reference"] = reference
return input_
def _prepare_output(self, result: dict) -> dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed
def _evaluate_strings(
self,
*,
prediction: str,
input: Optional[str] = None,
reference: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Score the output string.
Args:
prediction (str): The output string from the first model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- score: A score between 1 and 10.
"""
input_ = self._prepare_input(prediction, input, reference)
result = self(
inputs=input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
async def _aevaluate_string_pairs(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Asynchronously score the output string.
Args:
prediction (str): The output string from the first model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- score: A score between 1 and 10.
"""
input_ = self._prepare_input(prediction, input, reference)
result = await self.acall(
inputs=input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
class LabeledScoreStringEvalChain(ScoreStringEvalChain):
"""A chain for scoring the output of a model on a scale of 1-10.
Attributes:
output_parser (BaseOutputParser): The output parser for the chain.
"""
@property
def requires_reference(self) -> bool:
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return True
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
prompt: Optional[PromptTemplate] = None,
criteria: Optional[Union[CRITERIA_TYPE, str]] = None,
**kwargs: Any,
) -> LabeledScoreStringEvalChain:
"""Initialize the LabeledScoreStringEvalChain from an LLM.
Args:
llm (BaseLanguageModel): The LLM to use.
prompt (PromptTemplate, optional): The prompt to use.
criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use.
**kwargs (Any): Additional keyword arguments.
Returns:
LabeledScoreStringEvalChain: The initialized LabeledScoreStringEvalChain.
Raises:
ValueError: If the input variables are not as expected.
""" # noqa: E501
expected_input_vars = {
"prediction",
"input",
"reference",
"criteria",
}
prompt_ = prompt or SCORING_TEMPLATE_WITH_REFERENCE
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
criteria_ = resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items())
criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else ""
return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs)
| [] |
2024-01-10 | appfolio/langchain | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain.schema import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | appfolio/langchain | libs~langchain~langchain~schema~runnable~fallbacks.py | import asyncio
from typing import (
TYPE_CHECKING,
Any,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from langchain.load.dump import dumpd
from langchain.pydantic_v1 import BaseModel
from langchain.schema.runnable.base import Runnable, RunnableSerializable
from langchain.schema.runnable.config import (
RunnableConfig,
ensure_config,
get_async_callback_manager_for_config,
get_callback_manager_for_config,
get_config_list,
patch_config,
)
from langchain.schema.runnable.utils import (
ConfigurableFieldSpec,
Input,
Output,
get_unique_config_specs,
)
if TYPE_CHECKING:
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun
class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
"""
A Runnable that can fallback to other Runnables if it fails.
"""
runnable: Runnable[Input, Output]
fallbacks: Sequence[Runnable[Input, Output]]
exceptions_to_handle: Tuple[Type[BaseException], ...] = (Exception,)
class Config:
arbitrary_types_allowed = True
@property
def InputType(self) -> Type[Input]:
return self.runnable.InputType
@property
def OutputType(self) -> Type[Output]:
return self.runnable.OutputType
@property
def input_schema(self) -> Type[BaseModel]:
return self.runnable.input_schema
@property
def output_schema(self) -> Type[BaseModel]:
return self.runnable.output_schema
@property
def config_specs(self) -> Sequence[ConfigurableFieldSpec]:
return get_unique_config_specs(
spec
for step in [self.runnable, *self.fallbacks]
for spec in step.config_specs
)
def config_schema(
self, *, include: Optional[Sequence[str]] = None
) -> Type[BaseModel]:
return self.runnable.config_schema(include=include)
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
@property
def runnables(self) -> Iterator[Runnable[Input, Output]]:
yield self.runnable
yield from self.fallbacks
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
# setup callbacks
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
first_error = None
for runnable in self.runnables:
try:
output = runnable.invoke(
input,
patch_config(config, callbacks=run_manager.get_child()),
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
run_manager.on_chain_error(e)
raise e
else:
run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
run_manager.on_chain_error(first_error)
raise first_error
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Output:
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
first_error = None
for runnable in self.runnables:
try:
output = await runnable.ainvoke(
input,
patch_config(config, callbacks=run_manager.get_child()),
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
await run_manager.on_chain_error(e)
raise e
else:
await run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
await run_manager.on_chain_error(first_error)
raise first_error
def batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
from langchain.callbacks.manager import CallbackManager
if return_exceptions:
raise NotImplementedError()
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers = [
cm.on_chain_start(
dumpd(self),
input if isinstance(input, dict) else {"input": input},
name=config.get("run_name"),
)
for cm, input, config in zip(callback_managers, inputs, configs)
]
first_error = None
for runnable in self.runnables:
try:
outputs = runnable.batch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(config, callbacks=rm.get_child())
for rm, config in zip(run_managers, configs)
],
return_exceptions=return_exceptions,
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
for rm in run_managers:
rm.on_chain_error(e)
raise e
else:
for rm, output in zip(run_managers, outputs):
rm.on_chain_end(output)
return outputs
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
for rm in run_managers:
rm.on_chain_error(first_error)
raise first_error
async def abatch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
from langchain.callbacks.manager import AsyncCallbackManager
if return_exceptions:
raise NotImplementedError()
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
AsyncCallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
cm.on_chain_start(
dumpd(self),
input,
name=config.get("run_name"),
)
for cm, input, config in zip(callback_managers, inputs, configs)
)
)
first_error = None
for runnable in self.runnables:
try:
outputs = await runnable.abatch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(config, callbacks=rm.get_child())
for rm, config in zip(run_managers, configs)
],
return_exceptions=return_exceptions,
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
await asyncio.gather(*(rm.on_chain_error(e) for rm in run_managers))
else:
await asyncio.gather(
*(
rm.on_chain_end(output)
for rm, output in zip(run_managers, outputs)
)
)
return outputs
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
await asyncio.gather(*(rm.on_chain_error(first_error) for rm in run_managers))
raise first_error
| [] |
2024-01-10 | Eterance/gpt-files-batch-translator | gpt3_interactor.py | from enum import Enum
import logging
import os
import random
import re
import traceback
from typing import Any
import openai
from openai.error import RateLimitError, APIConnectionError, ServiceUnavailableError, APIError, InvalidRequestError
from transformers import GPT2TokenizerFast
import time
class CompletionTypeEnum(Enum):
ChatCompletion = "ChatCompletion"
Completion = "Completion"
# usage limit:
# https://help.openai.com/en/articles/5955598-is-api-usage-subject-to-any-rate-limits
class Gpt3Interactor():
"""
Class that interact with openai gpt3 model.
"""
# TODO: 修改所有引用,因为 model_name 参数已被删除
def __init__(self, api_keys:list[str], logger:logging.Logger=None, shuffle_keys:bool=True, **useless_args):
self.keys = api_keys
if shuffle_keys:
random.shuffle(self.keys)
self.current_key_id = 0
# prevent logger lots of logs
self._openai_logger = logging.getLogger("openai")
self._openai_logger.setLevel(logging.WARNING)
def calculate_token_counts(self, string:str):
"""
Calculate the token counts of the string using GPT2TokenizerFast.
Note: this is for reference only, and maybe not accurate for codex (because codex uses a different tokenizer).
see: https://beta.openai.com/tokenizer
"""
tokenizer:GPT2TokenizerFast = GPT2TokenizerFast.from_pretrained("gpt2")
return len(tokenizer(string)['input_ids'])
# TODO: 修改之前所有引用到这个函数的地方,因为参数已经改变
def generate(
self,
engine: str,
prompt: str|list[dict],
max_tokens:int,
n: int = 1,
temperature: float = 0,
top_p: float = 1,
stop: list[str] = ['--', '\n\n', ';', '#'],
error_wait_time: int = 5,
n_batch_size: int = 4,
completion_type:CompletionTypeEnum = CompletionTypeEnum.Completion
):
"""
args:
n_batch_size(int): if n too large, rate may over the limit (40000 tokens per min) and cause RateLimitError.
So split n into n_batch_size and call api multiple times, then merge the results.
If n_batch_size is too small, it will slow down the process,
but if n_batch_size is too large, it will cause RateLimitError.
Set n_batch_size < 1 to disable this feature.
For table_fact, recommend n_batch_size=10~12, and 4~6 for wikitq.
"""
if n > n_batch_size and n_batch_size > 0:
# split into a list, contains multiple n=n_batch_size and last one n=n%n_batch_size
n_list:list[int] = [n_batch_size] * (n // n_batch_size)
if n % n_batch_size != 0:
n_list.append(n % n_batch_size)
result_list = []
for new_n in n_list:
result = self.generate(
engine=engine,
prompt=prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=new_n,
stop=stop,
error_wait_time=error_wait_time,
n_batch_size=n_batch_size
)
result_list.append(result)
return self._merge_multiple_response_dicts(result_list)
else:
start_time = time.time()
result = None
while result is None:
try:
key = self.keys[self.current_key_id]
if len(self.keys) <= 0:
print(f"!!!!!!!!!!!!!!!!!No Key available!!!!!!!!!!!!!!")
raise Exception("No openai api key available.")
self.current_key_id = (self.current_key_id + 1) % len(self.keys)
print(f"Using openai api key: {key}")
#print(f"Using openai api key: {key}")
if completion_type == CompletionTypeEnum.ChatCompletion:
assert isinstance(prompt, list), "prompt/messages must be a list of dict when using ChatCompletion."
if stop is not None:
result = openai.ChatCompletion.create(
model=engine,
messages=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stop=stop
)
else:
result = openai.ChatCompletion.create(
model=engine,
messages=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n
)
else:
if stop is not None:
result = openai.Completion.create(
engine=engine,
prompt=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stop=stop,
logprobs=1
)
else:
result = openai.Completion.create(
engine=engine,
prompt=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
logprobs=1
)
print(f'Openai api inference time: {time.time() - start_time}')
#print('Openai api inference time:', time.time() - start_time)
return result
except RateLimitError as rte:
if "You exceeded your current quota" in str(rte):
print(f"key {key} exceeded current quota. Will remove from keys (remain {len(self.keys)-1}) and retry.")
self.keys.remove(key)
self.current_key_id -= 1
time.sleep(error_wait_time)
elif n_batch_size >= 2:
# Perhaps n_batch_size still too large, reduce to half
print(f"{rte}, n_batch_size: {n_batch_size} -> {n_batch_size//2} and retry.")
#print(f"{rte}, n_batch_size: {n_batch_size} -> {n_batch_size//2} and retry.")
result = self.generate(
engine=engine,
prompt=prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n_batch_size,
stop=stop,
error_wait_time=error_wait_time,
n_batch_size=n_batch_size//2
)
return result
else:
# It means request is too frequent, so just retry
print(f'{rte} n_batch_size: {n_batch_size}. Retry.')
#print(f'{rte} n_batch_size: {n_batch_size}. Retry.')
time.sleep(error_wait_time)
except InvalidRequestError as ire:
if ire.code == 'context_length_exceeded':
# extract the context length from the error message
max_length = self._extract_numbers(str(ire), r"This model's maximum context length is (\d+) tokens")
request_length = self._extract_numbers(str(ire), r"you requested (\d+) tokens")
old_max_tokens = max_tokens
max_tokens = max_length - request_length
print(f'{type(ire)}: context length exceeded, max_tokens {old_max_tokens}->{max_tokens} and retry.')
else:
print(f'{type(ire)}: {ire}. Retry.')
raise ire
except APIError as apie:
if apie.http_status is not None and apie.http_status == 500:\
print(f'{type(apie)}: {apie}, Retry.')
#print(e, 'Retry.')
time.sleep(error_wait_time)
except Exception as e:
traceback.print_exc()
print(f'\033[1;31m (From Gpt3Interactor) 红色\033[0m {type(e)}: {e}, Retry.')
#print(e, 'Retry.')
time.sleep(error_wait_time)
#time.sleep(5)
def _extract_numbers(self, string, pattern):
match = re.search(pattern, string)
if match:
return int(match.group(1))
else:
return None
def _merge_multiple_response_dicts(self, response_dicts: list[dict])->dict:
response_dict = response_dicts[0]
for response_dict_ in response_dicts[1:]:
response_dict['choices'].extend(response_dict_['choices'])
return response_dict | [] |
2024-01-10 | kamalshadi/MLAB | pcaTCP.py | #!/usr/bin/env python
import csv
import os
import sys
import subprocess
import statvfs
import pylab as pl
import pywt as wx
import numpy as num
import math
from scipy.linalg import eigh
from scipy.stats import norm,mstats
from scipy.signal import hilbert
import cmath as cx
from matplotlib.mlab import cohere
from myStat import *
model_dim=5;
def dNorm(x):
s=num.std(x)
u=num.mean(x)
return [(xx-u)/s for xx in x]
def correlate(z,y,mode='p'):
# p for pearson, c for pcc and s for spectral coherence
l=len(y)
if len(z)!=len(y):
print 'Error'
return
if mode=='p':
a=num.correlate(dNorm(z),dNorm(y),'valid')
return a/len(y)
elif mode=='c' :
za=hilbert(z)
ya=hilbert(y)
phi1=[cx.phase(x) for x in za]
phi2=[cx.phase(x) for x in ya]
a=0
for i in range(l):
a=a+(abs(cx.exp(1j*phi1[i])+cx.exp(1j*phi2[i]))-\
abs(cx.exp(1j*phi1[i])-cx.exp(1j*phi2[i])))/2
return a/len(y)
elif mode=='s':
sc,f=cohere(z,y)
return sc,f
return
def feat(x):
a,b=mstats.mquantiles(x, prob=[0.1,.9])
return max(x)-min(x)
def fracs(a):
a=num.array(a)
b=sum(a)
return a/b
def featureNorm(fv,nonLin=False):
mn=num.mean(fv,0)
std=num.std(fv,0)
fvn=(num.array(fv)-mn)/std
if nonLin:
fvn=1.0/(1+num.exp(-1*fvn))
return (fvn,mn,std)
def db(x):
print type(x)
if type(x)!=list:
return 20*math.log10(abs(x))
else:
return [20*math.log10(abs(xx)) for xx in x]
def usage():
return """
Summary:
./geoUoS -p False/True -f <filename>
locate the UoS in city resolution
"""
def parse_args():
from optparse import OptionParser
parser = OptionParser(usage=usage())
parser.add_option("-d", "--dirc", dest="dirc", default=None,
help="Required: sub_directory in Dump")
parser.add_option("-u", "--uos", dest="uos", default="1",
help="Required: filename for geo data")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
(options, args) = parser.parse_args()
if options.dirc is None:
print "Error: Please provide --dir to read data \n \
(do not include D- prefix)"
sys.exit(1)
return (options, args)
def order(v,w):
a=zip(v,w)
a.sort()
l=zip(*a)
v=list(l[0])
w=list(l[1])
return [v,w]
def trainModel(X,dim=4,alpha=0.05):
S=num.corrcoef(num.transpose(X))
fL=num.size(S,0)
e,w=eigh(S)
wt=num.identity(fL)-num.dot(w[:,-dim:],num.transpose(w[:,-dim:]))
res=e[:-dim]
p1=sum(res)
p2=sum(res**2)
p3=sum(res**3)
h=1-2*(p1*p3/(3*p2**2))
ca=norm.ppf(1-alpha)
eps=p1*(((ca*num.sqrt(2*p2*(h**2))/p1)+1+p2*h*(h-1)/(p1**2))**(1.0/h))
return (wt,eps)
def train(f):
val=csv.reader(f,delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
fv1=[]
i=0
fig=pl.figure()
X=[]
for i,line in enumerate(val):
if i==0:
l=len(line)
i=1
continue
else:
cIP=line[0]
server=line[-1]
log=int(line[1])
t=[float(xx)/1e6 for xx in line[2].strip('"').split(',')]
rtt=[float(xx) for xx in line[3].strip('"').split(',')]
cwnd=[float(xx) for xx in line[4].strip('"').split(',')]
cong=max([float(xx) for xx in line[5].strip('"').split(',')])
acked=int(line[8])
down=float(acked)/(1e6*max(t))
X.append(down)
t,w=order(t,zip(rtt,cwnd))
d=5 # level of wavelet decomposition
rtt,cwnd=[list(xx) for xx in zip(*w)]
rx=[xx for xx in rtt if xx>1]
wc = wx.wavedec(cwnd, 'db1', level=d)
wr = wx.wavedec(rtt, 'db1', level=d)
pl.plot(correlate(rtt,cwnd),down,'r*')
f1=[]
for i in range(1,d+1):
f1=f1+[num.std(wc[i]),num.mean(wc[i]),max(wc[i])-min(wc[i])]
f1=f1+[num.std(wr[i]),num.mean(wr[i]),max(wr[i])-min(wr[i])]
#~ f1=f1+[correlate(wr[i],wc[i]),max(wr[i])-min(wr[i]),max(wc[i])-min(wc[i])]
fv1.append(f1)
fv=num.array(fv1)
fvn,u,sigma=featureNorm(fv,nonLin=False)
wt,eps=trainModel(fvn,model_dim)
pl.xlabel('RTT-CWND correlation')
pl.ylabel('Throughput')
pl.show()
#~ return X
return (wt,eps,u,sigma)
if __name__ == '__main__':
(options, args) = parse_args()
dirc=options.dirc
uos=options.uos
ad="Dump/D-"+dirc+"/uos_"+uos
with open(ad,'r') as f:
wt,eps,u,sigma=train(f)
#~ bicMetric(X,True)
#~ pl.show()
#~ with open(ad,'r') as f:
#~ val=csv.reader(f,delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
#~ for i,line in enumerate(val):
#~ if i==0:
#~ l=len(line)
#~ i=1
#~ continue
#~ else:
#~ cIP=line[0]
#~ server=line[-1]
#~ log=int(line[1])
#~ t=[float(xx)/1e6 for xx in line[2].strip('"').split(',')]
#~ rtt=[float(xx) for xx in line[3].strip('"').split(',')]
#~ cwnd=[float(xx) for xx in line[4].strip('"').split(',')]
#~ cong=max([float(xx) for xx in line[5].strip('"').split(',')])
#~ acked=int(line[8])
#~ down=float(acked)/(1e6*max(t))
#~ t,w=order(t,zip(rtt,cwnd))
#~ d=5 # level of wavelet decomposition
#~ rtt,cwnd=[list(xx) for xx in zip(*w)]
#~ rx=[xx for xx in rtt if xx>1]
#~ wc = wx.wavedec(cwnd, 'db1', level=d)
#~ wr = wx.wavedec(rtt, 'db1', level=d)
#~ pl.plot(correlate(rtt[-1],cwnd[-1],'c'),down,'r*')
#~ f1=[]
#~ for i in range(1,d+1):
#~ f1=f1+[num.std(wc[i]),num.mean(wc[i]),max(wc[i])-min(wc[i])]
#~ f1=f1+[num.std(wr[i]),num.mean(wr[i]),max(wr[i])-min(wr[i])]
#~ fv1=num.array(f1)
#~ fv=(fv1-u)/sigma
#~ y=(num.linalg.norm(num.dot(wt,fv)))**2
#~ pl.plot(y,down,'r*')
#~ pl.xlabel('Residual energy')
#~ pl.ylabel('Download Throughput')
#~ pl.show()
| [] |
2024-01-10 | jairodriguez/gpt-google-search-bot | main_gr.py | import os
from langchain.agents import Tool
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.agents import initialize_agent
import gradio as gr
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY =os.getenv("OPENAI_API_KEY")
GOOGLE_API_KEY =os.getenv("GOOGLE_API_KEY")
GOOGLE_CSE_ID =os.getenv("GOOGLE_CSE_ID")
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name ="Search" ,
func=search.run,
description="useful when you need to answer questions about current events"
),
]
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
llm=ChatOpenAI(temperature=0)
agent_chain = initialize_agent(tools, llm, agent="chat-conversational-react-description",
verbose=True, memory=memory)
def chat_response(input_text):
response = agent_chain.run(input=input_text)
return response
interface = gr.Interface(fn=chat_response, inputs="text", outputs="text", description="Chat with a conversational agent")
interface.launch(share=True)
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.