date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ralewis85/langchain | tests~unit_tests~tools~test_zapier.py | """Test building the Zapier tool, not running it."""
import pytest
from langchain.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT
from langchain.tools.zapier.tool import ZapierNLARunAction
from langchain.utilities.zapier import ZapierNLAWrapper
def test_default_base_prompt() -> None:
"""Test that the default prompt is being inserted."""
tool = ZapierNLARunAction(
action_id="test",
zapier_description="test",
params_schema={"test": "test"},
api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"),
)
# Test that the base prompt was successfully assigned to the default prompt
assert tool.base_prompt == BASE_ZAPIER_TOOL_PROMPT
assert tool.description == BASE_ZAPIER_TOOL_PROMPT.format(
zapier_description="test",
params=str(list({"test": "test"}.keys())),
)
def test_custom_base_prompt() -> None:
"""Test that a custom prompt is being inserted."""
base_prompt = "Test. {zapier_description} and {params}."
tool = ZapierNLARunAction(
action_id="test",
zapier_description="test",
params_schema={"test": "test"},
base_prompt=base_prompt,
api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"),
)
# Test that the base prompt was successfully assigned to the default prompt
assert tool.base_prompt == base_prompt
assert tool.description == "Test. test and ['test']."
def test_custom_base_prompt_fail() -> None:
"""Test validating an invalid custom prompt."""
base_prompt = "Test. {zapier_description}."
with pytest.raises(ValueError):
ZapierNLARunAction(
action_id="test",
zapier_description="test",
params={"test": "test"},
base_prompt=base_prompt,
api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"),
)
def test_wrapper_fails_no_api_key_or_access_token_initialization() -> None:
"""Test Wrapper requires either an API Key or OAuth Access Token."""
with pytest.raises(ValueError):
ZapierNLAWrapper()
def test_wrapper_api_key_initialization() -> None:
"""Test Wrapper initializes with an API Key."""
ZapierNLAWrapper(zapier_nla_api_key="test")
def test_wrapper_access_token_initialization() -> None:
"""Test Wrapper initializes with an API Key."""
ZapierNLAWrapper(zapier_nla_oauth_access_token="test")
| [
"Test. {zapier_description}.",
"Test. {zapier_description} and {params}."
] |
2024-01-10 | ralewis85/langchain | langchain~agents~agent_toolkits~zapier~toolkit.py | """Zapier Toolkit."""
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.zapier.tool import ZapierNLARunAction
from langchain.utilities.zapier import ZapierNLAWrapper
class ZapierToolkit(BaseToolkit):
"""Zapier Toolkit."""
tools: List[BaseTool] = []
@classmethod
def from_zapier_nla_wrapper(
cls, zapier_nla_wrapper: ZapierNLAWrapper
) -> "ZapierToolkit":
"""Create a toolkit from a ZapierNLAWrapper."""
actions = zapier_nla_wrapper.list()
tools = [
ZapierNLARunAction(
action_id=action["id"],
zapier_description=action["description"],
params_schema=action["params"],
api_wrapper=zapier_nla_wrapper,
)
for action in actions
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
| [] |
2024-01-10 | ralewis85/langchain | langchain~utilities~zapier.py | """Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/start/
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to [email protected] for
developer support.
"""
import json
from typing import Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from requests import Request, Session
from langchain.utils import get_from_dict_or_env
class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/start/
This wrapper supports both API Key and OAuth Credential auth methods. API Key
is the fastest way to get started using this wrapper.
Call this wrapper with either `zapier_nla_api_key` or
`zapier_nla_oauth_access_token` arguments, or set the `ZAPIER_NLA_API_KEY`
environment variable. If both arguments are set, the Access Token will take
precedence.
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use OAuth. Review the full docs above to learn how to create
your own provider and generate credentials.
"""
zapier_nla_api_key: str
zapier_nla_oauth_access_token: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
}
)
if self.zapier_nla_oauth_access_token:
session.headers.update(
{"Authorization": f"Bearer {self.zapier_nla_oauth_access_token}"}
)
else:
session.params = {"api_key": self.zapier_nla_api_key}
return session
def _get_action_request(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Request:
data = params if params else {}
data.update(
{
"instructions": instructions,
}
)
return Request(
"POST",
self.zapier_nla_api_base + f"exposed/{action_id}/execute/",
json=data,
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
zapier_nla_api_key_default = None
# If there is a oauth_access_key passed in the values
# we don't need a nla_api_key it can be blank
if "zapier_nla_oauth_access_token" in values:
zapier_nla_api_key_default = ""
else:
values["zapier_nla_oauth_access_token"] = ""
# we require at least one API Key
zapier_nla_api_key = get_from_dict_or_env(
values,
"zapier_nla_api_key",
"ZAPIER_NLA_API_KEY",
zapier_nla_api_key_default,
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values
def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
"""
session = self._get_session()
response = session.get(self.zapier_nla_api_base + "exposed/")
response.raise_for_status()
return response.json()["results"]
def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._get_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({"preview_only": True})
request = self._get_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["input_params"]
def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
def list_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions)
| [] |
2024-01-10 | mingfeima/transformers | pytorch_transformers~tokenization_xlm.py | # coding=utf-8
# Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
from io import open
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_bert import BasicTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-vocab.json",
'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-vocab.json",
'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-vocab.json",
'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-vocab.json",
'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-vocab.json",
'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-vocab.json",
'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-vocab.json",
'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-vocab.json",
},
'merges_file':
{
'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-merges.txt",
'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-merges.txt",
'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-merges.txt",
'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-merges.txt",
'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-merges.txt",
'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-merges.txt",
'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-merges.txt",
'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'xlm-mlm-en-2048': 512,
'xlm-mlm-ende-1024': 512,
'xlm-mlm-enfr-1024': 512,
'xlm-mlm-enro-1024': 512,
'xlm-mlm-tlm-xnli15-1024': 512,
'xlm-mlm-xnli15-1024': 512,
'xlm-clm-enfr-1024': 512,
'xlm-clm-ende-1024': 512,
}
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def text_standardize(text):
"""
fixes some issues the spacy tokenizer had on books corpus
also does some whitespace standardization
"""
text = text.replace('—', '-')
text = text.replace('–', '-')
text = text.replace('―', '-')
text = text.replace('…', '...')
text = text.replace('´', "'")
text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
text = re.sub(r'\s*\n\s*', ' \n ', text)
text = re.sub(r'[^\S\n]+', ' ', text)
return text.strip()
class XLMTokenizer(PreTrainedTokenizer):
"""
BPE tokenizer for XLM, adapted from OpenAI BPE tokenizer. Peculiarities:
- lower case all inputs
- uses `SpaCy tokenizer <https://spacy.io/api/tokenizer/>`_ and \
`ftfy <https://ftfy.readthedocs.io/en/latest/>`_ for pre-BPE tokenization if they are installed, \
fallback to BERT's BasicTokenizer if not.
- argument ``special_tokens`` and function ``set_special_tokens``, can be used to add additional symbols \
(ex: "__classify__") to a vocabulary.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, unk_token="<unk>", bos_token="<s>",
sep_token="</s>", pad_token="<pad>", cls_token="</s>",
mask_token="<special1>", additional_special_tokens=["<special0>",
"<special1>", "<special2>", "<special3>", "<special4>", "<special5>",
"<special6>", "<special7>", "<special8>", "<special9>"], **kwargs):
super(XLMTokenizer, self).__init__(unk_token=unk_token, bos_token=bos_token,
sep_token=sep_token, pad_token=pad_token,
cls_token=cls_token, mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
try:
import ftfy
from spacy.lang.en import English
_nlp = English()
self.nlp = _nlp.Defaults.create_tokenizer(_nlp)
self.fix_text = ftfy.fix_text
except ImportError:
logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.")
self.nlp = BasicTokenizer(do_lower_case=True)
self.fix_text = None
self.encoder = json.load(open(vocab_file, encoding="utf-8"))
self.decoder = {v:k for k,v in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
@property
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text):
""" Tokenize a string. """
split_tokens = []
if self.fix_text is None:
# Using BERT's BasicTokenizer
text = self.nlp.tokenize(text)
for token in text:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
else:
# Using SpaCy & ftfy (original tokenization process of OpenAI GPT)
text = self.nlp(text_standardize(self.fix_text(text)))
for token in text:
split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def add_special_tokens_single_sentence(self, token_ids):
"""
Adds special tokens to a sequence for sequence classification tasks.
An XLM sequence has the following format: [CLS] X [SEP]
"""
return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)]
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
An XLM sequence pair has the following format: [CLS] A [SEP] B [SEP]
"""
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
return vocab_file, merge_file
| [] |
2024-01-10 | youkefan18/sales-bot | src~sales_bot~embedding~chinese_embedding.py | from threading import Lock
from langchain.embeddings import HuggingFaceEmbeddings
class ChineseEmbedding():
"""
Singleton embedding instance for Chinese embedding.
Model chosen according to MTEB [benchmarking](https://huggingface.co/spaces/mteb/leaderboard).
[Issues] No sentence-transformers model found with name sentence_transformers\infgrad_stella-large-zh-v2. Creating a new one with MEAN pooling.
[Solution](https://huggingface.co/GanymedeNil/text2vec-large-chinese/discussions/10)
"""
#For thread safe singleton example see [here](https://refactoring.guru/design-patterns/singleton/python/example#example-1)
_instance = None
_lock: Lock = Lock()
_model_name = "infgrad/stella-large-zh-v2"
_embeddings = HuggingFaceEmbeddings(model_name=_model_name)
@property
def embeddings(self):
return self._embeddings
def __new__(cls):
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
if __name__ == "__main__":
print(ChineseEmbedding().embeddings.embed_query("Test sentence for embedding")) | [] |
2024-01-10 | youkefan18/sales-bot | src~sales_bot~sales_chatbot.py |
import gradio as gr
from chains import SalesChain
from langchain.memory import ConversationBufferMemory
def initialize_sales_bot(vector_store_dir: str="electronic_devices_sales_qa"):
global SALES_BOT
SALES_BOT = SalesChain(memory=ConversationBufferMemory(memory_key="chat_history"))
return SALES_BOT
def sales_chat(message, history):
print(f"[message]{message}")
print(f"[history]{history}")
# TODO: 从命令行参数中获取
enable_chat = True
ans = SALES_BOT.agent.run({"input": message})
# 如果检索出结果,或者开了大模型聊天模式
# 返回 RetrievalQA combine_documents_chain 整合的结果
# if ans["source_documents"] or enable_chat:
# print(f"[result]{ans['result']}")
# print(f"[source_documents]{ans['source_documents']}")
# return ans["result"]
# # 否则输出套路话术
# else:
# return "这个问题我要问问领导"
return ans
def launch_gradio():
demo = gr.ChatInterface(
fn=sales_chat,
title="电器销售",
# retry_btn=None,
# undo_btn=None,
chatbot=gr.Chatbot(height=600),
)
demo.launch(share=True, server_name="localhost")
if __name__ == "__main__":
# 初始化电器销售机器人
initialize_sales_bot()
# 启动 Gradio 服务
launch_gradio()
| [] |
2024-01-10 | youkefan18/sales-bot | src~sales_bot~chains~sales_chain.py | import os
import sys
from threading import Lock
from typing import Any, List, Optional
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain, RetrievalQA
from langchain.llms.base import LLM
from langchain.memory import ConversationBufferMemory
from langchain.schema import BaseMemory
from langchain.utilities import SerpAPIWrapper
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config.config import get_settings
from langchain.pydantic_v1 import BaseModel, Field
from langchain_model.api2d_model import Api2dLLM
from vectordbs.faissdb import FaissDb
from vectordbs.vectordb import VectorDb
class CustomerQuestion(BaseModel):
#matching with the key in LLMChain
query: str = Field()
class SalesChain:
#For thread safe singleton example see [here](https://refactoring.guru/design-patterns/singleton/python/example#example-1)
_instance = None
_lock: Lock = Lock()
_tools: List[Tool]
_agent: AgentExecutor
def __new__(cls,*args, **kwargs):
# The overriden __new__ need to have *args, **kwargs to pass param to __init__
with cls._lock:
if cls._instance is None:
"""
Here super() is calling the 'object'class whose constructor cannot take more args > 1.
And there's no point in calling object.__new__() with more than a class param thus it throws an exception.
See [stackoverflow](https://stackoverflow.com/questions/59217884/new-method-giving-error-object-new-takes-exactly-one-argument-the-typ)
"""
cls._instance = super().__new__(cls) #
return cls._instance
def __init__(self, tools: Optional[List[Tool]] = None, memory: Optional[BaseMemory] = None):
vectordb = FaissDb()
llm = Api2dLLM(temperature=0)
if tools is not None:
self._tools = tools
else:
self._tools = self._default_tools(vectordb, llm)
#TODO Fix exception when using vectordb's memory
memory = memory if memory is not None else vectordb.createMemory()
self._agent = self._create_agent(memory, self._tools, llm)
def _default_tools(self, vectordb: VectorDb, llm: LLM) -> List[Tool]:
web_tool = Tool.from_function(
#TODO Improve web search by switching to google shop searching with more input params
func=SerpAPIWrapper(params = {
"engine": "google",
"location": "Austin, Texas, United States",
"google_domain": "google.com",
"gl": "cn",
"hl": "zh-cn",
"tbm": "shop"
}).run,
name="Web Search",
description="""useful for when you could not find proper answer from 'VectorDb QA Search' tool \n
and need to answer questions about product specifications and market price."""
# coroutine= ... <- you can specify an async method if desired as well
)
vectorqa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectordb.db.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"score_threshold": 0.8, "k": 1}
)
)
vectorqa_tool = Tool.from_function(
func=vectorqa_chain.run,
name="VectorDb QA Search",
description=" useful for searching existing electronic device sales questions and answers. you should always use this first.", #Emphasize on priority
#args_schema=CustomerQuestion
# coroutine= ... <- you can specify an async method if desired as well
)
return [vectorqa_tool, web_tool]
def _create_agent(self, memory: BaseMemory, tools: List[Tool], llm: LLM) -> AgentExecutor:
#prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools: """
prefix = """你是一个专业而有礼貌的的电器销售人工智能体,优先使用"VectorDb QA Search"工具(注意不更改input的问题),尽可能回答问题:"""
suffix = """开始!"
{chat_history}
客户问题: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain)
return AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory,
# `I now know the final answer and can provide it to the customer.` is causing LLM output parse issue.
handle_parsing_errors="Print out LLM output, try parsing it and make sure it conforms!" #This helps to reduce eranous CoT
)
@property
def agent(self):
return self._agent
if __name__ == "__main__":
#TODO Fix "Observation: Invalid or incomplete response" causing infinit looping on ReAct
"""Reference:
[Different language in instructions](https://github.com/langchain-ai/langchain/issues/8867)
[Missing Action after Thought](https://github.com/langchain-ai/langchain/issues/12689)
[Handle parsing errors in langchain](https://python.langchain.com/docs/modules/agents/how_to/handle_parsing_errors)
[Wierd chain of thoughts](https://github.com/langchain-ai/langchain/issues/2840)
[Observation: Invalid or incomplete response](https://github.com/langchain-ai/langchain/issues/9381)
"""
#TODO Fix failure to call VectoDb QA Search with exact input unchanged.(Prevent GPT from summarizing the use's question)
text = SalesChain(memory=ConversationBufferMemory(memory_key="chat_history")).agent.run(input = "A100 GPU卡的价格多少?")
print(text) | [
"chat_history",
"input",
"agent_scratchpad"
] |
2024-01-10 | youkefan18/sales-bot | src~sales_bot~langchain_model~api2d_model.py | from typing import (
AbstractSet,
Any,
Collection,
Dict,
List,
Literal,
Mapping,
Optional,
Tuple,
Union,
)
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Field, root_validator
from langchain.utils import get_from_dict_or_env
from utils import LOG
class Api2dLLM(LLM):
# Instead of extending BaseOpenAI, subclassing LLM makes it easy to customize "_call"
# For BaseOpenAI, a proper client is needed and an override of __call__ or _generate might be needed
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
deployment_name: str = "gpt-3.5-turbo"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
openai_api_base: str = "https://oa.api2d.net"
openai_api_completion: str = "/v1/completions"
openai_api_chatcompletion: str = "/v1/chat/completions"
openai_organization: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
batch_size: int = 20
"""Batch size to use when passing multiple documents to generate."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
tiktoken_model_name: Optional[str] = None
stop: Optional[List[str]] = None
class Config():
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(values, "openai_api_key", "API2D_OPENAI_API_KEY")
values["openai_api_base"] = get_from_dict_or_env(values, "openai_api_base", "API2D_OPENAI_API_BASE")
values["openai_api_completion"] = get_from_dict_or_env(values, "openai_api_completion", "API2D_OPENAI_API_COMPLETION")
values["openai_api_chatcompletion"] = get_from_dict_or_env(values, "openai_api_chatcompletion", "API2D_OPENAI_API_CHAT_COMPLETION")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling AI21 API."""
return {
"openai_api_base": self.openai_api_base,
"openai_api_completion": self.openai_api_completion,
"openai_api_chatcompletion": self.openai_api_chatcompletion,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"request_timeout": self.request_timeout,
"logit_bias": self.logit_bias,
}
# @property
# def _invocation_params(self) -> Dict[str, Any]:
# openai_params = {
# "engine": self.deployment_name
# }
# return {**openai_params, **super()._invocation_params}
@property
def _llm_type(self) -> str:
return "api2d"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
elif stop is None:
stop = []
params = {**self._default_params, **kwargs}
#completion json
is_gpt3_5: bool = self.model_name == "gpt-3.5-turbo"
input = {"prompt": prompt, "stopSequences": stop, **params}
url = f"{self.openai_api_base}/{self.openai_api_completion}"
if is_gpt3_5:
input = {
"model": self.model_name,
"messages": [
{"role": "user", "content": prompt}
]
}
url = f"{self.openai_api_base}/{self.openai_api_chatcompletion}"
response = requests.post(
url=url,
headers={"Authorization": f"Bearer {self.openai_api_key}"},
json=input,
)
if response.status_code != 200:
optional_detail = response.json().get("error")
raise ValueError(
f"Api2d call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
#return response.json()["completions"][0]["data"]["text"]
return response.json()["choices"][0]["message"]["content"].strip() if is_gpt3_5 \
else response.json()["choices"][0]["text"].strip()
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model_name}, **self._default_params}
if __name__ == "__main__":
import os
import dotenv
env_file = f'{os.getenv("ENVIRONMENT", "dev")}.env'
dotenv.load_dotenv(dotenv_path=f'{env_file}')
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
llm = Api2dLLM()
prompt = PromptTemplate(
input_variables=["product_desc"],
template="给一个{product_desc}的系统取10个符合特性,吸引眼球的产品名字,给出完整名称",
)
#llm("which year is this year?")
chain = LLMChain(llm=llm, prompt=prompt)
print(chain.run({
'product_desc': "能通过多种文件访问协议访问如s3,NFS,Samba访问不同生物实验室设备数据的,方便用户访问并分享实验室文档及图片系统的,含有相关数据dashboard的"
})) | [
"给一个{product_desc}的系统取10个符合特性,吸引眼球的产品名字,给出完整名称",
"product_desc"
] |
2024-01-10 | youkefan18/sales-bot | src~sales_bot~vectordbs~faissdb.py | import os
import sys
from abc import ABC, abstractmethod
import faiss
import numpy as np
from langchain.docstore import InMemoryDocstore
from langchain.memory import VectorStoreRetrieverMemory
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.text_splitter import CharacterTextSplitter, TextSplitter
from langchain.vectorstores import FAISS
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from embedding.chinese_embedding import ChineseEmbedding
from vectordbs.vectordb import VectorDb
def score_normalizer(val: float) -> float:
"""_Issue with similarity score threshold_
Below is not working for HuggingFaceEmbeddings since the similarity score is not scaled to [0,1]
See [issue](https://github.com/langchain-ai/langchain/issues/4086)
retriever=FaissDb().db.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"score_threshold": 0.8}
)
Solve this by init FAISS with relevance_score_fn = score_normalizer.
Or pass above param to FAISS.from_documents().
def score_normalizer(val: float) -> float:
return 1 - 1 / (1 + np.exp(val))
"""
return 1 - 1 / (1 + np.exp(val))
class FaissDb(VectorDb):
"""_summary_
The filename is not allowed to be named as faiss.py,
otherwise it will pop up strange error like 'module 'faiss' has no attribute 'IndexFlatL2''
See [issue](https://github.com/facebookresearch/faiss/issues/1195)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#override
def _initDb(self, dbfile: str, embedding: Embeddings, rebuild: bool) -> VectorStore:
_db: FAISS = None
if not os.path.exists(dbfile.replace(".txt", ".db")) or rebuild:
try:
with open(dbfile, 'r', encoding='utf-8-sig') as f:
docs = f.read()
docs = self._transformer.create_documents([docs])
_db = FAISS.from_documents(docs, embedding, relevance_score_fn=score_normalizer)
_db.save_local(dbfile.replace(".txt", ".db"))
return _db
except FileNotFoundError as e:
print(e)
except Exception as e:
print(e)
else:
_db = FAISS.load_local(dbfile.replace(".txt", ".db"), embedding, relevance_score_fn=score_normalizer)
return _db
#override
def createMemory(self) -> VectorStoreRetrieverMemory:
embedding_size = 1536 # Dimensions of the OpenAIEmbeddings
index = faiss.IndexFlatL2(embedding_size)
embedding_fn = self._embedding
vectorstore = FAISS(embedding_fn, index, InMemoryDocstore({}), {})
retriever = vectorstore.as_retriever(search_kwargs=dict(k=1))
memory = VectorStoreRetrieverMemory(retriever=retriever)
return memory
if __name__ == "__main__":
v = FaissDb()
retriever = v.db.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"score_threshold": 0.8}
)
query = "你们价格怎么这么贵,是不是在坑人?"
docs = retriever.get_relevant_documents(query)
for doc in docs:
print(doc.page_content + "\n") | [] |
2024-01-10 | youkefan18/sales-bot | src~sales_bot~data_generator~qa_generator.py |
import os
import sys
from langchain.llms.base import LLM
from langchain.prompts import BasePromptTemplate, PromptTemplate
from pydantic import Field
from pydantic.dataclasses import dataclass
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from langchain_model import Api2dLLM
def promptFactory() -> BasePromptTemplate:
return PromptTemplate.from_template(
"""你是一个 {role}. \n
你在训练你所在的行业领域内的销售新人,请提供
{num_qa} 个销售话术 Q&A 的例子. \n
该Q&A例子的对话场景为{scenario}. \n
请以如下格式提供例子: \n
序号, 仅数字.
[客户问题]
[销售回答]
"""
)
def modelFactory() -> LLM:
return Api2dLLM()
@dataclass
class QAGenerator():
"""
Generate QA pairs based on domain industry of sales man and few shots.
For good example shots in electronic device sales, refer to [sales skills](https://zhuanlan.zhihu.com/p/357487465)
"""
_prompt: BasePromptTemplate = Field(default_factory=promptFactory)
_model: LLM = Field(default_factory=modelFactory)
@property
def model(self) -> LLM:
return self._model
@property
def prompt(self) -> BasePromptTemplate:
return self._prompt
def initQA(self, output: str):
from langchain.chains import LLMChain
role = "在电器行业的资深销售人员"
scenarios = [
"客户与销售在讨价还价",
"客户在询问电器产品细节",
"客户在粗鲁地向销售抱怨"
]
qa = QAGenerator()
num_qa = 10
chain = LLMChain(llm=qa.model, prompt=qa.prompt)
result = [chain.run(role=role, num_qa=num_qa, scenario=s) for s in scenarios]
with open(output, 'w', encoding='utf-8-sig') as f:
f.writelines(result)
if __name__ == "__main__":
qa = QAGenerator()
qa.initQA("resources/electronic_devices_sales_qa.txt") | [
"你是一个 {role}. \n\n 你在训练你所在的行业领域内的销售新人,请提供\n {num_qa} 个销售话术 Q&A 的例子. \n\n 该Q&A例子的对话场景为{scenario}. \n\n 请以如下格式提供例子: \n\n 序号, 仅数字.\n [客户问题]\n [销售回答]\n "
] |
2024-01-10 | lperezmo/embeddings-extraction | context-augmented-query.py | #--------------------------------------------------------#
# Import Libraries
#--------------------------------------------------------#
import os
import openai
import numpy as np
from time import sleep
from sklearn.metrics.pairwise import cosine_similarity
#--------------------------------------------------------#
# OpenAI API Key (optional: set as environment variable)
#--------------------------------------------------------#
openai.api_key = os.getenv('OPENAI_API_KEY')
#--------------------------------------------------------#
# Define function to get top-k results
#--------------------------------------------------------#
def get_top_k_results_text(df, query_text, embed_model, n=3):
# create embeddings (try-except added to avoid RateLimitError)
# Added a max of 5 retries
max_retries = 2
retry_count = 0
done = False
while not done and retry_count < max_retries:
try:
res = openai.Embedding.create(input=query_text, engine=embed_model)
done = True
except Exception as e:
# print(f"Error creating embeddings for batch {i}: {e}")
retry_count += 1
sleep(5)
query_embedding = res['data'][0]['embedding']
# Compute cosine similarity
similarities = cosine_similarity([query_embedding], list(df['embedding']))
# Find top-k indices and metadata
top_k_indices = np.argsort(similarities[0])[-n:][::-1]
top_k_results = df.iloc[top_k_indices]
# Join the text of the top-k results
joined_text = ' '.join(list(top_k_results['text']))
return joined_text
#--------------------------------------------------------#
# Context-Augmented Query
#--------------------------------------------------------#
def retrieve(query, df, limit_of_context = 3750, embed_model = 'text-embedding-ada-002'):
"""
Retrieve relevant contexts from the dataset and build a prompt for the question answering model.
Parameters
----------
query : str
The query to answer.
df : pandas.DataFrame
The DataFrame containing the embedding vectors and metadata.
limit_of_context : int
The maximum number of characters to use for the context.
embed_model : str
The embedding model to use.
Returns
-------
prompt : str
The prompt to use for the question answering model.
"""
# get relevant contexts
contexts = get_top_k_results_text(df, query, embed_model=embed_model, n=3)
# Limit the number of characters
contexts = contexts[:limit_of_context]
# build our prompt with the retrieved contexts included
prompt = (
f"Answer the question based on the context below.\n\n"+
f"Context:\n {contexts}\n\n"+f"Question: {query}\nAnswer:"
)
return prompt
| [
"Answer the question based on the context below.\n\nContext:\n PLACEHOLDER\n\nQuestion: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | tamtam0/datascience_portfolio | 4_IST_736_Text_Mining~IST_736_Project~Final736Project_ALuina_BMadhavan_DChang_TTamilmani_Group_2~Code~TopicModeling.py | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import re
import tweepy
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
# In[3]:
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def plotWordcloud(tweet_list):
wordcloud = WordCloud(max_font_size=1000, max_words=500, background_color="white",normalize_plurals=False).generate(' '.join(tweet_list))
plt.figure(figsize=(15,8))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
#plt.savefig("WordCloud_before"+".png", bbox_inches='tight')
# In[38]:
df_t= pd.read_csv("Data/labeled/Twitter_Sentiment.csv")
df_r = pd.read_csv("Data/labeled/Reddit_Sentiment.csv")
t=df_t['text']
r=df_r['body']
total=pd.concat([t, r], ignore_index=True)
total=total.to_frame()
total.count()
#plotWordcloud(total.array)
# In[42]:
total=total.dropna()
total.count()
total[0]
# In[43]:
import spacy
#spacy.download('en_core_web_sm')
#spacy.load('en_core_web_sm')
from spacy.lang.en import English
parser = English()
def tokenize(text):
lda_tokens = []
tokens = parser(text)
for token in tokens:
if token.orth_.isspace():
continue
elif token.like_url:
lda_tokens.append('URL')
elif token.orth_.startswith('@'):
lda_tokens.append('SCREEN_NAME')
else:
lda_tokens.append(token.lower_)
return lda_tokens
import nltk
#nltk.download('wordnet')
#nltk.download('stopwords')
from nltk.corpus import wordnet as wn
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
from nltk.stem.wordnet import WordNetLemmatizer
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
en_stop = set(nltk.corpus.stopwords.words('english'))
def prepare_text_for_lda(text):
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 2]
tokens = [token for token in tokens if token not in en_stop]
tokens = [get_lemma(token) for token in tokens]
return tokens
import random
text_data = []
for line in total[0]:
tokens = prepare_text_for_lda(line)
text_data.append(tokens)
# In[140]:
import gensim
from gensim import corpora, models
dictionary = corpora.Dictionary(text_data)
corpus = [dictionary.doc2bow(text) for text in text_data]
[[(dictionary[i], freq) for i, freq in doc] for doc in corpus[:1]]
# In[141]:
NUM_TOPICS = 125
#ldamodel = gensim.models.ldamulticore.LdaMulticore(corpus, num_topics = NUM_TOPICS, id2word=dictionary,
# passes=50,minimum_probability=0.5,
# per_word_topics=False)
#ldamodel.save('model5.gensim')
ldamodel = gensim.models.LdaModel(corpus, num_topics = NUM_TOPICS, id2word=dictionary,
passes=50,
per_word_topics=False,
alpha='auto')
from gensim.models import CoherenceModel
# Compute Perplexity
print('\nPerplexity: ', ldamodel.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=ldamodel, texts=text_data, dictionary=dictionary, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# In[182]:
topics = ldamodel.show_topics(formatted=False, num_topics=NUM_TOPICS,num_words= 10)
for topic in topics:
print(topic[0],[ t[0] for t in topic[1]])
#ldamodel.printTopics(NUM_TOPICS)
#import pandas as pd
#pd.DataFrame(ldamodel.get_document_topics(corpus))
# In[62]:
from gensim.models import CoherenceModel
# Compute Perplexity
print('Perplexity: ', ldamodel.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=ldamodel, texts=text_data, dictionary=dictionary, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print(' Coherence Score: ', coherence_lda,'\n')
# In[144]:
import pyLDAvis.gensim
import pyLDAvis
pyLDAvis.enable_notebook()
panel_genism = pyLDAvis.gensim.prepare(ldamodel, corpus, dictionary, mds='tsne')
pyLDAvis.display(panel_genism)
# In[177]:
topics=[]
for i,c in enumerate(corpus):
topic=ldamodel[c]
topics.append(topic)
topics
topics_max=[]
topics_max2=[]
for i in topics:
d=dict(i)
topics_max.append(max(d, key=d.get))
topics_max
#corpus.shape
# In[178]:
t1=df_t[["created_at","text","label","score"]].copy()
t1['source']="twitter"
t1['created_at']=t1['created_at'].str.slice(stop=10)
t2=df_r[["timestamp","body","label","score"]].copy()
t2=t2.rename(columns={"timestamp":"created_at","body":"text"})
t2['source']="reddit"
t2['created_at']=t2['created_at'].str.slice(stop=10)
t=pd.concat([t1, t2], ignore_index=True)
# In[179]:
t.shape
t=t.dropna()
t.shape
# In[181]:
t['topic']=topics_max
t.to_csv("topics.csv")
# In[176]:
# In[92]:
from sklearn.feature_extraction.text import CountVectorizer
count_vectorizer= CountVectorizer(input='content')
text_data_v = [' '.join(text) for text in text_data]
dtm_c= count_vectorizer.fit_transform(text_data_v)
# In[95]:
ldamodel_sk = LatentDirichletAllocation(n_components=NUM_TOPICS, max_iter=50, learning_method='online')
ldamodel_sk_result = ldamodel_sk.fit_transform(dtm_c)
# In[96]:
import pyLDAvis.sklearn
import pyLDAvis
pyLDAvis.enable_notebook()
panel = pyLDAvis.sklearn.prepare(ldamodel_sk, dtm_c, count_vectorizer, mds='tsne')
pyLDAvis.display(panel)
# In[89]:
for n in range(ldamodel_sk_result.shape[0]):
topic_most_pr = ldamodel_sk_result[n].argmax()
print("doc: {} topic: {}\n".format(n,topic_most_pr))
# In[50]:
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer= TfidfVectorizer(input='content')
text_data_v = [' '.join(text) for text in text_data]
dtm= vectorizer.fit_transform(text_data_v)
# In[8]:
# In[90]:
lda_model_2 = LatentDirichletAllocation(n_components=NUM_TOPICS, max_iter=50, learning_method='online')
lda_model_2_result = lda_model_2.fit_transform(dtm)
# In[91]:
for n in range(lda_model_2_result.shape[0]):
topic_most_pr = lda_model_2_result[n].argmax()
print("doc: {} topic: {}\n".format(n,topic_most_pr))
# In[81]:
lda_model_2_result[0:0]
# ## import pyLDAvis.sklearn
# import pyLDAvis
# ## conda install -c conda-forge pyldavis
# #pyLDAvis.enable_notebook() ## not using notebook
# pyLDAvis.enable_notebook()
# panel = pyLDAvis.sklearn.prepare(lda_model_2, dtm, vectorizer, mds='tsne')
# ### !!!!!!! Important - you must interrupt and close the kernet in Spyder to end
# ## In other words - press the red square and then close the small red x to close
# ## the Console
# pyLDAvis.display(panel)
# In[25]:
from matplotlib import pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import matplotlib.colors as mcolors
cols = [color for name, color in mcolors.TABLEAU_COLORS.items()]
cloud = WordCloud(stopwords=STOPWORDS,
background_color='white',
width=3000,
height=2000,
max_words=10,
colormap='tab10',
color_func=lambda *args, **kwargs: cols[i],
prefer_horizontal=1.0)
topics = ldamodel.show_topics(formatted=False,num_topics=13)
fig, axes = plt.subplots(7, 2, figsize=(16,24), sharex=True, sharey=True)
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words = dict(topics[i][1])
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(i), fontdict=dict(size=16))
plt.gca().axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
plt.show()
# In[ ]:
| [] |
2024-01-10 | satwik121/chabot_db | sat~dp_inv.py | import streamlit as st
# from invoice_backend import *
from PIL import Image
import json
from langchain.llms import OpenAI
from PyPDF2 import PdfReader
import pandas as pd
import re
from langchain.llms.openai import OpenAI
from langchain.prompts import PromptTemplate
import os
import regex as re
# import re
import csv
import json
# import pyodbc
import mysql.connector
from mysql.connector import Error
import streamlit as st
# os.environ["OPENAI_API_KEY"] = "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"
key = st.secrets['key']
def get_pdf_text(pdf_doc):
text = ""
pdf_reader = PdfReader(pdf_doc)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def extract_data(pages_data):
template = '''Extract all following values: invoice no., Date, Shipmode,Ship to, Bill to, Balance Due, Item,
Quantity, Rate, discount ,Shipping, Total,
Order ID from this data{pages}. If the word Discount is not in the page return 0.0 else return the value for it. Note that the output can have multiple records with same invoice number if there are more than 1 row of Items in the invoice.
Expected output : Remove dollar sign if found anywhere in {pages} and return the final answer in this format {{'Invoice no.':1001329,
'Item':'Office Chair', 'Quantity':2, 'Date':'05/01/2022',
'Rate':1100.00, 'Discount' : 220, 'mount':2200.00, 'Sub Total':1980.00, 'Balance Due':1980.00,
'Ship Mode':'First Class','Ship to':'Saltillo, Coahuila,Mexico', 'Order ID':'MX-2012-AH1003082-41251'}}
'''
prompt_template = PromptTemplate(input_variables=['pages'], template=template)
llm = OpenAI(temperature=0.4,openai_api_key=key)
full_response = llm(prompt_template.format(pages=pages_data))
return full_response
def extract_df(extracted_text):
pattern = r'{(.+)}'
match = re.search(pattern, extracted_text, re.DOTALL)
if match:
extracted_data = match.group(1)
# Converting the extracted text to a dictionary
data_dict = eval('{' + extracted_data + '}')
df_temp=pd.DataFrame([data_dict])
df_temp=df_temp.set_index('Order ID').reset_index()
return df_temp
else:
return 'Could Not extract the Data'
def ingest_to_db_sat(df):
# try:
ik = df.to_json(orient='records')
json_string = json.loads(ik)
d1= json_string[0]
# st.write(d1)
import mysql.connector
from mysql.connector import Error
# Establish a database connection
db_connection = mysql.connector.connect(
host="sqldatabase.mysql.database.azure.com",
user="yusuf121",
password="Satwik121",
database="chatbotdb"
)
cursor = db_connection.cursor()
# st.write("here is")
insert_query = """INSERT INTO invoice(`Order ID`, `Invoice no.`, `Date`, `Ship Mode`, `Ship to`, `Bill to`,
`Balance Due`, `Item`, `Quantity`, `Rate`, `Discount`, `Shipping`, `Total`)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
cursor.execute(insert_query, (
d1.get("Order ID", None),
d1.get("Invoice no.", None),
d1.get("Date", None),
d1.get("Ship Mode", None),
d1.get("Ship to", None),
d1.get("Bill to", None),
d1.get("Balance Due", None),
d1.get("Item", None),
d1.get("Quantity", None),
d1.get("Rate", None),
d1.get("Discount", None),
d1.get("Shipping", None),
d1.get("Total", None)
))
db_connection.commit()
return "Ingested Successfully"
# except Error as e:
# st.error(f"Error connecting to the database: {str(e)}")
# return "Failed to connect DB"
#---------------------------------------------------------------------------main----------------------------------------------------------------
def app():
# st.set_page_config(page_title='Invoice Reader',page_icon='.\Data\invoice.png')
with st.container():
col1, col2 = st.columns([0.1, 0.9])
with col1:
st.image('dbot.png', width=75,)
#st.image('./data/2.png', width=300)
with col2:
st.title('Invoice Reader')
with st.sidebar:
# col1, col2, col3 = st.columns(3)
# with col2:
image=Image.open('invoice.png')
new_image = image.resize((200, 200))
st.image(new_image,use_column_width='auto',width=300)
st.header('📜Invoice Reader')
st.write('Use our OpenAI & Langchain powered invoice reader to fetch the data in tabular form, ingest it to your database and download it in just a few steps. ')
st.warning('The results may not be always reliable',icon='🚨')
uploaded_file = st.file_uploader("Upload your document here 👇", type=["pdf"])
file_uploaded=False
if uploaded_file:
raw_data=get_pdf_text(uploaded_file)
extracted_text=extract_data(raw_data)
df=extract_df(extracted_text)
# ik = df.to_json(orient='records')
# json_string = json.loads(ik)
# d1= json_string[0]
# st.write(d1)
if type(df)== pd.core.frame.DataFrame:
ingest_button=st.button('Ingest Data') #comment
st.dataframe(df)
if ingest_button:
x=ingest_to_db_sat(df)
st.markdown(x)
else:
st.write("no pdf detected")
st.markdown(df)
else:
st.markdown('Please Upload an invoice pdf.')
| [
"MX-2012-AH1003082-41251",
"Invoice no.",
"Quantity",
"Balance Due",
"Order ID",
"05/01/2022",
"Sub Total",
"Extract all following values: invoice no., Date, Shipmode,Ship to, Bill to, Balance Due, Item, \n Quantity, Rate, discount ,Shipping, Total,\n Order ID from this data{pages}. If the word Discount is not in the page return 0.0 else return the value for it. Note that the output can have multiple records with same invoice number if there are more than 1 row of Items in the invoice.\n \n Expected output : Remove dollar sign if found anywhere in {pages} and return the final answer in this format {{'Invoice no.':1001329, \n 'Item':'Office Chair', 'Quantity':2, 'Date':'05/01/2022', \n 'Rate':1100.00, 'Discount' : 220, 'mount':2200.00, 'Sub Total':1980.00, 'Balance Due':1980.00,\n 'Ship Mode':'First Class','Ship to':'Saltillo, Coahuila,Mexico', 'Order ID':'MX-2012-AH1003082-41251'}}\n ",
"First Class",
"Ship to",
"Saltillo, Coahuila,Mexico",
"mount",
"Office Chair",
"Ship Mode",
"Discount"
] |
2024-01-10 | satwik121/chabot_db | gpt-clone.py | import streamlit as st
import openai
# Set your OpenAI API key
api_key = "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"
openai.api_key = api_key
# Initialize chat history as an empty list
chat_history = []
# Define a function to interact with the chatbot
def chat_with_bot(prompt):
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=50,
)
return response.choices[0].text
# Streamlit app title and description
st.title("Chatbot with GPT-3")
st.write("Enter a message, and the chatbot will respond.")
# User input text box
user_input = st.text_input("You:", "")
# Bot response
if st.button("Send"):
if user_input:
prompt = f"You: {user_input}\nBot:"
bot_response = chat_with_bot(prompt)
# Add the user's input and bot's response to chat history
chat_history.append((user_input, bot_response))
st.write("Bot:", bot_response)
else:
st.write("Please enter a message.")
# Display chat history
st.subheader("Chat History")
for user_message, bot_message in chat_history:
st.text(f"You: {user_message}")
st.text(f"Bot: {bot_message}")
# Add a "Quit" button to exit the app
if st.button("Quit"):
st.stop()
| [
"You: PLACEHOLDER\nBot:"
] |
2024-01-10 | satwik121/chabot_db | sat~dp_pdf.py | import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css ,bot_template, user_template
# from langchain.llms import HuggingFaceHub
# import tabula
from io import BytesIO
import streamlit as st
import openai
from streamlit_chat import message
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage
)
# key = "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"
key = st.secrets['key']
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
if "txt" not in st.session_state:
st.session_state.txt = None
# if "tb" not in st.session_state:
# st.session_state.tb = None
# if "agree" not in st.session_state:
# st.session_state.agree = None
if "radio" not in st.session_state:
st.session_state.radio = None
if "im" not in st.session_state:
st.session_state.im = None
def extract_tables_from_pdf(pdf_docs):
extracted_tables = []
if pdf_docs is not None:
# Ensure pdf_docs is a list
if not isinstance(pdf_docs, list):
pdf_docs = [pdf_docs]
for pdf_doc in pdf_docs:
# Use BytesIO to create a file-like object from the uploaded PDF data
pdf_file = BytesIO(pdf_doc.read())
# Use Tabula to extract tables
try:
tables = tabula.read_pdf(pdf_file, pages='all', multiple_tables=True)
#st.write(tables)
extracted_tables.extend(tables)
except Exception as e:
st.error(f"Error extracting tables from PDF: {str(e)}")
return extracted_tables
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
embeddings = OpenAIEmbeddings(openai_api_key=key)
# embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm = ChatOpenAI(openai_api_key=key)
# llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
def livegpt2(user_input):
chat = ChatOpenAI(temperature=0,openai_api_key=key)
# initialize message history
if "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content="You are a helpful assistant.")
]
# handle user input
if user_input:
st.session_state.messages.append(HumanMessage(content=user_input))
with st.spinner("Thinking..."):
response = chat(st.session_state.messages)
st.session_state.messages.append(
AIMessage(content=response.content))
# display message history
messages = st.session_state.get('messages', [])
for i, msg in enumerate(messages[1:]):
if i % 2 == 0:
message(msg.content, is_user=True, key=str(i) + '_user')
else:
message(msg.content, is_user=False, key=str(i) + '_ai')
def pdf():
load_dotenv()
# st.set_page_config(page_title="Docs Miner📚", page_icon=":books:")
st.write(css, unsafe_allow_html=True)
with st.container():
col1, col2 = st.columns([0.2, 0.7])
with col1:
st.image('sat.png', width=300)
with col2:
st.markdown("""
<div style="display: flex; flex-direction: column; align-items: center; justify-content: center; height: 35vh;">
<h1> Welcome to Document Miner📚!</h1>
<!-- Additional content goes here -->
</div>
""", unsafe_allow_html=True)
st.header("Ask Question's about your documents")
if "user_question" not in st.session_state:
st.session_state.user_question = None
st.session_state.user_question = st.text_input("Ask a question about your documents:")
# clear_button = st.button("Clear Input")
# if clear_button:
# # Clear the input field by resetting the session state
# st.session_state.user_question = ""
with st.sidebar:
st.session_state.radio = st.radio("Select Action",["Private Mode","Public Mode"])
if st.session_state.user_question:
if st.session_state.conversation == None :
st.write("Upload The Pdf First")
else :
if st.session_state.radio == "Private Mode" :
handle_userinput(st.session_state.user_question)
elif st.session_state.radio == "Public Mode" :
st.warning("Chatbot in Live Mode",icon="⚠️")
livegpt2(st.session_state.user_question)
else :
st.write("Choose Mode")
with st.sidebar:
st.subheader("Process The Docs Here!")
pdf_docs = st.file_uploader(
"", accept_multiple_files=True)
# st.write("pdf_docs")
# st.write(pdf_docs)
if st.button("Process"):
if len(pdf_docs) == 0:
st.write("🤖Upload the Pdf First📑")
else:
with st.spinner("Processing"):
#get tables
# st.session_state.tb = extract_tables_from_pdf(pdf_docs)
# get pdf text
raw_text = get_pdf_text(pdf_docs)
st.session_state.txt = raw_text
st.write(raw_text)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(vectorstore)
# st.session_state.radio = st.radio("Select Action",["Get Tables","Get Data"])
if __name__ == '__main__':
pdf() | [
"You are a helpful assistant."
] |
2024-01-10 | satwik121/chabot_db | sat~dp_rcpt.py | import streamlit as st
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentAnalysisClient
import json
import mysql.connector
from mysql.connector import Error
import pandas as pd
import os
import openai
from io import BytesIO
from PIL import Image
# import config
# api_key = os.getenv("api_key")
# endpoint = os.getenv("endpoint")
api_key = st.secrets['api_key']
endpoint = st.secrets['endpoint']
# api_key = "33a331f9eb4c4d718f3557a817ee55b0"
# endpoint = "https://shyam-ocr-123.cognitiveservices.azure.com/"
def rcpt1():
if "uploaded_file" not in st.session_state:
st.session_state.uploaded_file = None
# print(api_key)
# print(endpoint)
model_id = "receipt_model"
#formUrl = "YOUR_DOCUMENT"
# Create a Form Recognizer client
#form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(api_key))
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(api_key)
)
dict1 = {}
# Create a Streamlit app
# st.title("Receipt Extraction App")
with st.container():
col1, col2 = st.columns([0.1, 0.9])
with col1:
st.image('./data/Home.png', width=300)
#st.image('./data/2.png', width=300)
with col2:
st.markdown("""
<div style="display: flex; flex-direction: column; align-items: center; justify-content: center; height: 35vh;">
<h1> Welcome To Reciept Extraction App!</h1>
<!-- Additional content goes here -->
</div>
""", unsafe_allow_html=True)
st.markdown("This App is designed to help you extract information from your receipts and store it in a real-time database.")
# Key Features
st.header("Key Features")
st.markdown("""
- **Image Upload**: Upload a receipt image in JPG or PNG format.
- **Receipt Extraction**: Extract information from the uploaded image.
- **Data Storage**: Store the extracted data in a real-time database.
- **User-Friendly**: A simple and user-friendly interface for easy use.
""")
st.header("How to Use")
st.markdown("""
1. Click the "Upload Receipt Image" button.
2. Select a JPG or PNG image of your receipt.
3. Click the "Extract and Store Receipt Info" button to initiate extraction and storage.
4. The extracted information will be stored in the database.
**Note**: The quality of the extracted information depends on the image quality and receipt layout.
""")
with st.sidebar:
st.session_state.uploaded_file = st.file_uploader("Upload a Receipt in jpg/png Format", type=["jpg","png"])
# st.image('./data/Home.png', width=300)
st.warning('Model is Trained on Specific Templates Only', icon="⚠️")
# Upload PDF file using Streamlit
if st.session_state.uploaded_file is not None:
with st.session_state.uploaded_file:
st.write("File Uploaded! Analyzing...")
# img = Image.open(st.session_state.uploaded_file)
# # Resize the image to your desired dimensions (e.g., 400x400 pixels)
# img = img.resize((300, 400))
# st.image(st.session_state.uploaded_file, caption="Uploaded Image", use_column_width=True)
# st.image(img, caption="Uploaded Image", use_column_width=True)
file_contents = st.session_state.uploaded_file.read()
file_stream = BytesIO(file_contents)
# Analyze the content of the document
poller =document_analysis_client.begin_analyze_document( model_id = model_id, document= file_stream)
documents = poller.result()
# Display extracted entities
for idx, document in enumerate(documents.documents):
st.subheader(f"Document #{idx + 1} Entities:")
for name, field in document.fields.items():
dict1[name] = field.value
#---------------------------------------------------------------Product Table------------------------------------------------
# tbl = dict1.get("prod table")
# import re
# def extract_info(text):
# info = re.findall(r"'(.*?)': DocumentField\(value_type=string, value='(.*?)'", text)
# return {key: value for key, value in info}
# # Extract the information from each string
# extracted_data = [extract_info(text) for text in tbl]
# # Create a DataFrame
# df = pd.DataFrame(extracted_data)
# st.write(df)
dict1.pop("prod table")
# Create a list of indices
index = list(range(1, 2))
df = pd.DataFrame(dict1,index = index)
#st.write(df)
df.columns = ['VendorOrg', 'ClientName','Subtotal', 'Total', 'Tax', 'VendorAddress', 'ClientAddress', 'ShippingAddress', 'Receipt', 'ReceiptDate', 'DueDate', 'PONumber']
#df.to_csv('rcpt.csv',index = False)
ik = df.to_json(orient='records')
json_string = json.loads(ik)
d1= json_string[0]
st.write(d1)
# d1 = {
# "VendorOrg":"East Repair Inc.",
# "ClientName":"John Smith",
# "Subtotal":"145.00",
# "Total":"$154.06",
# "Tax":"9.06",
# "VendorAddress":"1912 Harvest Lane New York, NY 12210",
# "ClientAddress":"2 Court Square New York, NY 12210",
# "ShippingAddress":"John Smith 3787 Pineview Drive Cambridge, MA 12210",
# "Receipt":"US-001",
# "ReceiptDate":"11/02/2019",
# "DueDate":"26/02/2019",
# "PONumber":"2312/2019",
# }
# st.write(d1)
if st.button("Download :memo:"):
import base64
# Convert the JSON data to a JSON string
json_data = json.dumps(d1, indent=4)
# Create a data URI for the JSON string
b64 = base64.b64encode(json_data.encode()).decode()
href = f'<a href="data:application/json;base64,{b64}" download="data.json">Download JSON</a>'
st.markdown(href, unsafe_allow_html=True)
st.write(df)
but1 = st.button("Ingest To The Database")
if but1:
try:
import mysql.connector
# Establish a database connection
db_connection = mysql.connector.connect(
host="sqldatabase.mysql.database.azure.com",
user="yusuf121",
password="Satwik121",
database="chatbotdb"
)
cursor = db_connection.cursor()
# st.write("here is")
insert_query = """INSERT INTO receipt (VendorOrg, ClientName, Subtotal, Total, Tax, VendorAddress, ClientAddress, ShippingAddress, Receipt, ReceiptDate, DueDate, PONumber)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
cursor.execute(insert_query, (
d1.get("VendorOrg", None),
d1.get("ClientName", None),
d1.get("Subtotal", None),
d1.get("Total", None),
d1.get("Tax", None),
d1.get("VendorAddress", None),
d1.get("ClientAddress", None),
d1.get("ShippingAddress", None),
d1.get("Receipt", None),
d1.get("ReceiptDate", None),
d1.get("DueDate", None),
d1.get("PONumber", None)
))
db_connection.commit()
st.write(" Details Added Successfully in the Table ")
except Error as e:
st.error(f"Error connecting to the database: {str(e)}")
# db_connection.commit()
| [] |
2024-01-10 | tianjiqx/notes | llm~test~sqlagent.py | from typing import Optional, List
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.callbacks.manager import CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import OpenAI
from langchain.agents import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
import os
from langchain.tools import BaseSQLDatabaseTool, BaseTool, QuerySQLDataBaseTool, InfoSQLDatabaseTool, \
ListSQLDatabaseTool, QuerySQLCheckerTool
os.environ['OPENAI_API_KEY'] = "sk-UjRJKjGVOKzud8xxxxxxxxtOoJcw655VCT"
os.environ["OPENAI_PROXY"] = "http://127.0.0.1:7890"
class QuerySQLTableTool(BaseSQLDatabaseTool, BaseTool):
"""Tool for querying a SQL Table."""
name = "sql_table_query"
description = """
Input to this tool is a detailed and correct SQL query, output is a result from the table.
If the query is not correct, an error message will be returned.
If an error is returned, rewrite the query, check the query, and try again.
"""
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Execute the query, return the results or an error message."""
return self.db.run_no_throw(query)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
raise NotImplementedError("QuerySqlDbTool does not support async")
class SQLDatabaseToolkit2(SQLDatabaseToolkit):
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
query_sql_database_tool_description = (
"Input to this tool is a detailed and correct SQL query, output is a "
"result from the database. If the query is not correct, an error message "
"will be returned. If an error is returned, rewrite the query, check the "
"query, and try again. If you encounter an issue with Unknown column "
"'xxxx' in 'field list', using schema_sql_db to query the correct table "
"fields."
)
query_sql_table_tool_description = (
"Input to this tool is a detailed and correct SQL query, output is a "
"result from the table, like row count. If the query is not correct, an error message "
"will be returned. If an error is returned, rewrite the query, check the "
"query, and try again. If you encounter an issue with Unknown column "
"'xxxx' in 'field list', using schema_sql_db to query the correct table "
"fields."
)
info_sql_database_tool_description = (
"Input to this tool is a comma-separated list of tables, output is the "
"schema and sample rows for those tables. "
"Be sure that the tables actually exist by calling list_tables_sql_db "
"first! Example Input: 'table1, table2, table3'"
)
return [
QuerySQLDataBaseTool(
db=self.db, description=query_sql_database_tool_description
),
QuerySQLTableTool(
db=self.db, description=query_sql_table_tool_description
),
InfoSQLDatabaseTool(
db=self.db, description=info_sql_database_tool_description
),
ListSQLDatabaseTool(db=self.db),
QuerySQLCheckerTool(db=self.db, llm=self.llm),
]
db = SQLDatabase.from_uri("sqlite:////home/tianjiqx/qx/sqllite/Chinook.db")
toolkit = SQLDatabaseToolkit2(db=db, llm=OpenAI(temperature=0))
SQL_PREFIX = """You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
"""
SQL_SUFFIX = """Begin!
Question: {input}
Thought: I should look at the tables in the database to see what I can query. Answer questions based on the most relevant tables.
Generally, each table should obtain information independently
{agent_scratchpad}"""
frist_promt= """
You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct sqlite query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most 10 results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
sql_db_query: Input to this tool is a detailed and correct SQL query, output is a result from the database. If the query is not correct, an error message will be returned. If an error is returned, rewrite the query, check the query, and try again. If you encounter an issue with Unknown column 'xxxx' in 'field list', using schema_sql_db to query the correct table fields.
sql_table_query: Input to this tool is a detailed and correct SQL query, output is a result from the table, like row count. If the query is not correct, an error message will be returned. If an error is returned, rewrite the query, check the query, and try again. If you encounter an issue with Unknown column 'xxxx' in 'field list', using schema_sql_db to query the correct table fields.
sql_db_schema: Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables. Be sure that the tables actually exist by calling list_tables_sql_db first! Example Input: 'table1, table2, table3'
sql_db_list_tables: Input is an empty string, output is a comma separated list of tables in the database.
sql_db_query_checker:
Use this tool to double check if your query is correct before executing it.
Always use this tool before executing a query with query_sql_db!
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [sql_db_query, sql_table_query, sql_db_schema, sql_db_list_tables, sql_db_query_checker]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!
Question: {input}
Thought: I should look at the tables in the database to see what I can query. Answer questions based on the most relevant tables.
Generally, each table should obtain information independently
{agent_scratchpad}
"""
agent_executor = create_sql_agent(
llm=OpenAI(temperature=0.7),
toolkit=toolkit,
suffix=SQL_SUFFIX,
verbose=True,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
# agent_executor.run("Describe the playlisttrack table")
agent_executor.run("总共有多少张表,数据量最多的表是那张,有多少行, PlaylistTrack 表有多少行")
| [
"\n Input to this tool is a detailed and correct SQL query, output is a result from the table.\n If the query is not correct, an error message will be returned.\n If an error is returned, rewrite the query, check the query, and try again.\n "
] |
2024-01-10 | lowcountrylee/guidance | guidance~llms~_openai.py | import openai
import os
import time
import requests
import aiohttp
import copy
import time
import asyncio
import types
import collections
import json
import re
import regex
from ._llm import LLM, LLMSession, SyncSession
class MalformedPromptException(Exception):
pass
import pyparsing as pp
role_start_tag = pp.Suppress(pp.Optional(pp.White()) + pp.Literal("<|im_start|>"))
role_start_name = pp.Word(pp.alphanums + "_")("role_name")
role_kwargs = pp.Suppress(pp.Optional(" ")) + pp.Dict(pp.Group(pp.Word(pp.alphanums + "_") + pp.Suppress("=") + pp.QuotedString('"')))("kwargs")
role_start = (role_start_tag + role_start_name + pp.Optional(role_kwargs) + pp.Suppress("\n")).leave_whitespace()
role_end = pp.Suppress(pp.Literal("<|im_end|>"))
role_content = pp.Combine(pp.ZeroOrMore(pp.CharsNotIn("<") | pp.Literal("<") + ~pp.FollowedBy("|im_end|>")))("role_content")
role_group = pp.Group(role_start + role_content + role_end)("role_group").leave_whitespace()
partial_role_group = pp.Group(role_start + role_content)("role_group").leave_whitespace()
roles_grammar = pp.ZeroOrMore(role_group) + pp.Optional(partial_role_group) + pp.StringEnd()
# import pyparsing as pp
# role_start_tag = pp.Literal("<|im_start|>")
# role_start_name = pp.Word(pp.alphanums + "_")
# role_kwargs = pp.Dict(pp.Group(pp.Word(pp.alphanums + "_") + pp.Suppress("=") + pp.QuotedString('"')))
# role_start = role_start_tag + role_start_name + pp.Optional(role_kwargs) + pp.Suppress("\n")
# role_end = pp.Literal("<|im_end|>")
# role_content = pp.CharsNotIn("<|im_start|><|im_end|>")
# r'<\|im_start\|>([^\n]+)\n(.*?)(?=<\|im_end\|>|$)'
def prompt_to_messages(prompt):
messages = []
assert prompt.endswith("<|im_start|>assistant\n"), "When calling OpenAI chat models you must generate only directly inside the assistant role! The OpenAI API does not currently support partial assistant prompting."
parsed_prompt = roles_grammar.parse_string(prompt)
# pattern = r'<\|im_start\|>([^\n]+)\n(.*?)(?=<\|im_end\|>|$)'
# matches = re.findall(pattern, prompt, re.DOTALL)
# if not matches:
# return [{'role': 'user', 'content': prompt}]
for role in parsed_prompt:
if len(role["role_content"]) > 0: # only add non-empty messages (OpenAI does not support empty messages anyway)
message = {'role': role["role_name"], 'content': role["role_content"]}
if "kwargs" in role:
for k, v in role["kwargs"].items():
message[k] = v
messages.append(message)
return messages
async def add_text_to_chat_mode_generator(chat_mode):
in_function_call = False
async for resp in chat_mode:
if "choices" in resp:
for c in resp['choices']:
# move content from delta to text so we have a consistent interface with non-chat mode
found_content = False
if "content" in c['delta'] and c['delta']['content'] != "":
found_content = True
c['text'] = c['delta']['content']
# capture function call data and convert to text again so we have a consistent interface with non-chat mode and open models
if "function_call" in c['delta']:
# build the start of the function call (the follows the syntax that GPT says it wants when we ask it, and will be parsed by the @function_detector)
if not in_function_call:
start_val = "\n```typescript\nfunctions."+c['delta']['function_call']["name"]+"("
if not c['text']:
c['text'] = start_val
else:
c['text'] += start_val
in_function_call = True
# extend the arguments JSON string
val = c['delta']['function_call']["arguments"]
if 'text' in c:
c['text'] += val
else:
c['text'] = val
if not found_content and not in_function_call:
break # the role markers are outside the generation in chat mode right now TODO: consider how this changes for uncontrained generation
else:
yield resp
else:
yield resp
# close the function call if needed
if in_function_call:
yield {'choices': [{'text': ')```'}]}
def add_text_to_chat_mode(chat_mode):
if isinstance(chat_mode, (types.AsyncGeneratorType, types.GeneratorType)):
return add_text_to_chat_mode_generator(chat_mode)
else:
for c in chat_mode['choices']:
c['text'] = c['message']['content']
return chat_mode
class OpenAI(LLM):
llm_name: str = "openai"
def __init__(self, model=None, caching=True, max_retries=5, max_calls_per_min=60,
api_key=None, api_type="open_ai", api_base=None, api_version=None, deployment_id=None,
temperature=0.0, chat_mode="auto", organization=None, rest_call=False,
allowed_special_tokens={"<|endoftext|>", "<|endofprompt|>"},
token=None, endpoint=None, encoding_name=None):
super().__init__()
# map old param values
# TODO: add deprecated warnings after some time
if token is not None:
if api_key is None:
api_key = token
if endpoint is not None:
if api_base is None:
api_base = endpoint
# fill in default model value
if model is None:
model = os.environ.get("OPENAI_MODEL", None)
if model is None:
try:
with open(os.path.expanduser('~/.openai_model'), 'r') as file:
model = file.read().replace('\n', '')
except:
pass
# fill in default deployment_id value
if deployment_id is None:
deployment_id = os.environ.get("OPENAI_DEPLOYMENT_ID", None)
# auto detect chat completion mode
if chat_mode == "auto":
# parse to determin if the model need to use the chat completion API
chat_model_pattern = r'^(gpt-3\.5-turbo|gpt-4)(-\d+k)?(-\d{4})?$'
if re.match(chat_model_pattern, model):
chat_mode = True
else:
chat_mode = False
# fill in default API key value
if api_key is None: # get from environment variable
api_key = os.environ.get("OPENAI_API_KEY", getattr(openai, "api_key", None))
if api_key is not None and not api_key.startswith("sk-") and os.path.exists(os.path.expanduser(api_key)): # get from file
with open(os.path.expanduser(api_key), 'r') as file:
api_key = file.read().replace('\n', '')
if api_key is None: # get from default file location
try:
with open(os.path.expanduser('~/.openai_api_key'), 'r') as file:
api_key = file.read().replace('\n', '')
except:
pass
if organization is None:
organization = os.environ.get("OPENAI_ORGANIZATION", None)
# fill in default endpoint value
if api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", None) or os.environ.get("OPENAI_ENDPOINT", None) # ENDPOINT is deprecated
import tiktoken
if encoding_name is None:
encoding_name = tiktoken.encoding_for_model(model).name
self._tokenizer = tiktoken.get_encoding(encoding_name)
self.chat_mode = chat_mode
self.allowed_special_tokens = allowed_special_tokens
self.model_name = model
self.deployment_id = deployment_id
self.caching = caching
self.max_retries = max_retries
self.max_calls_per_min = max_calls_per_min
if isinstance(api_key, str):
api_key = api_key.replace("Bearer ", "")
self.api_key = api_key
self.api_type = api_type
self.api_base = api_base
self.api_version = api_version
self.current_time = time.time()
self.call_history = collections.deque()
self.temperature = temperature
self.organization = organization
self.rest_call = rest_call
self.endpoint = endpoint
if not self.rest_call:
self.caller = self._library_call
else:
self.caller = self._rest_call
self._rest_headers = {
"Content-Type": "application/json"
}
def session(self, asynchronous=False):
if asynchronous:
return OpenAISession(self)
else:
return SyncSession(OpenAISession(self))
def role_start(self, role_name, **kwargs):
assert self.chat_mode, "role_start() can only be used in chat mode"
return "<|im_start|>"+role_name+"".join([f' {k}="{v}"' for k,v in kwargs.items()])+"\n"
def role_end(self, role=None):
assert self.chat_mode, "role_end() can only be used in chat mode"
return "<|im_end|>"
def end_of_text(self):
return "<|endoftext|>"
@classmethod
async def stream_then_save(cls, gen, key, stop_regex, n):
list_out = []
cached_out = None
# init stop_regex variables
if stop_regex is not None:
if isinstance(stop_regex, str):
stop_patterns = [regex.compile(stop_regex)]
else:
stop_patterns = [regex.compile(pattern) for pattern in stop_regex]
current_strings = ["" for _ in range(n)]
# last_out_pos = ["" for _ in range(n)]
# iterate through the stream
all_done = False
async for curr_out in gen:
# if we have a cached output, extend it with the current output
if cached_out is not None:
out = merge_stream_chunks(cached_out, curr_out)
else:
out = curr_out
# check if we have stop_regex matches
found_partial = False
if stop_regex is not None:
# keep track of the generated text so far
for i,choice in enumerate(curr_out['choices']):
current_strings[i] += choice['text']
# check if all of the strings match a stop string (and hence we can stop the batch inference)
all_done = True
for i in range(len(current_strings)):
found = False
for s in stop_patterns:
if s.search(current_strings[i]):
found = True
if not found:
all_done = False
break
# find where trim off the stop regex matches if needed (and look for partial matches)
stop_pos = [1e10 for _ in range(n)]
stop_text = [None for _ in range(n)]
for i in range(len(current_strings)):
for s in stop_patterns:
m = s.search(current_strings[i], partial=True)
if m:
span = m.span()
if span[1] > span[0]:
if m.partial: # we might be starting a stop sequence, so we can't emit anything yet
found_partial = True
break
else:
stop_text[i] = current_strings[i][span[0]:span[1]]
stop_pos[i] = min(span[0], stop_pos[i])
if stop_pos != 1e10:
stop_pos[i] = stop_pos[i] - len(current_strings[i]) # convert to relative position from the end
# if we might be starting a stop sequence, we need to cache the output and continue to wait and see
if found_partial:
cached_out = out
continue
# if we get here, we are not starting a stop sequence, so we can emit the output
else:
cached_out = None
if stop_regex is not None:
for i in range(len(out['choices'])):
if stop_pos[i] < len(out['choices'][i]['text']):
out['choices'][i] = out['choices'][i].to_dict() # because sometimes we might need to set the text to the empty string (and OpenAI's object does not like that)
out['choices'][i]['text'] = out['choices'][i]['text'][:stop_pos[i]]
out['choices'][i]['stop_text'] = stop_text[i]
out['choices'][i]['finish_reason'] = "stop"
list_out.append(out)
yield out
if all_done:
gen.aclose()
break
# if we have a cached output, emit it
if cached_out is not None:
list_out.append(cached_out)
yield out
cls.cache[key] = list_out
def _stream_completion(self):
pass
# Define a function to add a call to the deque
def add_call(self):
# Get the current timestamp in seconds
now = time.time()
# Append the timestamp to the right of the deque
self.call_history.append(now)
# Define a function to count the calls in the last 60 seconds
def count_calls(self):
# Get the current timestamp in seconds
now = time.time()
# Remove the timestamps that are older than 60 seconds from the left of the deque
while self.call_history and self.call_history[0] < now - 60:
self.call_history.popleft()
# Return the length of the deque as the number of calls
return len(self.call_history)
async def _library_call(self, **kwargs):
""" Call the OpenAI API using the python package.
Note that is uses the local auth token, and does not rely on the openai one.
"""
# save the params of the openai library
prev_key = openai.api_key
prev_org = openai.organization
prev_type = openai.api_type
prev_version = openai.api_version
prev_base = openai.api_base
# set the params of the openai library if we have them
if self.api_key is not None:
openai.api_key = self.api_key
if self.organization is not None:
openai.organization = self.organization
if self.api_type is not None:
openai.api_type = self.api_type
if self.api_version is not None:
openai.api_version = self.api_version
if self.api_base is not None:
openai.api_base = self.api_base
assert openai.api_key is not None, "You must provide an OpenAI API key to use the OpenAI LLM. Either pass it in the constructor, set the OPENAI_API_KEY environment variable, or create the file ~/.openai_api_key with your key in it."
if self.chat_mode:
kwargs['messages'] = prompt_to_messages(kwargs['prompt'])
del kwargs['prompt']
del kwargs['echo']
del kwargs['logprobs']
# print(kwargs)
out = await openai.ChatCompletion.acreate(**kwargs)
out = add_text_to_chat_mode(out)
else:
out = await openai.Completion.acreate(**kwargs)
# restore the params of the openai library
openai.api_key = prev_key
openai.organization = prev_org
openai.api_type = prev_type
openai.api_version = prev_version
openai.api_base = prev_base
return out
async def _rest_call(self, **kwargs):
""" Call the OpenAI API using the REST API.
"""
# Define the request headers
headers = copy.copy(self._rest_headers)
if self.api_key is not None:
headers['Authorization'] = f"Bearer {self.api_key}"
# Define the request data
stream = kwargs.get("stream", False)
data = {
"model": self.model_name,
"prompt": kwargs["prompt"],
"max_tokens": kwargs.get("max_tokens", None),
"temperature": kwargs.get("temperature", 0.0),
"top_p": kwargs.get("top_p", 1.0),
"n": kwargs.get("n", 1),
"stream": stream,
"logprobs": kwargs.get("logprobs", None),
'stop': kwargs.get("stop", None),
"echo": kwargs.get("echo", False)
}
if self.chat_mode:
data['messages'] = prompt_to_messages(data['prompt'])
del data['prompt']
del data['echo']
del data['logprobs']
# Send a POST request and get the response
# An exception for timeout is raised if the server has not issued a response for 10 seconds
try:
if stream:
session = aiohttp.ClientSession()
response = await session.post(self.endpoint, json=data, headers=headers, timeout=60)
status = response.status
else:
response = requests.post(self.endpoint, headers=headers, json=data, timeout=60)
status = response.status_code
text = response.text
if status != 200:
if stream:
text = await response.text()
raise Exception("Response is not 200: " + text)
if stream:
response = self._rest_stream_handler(response, session)
else:
response = response.json()
except requests.Timeout:
raise Exception("Request timed out.")
except requests.ConnectionError:
raise Exception("Connection error occurred.")
if self.chat_mode:
response = add_text_to_chat_mode(response)
return response
async def _close_response_and_session(self, response, session):
await response.release()
await session.close()
async def _rest_stream_handler(self, response, session):
# async for line in response.iter_lines():
async for line in response.content:
text = line.decode('utf-8')
if text.startswith('data: '):
text = text[6:]
if text.strip() == '[DONE]':
await self._close_response_and_session(response, session)
break
else:
yield json.loads(text)
def encode(self, string):
# note that is_fragment is not used used for this tokenizer
return self._tokenizer.encode(string, allowed_special=self.allowed_special_tokens)
def decode(self, tokens):
return self._tokenizer.decode(tokens)
def merge_stream_chunks(first_chunk, second_chunk):
""" This merges two stream responses together.
"""
out = copy.deepcopy(first_chunk)
# merge the choices
for i in range(len(out['choices'])):
out_choice = out['choices'][i]
second_choice = second_chunk['choices'][i]
out_choice['text'] += second_choice['text']
if 'index' in second_choice:
out_choice['index'] = second_choice['index']
if 'finish_reason' in second_choice:
out_choice['finish_reason'] = second_choice['finish_reason']
if out_choice.get('logprobs', None) is not None:
out_choice['logprobs']['token_logprobs'] += second_choice['logprobs']['token_logprobs']
out_choice['logprobs']['top_logprobs'] += second_choice['logprobs']['top_logprobs']
out_choice['logprobs']['text_offset'] = second_choice['logprobs']['text_offset']
return out
class OpenAIStreamer():
def __init__(self, stop_regex, n):
self.stop_regex = stop_regex
self.n = n
self.current_strings = ["" for _ in range(n)]
self.current_length = 0
class RegexStopChecker():
def __init__(self, stop_pattern, decode, prefix_length):
if isinstance(stop_pattern, str):
self.stop_patterns = [regex.compile(stop_pattern)]
else:
self.stop_patterns = [regex.compile(pattern) for pattern in stop_pattern]
self.prefix_length = prefix_length
self.decode = decode
self.current_strings = None
self.current_length = 0
def __call__(self, input_ids, scores, **kwargs):
# extend our current strings
if self.current_strings is None:
self.current_strings = ["" for _ in range(len(input_ids))]
for i in range(len(self.current_strings)):
self.current_strings[i] += self.decode(input_ids[i][self.current_length:])
# trim off the prefix string so we don't look for stop matches in the prompt
if self.current_length == 0:
for i in range(len(self.current_strings)):
self.current_strings[i] = self.current_strings[i][self.prefix_length:]
self.current_length = len(input_ids[0])
# check if all of the strings match a stop string (and hence we can stop the batch inference)
all_done = True
for i in range(len(self.current_strings)):
found = False
for s in self.stop_patterns:
if s.search(self.current_strings[i]):
found = True
if not found:
all_done = False
break
return all_done
# define the syntax for the function definitions
import pyparsing as pp
start_functions = pp.Suppress(pp.Literal("## functions\n\nnamespace functions {\n\n"))
comment = pp.Combine(pp.Suppress(pp.Literal("//") + pp.Optional(" ")) + pp.restOfLine)
end_functions = pp.Suppress("} // namespace functions")
function_def_start = pp.Optional(comment)("function_description") + pp.Suppress(pp.Literal("type")) + pp.Word(pp.alphas + "_")("function_name") + pp.Suppress(pp.Literal("=") + pp.Literal("(_:") + pp.Literal("{"))
function_def_end = pp.Suppress(pp.Literal("})") + pp.Literal("=>") + pp.Literal("any;"))
parameter_type = (pp.Word(pp.alphas + "_")("simple_type") | pp.QuotedString('"')("enum_option") + pp.OneOrMore(pp.Suppress("|") + pp.QuotedString('"')("enum_option"))("enum")) + pp.Suppress(pp.Optional(","))
parameter_def = pp.Optional(comment)("parameter_description") + pp.Word(pp.alphas + "_")("parameter_name") + pp.Optional(pp.Literal("?"))("is_optional") + pp.Suppress(pp.Literal(":")) + pp.Group(parameter_type)("parameter_type")
function_def = function_def_start + pp.OneOrMore(pp.Group(parameter_def)("parameter")) + function_def_end
functions_def = start_functions + pp.OneOrMore(pp.Group(function_def)("function")) + end_functions
def get_json_from_parse(parse_out):
functions = []
for function in parse_out:
function_name = function.function_name
function_description = function.function_description
parameters = {
"type": "object",
"properties": {},
"required": []
}
for parameter in function:
if isinstance(parameter, str):
continue
parameter_name = parameter.parameter_name
parameter_description = parameter.parameter_description
parameter_type = parameter.parameter_type
is_optional = parameter.is_optional
d = {}
if parameter_type.simple_type:
d["type"] = parameter_type.simple_type
elif parameter_type.enum:
d["type"] = "string"
d["enum"] = [s for s in parameter_type]
if parameter_description:
d["description"] = parameter_description
if not is_optional:
parameters["required"].append(parameter_name)
parameters["properties"][parameter_name] = d
functions.append({
"name": function_name,
"description": function_description,
"parameters": parameters
})
return functions
def extract_function_defs(prompt):
""" This extracts function definitions from the prompt.
"""
if "\n## functions\n" not in prompt:
return None
else:
functions_text = prompt[prompt.index("\n## functions\n")+1:prompt.index("} // namespace functions")+24]
parse_out = functions_def.parseString(functions_text)
return get_json_from_parse(parse_out)
# Define a deque to store the timestamps of the calls
class OpenAISession(LLMSession):
async def __call__(self, prompt, stop=None, stop_regex=None, temperature=None, n=1, max_tokens=1000, logprobs=None,
top_p=1.0, echo=False, logit_bias=None, token_healing=None, pattern=None, stream=None,
cache_seed=0, caching=None, **completion_kwargs):
""" Generate a completion of the given prompt.
"""
# we need to stream in order to support stop_regex
if stream is None:
stream = stop_regex is not None
assert stop_regex is None or stream, "We can only support stop_regex for the OpenAI API when stream=True!"
assert stop_regex is None or n == 1, "We don't yet support stop_regex combined with n > 1 with the OpenAI API!"
assert token_healing is None or token_healing is False, "The OpenAI API does not yet support token healing! Please either switch to an endpoint that does, or don't use the `token_healing` argument to `gen`."
# set defaults
if temperature is None:
temperature = self.llm.temperature
# get the arguments as dictionary for cache key generation
args = locals().copy()
assert not pattern, "The OpenAI API does not support Guidance pattern controls! Please either switch to an endpoint that does, or don't use the `pattern` argument to `gen`."
# assert not stop_regex, "The OpenAI API does not support Guidance stop_regex controls! Please either switch to an endpoint that does, or don't use the `stop_regex` argument to `gen`."
# define the key for the cache
cache_params = self._cache_params(args)
llm_cache = self.llm.cache
key = llm_cache.create_key(self.llm.llm_name, **cache_params)
# allow streaming to use non-streaming cache (the reverse is not true)
if key not in llm_cache and stream:
cache_params["stream"] = False
key1 = llm_cache.create_key(self.llm.llm_name, **cache_params)
if key1 in llm_cache:
key = key1
# check the cache
if key not in llm_cache or caching is False or (caching is not True and not self.llm.caching):
# ensure we don't exceed the rate limit
while self.llm.count_calls() > self.llm.max_calls_per_min:
await asyncio.sleep(1)
functions = extract_function_defs(prompt)
fail_count = 0
while True:
try_again = False
try:
self.llm.add_call()
call_args = {
"model": self.llm.model_name,
"deployment_id": self.llm.deployment_id,
"prompt": prompt,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"n": n,
"stop": stop,
"logprobs": logprobs,
"echo": echo,
"stream": stream,
**completion_kwargs
}
if functions is None:
if "function_call" in call_args:
del call_args["function_call"]
else:
call_args["functions"] = functions
if logit_bias is not None:
call_args["logit_bias"] = {str(k): v for k,v in logit_bias.items()} # convert keys to strings since that's the open ai api's format
out = await self.llm.caller(**call_args)
except openai.error.RateLimitError:
await asyncio.sleep(3)
try_again = True
fail_count += 1
if not try_again:
break
if fail_count > self.llm.max_retries:
raise Exception(f"Too many (more than {self.llm.max_retries}) OpenAI API RateLimitError's in a row!")
if stream:
return self.llm.stream_then_save(out, key, stop_regex, n)
else:
llm_cache[key] = out
# wrap as a list if needed
if stream:
if isinstance(llm_cache[key], list):
return llm_cache[key]
return [llm_cache[key]]
return llm_cache[key]
import os
import json
import platformdirs
from ._openai import OpenAI
class AzureOpenAI(OpenAI):
def __init__(self, *args, **kwargs):
raise NotImplementedError("The AzureOpenAI class has been merged with the OpenAI class for Azure usage. Please use the OpenAI class instead: https://guidance.readthedocs.io/en/latest/example_notebooks/api_examples/llms/OpenAI.html")
class MSALOpenAI(OpenAI):
""" Microsoft Authentication Library (MSAL) OpenAI style integration.
Warning: This class is not finalized and may change in the future.
"""
llm_name: str = "azure_openai"
def __init__(self, model=None, client_id=None, authority=None, caching=True, max_retries=5, max_calls_per_min=60, token=None,
endpoint=None, scopes=None, temperature=0.0, chat_mode="auto"):
assert endpoint is not None, "An endpoint must be specified!"
# build a standard OpenAI LLM object
super().__init__(
model=model, caching=caching, max_retries=max_retries, max_calls_per_min=max_calls_per_min,
token=token, endpoint=endpoint, temperature=temperature, chat_mode=chat_mode
)
self.client_id = client_id
self.authority = authority
self.scopes = scopes
from msal import PublicClientApplication, SerializableTokenCache
self._token_cache = SerializableTokenCache()
self._token_cache_path = os.path.join(platformdirs.user_cache_dir("guidance"), "_azure_openai.token")
self._app = PublicClientApplication(client_id=self.client_id, authority=self.authority, token_cache=self._token_cache)
if os.path.exists(self._token_cache_path):
self._token_cache.deserialize(open(self._token_cache_path, 'r').read())
self._rest_headers["X-ModelType"] = self.model_name
@property
def token(self):
return self._get_token()
@token.setter
def token(self, value):
pass # ignored for now
def _get_token(self):
accounts = self._app.get_accounts()
result = None
if accounts:
# Assuming the end user chose this one
chosen = accounts[0]
# Now let's try to find a token in cache for this account
result = self._app.acquire_token_silent(self.scopes, account=chosen)
if not result:
# So no suitable token exists in cache. Let's get a new one from AAD.
flow = self._app.initiate_device_flow(scopes=self.scopes)
if "user_code" not in flow:
raise ValueError(
"Fail to create device flow. Err: %s" % json.dumps(flow, indent=4))
print(flow["message"])
result = self._app.acquire_token_by_device_flow(flow)
# save the aquired token
with open(self._token_cache_path, "w") as f:
f.write(self._token_cache.serialize())
return result["access_token"]
| [
"role_content"
] |
2024-01-10 | lowcountrylee/guidance | guidance~llms~caches~_diskcache.py | import os
import diskcache
import platformdirs
from guidance.llms.caches import Cache
class DiskCache(Cache):
"""DiskCache is a cache that uses diskcache lib."""
def __init__(self, llm_name: str):
self._diskcache = diskcache.Cache(
os.path.join(
platformdirs.user_cache_dir("guidance"), f"_{llm_name}.diskcache"
)
)
def __getitem__(self, key: str) -> str:
return self._diskcache[key]
def __setitem__(self, key: str, value: str) -> None:
self._diskcache[key] = value
def __contains__(self, key: str) -> bool:
return key in self._diskcache
def clear(self):
self._diskcache.clear()
| [] |
2024-01-10 | yeserumo/auto-TTS-bupt | dlas~data~audio~audio_with_noise_dataset.py | import random
import sys
from math import pi
import torch
import torch.nn.functional as F
import torchaudio
from torch.utils.data import Dataset
from tqdm import tqdm
from dlas.data.audio.unsupervised_audio_dataset import (
UnsupervisedAudioDataset, load_audio)
from dlas.data.util import (find_files_of_type, is_audio_file,
load_paths_from_cache)
from dlas.utils.util import opt_get
def _integration_fn_fully_enabled(n):
return torch.ones((n,))
# Randomly assigns up to 5 blocks of the output tensor the value '1'. Rest is zero
def _integration_fn_spiky(n):
fn = torch.zeros((n,))
spikes = random.randint(1, 5)
for _ in range(spikes):
sz = random.randint(n//8, n//2)
pos = random.randint(0, n)
extent = min(n, sz+pos)
fn[pos:extent] = 1
return fn
# Uses a sinusoidal ramp up and down (of random length) to a peak which is held for a random duration.
def _integration_fn_smooth(n):
center = random.randint(1, n-2)
max_duration = n-center-1
duration = random.randint(max_duration//4, max_duration)
end = center+duration
ramp_up_sz = random.randint(n//16, n//4)
ramp_up = torch.sin(pi*torch.arange(0, ramp_up_sz)/(2*ramp_up_sz))
if ramp_up_sz > center:
ramp_up = ramp_up[(ramp_up_sz-center):]
ramp_up_sz = center
ramp_down_sz = random.randint(n//16, n//4)
ramp_down = torch.flip(
torch.sin(pi*torch.arange(0, ramp_down_sz)/(2*ramp_down_sz)), dims=[0])
if ramp_down_sz > (n-end):
ramp_down = ramp_down[:(n-end)]
ramp_down_sz = n-end
fn = torch.zeros((n,))
fn[(center-ramp_up_sz):center] = ramp_up
fn[center:end] = 1
fn[end:(end+ramp_down_sz)] = ramp_down
return fn
def load_rir(path, sr, max_sz):
rir = load_audio(path, sr).abs()
if rir.shape[-1] > max_sz:
rir = rir[:, :max_sz]
rir = (rir / torch.norm(rir, p=2)).flip([1])
return rir
'''
Wraps a unsupervised_audio_dataset and applies noise to the output clips, then provides labels depending on what
noise was added.
'''
class AudioWithNoiseDataset(Dataset):
def __init__(self, opt):
self.underlying_dataset = UnsupervisedAudioDataset(opt)
self.env_noise_paths = load_paths_from_cache(
opt['env_noise_paths'], opt['env_noise_cache'])
self.music_paths = load_paths_from_cache(
opt['music_paths'], opt['music_cache'])
self.openair_paths = find_files_of_type(
'img', opt['openair_path'], qualifier=is_audio_file)[0]
self.min_volume = opt_get(opt, ['min_noise_volume'], .2)
self.max_volume = opt_get(opt, ['max_noise_volume'], .5)
self.sampling_rate = self.underlying_dataset.sampling_rate
self.use_gpu_for_reverb_compute = opt_get(
opt, ['use_gpu_for_reverb_compute'], True)
self.openair_kernels = None
self.current_item_fetch = 0
self.fetch_error_count = 0
def load_openair_kernels(self):
if self.use_gpu_for_reverb_compute and self.openair_kernels is None:
# Load the openair reverbs as CUDA tensors.
self.openair_kernels = []
for oa in self.openair_paths:
self.openair_kernels.append(load_rir(
oa, self.underlying_dataset.sampling_rate, self.underlying_dataset.sampling_rate*2).cuda())
def __getitem__(self, item):
if self.current_item_fetch != item:
self.current_item_fetch = item
self.fetch_error_count = 0
# Load on the fly to prevent GPU memory sharing across process errors.
self.load_openair_kernels()
out = self.underlying_dataset[item]
clip = out['clip']
dlen = clip.shape[-1]
clip = clip[:, :out['clip_lengths']]
padding_room = dlen - clip.shape[-1]
augpath = ''
augvol = 0
try:
# Randomly adjust clip volume, regardless of the selection, between
clipvol = (random.random() * (.8-.5) + .5)
clip = clip * clipvol
label = random.randint(0, 4) # Current excludes GSM corruption.
# label = 3
if label > 0 and label < 4: # 0 is basically "leave it alone"
aug_needed = True
augvol = (random.random() * (self.max_volume -
self.min_volume) + self.min_volume)
if label == 1:
# Add environmental noise.
augpath = random.choice(self.env_noise_paths)
intg_fns = [_integration_fn_fully_enabled]
elif label == 2:
# Add music
augpath = random.choice(self.music_paths)
intg_fns = [_integration_fn_fully_enabled]
augvol *= .5 # Music is often severely in the background.
elif label == 3:
augpath = random.choice(self.underlying_dataset.audiopaths)
# This can take two forms:
if padding_room < 22000 or random.random() < .5:
# (1) The voices talk over one another. If there is no padding room, we always take this choice.
intg_fns = [_integration_fn_smooth,
_integration_fn_fully_enabled]
else:
# (2) There are simply two voices in the clip, separated from one another.
# This is a special case that does not use the same logic as the rest of the augmentations.
aug = load_audio(
augpath, self.underlying_dataset.sampling_rate)
# Pad with some random silence
aug = F.pad(aug, (random.randint(20, 4000), 0))
# Fit what we can given the padding room we have.
aug = aug[:, :padding_room]
clip = torch.cat([clip, aug], dim=1)
# Restore some meta-parameters.
padding_room = dlen - clip.shape[-1]
out['clip_lengths'] = torch.tensor(clip.shape[-1])
aug_needed = False
if aug_needed:
aug = load_audio(
augpath, self.underlying_dataset.sampling_rate)
if aug.shape[1] > clip.shape[1]:
n, cn = aug.shape[1], clip.shape[1]
gap = n-cn
placement = random.randint(0, gap)
aug = aug[:, placement:placement+cn]
aug = random.choice(intg_fns)(aug.shape[1]) * aug
aug = aug * augvol
if aug.shape[1] < clip.shape[1]:
gap = clip.shape[1] - aug.shape[1]
placement = random.randint(0, gap-1)
aug = torch.nn.functional.pad(
aug, (placement, gap-placement))
clip = clip + aug
elif label == 4:
# Perform reverb (to simulate being in a large room with an omni-mic). This is performed by convolving
# impulse recordings from openair over the input clip.
if self.use_gpu_for_reverb_compute:
rir = random.choice(self.openair_kernels)
else:
augpath = random.choice(self.openair_paths)
rir = load_rir(
augpath, self.underlying_dataset.sampling_rate, clip.shape[-1])
clip = torch.nn.functional.pad(clip, (rir.shape[1]-1, 0))
if self.use_gpu_for_reverb_compute:
clip = clip.cuda()
clip = torch.nn.functional.conv1d(
clip.unsqueeze(0), rir.unsqueeze(0)).squeeze(0).cpu()
elif label == 5:
# Apply the GSM codec to simulate cellular phone audio.
clip = torchaudio.functional.apply_codec(
clip, self.underlying_dataset.sampling_rate, format="gsm")
except:
if self.fetch_error_count > 10:
print(
f"Exception encountered processing {item}, re-trying because this is often just a failed aug.")
print(sys.exc_info())
# raise # Uncomment to surface exceptions.
self.fetch_error_count += 1
return self[item]
clip.clip_(-1, 1)
# Restore padding.
clip = F.pad(clip, (0, padding_room))
out['clip'] = clip
out['label'] = label
# out['aug'] = aug
out['augpath'] = augpath
out['augvol'] = augvol
out['clipvol'] = clipvol
return out
def __len__(self):
return len(self.underlying_dataset)
if __name__ == '__main__':
params = {
'mode': 'unsupervised_audio_with_noise',
'path': ['y:/clips/books1'],
'cache_path': 'D:\\data\\clips_for_noise_classifier.pth',
'sampling_rate': 22050,
'pad_to_samples': 400000,
'do_augmentation': False,
'phase': 'train',
'n_workers': 4,
'batch_size': 256,
'extra_samples': 0,
'env_noise_paths': ['E:\\audio\\UrbanSound\\filtered', 'E:\\audio\\UrbanSound\\MSSND'],
'env_noise_cache': 'E:\\audio\\UrbanSound\\cache.pth',
'music_paths': ['E:\\audio\\music\\FMA\\fma_large', 'E:\\audio\\music\\maestro\\maestro-v3.0.0'],
'music_cache': 'E:\\audio\\music\\cache.pth',
'openair_path': 'D:\\data\\audio\\openair\\resampled',
'use_gpu_for_reverb_compute': False,
}
from data import create_dataloader, create_dataset, util
ds = create_dataset(params)
dl = create_dataloader(ds, params, pin_memory=False)
i = 0
for b in tqdm(dl):
for b_ in range(b['clip'].shape[0]):
# torchaudio.save(f'{i}_clip_{b_}_{b["label"][b_].item()}.wav', b['clip'][b_][:, :b['clip_lengths'][b_]], ds.sampling_rate)
# torchaudio.save(f'{i}_clip_{b_}_aug.wav', b['aug'][b_], ds.sampling_rate)
print(
f'{i} aug path: {b["augpath"][b_]} aug volume: {b["augvol"][b_]} clip volume: {b["clipvol"][b_]}')
i += 1
| [] |
2024-01-10 | ArptPlank/-GPT- | V1.0.0~connect_with_gpt.py | import os
from openai import OpenAI, api_key
import configparser
import threading
import time
import pygame
class gpt():
def __init__(self):
config = configparser.ConfigParser()
config.read('config.ini')
api_key = config.get("Settings", "openai_api_key")
self.client = OpenAI(api_key=api_key)
self.model_for_chat = "gpt-3.5-turbo"
self.delta = []
self.is_chat = False
self.is_finish = False
self.sound_path = []
self.index = 0
pygame.init()
def split(self,text):
_text = []
text = text.split("\n")
for t in text:
if "用户:" in t:
_text.append({"role":"user","content":t.replace("用户:","")})
elif "GPT:" in t:
_text.append({"role":"system","content":t.replace("GPT:","")})
else:
pass
return _text
def chat(self,text):
self.is_chat = True
text = text.replace("\n","")
text = self.split(text)
response = self.client.chat.completions.create(
model=self.model_for_chat,
messages=text,
max_tokens=600,
stream=True,
)
t = threading.Thread(target=self.get_response,args=(response,))
t.start()
def get_text_of_sound(self,sound_path):
audio_file = open(sound_path, 'rb')
transcript = self.client.audio.transcriptions.create(
model="whisper-1",
file=audio_file,
prompt="ZyntriQix, Digique Plus, CynapseFive, VortiQore V8, EchoNix Array, OrbitalLink Seven, DigiFractal Matrix, PULSE, RAPT, B.R.I.C.K., Q.U.A.R.T.Z., F.L.I.N.T."
)
return transcript.text
def get_response(self,response):
for chunk in response:
if chunk.choices[0].delta.content is not None:
self.delta.append(chunk.choices[0].delta.content)
self.is_finish = True
def get_real_sound(self,text,index):
sound_path = f"output_sound/{time.time()}.mp3."
response = self.client.audio.speech.create(
model="tts-1",
voice="alloy",
input=text,
)
response.stream_to_file(sound_path)
while True:
if index == self.index:
break
pygame.mixer.init()
pygame.mixer.music.load(sound_path)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pass
self.index += 1
#os.remove(sound_path)
if __name__ == '__main__':
gpt = gpt()
print(gpt.get_text_of_sound("mp3/1702134838.wav"))
# print(gpt.chat("用户:你好\nGPT:你��
| [] |
2024-01-10 | ArptPlank/-GPT- | V1.5.0~connect_with_gpt.py | import os
from openai import OpenAI, api_key
import configparser
import threading
import time
import pygame
from vits import vits
class gpt():
def __init__(self,collector):
config = configparser.ConfigParser()
config.read('config.ini')
api_key = config.get("Settings", "openai_api_key")
self.client = OpenAI(api_key=api_key)
self.model_for_chat = "gpt-3.5-turbo"
self.delta = []
self.is_chat = False
self.is_finish = False
self.sound_path = []
self.index = 0
self.vits = vits()
self.collector = collector
pygame.init()
def split(self,text):
_text = []
text = text.split("\n")
for t in text:
if "用户:" in t:
_text.append({"role":"user","content":t.replace("用户:","")})
elif "GPT:" in t:
_text.append({"role":"system","content":t.replace("GPT:","")})
else:
pass
return _text
def chat(self,text):
self.is_chat = True
text = text.replace("\n","")
text = self.split(text)
response = self.client.chat.completions.create(
model=self.model_for_chat,
messages=text,
max_tokens=300,
stream=True,
)
t = threading.Thread(target=self.get_response,args=(response,))
t.start()
def get_text_of_sound(self,sound_path):
audio_file = open(sound_path, 'rb')
transcript = self.client.audio.transcriptions.create(
model="whisper-1",
file=audio_file,
prompt="ZyntriQix, Digique Plus, CynapseFive, VortiQore V8, EchoNix Array, OrbitalLink Seven, DigiFractal Matrix, PULSE, RAPT, B.R.I.C.K., Q.U.A.R.T.Z., F.L.I.N.T."
)
return transcript.text
def get_response(self,response):
for chunk in response:
if chunk.choices[0].delta.content is not None:
self.delta.append(chunk.choices[0].delta.content)
self.is_finish = True
def get_real_sound(self,text,tag):
sound_path = ""
if tag == 1:
sound_path = f"output_sound/{time.time()}.mp3."
response = self.client.audio.speech.create(
model="tts-1",
voice="alloy",
input=text,
)
response.stream_to_file(sound_path)
elif tag == 2:
sound_path = self.vits.make_sound(text)
self.collector.add_sound(sound_path=sound_path,index=self.index)
self.index += 1
#os.remove(sound_path)
if __name__ == '__main__':
gpt = gpt()
print(gpt.get_text_of_sound("mp3/1702134838.wav"))
# print(gpt.chat("用户:你好\nGPT:你�� | [] |
2024-01-10 | MIBlue119/storystudio | storystudio~tools~stt.py | import os
import openai
from storystudio.settings import app_settings
from storystudio.utils import log_io
openai.api_key = app_settings.OPENAI_API_KEY
def generate_srt(audio_path, caption_output_path):
with open(audio_path, "rb") as audio_file:
transcript = openai.Audio.transcribe(
file=audio_file,
model="whisper-1",
response_format="srt",
)
with open(caption_output_path, "w") as caption_file:
# Write the transcript to a file
caption_file.write(transcript)
@log_io
def generate_voice_to_text(voice_output_dir, voice_to_text_output_dir):
os.makedirs(voice_to_text_output_dir, exist_ok=True)
for file in os.listdir(voice_output_dir):
if file.endswith(".wav"):
audio_path = os.path.join(voice_output_dir, file)
caption_output_path = os.path.join(
voice_to_text_output_dir, file.replace(".wav", ".srt")
)
generate_srt(audio_path, caption_output_path)
| [] |
2024-01-10 | kasikasi2014/AINote | project~python~example_chat_competion_use_weather_function.py | import configparser
import openai
from tenacity import retry, wait_random_exponential, stop_after_attempt
GPT_MODEL = "gpt-3.5-turbo"
import os
conf = configparser.ConfigParser()
conf.read('../config.ini')
os.environ["HTTP_PROXY"] = "http://127.0.0.1:1087"
os.environ["HTTPS_PROXY"] = "http://127.0.0.1:1087"
openai.api_key = conf.get("Openai", "api_key") # 在config.ini中配置自己的APIkey
# openai.api_key = os.getenv("OPENAI_API_KEY")
# 请根据自己的需求调整以下参数
model = 'gpt-3.5-turbo'
max_tokens = 50
temperature = 0.2
# 定义一个名为functions的列表,其中包含两个字典,这两个字典分别定义了两个功能的相关参数
# 第一个字典定义了一个名为"get_current_weather"的function
functions = [
{
"name": "get_current_weather", # function的名称
"description": "Get the current weather", # function的描述
"parameters": { # 定义该function需要的参数
"type": "object",
"properties": { # 参数的属性
"location": { # 地点参数
"type": "string", # 参数类型为字符串
"description": "The city and state, e.g. San Francisco, CA", # 参数的描述
},
"format": { # 温度单位参数
"type": "string", # 参数类型为字符串
"enum": ["celsius", "fahrenheit"], # 参数的取值范围
"description": "The temperature unit to use. Infer this from the users location.", # 参数的描述
},
},
"required": ["location", "format"], # 该functions需要的必要参数
},
},
# 第二个字典定义了一个名为"get_n_day_weather_forecast"的function
{
"name": "get_n_day_weather_forecast", # function的名称
"description": "Get an N-day weather forecast", # function的描述
"parameters": { # 定义该function需要的参数
"type": "object",
"properties": { # 参数的属性
"location": { # 地点参数
"type": "string", # 参数类型为字符串
"description": "The city and state, e.g. San Francisco, CA", # 参数的描述
},
"format": { # 温度单位参数
"type": "string", # 参数类型为字符串
"enum": ["celsius", "fahrenheit"], # 参数的取值范围
"description": "The temperature unit to use. Infer this from the users location.", # 参数的描述
},
"num_days": { # 预测天数参数
"type": "integer", # 参数类型为整数
"description": "The number of days to forecast", # 参数的描述
}
},
"required": ["location", "format", "num_days"] # 该function需要的必要参数
},
},
]
import json
import requests
class WeatherApi(object):
"""这是一个对接天气预报的api的类
web url:https://www.apispace.com/
需要注册登陆该网站并获取apikey,截止:2023-07-28日,该网站每一类接口,新用户有200次免费使用权限
包括3个方法:,
1、获取城市代码:get_area_code()
2、获取当前天气信息:get_current_weather()
3、获取最近天气信息:get_n_day_weather_forecast()
"""
headers = {
"X-APISpace-Token": "5wr80dvfoijzgqoi9117xevyo9us15zr",
"Authorization-Type": "apikey"
}
base_url = "https://eolink.o.apispace.com/456456/"
def get_area_code(self, city, area):
url = self.base_url + "function/v001/city"
payload = {"location": city, "items": "1", "area": area, "language": "", "withTz": "", "withPoi": "false"}
response = requests.request("GET", url, params=payload, headers=self.headers)
# print(response.text)
return response.json()["areaList"][0]["areacode"]
def get_current_weather(self, areacode):
url = self.base_url + "weather/v001/now"
payload = {"areacode": areacode}
response = requests.request("GET", url, params=payload, headers=self.headers)
# print(response.text)
return response.text
def get_n_day_weather_forecast(self, areacode, num_days):
url = self.base_url + "weather/v001/day"
payload = {"days": num_days, "areacode": areacode}
response = requests.request("GET", url, params=payload, headers=self.headers)
# print(response.text)
return response.text
def execute_function_call(message):
"""执行函数调用"""
w_api = WeatherApi()
if message["function_call"]["name"] == "get_current_weather":
arguments = json.loads(message["function_call"]["arguments"])
city, area = str(arguments.get("location")).split(', ')
areacode = w_api.get_area_code(city, area)
results = w_api.get_current_weather(areacode)
elif message["function_call"]["name"] == "get_n_day_weather_forecast":
arguments = json.loads(message["function_call"]["arguments"])
city, area = str(arguments.get("location")).split(', ')
num_days = arguments.get("num_days")
areacode = w_api.get_area_code(city, area)
results = w_api.get_n_day_weather_forecast(areacode, num_days)
else:
results = f"Error: function {message['function_call']['name']} does not exist"
return results # 返回结果
# 1.定义一个函数chat_completion_api,通过 OpenAI Python 库调用 Chat Completions API
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def chat_completion_api(messages, functions=None, function_call=None, model=GPT_MODEL):
"""
:param messages:
:param functions:
:param function_call: "none" is the default when no functions are present. "auto" is the default if functions are present.
:param model:
:return:
"""
try:
if functions:
if function_call:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
functions=functions,
function_call=function_call
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
functions=functions,
# function_call="auto"
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages
)
# 解析返回的数据,获取助手的回复消息
assistant_message = response["choices"][0]["message"]
return assistant_message
except Exception as e:
print("Unable to generate ChatCompletion response")
print(f"Exception: {e}")
return e
def start_chat():
"""
第一次开始会话时:需要给一个role
后面每次会话时将前面的内容加载到message里(后面可以优化,如何控制那些内容需要加载进去)
:return:
"""
messages = []
start = 0
while True:
if not start:
content = input(
"这是一个天气助手,你可以问他:北京,今天天气怎么样?,输入q 并回车结束对话。\nuser: ")
# 使用append方法向messages列表添加一条系统角色的消息
messages.append({
"role": "system", # 消息的角色是"system"
"content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous."
})
messages.append({"role": "system", "content": content})
else:
content = input("user: ")
messages.append({"role": "user", "content": content})
if content == 'q':
break
print('message:', messages) # 可以看到每次输入的message
res = chat_completion_api(messages, functions)
# print(f"bot:{res['content']}")
print(f"bot:{res}")
messages.append(res)
start += 1
if __name__ == '__main__':
start_chat()
| [
"Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous."
] |
2024-01-10 | kasikasi2014/AINote | project~langchain_autogpt~autogpt.py | import os
import faiss
import gradio as gr
import openai
from langchain import SerpAPIWrapper, FAISS, InMemoryDocstore
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.tools import Tool, WriteFileTool, ReadFileTool
from langchain_experimental.autonomous_agents import AutoGPT
openai.api_key = os.getenv("OPENAI_API_KEY")
class AutoGPTTool:
def __init__(self):
self.search = SerpAPIWrapper()
self.tools = [
Tool(
name="search",
func=self.search.run,
description="useful for when you need to answer questions about current events. You should ask targeted questions",
),
WriteFileTool(),
ReadFileTool(),
]
self.embeddings_model = OpenAIEmbeddings()
self.embedding_size = 1536
self.index = faiss.IndexFlatL2(self.embedding_size)
self.vectorstore = FAISS(
self.embeddings_model.embed_query,
self.index,
InMemoryDocstore({}),
{},
)
self.agent = AutoGPT.from_llm_and_tools(
ai_name="檀小椒",
ai_role="Assistant",
tools=self.tools,
llm=ChatOpenAI(temperature=0),
memory=self.vectorstore.as_retriever(),
)
self.agent.chain.verbose = True
def process_question(self, question):
return self.agent.run([question])
def setup_gradio_interface(self):
iface = gr.Interface(
fn=self.process_question,
inputs=[gr.Textbox(lines=5, label="问题", placeholder="请输入问题...")],
outputs=[gr.Textbox(lines=5, label="答案")],
title="AutoGPT Chat 小助理",
description="我是你的AutoGPT小助理:檀小椒,让我们开始聊天吧~",
theme="soft",
examples=["2023年9月4日北京的天气怎么样?", "2023年大运会举办地在哪?",
"Auto-GPT 是什么?把结果写到autogpt.txt文件中"],
allow_flagging="never"
)
return iface
if __name__ == "__main__":
# 使用示例
autogpt_tool = AutoGPTTool()
gradio_interface = autogpt_tool.setup_gradio_interface()
gradio_interface.launch(share=True, server_name="0.0.0.0")
| [] |
2024-01-10 | kasikasi2014/AINote | project~langchain_openai_translator~ai_translator~translator~translation_chain.py | from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.llms import ChatGLM
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from utils import LOG
class TranslationChain:
def __init__(self, model_name: str = "gpt-3.5-turbo", verbose: bool = True):
# 翻译任务指令始终由 System 角色承担
template = (
"You are a translation expert, proficient in various languages, \n"
"and proficient in all kinds of style translation. Please use style: '{translation_style}'. \n"
"Translates {source_language} to {target_language}. "
)
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
# 待翻译文本由 Human 角色输入
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
# 使用 System 和 Human 角色的提示模板构造 ChatPromptTemplate
chat_prompt_template = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
if model_name == "gpt-3.5-turbo":
# 为了翻译结果的稳定性,将 temperature 设置为 0
chat = ChatOpenAI(model_name=model_name, temperature=0, verbose=verbose)
elif model_name == "chat_glm":
endpoint_url = ("http://127.0.0.1:8000") # endpoint_url 填写跑模型的地址
chat = ChatGLM(endpoint_url=endpoint_url, temperature=0, verbose=verbose)
else:
raise Exception(f"This model is not supported. ModelName:{model_name}")
self.chain = LLMChain(llm=chat, prompt=chat_prompt_template, verbose=verbose)
def run(self, text: str, source_language: str, target_language: str, translation_style: str) -> (str, bool):
result = ""
try:
result = self.chain.run({
"text": text,
"source_language": source_language,
"target_language": target_language,
"translation_style": translation_style,
})
except Exception as e:
LOG.error(f"An error occurred during translation: {e}")
return result, False
return result, True | [
"You are a translation expert, proficient in various languages, \nand proficient in all kinds of style translation. Please use style: '{translation_style}'. \nTranslates {source_language} to {target_language}. ",
"[PLACEHOLDER, PLACEHOLDER]",
"{text}"
] |
2024-01-10 | kasikasi2014/AINote | project~langchain_sales_chatbot~datas_vectorization.py | import openai
import pandas as pd
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import CharacterTextSplitter
import configparser, os
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
conf = configparser.ConfigParser()
current_directory = os.path.dirname(os.path.realpath('__file__'))
config_file_path = os.path.join(current_directory, '..', '..', 'config.ini')
conf.read(config_file_path)
api_key = conf.get("Openai", "api_key") # 在config.ini中配置自己的APIkey
os.environ['OPENAI_API_KEY'] = api_key
chat_model = "gpt-3.5-turbo"
def read_file(file_name):
"""读取文本"""
with open(file_name) as f:
file_datas = f.read()
return file_datas
def split_datas(file_datas, split_str=r'\n\n---\n\n'):
"""文本拆分"""
text_splitter = CharacterTextSplitter(
separator=split_str,
chunk_size=150,
chunk_overlap=0,
length_function=len,
is_separator_regex=True,
)
docs = text_splitter.create_documents([file_datas])
print(docs[1])
return docs
def datas_to_embeddings(docs, db_name):
"""使用 Faiss 作为向量数据库,持久化存储房产销售 问答对(QA-Pair)"""
try:
db = FAISS.from_documents(docs, OpenAIEmbeddings())
if not os.path.exists(db_name): # 向量数据库文件不存在就创建并保存
db.save_local(db_name)
else:
old_db = FAISS.load_local(db_name, OpenAIEmbeddings()) # 向量数据库文件存在就添加并保存
old_db.merge_from(db)
old_db.save_local(db_name)
return True
except Exception as e:
raise e
def query_db(db_name, query):
"""使用 Faiss 作为向量数据库,去向量数据里做内容检索"""
db = FAISS.load_local(db_name, OpenAIEmbeddings())
retriever = db.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"score_threshold": 0.8, 'k': 1, "fetch_k": 2} # 按相关性去查找, k默认返回几条
)
docs = retriever.get_relevant_documents(query)
if docs:
for d in docs:
return d.page_content.split(":**")[-1]
else:
return "没有符合条件的回答"
def main(file_name, db_name, split_str):
"""
主函数:读取文本、分割文本、向量化、存到向量数据库
"""
file_datas = read_file(file_name)
docs = split_datas(file_datas, split_str)
res = datas_to_embeddings(docs, db_name)
return res
if __name__ == "__main__":
# 房地产销售问答数据
# file_name = "files/real_estate_sales_data_1.txt"
# db_name = "real_estate_sales"
# split_str = r'\n\n---\n\n'
# 苹果销售问答数据
file_name = "files/apple_Q&A_datas_3.txt"
db_name = "apple_Q&A_datas_embedding"
split_str = r'\d+\.'
# query = "iphone最新型号有哪些?"
# query = "北京今天天气怎么样?"
# query = "2023年iphone什么时候发布?"
query = "2023年iPhone都发布什么型号?"
# query = "北京朝阳区的苹果实体店有哪些?"
# query = "四川省的省会是哪里?"
# query = "周边的交通如何?"
# result = main(file_name, db_name, split_str) # 读取文本、分割文本、向量化、存到向量数据库
# db = FAISS.load_local(db_name, OpenAIEmbeddings())
# q = "2023年iphone什么时候发布"
# a = "2023年iPhone将于9月13日发布。"
# qa = f"[客户问题] {q}\n[销售回答] {a}"
# db.add_texts([qa])
# db.save_local(folder_path=db_name)
result = query_db(db_name, query) # 从向量数据库检索
print(result)
| [] |
2024-01-10 | kasikasi2014/AINote | project~langchain_sales_chatbot~sales_chatbot.py | import os
import gradio as gr
import configparser
from langchain import (FAISS, SerpAPIWrapper, OpenAI, PromptTemplate, LLMChain)
from langchain.agents import initialize_agent, AgentType
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.output_parsers import RegexParser
from langchain.tools import Tool
from utils.logger import LOG
from utils.argument_parser import ArgumentParser
from utils.sales_confg import SalesConfig
conf = configparser.ConfigParser()
current_directory = os.path.dirname(os.path.realpath('__file__'))
config_file_path = os.path.join(current_directory, '..', '..', 'config.ini')
conf.read(config_file_path)
if not os.getenv("OPENAI_API_KEY"):
os.environ['OPENAI_API_KEY'] = conf.get("Openai", "api_key") # 在config.ini中配置自己的APIkey
if not os.getenv("SERPAPI_API_KEY"):
os.environ['SERPAPI_API_KEY'] = conf.get("SerpApi", "serp_apikey") # 在config.ini中配置自己的serp_apikey
class SalesChatbot(object):
chat_model = "gpt-3.5-turbo"
text_model = "text-davinci-003"
def __init__(self):
self.llm = ChatOpenAI(model_name=self.chat_model, temperature=0)
self.db = FAISS.load_local(folder_path=config.db_name, embeddings=OpenAIEmbeddings())
def initialize_sales_bot(self):
"""使用 Faiss 作为向量数据库,去向量数据里做内容检索"""
global SALES_BOT
retriever = self.db.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"score_threshold": 0.8, 'k': 1} # 按相关性去查找, k默认返回几条
)
SALES_BOT = RetrievalQA.from_chain_type(self.llm, retriever=retriever)
# 返回向量数据库的检索结果
SALES_BOT.return_source_documents = True
return SALES_BOT
def add_qa(self, q: str, a: str):
"""加到向量数据库"""
qa = f"[客户问题] {q}\n[销售回答] {a}"
self.db.add_texts([qa])
self.db.save_local(folder_path=config.db_name)
LOG.info(f"加到向量数据库:问题:{q},结果:{a}")
def use_search_api(self, query: str):
"""
use SerpAPI
"""
prompt = " 用中文回答我。"
query += prompt
print(query)
search = SerpAPIWrapper()
tools = [Tool(name="Intermediate Answer",
func=search.run,
description="当你需要通过搜索来询问时很有用"),
]
agent = initialize_agent(
tools, self.llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True
)
res = agent.run(query)
LOG.info(f"使用搜索引擎:问题:{query},结果:{res}")
return res
def check_topic_relevance(self, q: str, a: str):
"""检查话题相关性"""
role_prompt = config.role_prompt
check_template = PromptTemplate.from_template(
f"{role_prompt}"
"下面是一段Q&A问答对:\n"
"问题:{question}\n"
"答案:{answer}\n"
"判断Q&A问答对是否与你的角色强相关,如果是,则回答:是,否则回答:不是"
)
print(check_template)
output_parser = RegexParser(regex="([\u4e00-\u9fa5]+)", output_keys=['keys'])
checkLLMChain = LLMChain(llm=self.llm, prompt=check_template, output_parser=output_parser)
result = checkLLMChain.run({"question": q, "answer": a})
LOG.info(f"检查话题相关性:问题:{q},结果:{a}")
return result
def check_sure_answer(self, q: str, a: str):
"""
q:问题
a:答案
"""
check_template = PromptTemplate.from_template(
"这是一段Q&A问答对:\n"
"问题:{question}\n"
"答案:{answer}\n"
"判断答案是否是不确定的,如果是不确定,则回答:'不确定',否则回答:'确定'。"
)
output_parser = RegexParser(regex="([\u4e00-\u9fa5]+)", output_keys=['keys'])
checkLLMChain = LLMChain(llm=self.llm, prompt=check_template, output_parser=output_parser)
result = checkLLMChain.run({"question": q, "answer": a})
LOG.info(f"检查Q&A确定性:确定性:{result['keys']}|问题:{q}|答案:{a}")
return result
def sales_boot(self, message: str, history: list):
# print(f"[message]{message}")
print(f"[history]{history}{type(history)}")
# TODO 加一个功能,判断问题是否在历史记录里,如果在则从历史记录里提取
# TODO 加一个功能,维护有用的相关的历史历史记录
ans = SALES_BOT({"query": message})
# 如果检索出结果,或者开了大模型聊天模式,返回 RetrievalQA combine_documents_chain 整合的结果
if ans["source_documents"] or config.enable_chat:
result = ans['result']
LOG.info(f"向量数据库搜索结果:{result}")
check_answer = self.check_sure_answer(message, result) # 功能:检查RetrievalQA检索结果|大语言模型的回答 是否满足问题
if check_answer.get("keys") == "不确定":
search_api_answer = self.use_search_api(message) # 功能:答案不确定时,使用搜索引擎
# TODO 搜索结果的正确的待验证
relevance_answer = self.check_topic_relevance(message, search_api_answer) # 功能:搜索结果如果跟本小助手有高相关性 则将Q&A加到向量数据库里
if relevance_answer.get("keys") == "是":
self.add_qa(message, search_api_answer)
return search_api_answer
else:
return ans["result"]
# 否则输出套路话术
else:
LOG.info("不使用大模型聊天模式~")
return f"作为一个{config.title},我暂时不知道这个问题的答案,但是我会继续努力学习的~"
def launch_gradio(self, config: SalesConfig):
demo = gr.ChatInterface(
fn=self.sales_boot,
title=config.title,
description=config.description,
examples=config.examples,
# retry_btn=None,
# undo_btn=None,
chatbot=gr.Chatbot(height=600),
)
demo.launch(share=True, server_name="0.0.0.0")
if __name__ == "__main__":
# 解析命令行
argument_parser = ArgumentParser()
args = argument_parser.parse_arguments()
# 初始化配置单例
config = SalesConfig()
config.initialize(args)
sales_bot = SalesChatbot()
sales_bot.initialize_sales_bot()
sales_bot.launch_gradio(config)
# print(sales_bot.search_api("2023年iphone发布会时间是什么时候?"))
# print(sales_bot.check_topic_relevance("2023年iphone发布会时间是什么时候?", "2023年iPhone将于9月13日发布。"))
| [
"下面是一段Q&A问答对:\n",
"这是一段Q&A问答对:\n问题:{question}\n答案:{answer}\n判断答案是否是不确定的,如果是不确定,则回答:'不确定',否则回答:'确定'。",
"判断答案是否是不确定的,如果是不确定,则回答:'不确定',否则回答:'确定'。",
"PLACEHOLDER",
"问题:{question}\n",
"答案:{answer}\n",
"这是一段Q&A问答对:\n",
" 用中文回答我。",
"判断Q&A问答对是否与你的角色强相关,如果是,则回答:是,否则回答:不是",
"PLACEHOLDER下面是一段Q&A问答对:\n问题:{question}\n答案:{answer}\n判断Q&A问答对是否与你的角色强相关,如果是,则回答:是,否则回答:不是"
] |
2024-01-10 | Bradybry/GPT_perf | expert.py | import re
import json
from langchain.chat_models import ChatOpenAI
from langchain.llms import Anthropic
from langchain.schema import HumanMessage, SystemMessage
from config import OPENAI_API_KEY, ANTHROPIC_API_KEY #Import API Keys stored in a separate file. You can do this with envionrment variables as well.
import datetime
from pathlib import Path
# At the moment langchain API wrappers are needed due to the separation of chat models and language models. These wrappers allow us to use the same interface for both.
# Class to communicate with OpenAI for generating responses. Wrapped around the langchain wrappers
class OpenAIModel():
def __init__(self, openai_api_key, **model_params):
"""Initialize the OpenAI chat model.
Parameters:
openai_api_key (str): API key to access OpenAI API
model_params (dict): Parameters to configure the model like temperature, n, etc.
"""
self.chat = ChatOpenAI(openai_api_key=openai_api_key, **model_params, request_timeout=120)
def __call__(self, request_messages):
return self.chat(request_messages).content
def bulk_generate(self, message_list):
return self.chat.generate(message_list)
# Class to communicate with claude-v1.3 for generating responses. Wrapped around the langchain wrappers
class AnthropicModel():
def __init__(self, anthropic_api_key, **model_params):
"""Initialize the Anthropic chat model.
Parameters:
anthropic_api_key (str): API key to access Anthropic API
model_params (dict): Parameters to configure the model like model_name, max_tokens, etc.
"""
self.chat = Anthropic(model=model_params['model_name'],temperature=model_params['temperature'], max_tokens_to_sample=model_params['max_tokens'], anthropic_api_key=anthropic_api_key)
def __call__(self, request_messages):
# Convert request_messages into a single string to be used as preamble
# This is a hacky solution to the fact that the langchain wrapper expects a single string as input.
# But the performance is actaualy really good especially with the XML formatting method.
message = "\n\n".join([message.content for message in request_messages])
return self.chat(message)
def bulk_generate(self, message_list):
new_message_list = []
for request_messages in message_list:
new_message = "\n".join([message.content for message in request_messages])
new_message_list.append(new_message)
return self.chat.generate(new_message_list)
class LanguageExpert:
"""Defines an AI assistant/expert for natural language generation.
Attributes:
name (str): Name of the expert
system_message (str): Expert's initial greeting message
description (str): Description of the expert's abilities
example_input (str): Sample user input the expert can handle
example_output (str): Expert's response to the sample input
model_params (dict): Parameters to configure the language model
"""
def __init__(self, name: str, system_message=None, description=None,
example_input=None, example_output=None, model_params=None):
## Initialize expert attributes##
self.name = name
self.system_message = system_message
self.description = description
self.example_input = example_input
self.example_output = example_output
##Set default model parameters if none provided##
if model_params is None:
model_params = {"model_name": "claude-v1.3", "temperature": 0.00,
"frequency_penalty": 1.0, "presence_penalty": 0.5,
"n": 1, "max_tokens": 512}
self.model_params = model_params
self.gen_chat() #Generate the chat object to get model-specific responses
def serialize(self):
"""Returns a JSON-serializable representation of the expert.
Returns:
dict: Contains all expert attributes.
"""
return {
"name": self.name,
"system_message": self.system_message,
"description": self.description,
"example_input": self.example_input,
"example_output": self.example_output,
"model_params": self.model_params
}
def get_content(self):
"""Returns the expert definition in an fake XML format.
Returns:
SystemMessage: Expert definition wrapped in XML tags.
"""
example_output = self.example_output
example_input = self.example_input
content = '<assistant_definition>\n'
if self.name:
content += f'<name>{self.name}</name>\n'
if self.description:
content += f'<role>{self.description}</role>\n'
if self.system_message:
content += f'<system_message>{self.system_message}</system_message>\n'
if example_input:
content += f'<example_input>{example_input}</example_input>\n'
if example_output:
content += f'<example_output>{example_output}</example_output>\n'
content += '</assistant_definition>'
content = SystemMessage(content=content)
return content
def generate(self, message):
"""Generates a response to the input message.
Passes the input through the chat model and returns its response.
Parameters:
message (str): User's input message
Returns:
response (str): expert's response to the message
"""
human_message = HumanMessage(content=message)
request_message = [self.get_content(), human_message]
response = self.chat(request_message)
self.log([message], [response])
return response
def log(self, requests, responses):
"""Logs a conversation between the user and the expert.
Parameters:
requests (list): List of user requests/messages
responses (list): List of expert responses
"""
now = datetime.datetime.now()
filename = Path(f'./logs/{now.strftime("%Y-%m-%d_%H-%M-%S")}_{self.name}.txt')
filename.parent.mkdir(parents=True, exist_ok=True)
log = f'Expert Name: {self.name}\n\nRequests:\n'
for request in requests:
log += f'{request}\n\n'
log += 'Responses:\n'
for response in responses:
log += f'{response}\n\n'
with open(filename, 'w', encoding='utf-8') as f:
f.write(log)
def extract_texts_from_generations(self, generations):
"""Extracts plain text responses from a list of generated responses.
Parameters:
generations (list): List of generated responses from the model
Returns:
list: List of plain text responses
"""
return [generation[0].text for generation in generations]
def bulk_generate(self, messages:list):
"""Generates responses for multiple input messages.
Parameters:
messages (list): List of user input messages
Returns:
responses (list): List of corresponding expert responses
"""
human_messages = [HumanMessage(content=message) for message in messages]
request_messages = [[self.get_content(), human_message] for human_message in human_messages]
responses = self.chat.bulk_generate(request_messages)
responses = self.extract_texts_from_generations(responses.generations)
self.log(messages, responses)
return responses
def __call__(self, message:str):
"""Allows the expert to be called like a function.
Invokes the generate() method.
"""
return self.generate(message)
def change_param(self, parameter_name, new_value):
"""Changes a expert definition parameter to a new value.
Updates the internal model_params dictionary and regenerates
the chat object.
Parameters:
parameter_name (str): Name of the parameter to change
new_value: New value for the parameter
"""
if parameter_name in ["model_name", "temperature", "frequency_penalty", "presence_penalty", "n", "max_tokens"]:
self.__dict__["model_params"][parameter_name] = new_value
else:
self.__dict__[parameter_name] = new_value
self.gen_chat()
def gen_chat(self):
"""Instantiates the chat object used to generate responses.
The chat object is either an AnthropicModel or OpenAIModel, depending
on the model_name parameter.
"""
if 'gpt' in self.model_params["model_name"]:
self.chat = OpenAIModel(openai_api_key=OPENAI_API_KEY, **self.model_params)
elif 'claude' in self.model_params["model_name"]:
self.chat = AnthropicModel(anthropic_api_key=ANTHROPIC_API_KEY, **self.model_params)
else:
raise 'Model not supported'
| [] |
2024-01-10 | milestoneTwo/milestone2App | m2lib~utils~analyze.py | from gensim.models.coherencemodel import CoherenceModel
from m2lib.pickler.picklable import PickleDef, Picklable
from m2lib.model.gsdmmSttmModel import GSDMM, GSDMMModelStore
from m2lib.model.ldaModel import LDAModel, LDATFIDModel, LDATFIDModelStore, LDAModelStore
from m2lib.pickler.picklable import Picklable, PickleDef
from m2lib.featureizers.preprocessor import Preprocessor, PreprocessorStore
from m2lib.readers.readdata import Read
from m2lib.featureizers.bowfeature import BOWFeature, BOWFeatureStore
import pyLDAvis
import pyLDAvis.gensim
from configurations import HTML_DIR
import time
import os
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
class TopicsCharting(Picklable):
def __init__(self):
self.metricsStore = None
pd = PickleDef(self)
self.pickle_kwargs = pd()
super().__init__(**self.pickle_kwargs)
def pipeline(self, K_ = [1,2,3,4,5,6,7,8,9,10,15,20], p= 0.2):
# setup data
prepro = PreprocessorStore()
corpus = prepro.corpus_
_, corpus_ = train_test_split(corpus, test_size=p, random_state=42)
bow = BOWFeature()
bow.pipeline(corpus_)
train_corpus = bow.corpus_
train_dict = bow.dictionary
# classes to build metrics and store
metrics = Metrics()
metricsStore = MetricsStore()
for K in tqdm(K_):
# model setup
ldaModel = LDAModel()
ldaModel.lda_args['num_topics'] = K
ldaModel.train_model(train_corpus, train_dict)
# get Metrics and store them
metrics.pipeline(ldaModel.model, bow.corpus_, K)
metricsStore.coherence.append(metrics.coherence)
metricsStore.perplexity.append(metrics.perplexity)
metricsStore.K.append(K)
metricsStore.save()
self.metricsStore = metricsStore
self.save()
return metricsStore
def make_visualization(self):
fig, ax = plt.subplots(2,1)
x = self.metricsStore.K
y = self.metricsStore.coherence
y2 = self.metricsStore.perplexity
fig.suptitle('Coherence and Perplexity Against Topics')
ax[0].plot(x, y)
ax[1].plot(x, y2)
ax[0].set_title('Coherence')
ax[1].set_title('Perplexity')
ax[0].set(xlabel='K Topics', ylabel='Coherence')
ax[1].set(xlabel='K Topics', ylabel='Perplexity')
plt.show()
def save(self):
super().save()
def load(self):
super().load()
class MetricsStore(Picklable):
def __init__(self):
self.coherence = []
self.perplexity = []
self.K = []
pd = PickleDef(self)
self.pickle_kwargs = pd()
super().__init__(**self.pickle_kwargs)
def save(self):
super().save()
def load(self):
super().load()
class Metrics(Picklable):
def __init__(self):
self.K = None
self.coherence = None
self.perplexity = None
pd = PickleDef(self)
self.pickle_kwargs = pd()
super().__init__(**self.pickle_kwargs)
def pipeline(self, model, corpus, K):
# calc coherence
cm = CoherenceModel(model=model, corpus=corpus, coherence='u_mass')
coherence = cm.get_coherence()
self.coherence = coherence
# calc perplexity
perplexity = model.log_perplexity(corpus)
self.perplexity = perplexity
self.K = K
def save(self):
super().save()
def load(self):
super().load()
if __name__ == '__main__':
# ldaStore = LDAModelStore()
# bowStore = BOWFeatureStore()
# metrics = Metrics()
# metricsStore = MetricsStore()
# metrics.pipeline(ldaStore.model, bowStore.corpus_, 10)
# metricsStore.coherence.append(metrics.coherence)
# metricsStore.perplexity.append(metrics.perplexity)
# metricsStore.K.append(10)
# metricsStore.save()
topicsChart = TopicsCharting()
topicsChart.pipeline()
topicsChart.make_visualization()
| [] |
2024-01-10 | Aminolroaya/h2ogpt | src~gpt_langchain.py | import ast
import glob
import inspect
import os
import pathlib
import pickle
import shutil
import subprocess
import tempfile
import time
import traceback
import types
import uuid
import zipfile
from collections import defaultdict
from datetime import datetime
from functools import reduce
from operator import concat
import filelock
from joblib import delayed
from langchain.callbacks import streaming_stdout
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.schema import LLMResult
from tqdm import tqdm
from enums import DocumentSubset, no_lora_str, model_token_mapping, source_prefix, source_postfix, non_query_commands, \
LangChainAction, LangChainMode, DocumentChoice
from evaluate_params import gen_hyper
from gen import get_model, SEED
from prompter import non_hf_types, PromptType, Prompter
from utils import wrapped_partial, EThread, import_matplotlib, sanitize_filename, makedirs, get_url, flatten_list, \
get_device, ProgressParallel, remove, hash_file, clear_torch_cache, NullContext, get_hf_server, FakeTokenizer, \
have_libreoffice, have_arxiv, have_playwright, have_selenium, have_tesseract, have_pymupdf, set_openai
from utils_langchain import StreamingGradioCallbackHandler
import_matplotlib()
import numpy as np
import pandas as pd
import requests
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
# , GCSDirectoryLoader, GCSFileLoader
# , OutlookMessageLoader # GPL3
# ImageCaptionLoader, # use our own wrapper
# ReadTheDocsLoader, # no special file, some path, so have to give as special option
from langchain.document_loaders import PyPDFLoader, TextLoader, CSVLoader, PythonLoader, TomlLoader, \
UnstructuredURLLoader, UnstructuredHTMLLoader, UnstructuredWordDocumentLoader, UnstructuredMarkdownLoader, \
EverNoteLoader, UnstructuredEmailLoader, UnstructuredODTLoader, UnstructuredPowerPointLoader, \
UnstructuredEPubLoader, UnstructuredImageLoader, UnstructuredRTFLoader, ArxivLoader, UnstructuredPDFLoader, \
UnstructuredExcelLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter, Language
from langchain.chains.question_answering import load_qa_chain
from langchain.docstore.document import Document
from langchain import PromptTemplate, HuggingFaceTextGenInference
from langchain.vectorstores import Chroma
def get_db(sources, use_openai_embedding=False, db_type='faiss',
persist_directory="db_dir", load_db_if_exists=True,
langchain_mode='notset',
collection_name=None,
hf_embedding_model="sentence-transformers/all-MiniLM-L6-v2"):
if not sources:
return None
# get embedding model
embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model)
assert collection_name is not None or langchain_mode != 'notset'
if collection_name is None:
collection_name = langchain_mode.replace(' ', '_')
# Create vector database
if db_type == 'faiss':
from langchain.vectorstores import FAISS
db = FAISS.from_documents(sources, embedding)
elif db_type == 'weaviate':
import weaviate
from weaviate.embedded import EmbeddedOptions
from langchain.vectorstores import Weaviate
if os.getenv('WEAVIATE_URL', None):
client = _create_local_weaviate_client()
else:
client = weaviate.Client(
embedded_options=EmbeddedOptions()
)
index_name = collection_name.capitalize()
db = Weaviate.from_documents(documents=sources, embedding=embedding, client=client, by_text=False,
index_name=index_name)
elif db_type == 'chroma':
assert persist_directory is not None
makedirs(persist_directory, exist_ok=True)
# see if already actually have persistent db, and deal with possible changes in embedding
db = get_existing_db(None, persist_directory, load_db_if_exists, db_type, use_openai_embedding, langchain_mode,
hf_embedding_model, verbose=False)
if db is None:
from chromadb.config import Settings
client_settings = Settings(anonymized_telemetry=False,
chroma_db_impl="duckdb+parquet",
persist_directory=persist_directory)
db = Chroma.from_documents(documents=sources,
embedding=embedding,
persist_directory=persist_directory,
collection_name=collection_name,
client_settings=client_settings)
db.persist()
clear_embedding(db)
save_embed(db, use_openai_embedding, hf_embedding_model)
else:
# then just add
db, num_new_sources, new_sources_metadata = add_to_db(db, sources, db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model)
else:
raise RuntimeError("No such db_type=%s" % db_type)
return db
def _get_unique_sources_in_weaviate(db):
batch_size = 100
id_source_list = []
result = db._client.data_object.get(class_name=db._index_name, limit=batch_size)
while result['objects']:
id_source_list += [(obj['id'], obj['properties']['source']) for obj in result['objects']]
last_id = id_source_list[-1][0]
result = db._client.data_object.get(class_name=db._index_name, limit=batch_size, after=last_id)
unique_sources = {source for _, source in id_source_list}
return unique_sources
def add_to_db(db, sources, db_type='faiss',
avoid_dup_by_file=False,
avoid_dup_by_content=True,
use_openai_embedding=False,
hf_embedding_model=None):
assert hf_embedding_model is not None
num_new_sources = len(sources)
if not sources:
return db, num_new_sources, []
if db_type == 'faiss':
db.add_documents(sources)
elif db_type == 'weaviate':
# FIXME: only control by file name, not hash yet
if avoid_dup_by_file or avoid_dup_by_content:
unique_sources = _get_unique_sources_in_weaviate(db)
sources = [x for x in sources if x.metadata['source'] not in unique_sources]
num_new_sources = len(sources)
if num_new_sources == 0:
return db, num_new_sources, []
db.add_documents(documents=sources)
elif db_type == 'chroma':
collection = get_documents(db)
# files we already have:
metadata_files = set([x['source'] for x in collection['metadatas']])
if avoid_dup_by_file:
# Too weak in case file changed content, assume parent shouldn't pass true for this for now
raise RuntimeError("Not desired code path")
sources = [x for x in sources if x.metadata['source'] not in metadata_files]
if avoid_dup_by_content:
# look at hash, instead of page_content
# migration: If no hash previously, avoid updating,
# since don't know if need to update and may be expensive to redo all unhashed files
metadata_hash_ids = set(
[x['hashid'] for x in collection['metadatas'] if 'hashid' in x and x['hashid'] not in ["None", None]])
# avoid sources with same hash
sources = [x for x in sources if x.metadata.get('hashid') not in metadata_hash_ids]
num_nohash = len([x for x in sources if not x.metadata.get('hashid')])
print("Found %s new sources (%d have no hash in original source,"
" so have to reprocess for migration to sources with hash)" % (len(sources), num_nohash), flush=True)
# get new file names that match existing file names. delete existing files we are overridding
dup_metadata_files = set([x.metadata['source'] for x in sources if x.metadata['source'] in metadata_files])
print("Removing %s duplicate files from db because ingesting those as new documents" % len(
dup_metadata_files), flush=True)
client_collection = db._client.get_collection(name=db._collection.name,
embedding_function=db._collection._embedding_function)
for dup_file in dup_metadata_files:
dup_file_meta = dict(source=dup_file)
try:
client_collection.delete(where=dup_file_meta)
except KeyError:
pass
num_new_sources = len(sources)
if num_new_sources == 0:
return db, num_new_sources, []
db.add_documents(documents=sources)
db.persist()
clear_embedding(db)
save_embed(db, use_openai_embedding, hf_embedding_model)
else:
raise RuntimeError("No such db_type=%s" % db_type)
new_sources_metadata = [x.metadata for x in sources]
return db, num_new_sources, new_sources_metadata
def create_or_update_db(db_type, persist_directory, collection_name,
sources, use_openai_embedding, add_if_exists, verbose, hf_embedding_model):
if db_type == 'weaviate':
import weaviate
from weaviate.embedded import EmbeddedOptions
if os.getenv('WEAVIATE_URL', None):
client = _create_local_weaviate_client()
else:
client = weaviate.Client(
embedded_options=EmbeddedOptions()
)
index_name = collection_name.replace(' ', '_').capitalize()
if client.schema.exists(index_name) and not add_if_exists:
client.schema.delete_class(index_name)
if verbose:
print("Removing %s" % index_name, flush=True)
elif db_type == 'chroma':
if not os.path.isdir(persist_directory) or not add_if_exists:
if os.path.isdir(persist_directory):
if verbose:
print("Removing %s" % persist_directory, flush=True)
remove(persist_directory)
if verbose:
print("Generating db", flush=True)
if not add_if_exists:
if verbose:
print("Generating db", flush=True)
else:
if verbose:
print("Loading and updating db", flush=True)
db = get_db(sources,
use_openai_embedding=use_openai_embedding,
db_type=db_type,
persist_directory=persist_directory,
langchain_mode=collection_name,
hf_embedding_model=hf_embedding_model)
return db
def get_embedding(use_openai_embedding, hf_embedding_model="sentence-transformers/all-MiniLM-L6-v2"):
# Get embedding model
if use_openai_embedding:
assert os.getenv("OPENAI_API_KEY") is not None, "Set ENV OPENAI_API_KEY"
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings(disallowed_special=())
else:
# to ensure can fork without deadlock
from langchain.embeddings import HuggingFaceEmbeddings
device, torch_dtype, context_class = get_device_dtype()
model_kwargs = dict(device=device)
if 'instructor' in hf_embedding_model:
encode_kwargs = {'normalize_embeddings': True}
embedding = HuggingFaceInstructEmbeddings(model_name=hf_embedding_model,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs)
else:
embedding = HuggingFaceEmbeddings(model_name=hf_embedding_model, model_kwargs=model_kwargs)
return embedding
def get_answer_from_sources(chain, sources, question):
return chain(
{
"input_documents": sources,
"question": question,
},
return_only_outputs=True,
)["output_text"]
"""Wrapper around Huggingface text generation inference API."""
from functools import partial
from typing import Any, Dict, List, Optional, Set
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun, Callbacks
from langchain.llms.base import LLM
class GradioInference(LLM):
"""
Gradio generation inference API.
"""
inference_server_url: str = ""
temperature: float = 0.8
top_p: Optional[float] = 0.95
top_k: Optional[int] = None
num_beams: Optional[int] = 1
max_new_tokens: int = 512
min_new_tokens: int = 1
early_stopping: bool = False
max_time: int = 180
repetition_penalty: Optional[float] = None
num_return_sequences: Optional[int] = 1
do_sample: bool = False
chat_client: bool = False
return_full_text: bool = True
stream: bool = False
sanitize_bot_response: bool = False
prompter: Any = None
context: Any = ''
iinput: Any = ''
client: Any = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
if values['client'] is None:
import gradio_client
values["client"] = gradio_client.Client(
values["inference_server_url"]
)
except ImportError:
raise ImportError(
"Could not import gradio_client python package. "
"Please install it with `pip install gradio_client`."
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gradio_inference"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
# NOTE: prompt here has no prompt_type (e.g. human: bot:) prompt injection,
# so server should get prompt_type or '', not plain
# This is good, so gradio server can also handle stopping.py conditions
# this is different than TGI server that uses prompter to inject prompt_type prompting
stream_output = self.stream
gr_client = self.client
client_langchain_mode = 'Disabled'
client_add_chat_history_to_context = True
client_langchain_action = LangChainAction.QUERY.value
client_langchain_agents = []
top_k_docs = 1
chunk = True
chunk_size = 512
client_kwargs = dict(instruction=prompt if self.chat_client else '', # only for chat=True
iinput=self.iinput if self.chat_client else '', # only for chat=True
context=self.context,
# streaming output is supported, loops over and outputs each generation in streaming mode
# but leave stream_output=False for simple input/output mode
stream_output=stream_output,
prompt_type=self.prompter.prompt_type,
prompt_dict='',
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
num_beams=self.num_beams,
max_new_tokens=self.max_new_tokens,
min_new_tokens=self.min_new_tokens,
early_stopping=self.early_stopping,
max_time=self.max_time,
repetition_penalty=self.repetition_penalty,
num_return_sequences=self.num_return_sequences,
do_sample=self.do_sample,
chat=self.chat_client,
instruction_nochat=prompt if not self.chat_client else '',
iinput_nochat=self.iinput if not self.chat_client else '',
langchain_mode=client_langchain_mode,
add_chat_history_to_context=client_add_chat_history_to_context,
langchain_action=client_langchain_action,
langchain_agents=client_langchain_agents,
top_k_docs=top_k_docs,
chunk=chunk,
chunk_size=chunk_size,
document_subset=DocumentSubset.Relevant.name,
document_choice=[DocumentChoice.ALL.value],
)
api_name = '/submit_nochat_api' # NOTE: like submit_nochat but stable API for string dict passing
if not stream_output:
res = gr_client.predict(str(dict(client_kwargs)), api_name=api_name)
res_dict = ast.literal_eval(res)
text = res_dict['response']
return self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
else:
text_callback = None
if run_manager:
text_callback = partial(
run_manager.on_llm_new_token, verbose=self.verbose
)
job = gr_client.submit(str(dict(client_kwargs)), api_name=api_name)
text0 = ''
while not job.done():
outputs_list = job.communicator.job.outputs
if outputs_list:
res = job.communicator.job.outputs[-1]
res_dict = ast.literal_eval(res)
text = res_dict['response']
text = self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
# FIXME: derive chunk from full for now
text_chunk = text[len(text0):]
# save old
text0 = text
if text_callback:
text_callback(text_chunk)
time.sleep(0.01)
# ensure get last output to avoid race
res_all = job.outputs()
if len(res_all) > 0:
res = res_all[-1]
res_dict = ast.literal_eval(res)
text = res_dict['response']
# FIXME: derive chunk from full for now
else:
# go with old if failure
text = text0
text_chunk = text[len(text0):]
if text_callback:
text_callback(text_chunk)
return self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
class H2OHuggingFaceTextGenInference(HuggingFaceTextGenInference):
max_new_tokens: int = 512
do_sample: bool = False
top_k: Optional[int] = None
top_p: Optional[float] = 0.95
typical_p: Optional[float] = 0.95
temperature: float = 0.8
repetition_penalty: Optional[float] = None
return_full_text: bool = False
stop_sequences: List[str] = Field(default_factory=list)
seed: Optional[int] = None
inference_server_url: str = ""
timeout: int = 300
headers: dict = None
stream: bool = False
sanitize_bot_response: bool = False
prompter: Any = None
context: Any = ''
iinput: Any = ''
tokenizer: Any = None
client: Any = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
if values['client'] is None:
import text_generation
values["client"] = text_generation.Client(
values["inference_server_url"],
timeout=values["timeout"],
headers=values["headers"],
)
except ImportError:
raise ImportError(
"Could not import text_generation python package. "
"Please install it with `pip install text_generation`."
)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if stop is None:
stop = self.stop_sequences
else:
stop += self.stop_sequences
# HF inference server needs control over input tokens
assert self.tokenizer is not None
from h2oai_pipeline import H2OTextGenerationPipeline
prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer)
# NOTE: TGI server does not add prompting, so must do here
data_point = dict(context=self.context, instruction=prompt, input=self.iinput)
prompt = self.prompter.generate_prompt(data_point)
gen_server_kwargs = dict(do_sample=self.do_sample,
stop_sequences=stop,
max_new_tokens=self.max_new_tokens,
top_k=self.top_k,
top_p=self.top_p,
typical_p=self.typical_p,
temperature=self.temperature,
repetition_penalty=self.repetition_penalty,
return_full_text=self.return_full_text,
seed=self.seed,
)
gen_server_kwargs.update(kwargs)
# lower bound because client is re-used if multi-threading
self.client.timeout = max(300, self.timeout)
if not self.stream:
res = self.client.generate(
prompt,
**gen_server_kwargs,
)
if self.return_full_text:
gen_text = res.generated_text[len(prompt):]
else:
gen_text = res.generated_text
# remove stop sequences from the end of the generated text
for stop_seq in stop:
if stop_seq in gen_text:
gen_text = gen_text[:gen_text.index(stop_seq)]
text = prompt + gen_text
text = self.prompter.get_response(text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
else:
text_callback = None
if run_manager:
text_callback = partial(
run_manager.on_llm_new_token, verbose=self.verbose
)
# parent handler of streamer expects to see prompt first else output="" and lose if prompt=None in prompter
if text_callback:
text_callback(prompt)
text = ""
# Note: Streaming ignores return_full_text=True
for response in self.client.generate_stream(prompt, **gen_server_kwargs):
text_chunk = response.token.text
text += text_chunk
text = self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
# stream part
is_stop = False
for stop_seq in stop:
if stop_seq in text_chunk:
is_stop = True
break
if is_stop:
break
if not response.token.special:
if text_callback:
text_callback(text_chunk)
return text
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.llms.openai import _streaming_response_template, completion_with_retry, _update_response, \
update_token_usage
class H2OOpenAI(OpenAI):
"""
New class to handle vLLM's use of OpenAI, no vllm_chat supported, so only need here
Handles prompting that OpenAI doesn't need, stopping as well
"""
stop_sequences: Any = None
sanitize_bot_response: bool = False
prompter: Any = None
context: Any = ''
iinput: Any = ''
tokenizer: Any = None
@classmethod
def _all_required_field_names(cls) -> Set:
_all_required_field_names = super(OpenAI, cls)._all_required_field_names()
_all_required_field_names.update(
{'top_p', 'frequency_penalty', 'presence_penalty', 'stop_sequences', 'sanitize_bot_response', 'prompter',
'tokenizer'})
return _all_required_field_names
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
stop = self.stop_sequences if not stop else self.stop_sequences + stop
# HF inference server needs control over input tokens
assert self.tokenizer is not None
from h2oai_pipeline import H2OTextGenerationPipeline
for prompti, prompt in enumerate(prompts):
prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer)
# NOTE: OpenAI/vLLM server does not add prompting, so must do here
data_point = dict(context=self.context, instruction=prompt, input=self.iinput)
prompt = self.prompter.generate_prompt(data_point)
prompts[prompti] = prompt
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
text = ''
for _prompts in sub_prompts:
if self.streaming:
text_with_prompt = ""
prompt = _prompts[0]
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
params["stream"] = True
response = _streaming_response_template()
first = True
for stream_resp in completion_with_retry(
self, prompt=_prompts, **params
):
if first:
stream_resp["choices"][0]["text"] = prompt + stream_resp["choices"][0]["text"]
first = False
text_chunk = stream_resp["choices"][0]["text"]
text_with_prompt += text_chunk
text = self.prompter.get_response(text_with_prompt, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
if run_manager:
run_manager.on_llm_new_token(
text_chunk,
verbose=self.verbose,
logprobs=stream_resp["choices"][0]["logprobs"],
)
_update_response(response, stream_resp)
choices.extend(response["choices"])
else:
response = completion_with_retry(self, prompt=_prompts, **params)
choices.extend(response["choices"])
if not self.streaming:
# Can't update token usage if streaming
update_token_usage(_keys, response, token_usage)
choices[0]['text'] = text
return self.create_llm_result(choices, prompts, token_usage)
class H2OChatOpenAI(ChatOpenAI):
@classmethod
def _all_required_field_names(cls) -> Set:
_all_required_field_names = super(ChatOpenAI, cls)._all_required_field_names()
_all_required_field_names.update({'top_p', 'frequency_penalty', 'presence_penalty'})
return _all_required_field_names
def get_llm(use_openai_model=False,
model_name=None,
model=None,
tokenizer=None,
inference_server=None,
langchain_only_model=None,
stream_output=False,
do_sample=False,
temperature=0.1,
top_k=40,
top_p=0.7,
num_beams=1,
max_new_tokens=256,
min_new_tokens=1,
early_stopping=False,
max_time=180,
repetition_penalty=1.0,
num_return_sequences=1,
prompt_type=None,
prompt_dict=None,
prompter=None,
context=None,
iinput=None,
sanitize_bot_response=False,
verbose=False,
):
if inference_server is None:
inference_server = ''
if use_openai_model or inference_server.startswith('openai') or inference_server.startswith('vllm'):
if use_openai_model and model_name is None:
model_name = "gpt-3.5-turbo"
# FIXME: Will later import be ignored? I think so, so should be fine
openai, inf_type = set_openai(inference_server)
kwargs_extra = {}
if inference_server == 'openai_chat' or inf_type == 'vllm_chat':
cls = H2OChatOpenAI
# FIXME: Support context, iinput
else:
cls = H2OOpenAI
if inf_type == 'vllm':
kwargs_extra = dict(stop_sequences=prompter.stop_sequences,
sanitize_bot_response=sanitize_bot_response,
prompter=prompter,
context=context,
iinput=iinput,
tokenizer=tokenizer,
client=None)
callbacks = [StreamingGradioCallbackHandler()]
llm = cls(model_name=model_name,
temperature=temperature if do_sample else 0,
# FIXME: Need to count tokens and reduce max_new_tokens to fit like in generate.py
max_tokens=max_new_tokens,
top_p=top_p if do_sample else 1,
frequency_penalty=0,
presence_penalty=1.07 - repetition_penalty + 0.6, # so good default
callbacks=callbacks if stream_output else None,
openai_api_key=openai.api_key,
openai_api_base=openai.api_base,
logit_bias=None if inf_type == 'vllm' else {},
max_retries=2,
streaming=stream_output,
**kwargs_extra
)
streamer = callbacks[0] if stream_output else None
if inference_server in ['openai', 'openai_chat']:
prompt_type = inference_server
else:
# vllm goes here
prompt_type = prompt_type or 'plain'
elif inference_server:
assert inference_server.startswith(
'http'), "Malformed inference_server=%s. Did you add http:// in front?" % inference_server
from gradio_utils.grclient import GradioClient
from text_generation import Client as HFClient
if isinstance(model, GradioClient):
gr_client = model
hf_client = None
else:
gr_client = None
hf_client = model
assert isinstance(hf_client, HFClient)
inference_server, headers = get_hf_server(inference_server)
# quick sanity check to avoid long timeouts, just see if can reach server
requests.get(inference_server, timeout=int(os.getenv('REQUEST_TIMEOUT_FAST', '10')))
callbacks = [StreamingGradioCallbackHandler()]
if gr_client:
chat_client = False
llm = GradioInference(
inference_server_url=inference_server,
return_full_text=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
min_new_tokens=min_new_tokens,
early_stopping=early_stopping,
max_time=max_time,
repetition_penalty=repetition_penalty,
num_return_sequences=num_return_sequences,
do_sample=do_sample,
chat_client=chat_client,
callbacks=callbacks if stream_output else None,
stream=stream_output,
prompter=prompter,
context=context,
iinput=iinput,
client=gr_client,
sanitize_bot_response=sanitize_bot_response,
)
elif hf_client:
llm = H2OHuggingFaceTextGenInference(
inference_server_url=inference_server,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
return_full_text=True,
seed=SEED,
stop_sequences=prompter.stop_sequences,
temperature=temperature,
top_k=top_k,
top_p=top_p,
# typical_p=top_p,
callbacks=callbacks if stream_output else None,
stream=stream_output,
prompter=prompter,
context=context,
iinput=iinput,
tokenizer=tokenizer,
client=hf_client,
timeout=max_time,
sanitize_bot_response=sanitize_bot_response,
)
else:
raise RuntimeError("No defined client")
streamer = callbacks[0] if stream_output else None
elif model_name in non_hf_types:
assert langchain_only_model
if model_name == 'llama':
callbacks = [StreamingGradioCallbackHandler()]
streamer = callbacks[0] if stream_output else None
else:
# stream_output = False
# doesn't stream properly as generator, but at least
callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()]
streamer = None
if prompter:
prompt_type = prompter.prompt_type
else:
prompter = Prompter(prompt_type, prompt_dict, debug=False, chat=False, stream_output=stream_output)
pass # assume inputted prompt_type is correct
from gpt4all_llm import get_llm_gpt4all
llm = get_llm_gpt4all(model_name, model=model, max_new_tokens=max_new_tokens,
temperature=temperature,
repetition_penalty=repetition_penalty,
top_k=top_k,
top_p=top_p,
callbacks=callbacks,
verbose=verbose,
streaming=stream_output,
prompter=prompter,
context=context,
iinput=iinput,
)
elif hasattr(model, 'is_exlama') and model.is_exlama():
assert langchain_only_model
callbacks = [StreamingGradioCallbackHandler()]
streamer = callbacks[0] if stream_output else None
max_max_tokens = tokenizer.model_max_length
from src.llm_exllama import Exllama
llm = Exllama(streaming=stream_output,
model_path=None,
model=model,
lora_path=None,
temperature=temperature,
top_k=top_k,
top_p=top_p,
typical=.7,
beams=1,
# beam_length = 40,
stop_sequences=prompter.stop_sequences,
callbacks=callbacks,
verbose=verbose,
max_seq_len=max_max_tokens,
fused_attn=False,
# alpha_value = 1.0, #For use with any models
# compress_pos_emb = 4.0, #For use with superhot
# set_auto_map = "3, 2" #Gpu split, this will split 3gigs/2gigs
prompter=prompter,
context=context,
iinput=iinput,
)
else:
if model is None:
# only used if didn't pass model in
assert tokenizer is None
prompt_type = 'human_bot'
if model_name is None:
model_name = 'h2oai/h2ogpt-oasst1-512-12b'
# model_name = 'h2oai/h2ogpt-oig-oasst1-512-6_9b'
# model_name = 'h2oai/h2ogpt-oasst1-512-20b'
inference_server = ''
model, tokenizer, device = get_model(load_8bit=True, base_model=model_name,
inference_server=inference_server, gpu_id=0)
max_max_tokens = tokenizer.model_max_length
gen_kwargs = dict(do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
min_new_tokens=min_new_tokens,
early_stopping=early_stopping,
max_time=max_time,
repetition_penalty=repetition_penalty,
num_return_sequences=num_return_sequences,
return_full_text=True,
handle_long_generation=None)
assert len(set(gen_hyper).difference(gen_kwargs.keys())) == 0
if stream_output:
skip_prompt = False
from gen import H2OTextIteratorStreamer
decoder_kwargs = {}
streamer = H2OTextIteratorStreamer(tokenizer, skip_prompt=skip_prompt, block=False, **decoder_kwargs)
gen_kwargs.update(dict(streamer=streamer))
else:
streamer = None
from h2oai_pipeline import H2OTextGenerationPipeline
pipe = H2OTextGenerationPipeline(model=model, use_prompter=True,
prompter=prompter,
context=context,
iinput=iinput,
prompt_type=prompt_type,
prompt_dict=prompt_dict,
sanitize_bot_response=sanitize_bot_response,
chat=False, stream_output=stream_output,
tokenizer=tokenizer,
# leave some room for 1 paragraph, even if min_new_tokens=0
max_input_tokens=max_max_tokens - max(min_new_tokens, 256),
**gen_kwargs)
# pipe.task = "text-generation"
# below makes it listen only to our prompt removal,
# not built in prompt removal that is less general and not specific for our model
pipe.task = "text2text-generation"
from langchain.llms import HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=pipe)
return llm, model_name, streamer, prompt_type
def get_device_dtype():
# torch.device("cuda") leads to cuda:x cuda:y mismatches for multi-GPU consistently
import torch
n_gpus = torch.cuda.device_count() if torch.cuda.is_available else 0
device = 'cpu' if n_gpus == 0 else 'cuda'
# from utils import NullContext
# context_class = NullContext if n_gpus > 1 or n_gpus == 0 else context_class
context_class = torch.device
torch_dtype = torch.float16 if device == 'cuda' else torch.float32
return device, torch_dtype, context_class
def get_wiki_data(title, first_paragraph_only, text_limit=None, take_head=True):
"""
Get wikipedia data from online
:param title:
:param first_paragraph_only:
:param text_limit:
:param take_head:
:return:
"""
filename = 'wiki_%s_%s_%s_%s.data' % (first_paragraph_only, title, text_limit, take_head)
url = f"https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&explaintext=1&titles={title}"
if first_paragraph_only:
url += "&exintro=1"
import json
if not os.path.isfile(filename):
data = requests.get(url).json()
json.dump(data, open(filename, 'wt'))
else:
data = json.load(open(filename, "rt"))
page_content = list(data["query"]["pages"].values())[0]["extract"]
if take_head is not None and text_limit is not None:
page_content = page_content[:text_limit] if take_head else page_content[-text_limit:]
title_url = str(title).replace(' ', '_')
return Document(
page_content=page_content,
metadata={"source": f"https://en.wikipedia.org/wiki/{title_url}"},
)
def get_wiki_sources(first_para=True, text_limit=None):
"""
Get specific named sources from wikipedia
:param first_para:
:param text_limit:
:return:
"""
default_wiki_sources = ['Unix', 'Microsoft_Windows', 'Linux']
wiki_sources = list(os.getenv('WIKI_SOURCES', default_wiki_sources))
return [get_wiki_data(x, first_para, text_limit=text_limit) for x in wiki_sources]
def get_github_docs(repo_owner, repo_name):
"""
Access github from specific repo
:param repo_owner:
:param repo_name:
:return:
"""
with tempfile.TemporaryDirectory() as d:
subprocess.check_call(
f"git clone --depth 1 https://github.com/{repo_owner}/{repo_name}.git .",
cwd=d,
shell=True,
)
git_sha = (
subprocess.check_output("git rev-parse HEAD", shell=True, cwd=d)
.decode("utf-8")
.strip()
)
repo_path = pathlib.Path(d)
markdown_files = list(repo_path.glob("*/*.md")) + list(
repo_path.glob("*/*.mdx")
)
for markdown_file in markdown_files:
with open(markdown_file, "r") as f:
relative_path = markdown_file.relative_to(repo_path)
github_url = f"https://github.com/{repo_owner}/{repo_name}/blob/{git_sha}/{relative_path}"
yield Document(page_content=f.read(), metadata={"source": github_url})
def get_dai_pickle(dest="."):
from huggingface_hub import hf_hub_download
# True for case when locally already logged in with correct token, so don't have to set key
token = os.getenv('HUGGINGFACE_API_TOKEN', True)
path_to_zip_file = hf_hub_download('h2oai/dai_docs', 'dai_docs.pickle', token=token, repo_type='dataset')
shutil.copy(path_to_zip_file, dest)
def get_dai_docs(from_hf=False, get_pickle=True):
"""
Consume DAI documentation, or consume from public pickle
:param from_hf: get DAI docs from HF, then generate pickle for later use by LangChain
:param get_pickle: Avoid raw DAI docs, just get pickle directly from HF
:return:
"""
import pickle
if get_pickle:
get_dai_pickle()
dai_store = 'dai_docs.pickle'
dst = "working_dir_docs"
if not os.path.isfile(dai_store):
from create_data import setup_dai_docs
dst = setup_dai_docs(dst=dst, from_hf=from_hf)
import glob
files = list(glob.glob(os.path.join(dst, '*rst'), recursive=True))
basedir = os.path.abspath(os.getcwd())
from create_data import rst_to_outputs
new_outputs = rst_to_outputs(files)
os.chdir(basedir)
pickle.dump(new_outputs, open(dai_store, 'wb'))
else:
new_outputs = pickle.load(open(dai_store, 'rb'))
sources = []
for line, file in new_outputs:
# gradio requires any linked file to be with app.py
sym_src = os.path.abspath(os.path.join(dst, file))
sym_dst = os.path.abspath(os.path.join(os.getcwd(), file))
if os.path.lexists(sym_dst):
os.remove(sym_dst)
os.symlink(sym_src, sym_dst)
itm = Document(page_content=line, metadata={"source": file})
# NOTE: yield has issues when going into db, loses metadata
# yield itm
sources.append(itm)
return sources
image_types = ["png", "jpg", "jpeg"]
non_image_types = ["pdf", "txt", "csv", "toml", "py", "rst", "rtf",
"md",
"html", "mhtml",
"enex", "eml", "epub", "odt", "pptx", "ppt",
"zip", "urls",
]
# "msg", GPL3
if have_libreoffice or True:
# or True so it tries to load, e.g. on MAC/Windows, even if don't have libreoffice since works without that
non_image_types.extend(["docx", "doc", "xls", "xlsx"])
file_types = non_image_types + image_types
def add_meta(docs1, file):
file_extension = pathlib.Path(file).suffix
hashid = hash_file(file)
doc_hash = str(uuid.uuid4())[:10]
if not isinstance(docs1, (list, tuple, types.GeneratorType)):
docs1 = [docs1]
[x.metadata.update(dict(input_type=file_extension, date=str(datetime.now()), hashid=hashid, doc_hash=doc_hash)) for
x in docs1]
def file_to_doc(file, base_path=None, verbose=False, fail_any_exception=False,
chunk=True, chunk_size=512, n_jobs=-1,
is_url=False, is_txt=False,
enable_captions=True,
captions_model=None,
enable_ocr=False, enable_pdf_ocr='auto', caption_loader=None,
headsize=50):
if file is None:
if fail_any_exception:
raise RuntimeError("Unexpected None file")
else:
return []
doc1 = [] # in case no support, or disabled support
if base_path is None and not is_txt and not is_url:
# then assume want to persist but don't care which path used
# can't be in base_path
dir_name = os.path.dirname(file)
base_name = os.path.basename(file)
# if from gradio, will have its own temp uuid too, but that's ok
base_name = sanitize_filename(base_name) + "_" + str(uuid.uuid4())[:10]
base_path = os.path.join(dir_name, base_name)
if is_url:
file = file.strip() # in case accidental spaces in front or at end
if file.lower().startswith('arxiv:'):
query = file.lower().split('arxiv:')
if len(query) == 2 and have_arxiv:
query = query[1]
docs1 = ArxivLoader(query=query, load_max_docs=20, load_all_available_meta=True).load()
# ensure string, sometimes None
[[x.metadata.update({k: str(v)}) for k, v in x.metadata.items()] for x in docs1]
query_url = f"https://arxiv.org/abs/{query}"
[x.metadata.update(
dict(source=x.metadata.get('entry_id', query_url), query=query_url,
input_type='arxiv', head=x.metadata.get('Title', ''), date=str(datetime.now))) for x in
docs1]
else:
docs1 = []
else:
if not (file.startswith("http://") or file.startswith("file://") or file.startswith("https://")):
file = 'http://' + file
docs1 = UnstructuredURLLoader(urls=[file]).load()
if len(docs1) == 0 and have_playwright:
# then something went wrong, try another loader:
from langchain.document_loaders import PlaywrightURLLoader
docs1 = PlaywrightURLLoader(urls=[file]).load()
if len(docs1) == 0 and have_selenium:
# then something went wrong, try another loader:
# but requires Chrome binary, else get: selenium.common.exceptions.WebDriverException: Message: unknown error: cannot find Chrome binary
from langchain.document_loaders import SeleniumURLLoader
from selenium.common.exceptions import WebDriverException
try:
docs1 = SeleniumURLLoader(urls=[file]).load()
except WebDriverException as e:
print("No web driver: %s" % str(e), flush=True)
[x.metadata.update(dict(input_type='url', date=str(datetime.now))) for x in docs1]
docs1 = clean_doc(docs1)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif is_txt:
base_path = "user_paste"
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True)
source_file = os.path.join(base_path, "_%s" % str(uuid.uuid4())[:10])
with open(source_file, "wt") as f:
f.write(file)
metadata = dict(source=source_file, date=str(datetime.now()), input_type='pasted txt')
doc1 = Document(page_content=file, metadata=metadata)
doc1 = clean_doc(doc1)
elif file.lower().endswith('.html') or file.lower().endswith('.mhtml'):
docs1 = UnstructuredHTMLLoader(file_path=file).load()
add_meta(docs1, file)
docs1 = clean_doc(docs1)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size, language=Language.HTML)
elif (file.lower().endswith('.docx') or file.lower().endswith('.doc')) and (have_libreoffice or True):
docs1 = UnstructuredWordDocumentLoader(file_path=file).load()
add_meta(docs1, file)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif (file.lower().endswith('.xlsx') or file.lower().endswith('.xls')) and (have_libreoffice or True):
docs1 = UnstructuredExcelLoader(file_path=file).load()
add_meta(docs1, file)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif file.lower().endswith('.odt'):
docs1 = UnstructuredODTLoader(file_path=file).load()
add_meta(docs1, file)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif file.lower().endswith('pptx') or file.lower().endswith('ppt'):
docs1 = UnstructuredPowerPointLoader(file_path=file).load()
add_meta(docs1, file)
docs1 = clean_doc(docs1)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif file.lower().endswith('.txt'):
# use UnstructuredFileLoader ?
docs1 = TextLoader(file, encoding="utf8", autodetect_encoding=True).load()
# makes just one, but big one
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
doc1 = clean_doc(doc1)
add_meta(doc1, file)
elif file.lower().endswith('.rtf'):
docs1 = UnstructuredRTFLoader(file).load()
add_meta(docs1, file)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif file.lower().endswith('.md'):
docs1 = UnstructuredMarkdownLoader(file).load()
add_meta(docs1, file)
docs1 = clean_doc(docs1)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size, language=Language.MARKDOWN)
elif file.lower().endswith('.enex'):
docs1 = EverNoteLoader(file).load()
add_meta(doc1, file)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif file.lower().endswith('.epub'):
docs1 = UnstructuredEPubLoader(file).load()
add_meta(docs1, file)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif file.lower().endswith('.jpeg') or file.lower().endswith('.jpg') or file.lower().endswith('.png'):
docs1 = []
if have_tesseract and enable_ocr:
# OCR, somewhat works, but not great
docs1.extend(UnstructuredImageLoader(file).load())
add_meta(docs1, file)
if enable_captions:
# BLIP
if caption_loader is not None and not isinstance(caption_loader, (str, bool)):
# assumes didn't fork into this process with joblib, else can deadlock
caption_loader.set_image_paths([file])
docs1c = caption_loader.load()
add_meta(docs1c, file)
[x.metadata.update(dict(head=x.page_content[:headsize].strip())) for x in docs1c]
docs1.extend(docs1c)
else:
from image_captions import H2OImageCaptionLoader
caption_loader = H2OImageCaptionLoader(caption_gpu=caption_loader == 'gpu',
blip_model=captions_model,
blip_processor=captions_model)
caption_loader.set_image_paths([file])
docs1c = caption_loader.load()
add_meta(docs1c, file)
[x.metadata.update(dict(head=x.page_content[:headsize].strip())) for x in docs1c]
docs1.extend(docs1c)
for doci in docs1:
doci.metadata['source'] = doci.metadata['image_path']
doci.metadata['hash'] = hash_file(doci.metadata['source'])
if docs1:
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif file.lower().endswith('.msg'):
raise RuntimeError("Not supported, GPL3 license")
# docs1 = OutlookMessageLoader(file).load()
# docs1[0].metadata['source'] = file
elif file.lower().endswith('.eml'):
try:
docs1 = UnstructuredEmailLoader(file).load()
add_meta(docs1, file)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
except ValueError as e:
if 'text/html content not found in email' in str(e):
# e.g. plain/text dict key exists, but not
# doc1 = TextLoader(file, encoding="utf8").load()
docs1 = UnstructuredEmailLoader(file, content_source="text/plain").load()
add_meta(docs1, file)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
else:
raise
# elif file.lower().endswith('.gcsdir'):
# doc1 = GCSDirectoryLoader(project_name, bucket, prefix).load()
# elif file.lower().endswith('.gcsfile'):
# doc1 = GCSFileLoader(project_name, bucket, blob).load()
elif file.lower().endswith('.rst'):
with open(file, "r") as f:
doc1 = Document(page_content=f.read(), metadata={"source": file})
add_meta(doc1, file)
doc1 = chunk_sources(doc1, chunk=chunk, chunk_size=chunk_size, language=Language.RST)
elif file.lower().endswith('.pdf'):
env_gpt4all_file = ".env_gpt4all"
from dotenv import dotenv_values
env_kwargs = dotenv_values(env_gpt4all_file)
pdf_class_name = env_kwargs.get('PDF_CLASS_NAME', 'PyMuPDFParser')
doc1 = []
handled = False
if have_pymupdf and pdf_class_name == 'PyMuPDFParser':
# GPL, only use if installed
from langchain.document_loaders import PyMuPDFLoader
# load() still chunks by pages, but every page has title at start to help
doc1 = PyMuPDFLoader(file).load()
# remove empty documents
handled |= len(doc1) > 0
doc1 = [x for x in doc1 if x.page_content]
doc1 = clean_doc(doc1)
if len(doc1) == 0:
doc1 = UnstructuredPDFLoader(file).load()
handled |= len(doc1) > 0
# remove empty documents
doc1 = [x for x in doc1 if x.page_content]
# seems to not need cleaning in most cases
if len(doc1) == 0:
# open-source fallback
# load() still chunks by pages, but every page has title at start to help
doc1 = PyPDFLoader(file).load()
handled |= len(doc1) > 0
# remove empty documents
doc1 = [x for x in doc1 if x.page_content]
doc1 = clean_doc(doc1)
if have_pymupdf and len(doc1) == 0:
# GPL, only use if installed
from langchain.document_loaders import PyMuPDFLoader
# load() still chunks by pages, but every page has title at start to help
doc1 = PyMuPDFLoader(file).load()
handled |= len(doc1) > 0
# remove empty documents
doc1 = [x for x in doc1 if x.page_content]
doc1 = clean_doc(doc1)
if len(doc1) == 0 and enable_pdf_ocr == 'auto' or enable_pdf_ocr == 'on':
# try OCR in end since slowest, but works on pure image pages well
doc1 = UnstructuredPDFLoader(file, strategy='ocr_only').load()
handled |= len(doc1) > 0
# remove empty documents
doc1 = [x for x in doc1 if x.page_content]
# seems to not need cleaning in most cases
# Some PDFs return nothing or junk from PDFMinerLoader
if len(doc1) == 0:
# if literally nothing, show failed to parse so user knows, since unlikely nothing in PDF at all.
if handled:
raise ValueError("%s had no valid text, but meta data was parsed" % file)
else:
raise ValueError("%s had no valid text and no meta data was parsed" % file)
doc1 = chunk_sources(doc1, chunk=chunk, chunk_size=chunk_size)
add_meta(doc1, file)
elif file.lower().endswith('.csv'):
doc1 = CSVLoader(file).load()
add_meta(doc1, file)
elif file.lower().endswith('.py'):
doc1 = PythonLoader(file).load()
add_meta(doc1, file)
doc1 = chunk_sources(doc1, chunk=chunk, chunk_size=chunk_size, language=Language.PYTHON)
elif file.lower().endswith('.toml'):
doc1 = TomlLoader(file).load()
add_meta(doc1, file)
elif file.lower().endswith('.urls'):
with open(file, "r") as f:
docs1 = UnstructuredURLLoader(urls=f.readlines()).load()
add_meta(docs1, file)
doc1 = chunk_sources(docs1, chunk=chunk, chunk_size=chunk_size)
elif file.lower().endswith('.zip'):
with zipfile.ZipFile(file, 'r') as zip_ref:
# don't put into temporary path, since want to keep references to docs inside zip
# so just extract in path where
zip_ref.extractall(base_path)
# recurse
doc1 = path_to_docs(base_path, verbose=verbose, fail_any_exception=fail_any_exception, n_jobs=n_jobs)
else:
raise RuntimeError("No file handler for %s" % os.path.basename(file))
# allow doc1 to be list or not. If not list, did not chunk yet, so chunk now
# if list of length one, don't trust and chunk it
if not isinstance(doc1, list):
if chunk:
docs = chunk_sources([doc1], chunk=chunk, chunk_size=chunk_size)
else:
docs = [doc1]
elif isinstance(doc1, list) and len(doc1) == 1:
if chunk:
docs = chunk_sources(doc1, chunk=chunk, chunk_size=chunk_size)
else:
docs = doc1
else:
docs = doc1
assert isinstance(docs, list)
return docs
def path_to_doc1(file, verbose=False, fail_any_exception=False, return_file=True,
chunk=True, chunk_size=512,
n_jobs=-1,
is_url=False, is_txt=False,
enable_captions=True,
captions_model=None,
enable_ocr=False, enable_pdf_ocr='auto', caption_loader=None):
if verbose:
if is_url:
print("Ingesting URL: %s" % file, flush=True)
elif is_txt:
print("Ingesting Text: %s" % file, flush=True)
else:
print("Ingesting file: %s" % file, flush=True)
res = None
try:
# don't pass base_path=path, would infinitely recurse
res = file_to_doc(file, base_path=None, verbose=verbose, fail_any_exception=fail_any_exception,
chunk=chunk, chunk_size=chunk_size,
n_jobs=n_jobs,
is_url=is_url, is_txt=is_txt,
enable_captions=enable_captions,
captions_model=captions_model,
enable_ocr=enable_ocr,
enable_pdf_ocr=enable_pdf_ocr,
caption_loader=caption_loader)
except BaseException as e:
print("Failed to ingest %s due to %s" % (file, traceback.format_exc()))
if fail_any_exception:
raise
else:
exception_doc = Document(
page_content='',
metadata={"source": file, "exception": '%s Exception: %s' % (file, str(e)),
"traceback": traceback.format_exc()})
res = [exception_doc]
if return_file:
base_tmp = "temp_path_to_doc1"
if not os.path.isdir(base_tmp):
base_tmp = makedirs(base_tmp, exist_ok=True, tmp_ok=True)
filename = os.path.join(base_tmp, str(uuid.uuid4()) + ".tmp.pickle")
with open(filename, 'wb') as f:
pickle.dump(res, f)
return filename
return res
def path_to_docs(path_or_paths, verbose=False, fail_any_exception=False, n_jobs=-1,
chunk=True, chunk_size=512,
url=None, text=None,
enable_captions=True,
captions_model=None,
caption_loader=None,
enable_ocr=False,
enable_pdf_ocr='auto',
existing_files=[],
existing_hash_ids={},
):
# path_or_paths could be str, list, tuple, generator
globs_image_types = []
globs_non_image_types = []
if not path_or_paths and not url and not text:
return []
elif url:
globs_non_image_types = url if isinstance(url, (list, tuple, types.GeneratorType)) else [url]
elif text:
globs_non_image_types = text if isinstance(text, (list, tuple, types.GeneratorType)) else [text]
elif isinstance(path_or_paths, str) and os.path.isdir(path_or_paths):
# single path, only consume allowed files
path = path_or_paths
# Below globs should match patterns in file_to_doc()
[globs_image_types.extend(glob.glob(os.path.join(path, "./**/*.%s" % ftype), recursive=True))
for ftype in image_types]
[globs_non_image_types.extend(glob.glob(os.path.join(path, "./**/*.%s" % ftype), recursive=True))
for ftype in non_image_types]
else:
if isinstance(path_or_paths, str):
if os.path.isfile(path_or_paths) or os.path.isdir(path_or_paths):
path_or_paths = [path_or_paths]
else:
# path was deleted etc.
return []
# list/tuple of files (consume what can, and exception those that selected but cannot consume so user knows)
assert isinstance(path_or_paths, (list, tuple, types.GeneratorType)), \
"Wrong type for path_or_paths: %s %s" % (path_or_paths, type(path_or_paths))
# reform out of allowed types
globs_image_types.extend(flatten_list([[x for x in path_or_paths if x.endswith(y)] for y in image_types]))
# could do below:
# globs_non_image_types = flatten_list([[x for x in path_or_paths if x.endswith(y)] for y in non_image_types])
# But instead, allow fail so can collect unsupported too
set_globs_image_types = set(globs_image_types)
globs_non_image_types.extend([x for x in path_or_paths if x not in set_globs_image_types])
# filter out any files to skip (e.g. if already processed them)
# this is easy, but too aggressive in case a file changed, so parent probably passed existing_files=[]
assert not existing_files, "DEV: assume not using this approach"
if existing_files:
set_skip_files = set(existing_files)
globs_image_types = [x for x in globs_image_types if x not in set_skip_files]
globs_non_image_types = [x for x in globs_non_image_types if x not in set_skip_files]
if existing_hash_ids:
# assume consistent with add_meta() use of hash_file(file)
# also assume consistent with get_existing_hash_ids for dict creation
# assume hashable values
existing_hash_ids_set = set(existing_hash_ids.items())
hash_ids_all_image = set({x: hash_file(x) for x in globs_image_types}.items())
hash_ids_all_non_image = set({x: hash_file(x) for x in globs_non_image_types}.items())
# don't use symmetric diff. If file is gone, ignore and don't remove or something
# just consider existing files (key) having new hash or not (value)
new_files_image = set(dict(hash_ids_all_image - existing_hash_ids_set).keys())
new_files_non_image = set(dict(hash_ids_all_non_image - existing_hash_ids_set).keys())
globs_image_types = [x for x in globs_image_types if x in new_files_image]
globs_non_image_types = [x for x in globs_non_image_types if x in new_files_non_image]
# could use generator, but messes up metadata handling in recursive case
if caption_loader and not isinstance(caption_loader, (bool, str)) and \
caption_loader.device != 'cpu' or \
get_device() == 'cuda':
# to avoid deadlocks, presume was preloaded and so can't fork due to cuda context
n_jobs_image = 1
else:
n_jobs_image = n_jobs
return_file = True # local choice
is_url = url is not None
is_txt = text is not None
kwargs = dict(verbose=verbose, fail_any_exception=fail_any_exception,
return_file=return_file,
chunk=chunk, chunk_size=chunk_size,
n_jobs=n_jobs,
is_url=is_url,
is_txt=is_txt,
enable_captions=enable_captions,
captions_model=captions_model,
caption_loader=caption_loader,
enable_ocr=enable_ocr,
enable_pdf_ocr=enable_pdf_ocr,
)
if n_jobs != 1 and len(globs_non_image_types) > 1:
# avoid nesting, e.g. upload 1 zip and then inside many files
# harder to handle if upload many zips with many files, inner parallel one will be disabled by joblib
documents = ProgressParallel(n_jobs=n_jobs, verbose=10 if verbose else 0, backend='multiprocessing')(
delayed(path_to_doc1)(file, **kwargs) for file in globs_non_image_types
)
else:
documents = [path_to_doc1(file, **kwargs) for file in tqdm(globs_non_image_types)]
# do images separately since can't fork after cuda in parent, so can't be parallel
if n_jobs_image != 1 and len(globs_image_types) > 1:
# avoid nesting, e.g. upload 1 zip and then inside many files
# harder to handle if upload many zips with many files, inner parallel one will be disabled by joblib
image_documents = ProgressParallel(n_jobs=n_jobs, verbose=10 if verbose else 0, backend='multiprocessing')(
delayed(path_to_doc1)(file, **kwargs) for file in globs_image_types
)
else:
image_documents = [path_to_doc1(file, **kwargs) for file in tqdm(globs_image_types)]
# add image docs in
documents += image_documents
if return_file:
# then documents really are files
files = documents.copy()
documents = []
for fil in files:
with open(fil, 'rb') as f:
documents.extend(pickle.load(f))
# remove temp pickle
remove(fil)
else:
documents = reduce(concat, documents)
return documents
def prep_langchain(persist_directory,
load_db_if_exists,
db_type, use_openai_embedding, langchain_mode, langchain_mode_paths,
hf_embedding_model, n_jobs=-1, kwargs_make_db={}):
"""
do prep first time, involving downloads
# FIXME: Add github caching then add here
:return:
"""
assert langchain_mode not in ['MyData'], "Should not prep scratch data"
db_dir_exists = os.path.isdir(persist_directory)
user_path = langchain_mode_paths.get(langchain_mode)
if db_dir_exists and user_path is None:
print("Prep: persist_directory=%s exists, using" % persist_directory, flush=True)
db = get_existing_db(None, persist_directory, load_db_if_exists, db_type, use_openai_embedding, langchain_mode,
hf_embedding_model)
else:
if db_dir_exists and user_path is not None:
print("Prep: persist_directory=%s exists, user_path=%s passed, adding any changed or new documents" % (
persist_directory, user_path), flush=True)
elif not db_dir_exists:
print("Prep: persist_directory=%s does not exist, regenerating" % persist_directory, flush=True)
db = None
if langchain_mode in ['All', 'DriverlessAI docs']:
# FIXME: Could also just use dai_docs.pickle directly and upload that
get_dai_docs(from_hf=True)
if langchain_mode in ['All', 'wiki']:
get_wiki_sources(first_para=kwargs_make_db['first_para'], text_limit=kwargs_make_db['text_limit'])
langchain_kwargs = kwargs_make_db.copy()
langchain_kwargs.update(locals())
db, num_new_sources, new_sources_metadata = make_db(**langchain_kwargs)
return db
import posthog
posthog.disabled = True
class FakeConsumer(object):
def __init__(self, *args, **kwargs):
pass
def run(self):
pass
def pause(self):
pass
def upload(self):
pass
def next(self):
pass
def request(self, batch):
pass
posthog.Consumer = FakeConsumer
def check_update_chroma_embedding(db, use_openai_embedding, hf_embedding_model, langchain_mode):
changed_db = False
if load_embed(db) != (use_openai_embedding, hf_embedding_model):
print("Detected new embedding, updating db: %s" % langchain_mode, flush=True)
# handle embedding changes
db_get = get_documents(db)
sources = [Document(page_content=result[0], metadata=result[1] or {})
for result in zip(db_get['documents'], db_get['metadatas'])]
# delete index, has to be redone
persist_directory = db._persist_directory
shutil.move(persist_directory, persist_directory + "_" + str(uuid.uuid4()) + ".bak")
db_type = 'chroma'
load_db_if_exists = False
db = get_db(sources, use_openai_embedding=use_openai_embedding, db_type=db_type,
persist_directory=persist_directory, load_db_if_exists=load_db_if_exists,
langchain_mode=langchain_mode,
collection_name=None,
hf_embedding_model=hf_embedding_model)
if False:
# below doesn't work if db already in memory, so have to switch to new db as above
# upsert does new embedding, but if index already in memory, complains about size mismatch etc.
client_collection = db._client.get_collection(name=db._collection.name,
embedding_function=db._collection._embedding_function)
client_collection.upsert(ids=db_get['ids'], metadatas=db_get['metadatas'], documents=db_get['documents'])
changed_db = True
print("Done updating db for new embedding: %s" % langchain_mode, flush=True)
return db, changed_db
def get_existing_db(db, persist_directory, load_db_if_exists, db_type, use_openai_embedding, langchain_mode,
hf_embedding_model, verbose=False, check_embedding=True):
if load_db_if_exists and db_type == 'chroma' and os.path.isdir(persist_directory) and os.path.isdir(
os.path.join(persist_directory, 'index')):
if db is None:
if verbose:
print("DO Loading db: %s" % langchain_mode, flush=True)
embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model)
from chromadb.config import Settings
client_settings = Settings(anonymized_telemetry=False,
chroma_db_impl="duckdb+parquet",
persist_directory=persist_directory)
db = Chroma(persist_directory=persist_directory, embedding_function=embedding,
collection_name=langchain_mode.replace(' ', '_'),
client_settings=client_settings)
if verbose:
print("DONE Loading db: %s" % langchain_mode, flush=True)
else:
if verbose:
print("USING already-loaded db: %s" % langchain_mode, flush=True)
if check_embedding:
db_trial, changed_db = check_update_chroma_embedding(db, use_openai_embedding, hf_embedding_model,
langchain_mode)
if changed_db:
db = db_trial
# only call persist if really changed db, else takes too long for large db
if db is not None:
db.persist()
clear_embedding(db)
save_embed(db, use_openai_embedding, hf_embedding_model)
return db
return None
def clear_embedding(db):
if db is None:
return
# don't keep on GPU, wastes memory, push back onto CPU and only put back on GPU once again embed
try:
db._embedding_function.client.cpu()
clear_torch_cache()
except RuntimeError as e:
print("clear_embedding error: %s" % ''.join(traceback.format_tb(e.__traceback__)), flush=True)
def make_db(**langchain_kwargs):
func_names = list(inspect.signature(_make_db).parameters)
missing_kwargs = [x for x in func_names if x not in langchain_kwargs]
defaults_db = {k: v.default for k, v in dict(inspect.signature(run_qa_db).parameters).items()}
for k in missing_kwargs:
if k in defaults_db:
langchain_kwargs[k] = defaults_db[k]
# final check for missing
missing_kwargs = [x for x in func_names if x not in langchain_kwargs]
assert not missing_kwargs, "Missing kwargs for make_db: %s" % missing_kwargs
# only keep actual used
langchain_kwargs = {k: v for k, v in langchain_kwargs.items() if k in func_names}
return _make_db(**langchain_kwargs)
def save_embed(db, use_openai_embedding, hf_embedding_model):
if db is not None:
embed_info_file = os.path.join(db._persist_directory, 'embed_info')
with open(embed_info_file, 'wb') as f:
pickle.dump((use_openai_embedding, hf_embedding_model), f)
return use_openai_embedding, hf_embedding_model
def load_embed(db):
embed_info_file = os.path.join(db._persist_directory, 'embed_info')
if os.path.isfile(embed_info_file):
with open(embed_info_file, 'rb') as f:
use_openai_embedding, hf_embedding_model = pickle.load(f)
else:
# migration, assume defaults
use_openai_embedding, hf_embedding_model = False, "sentence-transformers/all-MiniLM-L6-v2"
return use_openai_embedding, hf_embedding_model
def get_persist_directory(langchain_mode):
return 'db_dir_%s' % langchain_mode # single place, no special names for each case
def _make_db(use_openai_embedding=False,
hf_embedding_model="sentence-transformers/all-MiniLM-L6-v2",
first_para=False, text_limit=None,
chunk=True, chunk_size=512,
langchain_mode=None,
langchain_mode_paths=None,
db_type='faiss',
load_db_if_exists=True,
db=None,
n_jobs=-1,
verbose=False):
persist_directory = get_persist_directory(langchain_mode)
user_path = langchain_mode_paths.get(langchain_mode)
# see if can get persistent chroma db
db_trial = get_existing_db(db, persist_directory, load_db_if_exists, db_type, use_openai_embedding, langchain_mode,
hf_embedding_model, verbose=verbose)
if db_trial is not None:
db = db_trial
sources = []
if not db:
if langchain_mode in ['wiki_full']:
from read_wiki_full import get_all_documents
small_test = None
print("Generating new wiki", flush=True)
sources1 = get_all_documents(small_test=small_test, n_jobs=os.cpu_count() // 2)
print("Got new wiki", flush=True)
if chunk:
sources1 = chunk_sources(sources1, chunk=chunk, chunk_size=chunk_size)
print("Chunked new wiki", flush=True)
sources.extend(sources1)
elif langchain_mode in ['wiki']:
sources1 = get_wiki_sources(first_para=first_para, text_limit=text_limit)
if chunk:
sources1 = chunk_sources(sources1, chunk=chunk, chunk_size=chunk_size)
sources.extend(sources1)
elif langchain_mode in ['github h2oGPT']:
# sources = get_github_docs("dagster-io", "dagster")
sources1 = get_github_docs("h2oai", "h2ogpt")
# FIXME: always chunk for now
sources1 = chunk_sources(sources1, chunk=chunk, chunk_size=chunk_size)
sources.extend(sources1)
elif langchain_mode in ['DriverlessAI docs']:
sources1 = get_dai_docs(from_hf=True)
if chunk and False: # FIXME: DAI docs are already chunked well, should only chunk more if over limit
sources1 = chunk_sources(sources1, chunk=chunk, chunk_size=chunk_size)
sources.extend(sources1)
if user_path:
# UserData or custom, which has to be from user's disk
if db is not None:
# NOTE: Ignore file names for now, only go by hash ids
# existing_files = get_existing_files(db)
existing_files = []
existing_hash_ids = get_existing_hash_ids(db)
else:
# pretend no existing files so won't filter
existing_files = []
existing_hash_ids = []
# chunk internally for speed over multiple docs
# FIXME: If first had old Hash=None and switch embeddings,
# then re-embed, and then hit here and reload so have hash, and then re-embed.
sources1 = path_to_docs(user_path, n_jobs=n_jobs, chunk=chunk, chunk_size=chunk_size,
existing_files=existing_files, existing_hash_ids=existing_hash_ids)
new_metadata_sources = set([x.metadata['source'] for x in sources1])
if new_metadata_sources:
print("Loaded %s new files as sources to add to %s" % (len(new_metadata_sources), langchain_mode),
flush=True)
if verbose:
print("Files added: %s" % '\n'.join(new_metadata_sources), flush=True)
sources.extend(sources1)
print("Loaded %s sources for potentially adding to %s" % (len(sources), langchain_mode), flush=True)
# see if got sources
if not sources:
if verbose:
if db is not None:
print("langchain_mode %s has no new sources, nothing to add to db" % langchain_mode, flush=True)
else:
print("langchain_mode %s has no sources, not making new db" % langchain_mode, flush=True)
return db, 0, []
if verbose:
if db is not None:
print("Generating db", flush=True)
else:
print("Adding to db", flush=True)
if not db:
if sources:
db = get_db(sources, use_openai_embedding=use_openai_embedding, db_type=db_type,
persist_directory=persist_directory, langchain_mode=langchain_mode,
hf_embedding_model=hf_embedding_model)
if verbose:
print("Generated db", flush=True)
else:
print("Did not generate db since no sources", flush=True)
new_sources_metadata = [x.metadata for x in sources]
elif user_path is not None:
print("Existing db, potentially adding %s sources from user_path=%s" % (len(sources), user_path), flush=True)
db, num_new_sources, new_sources_metadata = add_to_db(db, sources, db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model)
print("Existing db, added %s new sources from user_path=%s" % (num_new_sources, user_path), flush=True)
else:
new_sources_metadata = [x.metadata for x in sources]
return db, len(new_sources_metadata), new_sources_metadata
def get_metadatas(db):
from langchain.vectorstores import FAISS
if isinstance(db, FAISS):
metadatas = [v.metadata for k, v in db.docstore._dict.items()]
elif isinstance(db, Chroma):
metadatas = get_documents(db)['metadatas']
else:
# FIXME: Hack due to https://github.com/weaviate/weaviate/issues/1947
# seems no way to get all metadata, so need to avoid this approach for weaviate
metadatas = [x.metadata for x in db.similarity_search("", k=10000)]
return metadatas
def get_documents(db):
if hasattr(db, '_persist_directory'):
name_path = os.path.basename(db._persist_directory)
base_path = 'locks'
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True)
with filelock.FileLock(os.path.join(base_path, "getdb_%s.lock" % name_path)):
# get segfaults and other errors when multiple threads access this
return _get_documents(db)
else:
return _get_documents(db)
def _get_documents(db):
from langchain.vectorstores import FAISS
if isinstance(db, FAISS):
documents = [v for k, v in db.docstore._dict.items()]
elif isinstance(db, Chroma):
documents = db.get()
else:
# FIXME: Hack due to https://github.com/weaviate/weaviate/issues/1947
# seems no way to get all metadata, so need to avoid this approach for weaviate
documents = [x for x in db.similarity_search("", k=10000)]
return documents
def get_docs_and_meta(db, top_k_docs, filter_kwargs={}):
if hasattr(db, '_persist_directory'):
name_path = os.path.basename(db._persist_directory)
base_path = 'locks'
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True)
with filelock.FileLock(os.path.join(base_path, "getdb_%s.lock" % name_path)):
return _get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs)
else:
return _get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs)
def _get_docs_and_meta(db, top_k_docs, filter_kwargs={}):
from langchain.vectorstores import FAISS
if isinstance(db, Chroma):
db_get = db._collection.get(where=filter_kwargs.get('filter'))
db_metadatas = db_get['metadatas']
db_documents = db_get['documents']
elif isinstance(db, FAISS):
import itertools
db_metadatas = get_metadatas(db)
# FIXME: FAISS has no filter
# slice dict first
db_documents = list(dict(itertools.islice(db.docstore._dict.items(), top_k_docs)).values())
else:
db_metadatas = get_metadatas(db)
db_documents = get_documents(db)
return db_documents, db_metadatas
def get_existing_files(db):
metadatas = get_metadatas(db)
metadata_sources = set([x['source'] for x in metadatas])
return metadata_sources
def get_existing_hash_ids(db):
metadatas = get_metadatas(db)
# assume consistency, that any prior hashed source was single hashed file at the time among all source chunks
metadata_hash_ids = {x['source']: x.get('hashid') for x in metadatas}
return metadata_hash_ids
def run_qa_db(**kwargs):
func_names = list(inspect.signature(_run_qa_db).parameters)
# hard-coded defaults
kwargs['answer_with_sources'] = True
kwargs['show_rank'] = False
missing_kwargs = [x for x in func_names if x not in kwargs]
assert not missing_kwargs, "Missing kwargs for run_qa_db: %s" % missing_kwargs
# only keep actual used
kwargs = {k: v for k, v in kwargs.items() if k in func_names}
try:
return _run_qa_db(**kwargs)
finally:
clear_torch_cache()
def _run_qa_db(query=None,
iinput=None,
context=None,
use_openai_model=False, use_openai_embedding=False,
first_para=False, text_limit=None, top_k_docs=4, chunk=True, chunk_size=512,
langchain_mode_paths={},
detect_user_path_changes_every_query=False,
db_type='faiss',
model_name=None, model=None, tokenizer=None, inference_server=None,
langchain_only_model=False,
hf_embedding_model="sentence-transformers/all-MiniLM-L6-v2",
stream_output=False,
prompter=None,
prompt_type=None,
prompt_dict=None,
answer_with_sources=True,
cut_distance=1.64,
add_chat_history_to_context=True,
sanitize_bot_response=False,
show_rank=False,
use_llm_if_no_docs=False,
load_db_if_exists=False,
db=None,
do_sample=False,
temperature=0.1,
top_k=40,
top_p=0.7,
num_beams=1,
max_new_tokens=256,
min_new_tokens=1,
early_stopping=False,
max_time=180,
repetition_penalty=1.0,
num_return_sequences=1,
langchain_mode=None,
langchain_action=None,
langchain_agents=None,
document_subset=DocumentSubset.Relevant.name,
document_choice=[DocumentChoice.ALL.value],
n_jobs=-1,
verbose=False,
cli=False,
reverse_docs=True,
lora_weights='',
auto_reduce_chunks=True,
max_chunks=100,
):
"""
:param query:
:param use_openai_model:
:param use_openai_embedding:
:param first_para:
:param text_limit:
:param top_k_docs:
:param chunk:
:param chunk_size:
:param langchain_mode_paths: dict of langchain_mode -> user path to glob recursively from
:param db_type: 'faiss' for in-memory db or 'chroma' or 'weaviate' for persistent db
:param model_name: model name, used to switch behaviors
:param model: pre-initialized model, else will make new one
:param tokenizer: pre-initialized tokenizer, else will make new one. Required not None if model is not None
:param answer_with_sources
:return:
"""
assert langchain_mode_paths is not None
if model is not None:
assert model_name is not None # require so can make decisions
assert query is not None
assert prompter is not None or prompt_type is not None or model is None # if model is None, then will generate
if prompter is not None:
prompt_type = prompter.prompt_type
prompt_dict = prompter.prompt_dict
if model is not None:
assert prompt_type is not None
if prompt_type == PromptType.custom.name:
assert prompt_dict is not None # should at least be {} or ''
else:
prompt_dict = ''
assert len(set(gen_hyper).difference(inspect.signature(get_llm).parameters)) == 0
# pass in context to LLM directly, since already has prompt_type structure
# can't pass through langchain in get_chain() to LLM: https://github.com/hwchase17/langchain/issues/6638
llm, model_name, streamer, prompt_type_out = get_llm(use_openai_model=use_openai_model, model_name=model_name,
model=model,
tokenizer=tokenizer,
inference_server=inference_server,
langchain_only_model=langchain_only_model,
stream_output=stream_output,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
min_new_tokens=min_new_tokens,
early_stopping=early_stopping,
max_time=max_time,
repetition_penalty=repetition_penalty,
num_return_sequences=num_return_sequences,
prompt_type=prompt_type,
prompt_dict=prompt_dict,
prompter=prompter,
context=context if add_chat_history_to_context else '',
iinput=iinput if add_chat_history_to_context else '',
sanitize_bot_response=sanitize_bot_response,
verbose=verbose,
)
use_docs_planned = False
scores = []
chain = None
if isinstance(document_choice, str):
# support string as well
document_choice = [document_choice]
func_names = list(inspect.signature(get_chain).parameters)
sim_kwargs = {k: v for k, v in locals().items() if k in func_names}
missing_kwargs = [x for x in func_names if x not in sim_kwargs]
assert not missing_kwargs, "Missing: %s" % missing_kwargs
docs, chain, scores, use_docs_planned, have_any_docs = get_chain(**sim_kwargs)
if document_subset in non_query_commands:
formatted_doc_chunks = '\n\n'.join([get_url(x) + '\n\n' + x.page_content for x in docs])
if not formatted_doc_chunks and not use_llm_if_no_docs:
yield "No sources", ''
return
# if no souces, outside gpt_langchain, LLM will be used with '' input
yield formatted_doc_chunks, ''
return
if not use_llm_if_no_docs:
if not docs and langchain_action in [LangChainAction.SUMMARIZE_MAP.value,
LangChainAction.SUMMARIZE_ALL.value,
LangChainAction.SUMMARIZE_REFINE.value]:
ret = 'No relevant documents to summarize.' if have_any_docs else 'No documents to summarize.'
extra = ''
yield ret, extra
return
if not docs and langchain_mode not in [LangChainMode.DISABLED.value,
LangChainMode.LLM.value]:
ret = 'No relevant documents to query.' if have_any_docs else 'No documents to query.'
extra = ''
yield ret, extra
return
if chain is None and model_name not in langchain_only_model:
# here if no docs at all and not HF type
# can only return if HF type
return
# context stuff similar to used in evaluate()
import torch
device, torch_dtype, context_class = get_device_dtype()
with torch.no_grad():
have_lora_weights = lora_weights not in [no_lora_str, '', None]
context_class_cast = NullContext if device == 'cpu' or have_lora_weights else torch.autocast
with context_class_cast(device):
if stream_output and streamer:
answer = None
import queue
bucket = queue.Queue()
thread = EThread(target=chain, streamer=streamer, bucket=bucket)
thread.start()
outputs = ""
prompt = None # FIXME
try:
for new_text in streamer:
# print("new_text: %s" % new_text, flush=True)
if bucket.qsize() > 0 or thread.exc:
thread.join()
outputs += new_text
if prompter: # and False: # FIXME: pipeline can already use prompter
output1 = prompter.get_response(outputs, prompt=prompt,
sanitize_bot_response=sanitize_bot_response)
yield output1, ''
else:
yield outputs, ''
except BaseException:
# if any exception, raise that exception if was from thread, first
if thread.exc:
raise thread.exc
raise
finally:
# in case no exception and didn't join with thread yet, then join
if not thread.exc:
answer = thread.join()
# in case raise StopIteration or broke queue loop in streamer, but still have exception
if thread.exc:
raise thread.exc
# FIXME: answer is not string outputs from streamer. How to get actual final output?
# answer = outputs
else:
answer = chain()
if not use_docs_planned:
ret = answer['output_text']
extra = ''
yield ret, extra
elif answer is not None:
ret, extra = get_sources_answer(query, answer, scores, show_rank, answer_with_sources, verbose=verbose)
yield ret, extra
return
def get_docs_with_score(query, k_db, filter_kwargs, db, db_type, verbose=False):
# deal with bug in chroma where if (say) 234 doc chunks and ask for 233+ then fails due to reduction misbehavior
docs_with_score = []
if db_type == 'chroma':
while True:
try:
docs_with_score = db.similarity_search_with_score(query, k=k_db, **filter_kwargs)
break
except (RuntimeError, AttributeError) as e:
# AttributeError is for people with wrong version of langchain
if verbose:
print("chroma bug: %s" % str(e), flush=True)
if k_db == 1:
raise
if k_db > 10:
k_db -= 10
else:
k_db -= 1
k_db = max(1, k_db)
else:
docs_with_score = db.similarity_search_with_score(query, k=k_db, **filter_kwargs)
return docs_with_score
def get_chain(query=None,
iinput=None,
context=None, # FIXME: https://github.com/hwchase17/langchain/issues/6638
use_openai_model=False, use_openai_embedding=False,
first_para=False, text_limit=None, top_k_docs=4, chunk=True, chunk_size=512,
langchain_mode_paths=None,
detect_user_path_changes_every_query=False,
db_type='faiss',
model_name=None,
inference_server='',
langchain_only_model=False,
hf_embedding_model="sentence-transformers/all-MiniLM-L6-v2",
prompt_type=None,
prompt_dict=None,
cut_distance=1.1,
add_chat_history_to_context=True, # FIXME: https://github.com/hwchase17/langchain/issues/6638
load_db_if_exists=False,
db=None,
langchain_mode=None,
langchain_action=None,
langchain_agents=None,
document_subset=DocumentSubset.Relevant.name,
document_choice=[DocumentChoice.ALL.value],
n_jobs=-1,
# beyond run_db_query:
llm=None,
tokenizer=None,
verbose=False,
reverse_docs=True,
# local
auto_reduce_chunks=True,
max_chunks=100,
):
assert langchain_agents is not None # should be at least []
# determine whether use of context out of docs is planned
if not use_openai_model and prompt_type not in ['plain'] or langchain_only_model:
if langchain_mode in ['Disabled', 'LLM']:
use_docs_planned = False
else:
use_docs_planned = True
else:
use_docs_planned = True
# https://github.com/hwchase17/langchain/issues/1946
# FIXME: Seems to way to get size of chroma db to limit top_k_docs to avoid
# Chroma collection MyData contains fewer than 4 elements.
# type logger error
if top_k_docs == -1:
k_db = 1000 if db_type == 'chroma' else 100
else:
# top_k_docs=100 works ok too
k_db = 1000 if db_type == 'chroma' else top_k_docs
# FIXME: For All just go over all dbs instead of a separate db for All
if not detect_user_path_changes_every_query and db is not None:
# avoid looking at user_path during similarity search db handling,
# if already have db and not updating from user_path every query
# but if db is None, no db yet loaded (e.g. from prep), so allow user_path to be whatever it was
if langchain_mode_paths is None:
langchain_mode_paths = {}
langchain_mode_paths = langchain_mode_paths.copy()
langchain_mode_paths[langchain_mode] = None
db, num_new_sources, new_sources_metadata = make_db(use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
first_para=first_para, text_limit=text_limit,
chunk=chunk,
chunk_size=chunk_size,
langchain_mode=langchain_mode,
langchain_mode_paths=langchain_mode_paths,
db_type=db_type,
load_db_if_exists=load_db_if_exists,
db=db,
n_jobs=n_jobs,
verbose=verbose)
have_any_docs = db is not None
if langchain_action == LangChainAction.QUERY.value:
if iinput:
query = "%s\n%s" % (query, iinput)
if 'falcon' in model_name:
extra = "According to only the information in the document sources provided within the context above, "
prefix = "Pay attention and remember information below, which will help to answer the question or imperative after the context ends."
elif inference_server in ['openai', 'openai_chat']:
extra = "According to (primarily) the information in the document sources provided within context above, "
prefix = "Pay attention and remember information below, which will help to answer the question or imperative after the context ends. If the answer cannot be primarily obtained from information within the context, then respond that the answer does not appear in the context of the documents."
else:
extra = ""
prefix = ""
if langchain_mode in ['Disabled', 'LLM'] or not use_docs_planned:
template_if_no_docs = template = """%s{context}{question}""" % prefix
else:
template = """%s
\"\"\"
{context}
\"\"\"
%s{question}""" % (prefix, extra)
template_if_no_docs = """%s{context}%s{question}""" % (prefix, extra)
elif langchain_action in [LangChainAction.SUMMARIZE_ALL.value, LangChainAction.SUMMARIZE_MAP.value]:
none = ['', '\n', None]
if query in none and iinput in none:
prompt_summary = "Using only the text above, write a condensed and concise summary of key results (preferably as bullet points):\n"
elif query not in none:
prompt_summary = "Focusing on %s, write a condensed and concise Summary:\n" % query
elif iinput not in None:
prompt_summary = iinput
else:
prompt_summary = "Focusing on %s, %s:\n" % (query, iinput)
# don't auto reduce
auto_reduce_chunks = False
if langchain_action == LangChainAction.SUMMARIZE_MAP.value:
fstring = '{text}'
else:
fstring = '{input_documents}'
template = """In order to write a concise single-paragraph or bulleted list summary, pay attention to the following text:
\"\"\"
%s
\"\"\"\n%s""" % (fstring, prompt_summary)
template_if_no_docs = "Exactly only say: There are no documents to summarize."
elif langchain_action in [LangChainAction.SUMMARIZE_REFINE]:
template = '' # unused
template_if_no_docs = '' # unused
else:
raise RuntimeError("No such langchain_action=%s" % langchain_action)
if not use_openai_model and prompt_type not in ['plain'] or langchain_only_model:
use_template = True
else:
use_template = False
if db and use_docs_planned:
base_path = 'locks'
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True)
if hasattr(db, '_persist_directory'):
name_path = "sim_%s.lock" % os.path.basename(db._persist_directory)
else:
name_path = "sim.lock"
lock_file = os.path.join(base_path, name_path)
if not isinstance(db, Chroma):
# only chroma supports filtering
filter_kwargs = {}
else:
assert document_choice is not None, "Document choice was None"
if len(document_choice) >= 1 and document_choice[0] == DocumentChoice.ALL.value:
filter_kwargs = {}
elif len(document_choice) >= 2:
if document_choice[0] == DocumentChoice.ALL.value:
# remove 'All'
document_choice = document_choice[1:]
or_filter = [{"source": {"$eq": x}} for x in document_choice]
filter_kwargs = dict(filter={"$or": or_filter})
elif len(document_choice) == 1:
# degenerate UX bug in chroma
one_filter = [{"source": {"$eq": x}} for x in document_choice][0]
filter_kwargs = dict(filter=one_filter)
else:
# shouldn't reach
filter_kwargs = {}
if langchain_mode in [LangChainMode.LLM.value]:
docs = []
scores = []
elif document_subset == DocumentSubset.TopKSources.name or query in [None, '', '\n']:
db_documents, db_metadatas = get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs)
# similar to langchain's chroma's _results_to_docs_and_scores
docs_with_score = [(Document(page_content=result[0], metadata=result[1] or {}), 0)
for result in zip(db_documents, db_metadatas)]
# order documents
doc_hashes = [x.get('doc_hash', 'None') for x in db_metadatas]
doc_chunk_ids = [x.get('chunk_id', 0) for x in db_metadatas]
docs_with_score = [x for _, _, x in
sorted(zip(doc_hashes, doc_chunk_ids, docs_with_score), key=lambda x: (x[0], x[1]))
]
docs_with_score = docs_with_score[:top_k_docs]
docs = [x[0] for x in docs_with_score]
scores = [x[1] for x in docs_with_score]
have_any_docs |= len(docs) > 0
else:
# FIXME: if langchain_action == LangChainAction.SUMMARIZE_MAP.value
# if map_reduce, then no need to auto reduce chunks
if top_k_docs == -1 or auto_reduce_chunks:
top_k_docs_tokenize = 100
with filelock.FileLock(lock_file):
docs_with_score = get_docs_with_score(query, k_db, filter_kwargs, db, db_type, verbose=verbose)[
:top_k_docs_tokenize]
if hasattr(llm, 'pipeline') and hasattr(llm.pipeline, 'tokenizer'):
# more accurate
tokens = [len(llm.pipeline.tokenizer(x[0].page_content)['input_ids']) for x in docs_with_score]
template_tokens = len(llm.pipeline.tokenizer(template)['input_ids'])
elif inference_server in ['openai', 'openai_chat'] or use_openai_model or db_type in ['faiss',
'weaviate']:
# use ticktoken for faiss since embedding called differently
tokens = [llm.get_num_tokens(x[0].page_content) for x in docs_with_score]
template_tokens = llm.get_num_tokens(template)
elif isinstance(tokenizer, FakeTokenizer):
tokens = [tokenizer.num_tokens_from_string(x[0].page_content) for x in docs_with_score]
template_tokens = tokenizer.num_tokens_from_string(template)
else:
# in case model is not our pipeline with HF tokenizer
tokens = [db._embedding_function.client.tokenize([x[0].page_content])['input_ids'].shape[1] for x in
docs_with_score]
template_tokens = db._embedding_function.client.tokenize([template])['input_ids'].shape[1]
tokens_cumsum = np.cumsum(tokens)
if hasattr(llm, 'pipeline') and hasattr(llm.pipeline, 'max_input_tokens'):
max_input_tokens = llm.pipeline.max_input_tokens
elif inference_server in ['openai']:
max_tokens = llm.modelname_to_contextsize(model_name)
# leave some room for 1 paragraph, even if min_new_tokens=0
max_input_tokens = max_tokens - 256
elif inference_server in ['openai_chat']:
max_tokens = model_token_mapping[model_name]
# leave some room for 1 paragraph, even if min_new_tokens=0
max_input_tokens = max_tokens - 256
elif isinstance(tokenizer, FakeTokenizer):
max_input_tokens = tokenizer.model_max_length - 256
else:
# leave some room for 1 paragraph, even if min_new_tokens=0
max_input_tokens = 2048 - 256
max_input_tokens -= template_tokens
# FIXME: Doesn't account for query, == context, or new lines between contexts
where_res = np.where(tokens_cumsum < max_input_tokens)[0]
if where_res.shape[0] == 0:
# then no chunk can fit, still do first one
top_k_docs_trial = 1
else:
top_k_docs_trial = 1 + where_res[-1]
if 0 < top_k_docs_trial < max_chunks:
# avoid craziness
if top_k_docs == -1:
top_k_docs = top_k_docs_trial
else:
top_k_docs = min(top_k_docs, top_k_docs_trial)
if top_k_docs == -1:
# if here, means 0 and just do best with 1 doc
print("Unexpected large chunks and can't add to context, will add 1 anyways", flush=True)
top_k_docs = 1
docs_with_score = docs_with_score[:top_k_docs]
else:
with filelock.FileLock(lock_file):
docs_with_score = get_docs_with_score(query, k_db, filter_kwargs, db, db_type, verbose=verbose)[
:top_k_docs]
# put most relevant chunks closest to question,
# esp. if truncation occurs will be "oldest" or "farthest from response" text that is truncated
# BUT: for small models, e.g. 6_9 pythia, if sees some stuff related to h2oGPT first, it can connect that and not listen to rest
if reverse_docs:
docs_with_score.reverse()
# cut off so no high distance docs/sources considered
have_any_docs |= len(docs_with_score) > 0 # before cut
docs = [x[0] for x in docs_with_score if x[1] < cut_distance]
scores = [x[1] for x in docs_with_score if x[1] < cut_distance]
if len(scores) > 0 and verbose:
print("Distance: min: %s max: %s mean: %s median: %s" %
(scores[0], scores[-1], np.mean(scores), np.median(scores)), flush=True)
else:
docs = []
scores = []
if not docs and use_docs_planned and not langchain_only_model:
# if HF type and have no docs, can bail out
return docs, None, [], False, have_any_docs
if document_subset in non_query_commands:
# no LLM use
return docs, None, [], False, have_any_docs
common_words_file = "data/NGSL_1.2_stats.csv.zip"
if os.path.isfile(common_words_file) and langchain_mode == LangChainAction.QUERY.value:
df = pd.read_csv("data/NGSL_1.2_stats.csv.zip")
import string
reduced_query = query.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation))).strip()
reduced_query_words = reduced_query.split(' ')
set_common = set(df['Lemma'].values.tolist())
num_common = len([x.lower() in set_common for x in reduced_query_words])
frac_common = num_common / len(reduced_query) if reduced_query else 0
# FIXME: report to user bad query that uses too many common words
if verbose:
print("frac_common: %s" % frac_common, flush=True)
if len(docs) == 0:
# avoid context == in prompt then
use_docs_planned = False
template = template_if_no_docs
if langchain_action == LangChainAction.QUERY.value:
if use_template:
# instruct-like, rather than few-shot prompt_type='plain' as default
# but then sources confuse the model with how inserted among rest of text, so avoid
prompt = PromptTemplate(
# input_variables=["summaries", "question"],
input_variables=["context", "question"],
template=template,
)
chain = load_qa_chain(llm, prompt=prompt)
else:
# only if use_openai_model = True, unused normally except in testing
chain = load_qa_with_sources_chain(llm)
if not use_docs_planned:
chain_kwargs = dict(input_documents=[], question=query)
else:
chain_kwargs = dict(input_documents=docs, question=query)
target = wrapped_partial(chain, chain_kwargs)
elif langchain_action in [LangChainAction.SUMMARIZE_MAP.value,
LangChainAction.SUMMARIZE_REFINE,
LangChainAction.SUMMARIZE_ALL.value]:
from langchain.chains.summarize import load_summarize_chain
if langchain_action == LangChainAction.SUMMARIZE_MAP.value:
prompt = PromptTemplate(input_variables=["text"], template=template)
chain = load_summarize_chain(llm, chain_type="map_reduce",
map_prompt=prompt, combine_prompt=prompt, return_intermediate_steps=True)
target = wrapped_partial(chain, {"input_documents": docs}) # , return_only_outputs=True)
elif langchain_action == LangChainAction.SUMMARIZE_ALL.value:
assert use_template
prompt = PromptTemplate(input_variables=["text"], template=template)
chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt, return_intermediate_steps=True)
target = wrapped_partial(chain)
elif langchain_action == LangChainAction.SUMMARIZE_REFINE.value:
chain = load_summarize_chain(llm, chain_type="refine", return_intermediate_steps=True)
target = wrapped_partial(chain)
else:
raise RuntimeError("No such langchain_action=%s" % langchain_action)
else:
raise RuntimeError("No such langchain_action=%s" % langchain_action)
return docs, target, scores, use_docs_planned, have_any_docs
def get_sources_answer(query, answer, scores, show_rank, answer_with_sources, verbose=False):
if verbose:
print("query: %s" % query, flush=True)
print("answer: %s" % answer['output_text'], flush=True)
if len(answer['input_documents']) == 0:
extra = ''
ret = answer['output_text'] + extra
return ret, extra
# link
answer_sources = [(max(0.0, 1.5 - score) / 1.5, get_url(doc)) for score, doc in
zip(scores, answer['input_documents'])]
answer_sources_dict = defaultdict(list)
[answer_sources_dict[url].append(score) for score, url in answer_sources]
answers_dict = {}
for url, scores_url in answer_sources_dict.items():
answers_dict[url] = np.max(scores_url)
answer_sources = [(score, url) for url, score in answers_dict.items()]
answer_sources.sort(key=lambda x: x[0], reverse=True)
if show_rank:
# answer_sources = ['%d | %s' % (1 + rank, url) for rank, (score, url) in enumerate(answer_sources)]
# sorted_sources_urls = "Sources [Rank | Link]:<br>" + "<br>".join(answer_sources)
answer_sources = ['%s' % url for rank, (score, url) in enumerate(answer_sources)]
sorted_sources_urls = "Ranked Sources:<br>" + "<br>".join(answer_sources)
else:
answer_sources = ['<li>%.2g | %s</li>' % (score, url) for score, url in answer_sources]
sorted_sources_urls = f"{source_prefix}<p><ul>" + "<p>".join(answer_sources)
sorted_sources_urls += f"</ul></p>{source_postfix}"
if not answer['output_text'].endswith('\n'):
answer['output_text'] += '\n'
if answer_with_sources:
extra = '\n' + sorted_sources_urls
else:
extra = ''
ret = answer['output_text'] + extra
return ret, extra
def clean_doc(docs1):
if not isinstance(docs1, (list, tuple, types.GeneratorType)):
docs1 = [docs1]
for doci, doc in enumerate(docs1):
docs1[doci].page_content = '\n'.join([x.strip() for x in doc.page_content.split("\n") if x.strip()])
return docs1
def chunk_sources(sources, chunk=True, chunk_size=512, language=None):
if not chunk:
[x.metadata.update(dict(chunk_id=chunk_id)) for chunk_id, x in enumerate(sources)]
return sources
if not isinstance(sources, (list, tuple, types.GeneratorType)) and not callable(sources):
# if just one document
sources = [sources]
if language and False:
# Bug in langchain, keep separator=True not working
# https://github.com/hwchase17/langchain/issues/2836
# so avoid this for now
keep_separator = True
separators = RecursiveCharacterTextSplitter.get_separators_for_language(language)
else:
separators = ["\n\n", "\n", " ", ""]
keep_separator = False
splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=0, keep_separator=keep_separator,
separators=separators)
source_chunks = splitter.split_documents(sources)
# currently in order, but when pull from db won't be, so mark order and document by hash
[x.metadata.update(dict(chunk_id=chunk_id)) for chunk_id, x in enumerate(source_chunks)]
return source_chunks
def get_db_from_hf(dest=".", db_dir='db_dir_DriverlessAI_docs.zip'):
from huggingface_hub import hf_hub_download
# True for case when locally already logged in with correct token, so don't have to set key
token = os.getenv('HUGGINGFACE_API_TOKEN', True)
path_to_zip_file = hf_hub_download('h2oai/db_dirs', db_dir, token=token, repo_type='dataset')
import zipfile
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
persist_directory = os.path.dirname(zip_ref.namelist()[0])
remove(persist_directory)
zip_ref.extractall(dest)
return path_to_zip_file
# Note dir has space in some cases, while zip does not
some_db_zips = [['db_dir_DriverlessAI_docs.zip', 'db_dir_DriverlessAI docs', 'CC-BY-NC license'],
['db_dir_UserData.zip', 'db_dir_UserData', 'CC-BY license for ArXiv'],
['db_dir_github_h2oGPT.zip', 'db_dir_github h2oGPT', 'ApacheV2 license'],
['db_dir_wiki.zip', 'db_dir_wiki', 'CC-BY-SA Wikipedia license'],
# ['db_dir_wiki_full.zip', 'db_dir_wiki_full.zip', '23GB, 05/04/2023 CC-BY-SA Wiki license'],
]
all_db_zips = some_db_zips + \
[['db_dir_wiki_full.zip', 'db_dir_wiki_full.zip', '23GB, 05/04/2023 CC-BY-SA Wiki license'],
]
def get_some_dbs_from_hf(dest='.', db_zips=None):
if db_zips is None:
db_zips = some_db_zips
for db_dir, dir_expected, license1 in db_zips:
path_to_zip_file = get_db_from_hf(dest=dest, db_dir=db_dir)
assert os.path.isfile(path_to_zip_file), "Missing zip in %s" % path_to_zip_file
if dir_expected:
assert os.path.isdir(os.path.join(dest, dir_expected)), "Missing path for %s" % dir_expected
assert os.path.isdir(os.path.join(dest, dir_expected, 'index')), "Missing index in %s" % dir_expected
def _create_local_weaviate_client():
WEAVIATE_URL = os.getenv('WEAVIATE_URL', "http://localhost:8080")
WEAVIATE_USERNAME = os.getenv('WEAVIATE_USERNAME')
WEAVIATE_PASSWORD = os.getenv('WEAVIATE_PASSWORD')
WEAVIATE_SCOPE = os.getenv('WEAVIATE_SCOPE', "offline_access")
resource_owner_config = None
try:
import weaviate
if WEAVIATE_USERNAME is not None and WEAVIATE_PASSWORD is not None:
resource_owner_config = weaviate.AuthClientPassword(
username=WEAVIATE_USERNAME,
password=WEAVIATE_PASSWORD,
scope=WEAVIATE_SCOPE
)
client = weaviate.Client(WEAVIATE_URL, auth_client_secret=resource_owner_config)
return client
except Exception as e:
print(f"Failed to create Weaviate client: {e}")
return None
if __name__ == '__main__':
pass
| [
"In order to write a concise single-paragraph or bulleted list summary, pay attention to the following text:\n\"\"\"\nPLACEHOLDER\n\"\"\"\nPLACEHOLDER",
"PLACEHOLDER{context}PLACEHOLDER{question}",
"plain",
"True",
"Focusing on PLACEHOLDER, write a condensed and concise Summary:\n",
"question",
"%s{context}{question}",
"Using only the text above, write a condensed and concise summary of key results (preferably as bullet points):\n",
"None",
"Exactly only say: There are no documents to summarize.",
"False",
"PLACEHOLDER\n \"\"\"\n {context}\n \"\"\"\n PLACEHOLDER{question}",
"context",
"Focusing on PLACEHOLDER, PLACEHOLDER:\n",
"input_ids",
"human_bot"
] |
2024-01-10 | trangdata/askalex | openalex.py | import pyalex
import pandas as pd
import tiktoken
from openai.embeddings_utils import get_embedding, cosine_similarity
# from askalex import cosine_similarity, get_embedding
def find_abs(keywords, per_page=100):
print("Finding pubs...")
woi = pyalex.Works().search_filter(abstract=keywords).get(per_page=per_page)
abs_df = pd.DataFrame(
[
{
"title": e["title"],
"abstract": e["abstract"],
"url": e["doi"],
}
for e in woi
]
)
abs_df["abstract"] = abs_df["abstract"].apply(shorten_abstract)
print("Done!")
return abs_df
def shorten_abstract(text, max_words=500, max_length=300):
words = text.split()
if len(words) > max_words:
return " ".join(words[:max_length])
else:
return text
def get_embed(
df,
embedding_model="tcell_ada_embeddings",
embedding_encoding="cl100k_base", # this the encoding for text-embedding-ada-002
max_tokens=8000, # the maximum for text-embedding-ada-002 is 8191
top_n=1000,
):
print("Finding embeddings...")
# omit reviews that are too long to embed
encoding = tiktoken.get_encoding(embedding_encoding)
df["n_tokens"] = df.abstract.apply(lambda x: len(encoding.encode(x)))
df = df[df.n_tokens <= max_tokens].tail(top_n)
df["embedding"] = df.abstract.apply(
lambda x: get_embedding(x, engine=embedding_model)
)
print("Done!")
return df
def search_docs(
df,
user_query,
embedding_model="tcell_ada_embeddings",
top_n=10,
):
# perform semantic search on these abstracts and find
# the top 10 relevant abstracts
embedding = get_embedding(user_query, engine=embedding_model)
df["similarities"] = df.embedding.apply(lambda x: cosine_similarity(x, embedding))
res = df.sort_values("similarities", ascending=False).head(top_n)
return res
def style_dataframe(df):
# check that the input DataFrame has the correct columns
expected_columns = ["similarities", "title", "abstract", "url"]
missing_columns = set(expected_columns) - set(df.columns)
if missing_columns:
raise ValueError(f"Missing columns in input DataFrame: {missing_columns}")
styled_df = pd.DataFrame()
styled_df["Publication"] = df.apply(
lambda row: f'<p style="font-weight: bold; font-size: larger"><a href="{row["url"]}">{row["title"]}</a></p><p>{row["abstract"]}</p>',
axis=1,
)
styled_df["Similarity"] = df["similarities"].apply(lambda x: f"{x:.3f}")
return styled_df
| [] |
2024-01-10 | trangdata/askalex | askalex.py | import openai
import os
import numpy as np
from openai.embeddings_utils import get_embedding, distances_from_embeddings
openai.api_type = os.getenv("OPENAI_API_TYPE")
openai.api_base = os.getenv("OPENAI_API_BASE")
openai.api_version = os.getenv("OPENAI_API_VERSION")
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.proxy = os.getenv("OPENAI_PROXY")
# def get_embedding(text, engine="text-embedding-ada-002"): # model = "deployment_name"
# return client.embeddings.create(input=[text], model=engine).data[0].embedding
# # return openai.Embedding.create(input=text, engine=engine)["data"][0]["embedding"]
# def cosine_similarity(a, b):
# return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def create_context(question, df, max_len=1800, size="ada"):
"""
Create a context for a question by finding the most similar context from the dataframe
"""
# Get the embeddings for the question
q_embeddings = get_embedding(question, engine="tcell_ada_embeddings")
# Get the distances from the embeddings
df["distances"] = distances_from_embeddings(
q_embeddings, df["embedding"].values, distance_metric="cosine"
)
# df["sim"] = cosine_similarity(q_embeddings, df["embedding"].values)
returns = []
cur_len = 0
# Sort by distance and add the text to the context until the context is too long
for i, row in df.sort_values("distances", ascending=True).iterrows():
# Add the length of the text to the current length
cur_len += row["n_tokens"] + 4
# If the context is too long, break
if cur_len > max_len:
break
# Else add it to the text that is being returned
returns.append(row["abstract"])
# Return the context
return "\n\n###\n\n".join(returns)
def answer_question(
question,
df,
engine="T-Cell-Phenotype", # "GPT-4-32k",
max_len=4097,
size="ada",
debug=False,
stop_sequence=None,
):
"""
Answer a question based on the most similar context from the dataframe texts
"""
if question is None:
return ""
template = (
"You are an intelligent assistant helping users with their questions. "
+ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. "
+ "Answer the following question using only the data provided in the sources below. "
+ "For tabular information return it as an html table. Do not return markdown format. "
+ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. "
+ "If you cannot answer using the sources below, say you don't know. "
+ "\n\nContext: {context}\n\n---\n\nQuestion: {question}\nAnswer: "
)
context = create_context(
question,
df,
max_len=max_len,
size=size,
)
# If debug, print the raw model response
if debug:
print("Context:\n" + context)
print("\n\n")
prompt = template.format(context=context, question=question)
try:
return trim_incomplete_sentence(complete_model(prompt, engine, stop_sequence))
except Exception as e:
print(e)
return ""
def trim_incomplete_sentence(paragraph):
sentences = paragraph.split(". ")
# if the last sentence is complete
if sentences[-1].endswith("."):
return paragraph
# else, remove it
trimmed_paragraph = ". ".join(sentences[:-1])
trimmed_paragraph += "."
return trimmed_paragraph
def complete_model(
prompt,
engine,
stop_sequence,
):
model = [engine]
if "gpt" in model:
max_tokens = 10000
else:
n_tokens = len(prompt) // 4
max_tokens = 3880 - n_tokens
if model == "gpt-4-32k" or model == "gpt-4":
response = openai.ChatCompletion.create(
messages=[{"role": "user", "content": prompt}],
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_sequence,
model=model,
engine=engine,
)
return response["choices"][0]["message"]["content"]
else:
response = openai.Completion.create(
prompt=prompt,
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_sequence,
engine=engine,
)
return response["choices"][0]["text"]
| [
"You are an intelligent assistant helping users with their questions. Use 'you' to refer to the individual asking the questions even if they ask with 'I'. Answer the following question using only the data provided in the sources below. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. If you cannot answer using the sources below, say you don't know. \n\nContext: {context}\n\n---\n\nQuestion: {question}\nAnswer: "
] |
2024-01-10 | xxblxs/chatpdf-demo | ChatDemo.py | from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage
from langchain.chat_models import ChatOpenAI
import gradio as gr
import sys
import os
import openai
openai.api_base = "https://api.app4gpt.com/v1"
os.environ["OPENAI_API_KEY"] = 'you-API-KEY'
def create_service_context():
#constraint parameters
max_input_size = 4096
num_outputs = 3072
max_chunk_overlap = 20
chunk_size_limit = 600
#allows the user to explicitly set certain constraint parameters
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
#LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
#constructs service_context
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
return service_context
def data_ingestion_indexing(directory_path):
#loads data from the specified directory path
documents = SimpleDirectoryReader(directory_path).load_data()
#when first building the index
index = GPTVectorStoreIndex.from_documents(
documents, service_context=create_service_context()
)
#persist index to disk, default "storage" folder
index.storage_context.persist()
return index
def data_querying(input_text):
#rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="./storage")
#loads index from storage
index = load_index_from_storage(storage_context, service_context=create_service_context())
#queries the index with the input text
response = index.as_query_engine().query(input_text)
return response.response
iface = gr.Interface(fn=data_querying,
inputs=gr.components.Textbox(lines=7, label="Enter your question"),
outputs="text",
title="Custom-Pdf Demo by Gpt4")
#passes in data directory
index = data_ingestion_indexing("data")
iface.launch(share=False)
| [] |
2024-01-10 | vukrosic/talk-to-pdf | firebase-functions~get_answer.py | import io
import pinecone
import openai
import functions_framework
from firebase_functions import https_fn
from firebase_functions import options
from firebase_admin import initialize_app
initialize_app()
options.set_global_options(max_instances=10)
@https_fn.on_request(
cors=options.CorsOptions(
cors_origins=["http://localhost:3000", "*"],
cors_methods=["GET", "POST"],
)
)
@functions_framework.http
def answer_question(request):
request_json = request.get_json(silent=True)
request_args = request.args
# Check if a question was provided in the request
if "question" not in request_json:
return "No question provided"
question = request_json["question"]
index_name = "test" # Replace with your actual Pinecone index name
# Initialize Pinecone
PINECONE_API_KEY = ""
PINECONE_API_ENV = ""
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_API_ENV)
# Initialize Langchain embeddings
OPENAI_API_KEY = "" # Replace with your OpenAI API key
openai.api_key = OPENAI_API_KEY
# Convert the user's question into an embedding
question_embedding = embed_text(question)
# Search for the most similar embeddings in Pinecone
index = pinecone.Index(index_name)
results = index.query(queries=[question_embedding], top_k=3, include_metadata=True)
# Access the matches correctly
matches = results['results'][0]['matches']
relevant_documents = [match['metadata']['text'] for match in matches]
# Concatenate relevant documents into a single text
relevant_documents_text = "\n\n".join(relevant_documents)
if relevant_documents_text == "":
return "No relevant documents found"
# Create a chat prompt with relevant documents and the user's question
chat_prompt = f"Relevant Documents:\n{relevant_documents_text}\n\nUser Question: {question}\nAnswer:"
print(chat_prompt)
# Generate an answer using GPT-3.5 Turbo
response = openai.Completion.create(
engine="text-davinci-002", # Use the appropriate OpenAI engine
prompt=chat_prompt,
max_tokens=50, # Adjust as needed
)
answer = response.choices[0].text
print(answer)
return answer
def embed_text(text):
response = openai.Embedding.create(
input=text,
model="text-embedding-ada-002"
)
embeddings = response['data'][0]['embedding']
return embeddings
| [
"Relevant Documents:\nPLACEHOLDER\n\nUser Question: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | vukrosic/talk-to-pdf | firebase-functions~embed_to_database.py | import io
import os
import pinecone
import openai
from PyPDF2 import PdfReader
from firebase_functions import https_fn
from firebase_functions import options
from firebase_admin import initialize_app
initialize_app()
options.set_global_options(max_instances=10)
@https_fn.on_request(
cors=options.CorsOptions(
cors_origins=["http://localhost:3000", "*"],
cors_methods=["GET", "POST"],
)
)
def initialize_pinecone():
PINECONE_API_KEY = ""
PINECONE_API_ENV = ""
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_API_ENV)
print("Pinecone initialized")
return pinecone
def embed_to_database(req: https_fn.Request) -> https_fn.Response:
# Convert the Request object to a dictionary
try:
# Check if a file was uploaded
if "file" in req.files:
uploaded_file = req.files["file"]
file_extension = uploaded_file.filename.split(".")[-1].lower()
print(f"File extension: {file_extension}")
# Check if the uploaded file is a PDF
if file_extension == "pdf":
# Extract text from the PDF file
file_content = uploaded_file.read()
pdf_text = extract_text_from_pdf_file(file_content)
print("PDF text extracted")
else:
pdf_text = ""
print("No PDF file uploaded")
# Check if user-pasted text is provided
user_pasted_text = req.form.get('userPastedText', '') # Access userPastedText from req.form
print("User-pasted text:")
print(user_pasted_text)
# Combine text from PDF and user-pasted text
combined_text = pdf_text + user_pasted_text
# Convert the extracted text to OpenAI vector embeddings
chunk_size = 1000
texts = [combined_text[i:i + chunk_size] for i in range(0, len(combined_text), chunk_size)]
index_name = "test" # Replace with your actual Pinecone index name
pinecone.init(api_key="", environment="")
print("Pinecone initialized for text embedding")
# Upload the extracted text to Pinecone
upload_data_to_pinecone(texts, index_name, pinecone)
print("Data uploaded to Pinecone")
# Return a response
return https_fn.Response("Text extracted and uploaded to Pinecone successfully")
except Exception as e:
error_message = f"Error: {str(e)}"
print(error_message)
return https_fn.Response(error_message, status=500)
def extract_text_from_pdf_file(pdf_bytes):
text = ""
# Debugging: Print the length of the PDF bytes
pdf_bytes_length = len(pdf_bytes)
print(f"PDF bytes length: {pdf_bytes_length}")
pdf_reader = PdfReader(io.BytesIO(pdf_bytes))
for page in pdf_reader.pages:
text += page.extract_text()
return text
def upload_data_to_pinecone(texts, index_name, pinecone):
# Initialize OpenAI API client
openai.api_key = os.environ.get("OPENAI_API_KEY")
print(os.environ.get("OPENAI_API_KEY"))
# Convert and upload data as tuples (ID, vector)
data_to_upload = [(str(i), embed_text(text), {"text": text}) for i, text in enumerate(texts)]
print(f"Data to upload: {data_to_upload}")
# Upload the data to Pinecone
index = pinecone.Index("test")
index.delete(delete_all=True)
index.upsert(data_to_upload)
print("Data uploaded to Pinecone")
def embed_text(text):
print("text: ")
print(text)
response = openai.Embedding.create(
input=text,
model="text-embedding-ada-002"
)
embeddings = response['data'][0]['embedding']
print("Text embedded")
return embeddings
| [] |
2024-01-10 | mlflow/mlflow | mlflow~gateway~providers~anthropic.py | import time
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from mlflow.gateway.config import AnthropicConfig, RouteConfig
from mlflow.gateway.constants import (
MLFLOW_AI_GATEWAY_ANTHROPIC_DEFAULT_MAX_TOKENS,
MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS,
)
from mlflow.gateway.providers.base import BaseProvider, ProviderAdapter
from mlflow.gateway.providers.utils import rename_payload_keys, send_request
from mlflow.gateway.schemas import chat, completions, embeddings
class AnthropicAdapter(ProviderAdapter):
@classmethod
def model_to_completions(cls, resp, config):
stop_reason = "stop" if resp["stop_reason"] == "stop_sequence" else "length"
return completions.ResponsePayload(
created=int(time.time()),
object="text_completion",
model=resp["model"],
choices=[
completions.Choice(
index=0,
text=resp["completion"],
finish_reason=stop_reason,
)
],
usage=completions.CompletionsUsage(
prompt_tokens=None,
completion_tokens=None,
total_tokens=None,
),
)
@classmethod
def completions_to_model(cls, payload, config):
key_mapping = {"max_tokens": "max_tokens_to_sample", "stop": "stop_sequences"}
if "top_p" in payload:
raise HTTPException(
status_code=422,
detail="Cannot set both 'temperature' and 'top_p' parameters. "
"Please use only the temperature parameter for your query.",
)
max_tokens = payload.get("max_tokens", MLFLOW_AI_GATEWAY_ANTHROPIC_DEFAULT_MAX_TOKENS)
if max_tokens > MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS:
raise HTTPException(
status_code=422,
detail="Invalid value for max_tokens: cannot exceed "
f"{MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS}.",
)
payload["max_tokens"] = max_tokens
if payload.get("stream", None) == "true":
raise HTTPException(
status_code=422,
detail="Setting the 'stream' parameter to 'true' is not supported with the MLflow "
"Gateway.",
)
n = payload.pop("n", 1)
if n != 1:
raise HTTPException(
status_code=422,
detail="'n' must be '1' for the Anthropic provider. Received value: '{n}'.",
)
payload = rename_payload_keys(payload, key_mapping)
if payload["prompt"].startswith("Human: "):
payload["prompt"] = "\n\n" + payload["prompt"]
if not payload["prompt"].startswith("\n\nHuman: "):
payload["prompt"] = "\n\nHuman: " + payload["prompt"]
if not payload["prompt"].endswith("\n\nAssistant:"):
payload["prompt"] = payload["prompt"] + "\n\nAssistant:"
# The range of Anthropic's temperature is 0-1, but ours is 0-2, so we halve it
if "temperature" in payload:
payload["temperature"] = 0.5 * payload["temperature"]
return payload
@classmethod
def embeddings_to_model(cls, payload, config):
raise NotImplementedError
@classmethod
def model_to_embeddings(cls, resp, config):
raise NotImplementedError
class AnthropicProvider(BaseProvider, AnthropicAdapter):
def __init__(self, config: RouteConfig) -> None:
super().__init__(config)
if config.model.config is None or not isinstance(config.model.config, AnthropicConfig):
raise TypeError(f"Invalid config type {config.model.config}")
self.anthropic_config: AnthropicConfig = config.model.config
self.headers = {"x-api-key": self.anthropic_config.anthropic_api_key}
self.base_url = "https://api.anthropic.com/v1/"
async def completions(self, payload: completions.RequestPayload) -> completions.ResponsePayload:
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
resp = await send_request(
headers=self.headers,
base_url=self.base_url,
path="complete",
payload={
"model": self.config.model.name,
**AnthropicAdapter.completions_to_model(payload, self.config),
},
)
# Example response:
# Documentation: https://docs.anthropic.com/claude/reference/complete_post
# ```
# {
# "completion": " Hello! My name is Claude."
# "stop_reason": "stop_sequence",
# "model": "claude-instant-1.1",
# "truncated": False,
# "stop": None,
# "log_id": "dee173f87ddf1357da639dee3c38d833",
# "exception": None,
# }
# ```
return AnthropicAdapter.model_to_completions(resp, self.config)
async def chat(self, payload: chat.RequestPayload) -> None:
# Anthropic does not have a chat endpoint
raise HTTPException(
status_code=404, detail="The chat route is not available for Anthropic models."
)
async def embeddings(self, payload: embeddings.RequestPayload) -> None:
# Anthropic does not have an embeddings endpoint
raise HTTPException(
status_code=404, detail="The embeddings route is not available for Anthropic models."
)
| [] |
2024-01-10 | mlflow/mlflow | examples~langchain~simple_chain.py | import os
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import mlflow
# Ensure the OpenAI API key is set in the environment
assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable."
# Initialize the OpenAI model and the prompt template
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
# Create the LLMChain with the specified model and prompt
chain = LLMChain(llm=llm, prompt=prompt)
# Log the LangChain LLMChain in an MLflow run
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(chain, "langchain_model")
# Load the logged model using MLflow's Python function flavor
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
# Predict using the loaded model
print(loaded_model.predict([{"product": "colorful socks"}]))
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | mlflow/mlflow | tests~langchain~test_langchain_model_export.py | import importlib
import json
import os
import shutil
import sqlite3
from contextlib import contextmanager
from operator import itemgetter
from typing import Any, Dict, List, Mapping, Optional
import langchain
import numpy as np
import openai
import pytest
import transformers
from langchain import SQLDatabase
from langchain.agents import AgentType, initialize_agent
from langchain.chains import (
APIChain,
ConversationChain,
HypotheticalDocumentEmbedder,
LLMChain,
RetrievalQA,
)
from langchain.chains.api import open_meteo_docs
from langchain.chains.base import Chain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.document_loaders import TextLoader
from langchain.embeddings.base import Embeddings
from langchain.embeddings.fake import FakeEmbeddings
from langchain.evaluation.qa import QAEvalChain
from langchain.llms import HuggingFacePipeline, OpenAI
from langchain.llms.base import LLM
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain.requests import TextRequestsWrapper
from langchain.text_splitter import CharacterTextSplitter
from langchain.tools import Tool
from langchain.vectorstores import FAISS
from langchain_experimental.sql import SQLDatabaseChain
from packaging import version
from packaging.version import Version
from pydantic import BaseModel
from pyspark.sql import SparkSession
import mlflow
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow.deployments import PredictionsResponse
from mlflow.exceptions import MlflowException
from mlflow.models.signature import ModelSignature, Schema, infer_signature
from mlflow.types.schema import Array, ColSpec, DataType, Object, Property
from mlflow.utils.openai_utils import (
TEST_CONTENT,
TEST_INTERMEDIATE_STEPS,
TEST_SOURCE_DOCUMENTS,
_mock_chat_completion_response,
_mock_request,
_MockResponse,
)
from tests.helper_functions import pyfunc_serve_and_score_model
@contextmanager
def _mock_async_request(content=TEST_CONTENT):
with _mock_request(return_value=_mock_chat_completion_response(content)) as m:
yield m
@pytest.fixture
def model_path(tmp_path):
return tmp_path / "model"
@pytest.fixture(scope="module")
def spark():
with SparkSession.builder.master("local[*]").getOrCreate() as s:
yield s
@pytest.fixture(autouse=True)
def set_envs(monkeypatch):
monkeypatch.setenvs(
{
"MLFLOW_TESTING": "true",
"OPENAI_API_KEY": "test",
"SERPAPI_API_KEY": "test",
}
)
importlib.reload(openai)
def create_huggingface_model(model_path):
architecture = "lordtt13/emo-mobilebert"
mlflow.transformers.save_model(
transformers_model={
"model": transformers.TFMobileBertForSequenceClassification.from_pretrained(
architecture
),
"tokenizer": transformers.AutoTokenizer.from_pretrained(architecture),
},
path=model_path,
)
llm = mlflow.transformers.load_model(model_path)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
hf_pipe = HuggingFacePipeline(pipeline=llm)
return LLMChain(llm=hf_pipe, prompt=prompt)
def create_openai_llmchain():
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
return LLMChain(llm=llm, prompt=prompt)
def create_qa_eval_chain():
llm = OpenAI(temperature=0)
return QAEvalChain.from_llm(llm)
def create_qa_with_sources_chain():
# StuffDocumentsChain
return load_qa_with_sources_chain(OpenAI(temperature=0), chain_type="stuff")
def create_openai_llmagent(return_intermediate_steps=False):
from langchain.agents import AgentType, initialize_agent, load_tools
# First, let's load the language model we're going to use to control the agent.
llm = OpenAI(temperature=0)
# Next, let's load some tools to use.
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# Finally, let's initialize an agent with the tools.
return initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
return_intermediate_steps=return_intermediate_steps,
)
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
queries: Optional[Mapping] = None
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
# pylint: disable=arguments-differ
def _call(self, prompt: str, stop: Optional[List[str]] = None, run_manager=None) -> str:
"""First try to lookup in queries, else return 'foo' or 'bar'."""
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return "foo"
else:
return "bar"
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {}
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: List[str] = ["foo"]
the_output_keys: List[str] = ["bar"]
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> List[str]:
"""Output key of bar."""
return self.the_output_keys
# pylint: disable=arguments-differ
def _call(self, inputs: Dict[str, str], run_manager=None) -> Dict[str, str]:
if self.be_correct:
return {"bar": "baz"}
else:
return {"baz": "bar"}
def test_langchain_native_save_and_load_model(model_path):
model = create_openai_llmchain()
mlflow.langchain.save_model(model, model_path)
loaded_model = mlflow.langchain.load_model(model_path)
assert type(loaded_model) == langchain.chains.llm.LLMChain
assert type(loaded_model.llm) == langchain.llms.openai.OpenAI
assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate
assert loaded_model.prompt.template == "What is a good name for a company that makes {product}?"
def test_langchain_native_log_and_load_model():
model = create_openai_llmchain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert "langchain" in logged_model.flavors
assert str(logged_model.signature.inputs) == "['product': string (required)]"
assert str(logged_model.signature.outputs) == "['text': string (required)]"
assert type(loaded_model) == langchain.chains.llm.LLMChain
assert type(loaded_model.llm) == langchain.llms.openai.OpenAI
assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate
assert loaded_model.prompt.template == "What is a good name for a company that makes {product}?"
def test_pyfunc_load_openai_model():
model = create_openai_llmchain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
assert "langchain" in logged_model.flavors
assert type(loaded_model) == mlflow.pyfunc.PyFuncModel
def test_langchain_model_predict():
with _mock_request(return_value=_mock_chat_completion_response()):
model = create_openai_llmchain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
result = loaded_model.predict([{"product": "MLflow"}])
assert result == [TEST_CONTENT]
def test_pyfunc_spark_udf_with_langchain_model(spark):
model = create_openai_llmchain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.spark_udf(spark, logged_model.model_uri, result_type="string")
df = spark.createDataFrame([("MLflow",), ("Spark",)], ["product"])
df = df.withColumn("answer", loaded_model())
pdf = df.toPandas()
assert pdf["answer"].tolist() == [TEST_CONTENT, TEST_CONTENT]
def test_langchain_log_huggingface_hub_model_metadata(model_path):
model = create_huggingface_model(model_path)
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert "langchain" in logged_model.flavors
assert str(logged_model.signature.inputs) == "['product': string (required)]"
assert str(logged_model.signature.outputs) == "['text': string (required)]"
assert type(loaded_model) == langchain.chains.llm.LLMChain
assert type(loaded_model.llm) == HuggingFacePipeline
assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate
assert loaded_model.prompt.template == "What is a good name for a company that makes {product}?"
@pytest.mark.parametrize("return_intermediate_steps", [False, True])
def test_langchain_agent_model_predict(return_intermediate_steps):
langchain_agent_output = {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"choices": [
{
"index": 0,
"finish_reason": "stop",
"text": f"Final Answer: {TEST_CONTENT}",
}
],
"usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
}
model = create_openai_llmagent(return_intermediate_steps=return_intermediate_steps)
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
langchain_input = {
"input": "What was the high temperature in SF yesterday in Fahrenheit?"
"What is that number raised to the .023 power?"
}
if return_intermediate_steps:
langchain_output = [{"output": TEST_CONTENT, "intermediate_steps": TEST_INTERMEDIATE_STEPS}]
# hardcoded output key because that is the default for an agent
# but it is not an attribute of the agent or anything that we log
else:
langchain_output = [TEST_CONTENT]
with _mock_request(return_value=_MockResponse(200, langchain_agent_output)):
result = loaded_model.predict([langchain_input])
assert result == langchain_output
inference_payload = json.dumps({"inputs": langchain_input})
langchain_agent_output_serving = {"predictions": langchain_agent_output}
with _mock_request(return_value=_MockResponse(200, langchain_agent_output_serving)):
response = pyfunc_serve_and_score_model(
logged_model.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert (
PredictionsResponse.from_json(response.content.decode("utf-8"))
== langchain_agent_output_serving
)
def test_langchain_native_log_and_load_qaevalchain():
# QAEvalChain is a subclass of LLMChain
model = create_qa_eval_chain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert model == loaded_model
def test_langchain_native_log_and_load_qa_with_sources_chain():
# StuffDocumentsChain is a subclass of Chain
model = create_qa_with_sources_chain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert model == loaded_model
@pytest.mark.skipif(
version.parse(langchain.__version__) < version.parse("0.0.194"),
reason="Saving RetrievalQA chains requires langchain>=0.0.194",
)
def test_log_and_load_retrieval_qa_chain(tmp_path):
# Create the vector db, persist the db to a local fs folder
loader = TextLoader("tests/langchain/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = FakeEmbeddings(size=5)
db = FAISS.from_documents(docs, embeddings)
persist_dir = str(tmp_path / "faiss_index")
db.save_local(persist_dir)
# Create the RetrievalQA chain
retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=db.as_retriever())
# Log the RetrievalQA chain
def load_retriever(persist_directory):
embeddings = FakeEmbeddings(size=5)
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
retrievalQA,
"retrieval_qa_chain",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
# Remove the persist_dir
shutil.rmtree(persist_dir)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == retrievalQA
loaded_pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)
langchain_input = {"query": "What did the president say about Ketanji Brown Jackson"}
langchain_output = [TEST_CONTENT]
result = loaded_pyfunc_model.predict([langchain_input])
assert result == langchain_output
# Serve the chain
inference_payload = json.dumps({"inputs": langchain_input})
langchain_output_serving = {"predictions": langchain_output}
response = pyfunc_serve_and_score_model(
logged_model.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert (
PredictionsResponse.from_json(response.content.decode("utf-8")) == langchain_output_serving
)
@pytest.mark.skipif(
version.parse(langchain.__version__) < version.parse("0.0.194"),
reason="Saving RetrievalQA chains requires langchain>=0.0.194",
)
def test_log_and_load_retrieval_qa_chain_multiple_output(tmp_path):
# Create the vector db, persist the db to a local fs folder
loader = TextLoader("tests/langchain/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = FakeEmbeddings(size=5)
db = FAISS.from_documents(docs, embeddings)
persist_dir = str(tmp_path / "faiss_index")
db.save_local(persist_dir)
# Create the RetrievalQA chain
retrievalQA = RetrievalQA.from_llm(
llm=OpenAI(), retriever=db.as_retriever(), return_source_documents=True
)
# Log the RetrievalQA chain
def load_retriever(persist_directory):
embeddings = FakeEmbeddings(size=5)
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
retrievalQA,
"retrieval_qa_chain",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
# Remove the persist_dir
shutil.rmtree(persist_dir)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == retrievalQA
loaded_pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)
langchain_input = {"query": "What did the president say about Ketanji Brown Jackson"}
langchain_output = [
{loaded_model.output_key: TEST_CONTENT, "source_documents": TEST_SOURCE_DOCUMENTS}
]
result = loaded_pyfunc_model.predict([langchain_input])
assert result == langchain_output
# Serve the chain
inference_payload = json.dumps({"inputs": langchain_input})
langchain_output_serving = {"predictions": langchain_output}
response = pyfunc_serve_and_score_model(
logged_model.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert (
PredictionsResponse.from_json(response.content.decode("utf-8")) == langchain_output_serving
)
# Define a special embedding for testing
class DeterministicDummyEmbeddings(Embeddings, BaseModel):
size: int
def _get_embedding(self, text: str) -> List[float]:
seed = abs(hash(text)) % (10**8)
np.random.seed(seed)
return list(np.random.normal(size=self.size))
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding(t) for t in texts]
def embed_query(self, text: str) -> List[float]:
return self._get_embedding(text)
def assert_equal_retrievers(retriever, expected_retreiver):
from langchain.schema.retriever import BaseRetriever
assert isinstance(retriever, BaseRetriever)
assert isinstance(retriever, type(expected_retreiver))
assert isinstance(retriever.vectorstore, type(expected_retreiver.vectorstore))
assert retriever.tags == expected_retreiver.tags
assert retriever.metadata == expected_retreiver.metadata
assert retriever.search_type == expected_retreiver.search_type
assert retriever.search_kwargs == expected_retreiver.search_kwargs
def test_log_and_load_retriever_chain(tmp_path):
# Create the vector db, persist the db to a local fs folder
loader = TextLoader("tests/langchain/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=10, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = DeterministicDummyEmbeddings(size=5)
db = FAISS.from_documents(docs, embeddings)
persist_dir = str(tmp_path / "faiss_index")
db.save_local(persist_dir)
# Define the loader_fn
def load_retriever(persist_directory):
from typing import List # pylint: disable=lazy-builtin-import
import numpy as np
from langchain.embeddings.base import Embeddings
from pydantic import BaseModel
class DeterministicDummyEmbeddings(Embeddings, BaseModel):
size: int
def _get_embedding(self, text: str) -> List[float]:
if isinstance(text, np.ndarray):
text = text.item()
seed = abs(hash(text)) % (10**8)
np.random.seed(seed)
return list(np.random.normal(size=self.size))
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding(t) for t in texts]
def embed_query(self, text: str) -> List[float]:
return self._get_embedding(text)
embeddings = DeterministicDummyEmbeddings(size=5)
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
# Log the retriever
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
db.as_retriever(),
"retriever",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
# Remove the persist_dir
shutil.rmtree(persist_dir)
# Load the retriever
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert_equal_retrievers(loaded_model, db.as_retriever())
loaded_pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)
query = "What did the president say about Ketanji Brown Jackson"
langchain_input = {"query": query}
result = loaded_pyfunc_model.predict([langchain_input])
expected_result = [
{"page_content": doc.page_content, "metadata": doc.metadata}
for doc in db.as_retriever().get_relevant_documents(query)
]
assert result == [expected_result]
# Serve the retriever
inference_payload = json.dumps({"inputs": langchain_input})
response = pyfunc_serve_and_score_model(
logged_model.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
pred = PredictionsResponse.from_json(response.content.decode("utf-8"))["predictions"]
assert type(pred) == list
assert len(pred) == 1
docs_list = pred[0]
assert type(docs_list) == list
assert len(docs_list) == 4
# The returned docs are non-deterministic when used with dummy embeddings,
# so we cannot assert pred == {"predictions": [expected_result]}
def load_requests_wrapper(_):
return TextRequestsWrapper(headers=None, aiosession=None)
def test_log_and_load_api_chain():
llm = OpenAI(temperature=0)
apichain = APIChain.from_llm_and_api_docs(
llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True, limit_to_domains=["test.com"]
)
# Log the APIChain
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
apichain,
"api_chain",
loader_fn=load_requests_wrapper,
)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == apichain
def test_log_and_load_subclass_of_specialized_chain():
class APIChainSubclass(APIChain):
pass
llm = OpenAI(temperature=0)
apichain_subclass = APIChainSubclass.from_llm_and_api_docs(
llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True, limit_to_domains=["test.com"]
)
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
apichain_subclass,
"apichain_subclass",
loader_fn=load_requests_wrapper,
)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == apichain_subclass
def load_base_embeddings(_):
return FakeEmbeddings(size=32)
@pytest.mark.skip(reason="This fails due to https://github.com/hwchase17/langchain/issues/5131")
def test_log_and_load_hyde_chain():
# Create the HypotheticalDocumentEmbedder chain
base_embeddings = FakeEmbeddings(size=32)
llm = OpenAI()
# Load with `web_search` prompt
embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search")
# Log the hyde chain
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
embeddings,
"hyde_chain",
loader_fn=load_base_embeddings,
)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == embeddings
def create_sqlite_db_file(db_dir):
# Connect to SQLite database (or create it if it doesn't exist)
with sqlite3.connect(db_dir) as conn:
# Create a cursor
c = conn.cursor()
# Create a dummy table
c.execute(
"""
CREATE TABLE IF NOT EXISTS employees(
id INTEGER PRIMARY KEY,
name TEXT,
salary REAL,
department TEXT,
position TEXT,
hireDate TEXT);
"""
)
# Insert dummy data into the table
c.execute(
"""
INSERT INTO employees (name, salary, department, position, hireDate)
VALUES ('John Doe', 80000, 'IT', 'Engineer', '2023-06-26');
"""
)
def load_db(persist_dir):
db_file_path = os.path.join(persist_dir, "my_database.db")
sqlite_uri = f"sqlite:///{db_file_path}"
return SQLDatabase.from_uri(sqlite_uri)
@pytest.mark.skipif(
version.parse(langchain.__version__) < version.parse("0.0.297"),
reason="Saving SQLDatabaseChain chains requires langchain>=0.0.297",
)
def test_log_and_load_sql_database_chain(tmp_path):
# Create the SQLDatabaseChain
db_file_path = tmp_path / "my_database.db"
sqlite_uri = f"sqlite:///{db_file_path}"
llm = OpenAI(temperature=0)
create_sqlite_db_file(db_file_path)
db = SQLDatabase.from_uri(sqlite_uri)
db_chain = SQLDatabaseChain.from_llm(llm, db)
# Log the SQLDatabaseChain
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
db_chain,
"sql_database_chain",
loader_fn=load_db,
persist_dir=tmp_path,
)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == db_chain
def test_saving_not_implemented_for_memory():
conversation = ConversationChain(llm=OpenAI(temperature=0), memory=ConversationBufferMemory())
with pytest.raises(
ValueError,
match="Saving of memory is not yet supported.",
):
with mlflow.start_run():
mlflow.langchain.log_model(conversation, "conversation_model")
def test_saving_not_implemented_chain_type():
chain = FakeChain()
if version.parse(langchain.__version__) < version.parse("0.0.309"):
error_message = "Saving not supported for this chain type"
else:
error_message = f"Chain {chain} does not support saving."
with pytest.raises(
NotImplementedError,
match=error_message,
):
with mlflow.start_run():
mlflow.langchain.log_model(chain, "fake_chain")
def test_unsupported_class():
llm = FakeLLM()
with pytest.raises(
MlflowException,
match="MLflow langchain flavor only supports subclasses of "
+ "langchain.chains.base.Chain",
):
with mlflow.start_run():
mlflow.langchain.log_model(llm, "fake_llm")
def test_agent_with_unpicklable_tools(tmp_path):
tmp_file = tmp_path / "temp_file.txt"
with open(tmp_file, mode="w") as temp_file:
# files that aren't opened for reading cannot be pickled
tools = [
Tool.from_function(
func=lambda: temp_file,
name="Write 0",
description="If you need to write 0 to a file",
)
]
agent = initialize_agent(
llm=OpenAI(temperature=0), tools=tools, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
with pytest.raises(
MlflowException,
match=(
"Error when attempting to pickle the AgentExecutor tools. "
"This model likely does not support serialization."
),
):
with mlflow.start_run():
mlflow.langchain.log_model(agent, "unpicklable_tools")
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"),
reason="feature not existing",
)
def test_save_load_runnable_passthrough():
from langchain.schema.runnable import RunnablePassthrough
runnable = RunnablePassthrough()
assert runnable.invoke("hello") == "hello"
with mlflow.start_run():
model_info = mlflow.langchain.log_model(runnable, "model_path")
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke("hello") == "hello"
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict(["hello"]) == ["hello"]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": ["hello"]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": ["hello"]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"),
reason="feature not existing",
)
def test_save_load_runnable_lambda(spark):
from langchain.schema.runnable import RunnableLambda
def add_one(x: int) -> int:
return x + 1
runnable = RunnableLambda(add_one)
assert runnable.invoke(1) == 2
assert runnable.batch([1, 2, 3]) == [2, 3, 4]
with mlflow.start_run():
model_info = mlflow.langchain.log_model(
runnable, "runnable_lambda", input_example=[1, 2, 3]
)
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke(1) == 2
assert loaded_model.batch([1, 2, 3]) == [2, 3, 4]
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert loaded_model.predict(1) == [2]
assert loaded_model.predict([1, 2, 3]) == [2, 3, 4]
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, result_type="long")
df = spark.createDataFrame([(1,), (2,), (3,)], ["data"])
df = df.withColumn("answer", udf("data"))
pdf = df.toPandas()
assert pdf["answer"].tolist() == [2, 3, 4]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": [1, 2, 3]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": [2, 3, 4]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"),
reason="feature not existing",
)
def test_save_load_runnable_lambda_in_sequence():
from langchain.schema.runnable import RunnableLambda
def add_one(x):
return x + 1
def mul_two(x):
return x * 2
runnable_1 = RunnableLambda(add_one)
runnable_2 = RunnableLambda(mul_two)
sequence = runnable_1 | runnable_2
assert sequence.invoke(1) == 4
with mlflow.start_run():
model_info = mlflow.langchain.log_model(sequence, "model_path", input_example=[1, 2, 3])
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke(1) == 4
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict(1) == [4]
assert pyfunc_loaded_model.predict([1, 2, 3]) == [4, 6, 8]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": [1, 2, 3]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": [4, 6, 8]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"),
reason="feature not existing",
)
def test_save_load_runnable_parallel():
from langchain.schema.runnable import RunnableParallel
def fake_llm(prompt: str) -> str:
return "completion"
runnable = RunnableParallel({"llm": fake_llm})
assert runnable.invoke("hello") == {"llm": "completion"}
assert runnable.batch(["hello", "world"]) == [{"llm": "completion"}, {"llm": "completion"}]
with mlflow.start_run():
model_info = mlflow.langchain.log_model(
runnable, "model_path", input_example=["hello", "world"]
)
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke("hello") == {"llm": "completion"}
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict("hello") == [{"llm": "completion"}]
assert pyfunc_loaded_model.predict(["hello", "world"]) == [
{"llm": "completion"},
{"llm": "completion"},
]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": ["hello", "world"]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": [{"llm": "completion"}, {"llm": "completion"}]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"),
reason="feature not existing",
)
def tests_save_load_complex_runnable_parallel():
from langchain.schema.runnable import RunnableParallel
with _mock_request(return_value=_mock_chat_completion_response()):
chain = create_openai_llmchain()
runnable = RunnableParallel({"llm": chain})
expected_result = {"llm": {"product": "MLflow", "text": TEST_CONTENT}}
assert runnable.invoke({"product": "MLflow"}) == expected_result
with mlflow.start_run():
model_info = mlflow.langchain.log_model(
runnable, "model_path", input_example=[{"product": "MLflow"}]
)
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke("MLflow") == expected_result
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict([{"product": "MLflow"}]) == [expected_result]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": [{"product": "MLflow"}, {"product": "MLflow"}]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": [expected_result, expected_result]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"),
reason="feature not existing",
)
def test_save_load_runnable_parallel_and_assign_in_sequence():
from langchain.schema.runnable import RunnablePassthrough
def fake_llm(prompt: str) -> str:
return "completion"
runnable = {
"llm1": fake_llm,
"llm2": fake_llm,
} | RunnablePassthrough.assign(total_chars=lambda inputs: len(inputs["llm1"] + inputs["llm2"]))
expected_result = {
"llm1": "completion",
"llm2": "completion",
"total_chars": 20,
}
assert runnable.invoke("hello") == expected_result
with mlflow.start_run():
model_info = mlflow.langchain.log_model(
runnable, "model_path", input_example=["hello", "world"]
)
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke("hello") == expected_result
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict(["hello"]) == [expected_result]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": ["hello", "world"]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": [expected_result, expected_result]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"), reason="feature not existing"
)
def test_save_load_runnable_sequence():
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableSequence
prompt1 = PromptTemplate.from_template("what is the city {person} is from?")
llm = OpenAI(temperature=0.9)
model = prompt1 | llm | StrOutputParser()
with mlflow.start_run():
model_info = mlflow.langchain.log_model(model, "model_path")
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert type(loaded_model) == RunnableSequence
assert type(loaded_model.steps[0]) == PromptTemplate
assert type(loaded_model.steps[1]) == OpenAI
assert type(loaded_model.steps[2]) == StrOutputParser
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"), reason="feature not existing"
)
def test_save_load_long_runnable_sequence(model_path):
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough, RunnableSequence
prompt1 = PromptTemplate.from_template("what is the city {person} is from?")
llm = OpenAI(temperature=0.9)
model = prompt1 | llm | StrOutputParser()
for _ in range(10):
model = model | RunnablePassthrough()
with mlflow.start_run():
mlflow.langchain.save_model(model, model_path)
loaded_model = mlflow.langchain.load_model(model_path)
assert type(loaded_model) == RunnableSequence
assert type(loaded_model.steps[0]) == PromptTemplate
assert type(loaded_model.steps[1]) == OpenAI
assert type(loaded_model.steps[2]) == StrOutputParser
for i in range(3, 13):
assert type(loaded_model.steps[i]) == RunnablePassthrough
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"),
reason="feature not existing",
)
def test_save_load_complex_runnable_sequence():
from langchain.schema.runnable import RunnablePassthrough
with _mock_request(return_value=_mock_chat_completion_response()):
llm_chain = create_openai_llmchain()
chain = llm_chain | RunnablePassthrough()
expected_result = {"product": "MLflow", "text": TEST_CONTENT}
assert chain.invoke({"product": "MLflow"}) == expected_result
with mlflow.start_run():
model_info = mlflow.langchain.log_model(
chain, "model_path", input_example=[{"product": "MLflow"}]
)
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
result = loaded_model.invoke({"product": "MLflow"})
assert result == expected_result
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict([{"product": "MLflow"}]) == [expected_result]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": [{"product": "MLflow"}]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": [expected_result]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"), reason="feature not existing"
)
def test_save_load_simple_chat_model(spark):
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from mlflow.langchain.utils import _fake_simple_chat_model
prompt = ChatPromptTemplate.from_template(
"What is a good name for a company that makes {product}?"
)
chat_model = _fake_simple_chat_model()()
chain = prompt | chat_model | StrOutputParser()
assert chain.invoke({"product": "MLflow"}) == "Databricks"
# signature is required for spark_udf
signature = infer_signature({"product": "MLflow"}, "Databricks")
assert signature == ModelSignature(
Schema([ColSpec("string", "product")]), Schema([ColSpec("string")])
)
with mlflow.start_run():
model_info = mlflow.langchain.log_model(chain, "model_path", signature=signature)
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke({"product": "MLflow"}) == "Databricks"
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict([{"product": "MLflow"}]) == ["Databricks"]
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, result_type="string")
df = spark.createDataFrame([("MLflow",), ("Spark",)], ["product"])
df = df.withColumn("answer", udf("product"))
pdf = df.toPandas()
assert pdf["answer"].tolist() == ["Databricks", "Databricks"]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": {"product": "MLflow"}}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
# Because of the schema enforcement converts input to pandas dataframe
# the prediction result is wrapped in a list in api_request_parallel_processor
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": ["Databricks"]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"), reason="feature not existing"
)
def test_save_load_rag(tmp_path, spark):
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from mlflow.langchain.utils import _fake_simple_chat_model
chat_model = _fake_simple_chat_model()()
# Create the vector db, persist the db to a local fs folder
loader = TextLoader("tests/langchain/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=10, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = DeterministicDummyEmbeddings(size=5)
db = FAISS.from_documents(docs, embeddings)
persist_dir = str(tmp_path / "faiss_index")
db.save_local(persist_dir)
retriever = db.as_retriever()
def load_retriever(persist_directory):
embeddings = FakeEmbeddings(size=5)
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
prompt = ChatPromptTemplate.from_template(
"Answer the following question based on the context: {context}\nQuestion: {question}"
)
retrieval_chain = (
{
"context": retriever,
"question": RunnablePassthrough(),
}
| prompt
| chat_model
| StrOutputParser()
)
question = "What is a good name for a company that makes MLflow?"
answer = "Databricks"
assert retrieval_chain.invoke(question) == answer
with mlflow.start_run():
model_info = mlflow.langchain.log_model(
retrieval_chain,
"model_path",
loader_fn=load_retriever,
persist_dir=persist_dir,
input_example=question,
)
# Remove the persist_dir
shutil.rmtree(persist_dir)
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke(question) == answer
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict(question) == [answer]
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, result_type="string")
df = spark.createDataFrame([(question,), (question,)], ["question"])
df = df.withColumn("answer", udf("question"))
pdf = df.toPandas()
assert pdf["answer"].tolist() == [answer, answer]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": question}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": [answer]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"), reason="feature not existing"
)
def test_runnable_branch_save_load():
from langchain.schema.runnable import RunnableBranch
branch = RunnableBranch(
(lambda x: isinstance(x, str), lambda x: x.upper()),
(lambda x: isinstance(x, int), lambda x: x + 1),
(lambda x: isinstance(x, float), lambda x: x * 2),
lambda x: "goodbye",
)
assert branch.invoke("hello") == "HELLO"
assert branch.invoke({}) == "goodbye"
with mlflow.start_run():
# We only support single input format for now, so we should
# not save signature for runnable branch which accepts multiple
# input types
model_info = mlflow.langchain.log_model(branch, "model_path")
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke("hello") == "HELLO"
assert loaded_model.invoke({}) == "goodbye"
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict("hello") == "HELLO"
assert pyfunc_loaded_model.predict({}) == "goodbye"
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": "hello"}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": "HELLO"
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"), reason="feature not existing"
)
def test_complex_runnable_branch_save_load():
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableBranch, RunnableLambda
from mlflow.langchain.utils import _fake_mlflow_question_classifier, _fake_simple_chat_model
chat_model = _fake_mlflow_question_classifier()()
prompt = ChatPromptTemplate.from_template("{question_is_relevant}\n{query}")
# Need to add prompt here as the chat model doesn't accept dict input
answer_model = prompt | _fake_simple_chat_model()()
decline_to_answer = RunnableLambda(
lambda x: "I cannot answer questions that are not about MLflow."
)
something_went_wrong = RunnableLambda(lambda x: "Something went wrong.")
is_question_about_mlflow_prompt = ChatPromptTemplate.from_template(
"You are classifying documents to know if this question "
"is related with MLflow. Only answer with yes or no. The question is: {query}"
)
branch_node = RunnableBranch(
(lambda x: x["question_is_relevant"].lower() == "yes", answer_model),
(lambda x: x["question_is_relevant"].lower() == "no", decline_to_answer),
something_went_wrong,
)
chain = (
{
"question_is_relevant": is_question_about_mlflow_prompt
| chat_model
| StrOutputParser(),
"query": itemgetter("query"),
}
| branch_node
| StrOutputParser()
)
assert chain.invoke({"query": "Who owns MLflow?"}) == "Databricks"
assert (
chain.invoke({"query": "Do you like cat?"})
== "I cannot answer questions that are not about MLflow."
)
assert chain.invoke({"query": "Are you happy today?"}) == "Something went wrong."
with mlflow.start_run():
model_info = mlflow.langchain.log_model(
chain, "model_path", input_example={"query": "Who owns MLflow?"}
)
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke({"query": "Who owns MLflow?"}) == "Databricks"
assert (
loaded_model.invoke({"query": "Do you like cat?"})
== "I cannot answer questions that are not about MLflow."
)
assert loaded_model.invoke({"query": "Are you happy today?"}) == "Something went wrong."
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert pyfunc_loaded_model.predict({"query": "Who owns MLflow?"}) == ["Databricks"]
assert pyfunc_loaded_model.predict({"query": "Do you like cat?"}) == [
"I cannot answer questions that are not about MLflow."
]
assert pyfunc_loaded_model.predict({"query": "Are you happy today?"}) == [
"Something went wrong."
]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": {"query": "Who owns MLflow?"}}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": ["Databricks"]
}
@pytest.mark.skipif(
Version(langchain.__version__) < Version("0.0.311"), reason="feature not existing"
)
def test_chat_with_history(spark):
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda
from mlflow.langchain.utils import _fake_simple_chat_model
prompt_with_history_str = """
Here is a history between you and a human: {chat_history}
Now, please answer this question: {question}
"""
prompt_with_history = PromptTemplate(
input_variables=["chat_history", "question"], template=prompt_with_history_str
)
chat_model = _fake_simple_chat_model()()
def extract_question(input):
return input[-1]["content"]
def extract_history(input):
return input[:-1]
chain_with_history = (
{
"question": itemgetter("messages") | RunnableLambda(extract_question),
"chat_history": itemgetter("messages") | RunnableLambda(extract_history),
}
| prompt_with_history
| chat_model
| StrOutputParser()
)
input_example = {"messages": [{"role": "user", "content": "Who owns MLflow?"}]}
assert chain_with_history.invoke(input_example) == "Databricks"
with mlflow.start_run():
model_info = mlflow.langchain.log_model(
chain_with_history, "model_path", input_example=input_example
)
loaded_model = mlflow.langchain.load_model(model_info.model_uri)
assert loaded_model.invoke(input_example) == "Databricks"
pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
input_schema = pyfunc_loaded_model.metadata.get_input_schema()
assert input_schema == Schema(
[
ColSpec(
Array(
Object(
[Property("role", DataType.string), Property("content", DataType.string)]
)
),
"messages",
)
]
)
assert pyfunc_loaded_model.predict(input_example) == ["Databricks"]
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, result_type="string")
df = spark.createDataFrame([(input_example["messages"],)], ["messages"])
df = df.withColumn("answer", udf("messages"))
pdf = df.toPandas()
assert pdf["answer"].tolist() == ["Databricks"]
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"inputs": input_example}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert PredictionsResponse.from_json(response.content.decode("utf-8")) == {
"predictions": ["Databricks"]
}
| [
"Who owns MLflow?",
"You are classifying documents to know if this question is related with MLflow. Only answer with yes or no. The question is: {query}",
"You are classifying documents to know if this question ",
"chat_history",
"\n Here is a history between you and a human: {chat_history}\n\n Now, please answer this question: {question}\n ",
"question",
"Answer the following question based on the context: {context}\nQuestion: {question}",
"{question_is_relevant}\n{query}",
"what is the city {person} is from?",
"is related with MLflow. Only answer with yes or no. The question is: {query}",
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | mlflow/mlflow | mlflow~metrics~genai~model_utils.py | import logging
import os
import urllib.parse
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.utils.openai_utils import REQUEST_URL_CHAT
_logger = logging.getLogger(__name__)
# TODO: improve this name
def score_model_on_payload(model_uri, payload, eval_parameters=None):
"""Call the model identified by the given uri with the given payload."""
if eval_parameters is None:
eval_parameters = {}
prefix, suffix = _parse_model_uri(model_uri)
if prefix == "openai":
return _call_openai_api(suffix, payload, eval_parameters)
elif prefix == "gateway":
return _call_gateway_api(suffix, payload, eval_parameters)
elif prefix == "endpoints":
return _call_deployments_api(suffix, payload, eval_parameters)
elif prefix in ("model", "runs"):
# TODO: call _load_model_or_server
raise NotImplementedError
else:
raise MlflowException(
f"Unknown model uri prefix '{prefix}'",
error_code=INVALID_PARAMETER_VALUE,
)
def _parse_model_uri(model_uri):
parsed = urllib.parse.urlparse(model_uri, allow_fragments=False)
scheme = parsed.scheme
path = parsed.path
if not path.startswith("/") or len(path) <= 1:
raise MlflowException(
f"Malformed model uri '{model_uri}'", error_code=INVALID_PARAMETER_VALUE
)
path = path.lstrip("/")
return scheme, path
def _call_openai_api(openai_uri, payload, eval_parameters):
if "OPENAI_API_KEY" not in os.environ:
raise MlflowException(
"OPENAI_API_KEY environment variable not set",
error_code=INVALID_PARAMETER_VALUE,
)
from mlflow.openai import _get_api_config
from mlflow.openai.api_request_parallel_processor import process_api_requests
from mlflow.utils.openai_utils import _OAITokenHolder
api_config = _get_api_config()
api_token = _OAITokenHolder(api_config.api_type)
payload = {
"messages": [{"role": "user", "content": payload}],
**eval_parameters,
}
if api_config.api_type in ("azure", "azure_ad", "azuread"):
api_base = getattr(api_config, "api_base")
api_version = getattr(api_config, "api_version")
engine = getattr(api_config, "engine")
deployment_id = getattr(api_config, "deployment_id")
if engine:
# Avoid using both parameters as they serve the same purpose
# Invalid inputs:
# - Wrong engine + correct/wrong deployment_id
# - No engine + wrong deployment_id
# Valid inputs:
# - Correct engine + correct/wrong deployment_id
# - No engine + correct deployment_id
if deployment_id is not None:
_logger.warning(
"Both engine and deployment_id are set. " "Using engine as it takes precedence."
)
payload = {"engine": engine, **payload}
elif deployment_id is None:
raise MlflowException(
"Either engine or deployment_id must be set for Azure OpenAI API",
)
payload = payload
request_url = (
f"{api_base}/openai/deployments/{deployment_id}"
f"/chat/completions?api-version={api_version}"
)
else:
payload = {"model": openai_uri, **payload}
request_url = REQUEST_URL_CHAT
try:
resp = process_api_requests(
[payload],
request_url,
api_token=api_token,
throw_original_error=True,
max_workers=1,
)[0]
except MlflowException as e:
raise e
except Exception as e:
raise MlflowException(f"Error response from OpenAI:\n {e}")
return _parse_chat_response_format(resp)
def _call_deployments_api(deployment_uri, payload, eval_parameters):
from mlflow.deployments import get_deploy_client
client = get_deploy_client()
endpoint = client.get_endpoint(deployment_uri)
endpoint_type = endpoint.get("task", endpoint.get("endpoint_type"))
if endpoint_type == "llm/v1/completions":
completions_payload = {
"prompt": payload,
**eval_parameters,
}
response = client.predict(endpoint=deployment_uri, inputs=completions_payload)
return _parse_completions_response_format(response)
elif endpoint_type == "llm/v1/chat":
chat_payload = {
"messages": [{"role": "user", "content": payload}],
**eval_parameters,
}
response = client.predict(endpoint=deployment_uri, inputs=chat_payload)
return _parse_chat_response_format(response)
else:
raise MlflowException(
f"Unsupported endpoint type: {endpoint_type}. Use an "
"endpoint of type 'llm/v1/completions' or 'llm/v1/chat' instead.",
error_code=INVALID_PARAMETER_VALUE,
)
def _call_gateway_api(gateway_uri, payload, eval_parameters):
from mlflow.gateway import get_route, query
route_info = get_route(gateway_uri).dict()
if route_info["endpoint_type"] == "llm/v1/completions":
completions_payload = {
"prompt": payload,
**eval_parameters,
}
response = query(gateway_uri, completions_payload)
return _parse_completions_response_format(response)
elif route_info["endpoint_type"] == "llm/v1/chat":
chat_payload = {
"messages": [{"role": "user", "content": payload}],
**eval_parameters,
}
response = query(gateway_uri, chat_payload)
return _parse_chat_response_format(response)
else:
raise MlflowException(
f"Unsupported gateway route type: {route_info['endpoint_type']}. Use a "
"route of type 'llm/v1/completions' or 'llm/v1/chat' instead.",
error_code=INVALID_PARAMETER_VALUE,
)
def _parse_chat_response_format(response):
try:
text = response["choices"][0]["message"]["content"]
except (KeyError, IndexError, TypeError):
text = None
return text
def _parse_completions_response_format(response):
try:
text = response["choices"][0]["text"]
except (KeyError, IndexError, TypeError):
text = None
return text
| [] |
2024-01-10 | mlflow/mlflow | mlflow~langchain~api_request_parallel_processor.py | # Based ons: https://github.com/openai/openai-cookbook/blob/6df6ceff470eeba26a56de131254e775292eac22/examples/api_request_parallel_processor.py
# Several changes were made to make it work with MLflow.
# Currently, only chat completion is supported.
"""
API REQUEST PARALLEL PROCESSOR
Using the LangChain API to process lots of text quickly takes some care.
If you trickle in a million API requests one by one, they'll take days to complete.
This script parallelizes requests using LangChain API.
Features:
- Streams requests from file, to avoid running out of memory for giant jobs
- Makes requests concurrently, to maximize throughput
- Logs errors, to diagnose problems with requests
"""
from __future__ import annotations
import logging
import queue
import threading
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import langchain.chains
from langchain.schema import AgentAction
import mlflow
_logger = logging.getLogger(__name__)
@dataclass
class StatusTracker:
"""
Stores metadata about the script's progress. Only one instance is created.
"""
num_tasks_started: int = 0
num_tasks_in_progress: int = 0 # script ends when this reaches 0
num_tasks_succeeded: int = 0
num_tasks_failed: int = 0
num_api_errors: int = 0 # excluding rate limit errors, counted above
lock: threading.Lock = threading.Lock()
def start_task(self):
with self.lock:
self.num_tasks_started += 1
self.num_tasks_in_progress += 1
def complete_task(self, *, success: bool):
with self.lock:
self.num_tasks_in_progress -= 1
if success:
self.num_tasks_succeeded += 1
else:
self.num_tasks_failed += 1
def increment_num_api_errors(self):
with self.lock:
self.num_api_errors += 1
@dataclass
class APIRequest:
"""
Stores an API request's inputs, outputs, and other metadata. Contains a method to make an API
call.
"""
index: int
lc_model: langchain.chains.base.Chain
request_json: dict
results: list[tuple[int, str]]
errors: dict
def _prepare_to_serialize(self, response: dict):
"""
Converts LangChain objects to JSON-serializable formats.
"""
from langchain.load.dump import dumps
if "intermediate_steps" in response:
steps = response["intermediate_steps"]
if (
isinstance(steps, tuple)
and len(steps) == 2
and isinstance(steps[0], AgentAction)
and isinstance(steps[1], str)
):
response["intermediate_steps"] = [
{
"tool": agent.tool,
"tool_input": agent.tool_input,
"log": agent.log,
"result": result,
}
for agent, result in response["intermediate_steps"]
]
else:
try:
# `AgentAction` objects are not yet implemented for serialization in `dumps`
# https://github.com/langchain-ai/langchain/issues/8815#issuecomment-1666763710
response["intermediate_steps"] = dumps(steps)
except Exception as e:
_logger.warning(f"Failed to serialize intermediate steps: {e!r}")
# The `dumps` format for `Document` objects is noisy, so we will still have custom logic
if "source_documents" in response:
response["source_documents"] = [
{"page_content": doc.page_content, "metadata": doc.metadata}
for doc in response["source_documents"]
]
def call_api(self, status_tracker: StatusTracker):
"""
Calls the LangChain API and stores results.
"""
from langchain.schema import BaseRetriever
from mlflow.langchain.utils import lc_runnables_types, runnables_supports_batch_types
_logger.debug(f"Request #{self.index} started with payload: {self.request_json}")
try:
if isinstance(self.lc_model, BaseRetriever):
# Retrievers are invoked differently than Chains
docs = self.lc_model.get_relevant_documents(**self.request_json)
response = [
{"page_content": doc.page_content, "metadata": doc.metadata} for doc in docs
]
elif isinstance(self.lc_model, lc_runnables_types()):
if isinstance(self.request_json, dict):
# This is a temporary fix for the case when spark_udf converts
# input into pandas dataframe with column name, while the model
# does not accept dictionaries as input, it leads to errors like
# Expected Scalar value for String field 'query_text'
try:
response = self.lc_model.invoke(self.request_json)
except TypeError as e:
_logger.warning(
f"Failed to invoke {self.lc_model.__class__.__name__} "
f"with {self.request_json}. Error: {e!r}. Trying to "
"invoke with the first value of the dictionary."
)
self.request_json = next(iter(self.request_json.values()))
response = self.lc_model.invoke(self.request_json)
elif isinstance(self.request_json, list) and isinstance(
self.lc_model, runnables_supports_batch_types()
):
response = self.lc_model.batch(self.request_json)
else:
response = self.lc_model.invoke(self.request_json)
else:
response = self.lc_model(self.request_json, return_only_outputs=True)
# to maintain existing code, single output chains will still return only the result
if len(response) == 1:
response = response.popitem()[1]
else:
self._prepare_to_serialize(response)
_logger.debug(f"Request #{self.index} succeeded with response: {response}")
self.results.append((self.index, response))
status_tracker.complete_task(success=True)
except Exception as e:
self.errors[
self.index
] = f"error: {e!r} {traceback.format_exc()}\n request payload: {self.request_json}"
status_tracker.increment_num_api_errors()
status_tracker.complete_task(success=False)
def process_api_requests(
lc_model,
requests: Optional[List[Union[Any, Dict[str, Any]]]] = None,
max_workers: int = 10,
):
"""
Processes API requests in parallel.
"""
# initialize trackers
retry_queue = queue.Queue()
status_tracker = StatusTracker() # single instance to track a collection of variables
next_request = None # variable to hold the next request to call
results: list[tuple[int, str]] = []
errors: dict = {}
requests_iter = enumerate(requests)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
while True:
# get next request (if one is not already waiting for capacity)
if not retry_queue.empty():
next_request = retry_queue.get_nowait()
_logger.warning(f"Retrying request {next_request.index}: {next_request}")
elif req := next(requests_iter, None):
# get new request
index, request_json = req
next_request = APIRequest(
index=index,
lc_model=lc_model,
request_json=request_json,
results=results,
errors=errors,
)
status_tracker.start_task()
else:
next_request = None
# if enough capacity available, call API
if next_request:
# call API
executor.submit(
next_request.call_api,
status_tracker=status_tracker,
)
# if all tasks are finished, break
# check next_request to avoid terminating the process
# before extra requests need to be processed
if status_tracker.num_tasks_in_progress == 0 and next_request is None:
break
time.sleep(0.001) # avoid busy waiting
# after finishing, log final status
if status_tracker.num_tasks_failed > 0:
raise mlflow.MlflowException(
f"{status_tracker.num_tasks_failed} tasks failed. Errors: {errors}"
)
return [res for _, res in sorted(results)]
| [] |
2024-01-10 | mlflow/mlflow | mlflow~langchain~__init__.py | """
The ``mlflow.langchain`` module provides an API for logging and loading LangChain models.
This module exports multivariate LangChain models in the langchain flavor and univariate
LangChain models in the pyfunc flavor:
LangChain (native) format
This is the main flavor that can be accessed with LangChain APIs.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and for batch inference.
.. _LangChain:
https://python.langchain.com/en/latest/index.html
"""
import logging
import os
from typing import Any, Dict, List, Optional, Union
import pandas as pd
import yaml
import mlflow
from mlflow import pyfunc
from mlflow.environment_variables import _MLFLOW_TESTING
from mlflow.langchain.runnables import _load_runnables, _save_runnables
from mlflow.langchain.utils import (
_BASE_LOAD_KEY,
_MODEL_LOAD_KEY,
_RUNNABLE_LOAD_KEY,
_load_base_lcs,
_save_base_lcs,
_validate_and_wrap_lc_model,
lc_runnables_types,
)
from mlflow.models import Model, ModelInputExample, ModelSignature
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.signature import _infer_signature_from_input_example
from mlflow.models.utils import _save_example
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types.schema import ColSpec, DataType, Schema
from mlflow.utils.annotations import experimental
from mlflow.utils.docstring_utils import LOG_MODEL_PARAM_DOCS, format_docstring
from mlflow.utils.environment import (
_CONDA_ENV_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_mlflow_conda_env,
_process_conda_env,
_process_pip_requirements,
_PythonEnv,
_validate_env_arguments,
)
from mlflow.utils.file_utils import get_total_file_size, write_to
from mlflow.utils.model_utils import (
_add_code_from_conf_to_system_path,
_get_flavor_configuration,
_validate_and_copy_code_paths,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.requirements_utils import _get_pinned_requirement
logger = logging.getLogger(mlflow.__name__)
FLAVOR_NAME = "langchain"
_MODEL_TYPE_KEY = "model_type"
def get_default_pip_requirements():
"""
:return: A list of default pip requirements for MLflow Models produced by this flavor.
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at a minimum, contains these requirements.
"""
return [_get_pinned_requirement("langchain")]
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
@experimental
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def save_model(
lc_model,
path,
conda_env=None,
code_paths=None,
mlflow_model=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
loader_fn=None,
persist_dir=None,
example_no_conversion=False,
):
"""
Save a LangChain model to a path on the local file system.
:param lc_model: A LangChain model, which could be a
`Chain <https://python.langchain.com/docs/modules/chains/>`_,
`Agent <https://python.langchain.com/docs/modules/agents/>`_,
`retriever <https://python.langchain.com/docs/modules/data_connection/retrievers/>`_,
or `RunnableSequence <https://python.langchain.com/docs/modules/chains/foundational/sequential_chains#using-lcel>`_.
:param path: Local path where the serialized model (as YAML) is to be saved.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
If not specified, the model signature would be set according to
`lc_model.input_keys` and `lc_model.output_keys` as columns names, and
`DataType.string` as the column type.
Alternatively, you can explicitly specify the model signature.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models import infer_signature
chain = LLMChain(llm=llm, prompt=prompt)
prediction = chain.run(input_str)
input_columns = [
{"type": "string", "name": input_key} for input_key in chain.input_keys
]
signature = infer_signature(input_columns, predictions)
:param input_example: {{ input_example }}
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:param loader_fn: A function that's required for models containing objects that aren't natively
serialized by LangChain.
This function takes a string `persist_dir` as an argument and returns the
specific object that the model needs. Depending on the model,
this could be a retriever, vectorstore, requests_wrapper, embeddings, or
database. For RetrievalQA Chain and retriever models, the object is a
(`retriever <https://python.langchain.com/docs/modules/data_connection/retrievers/>`_).
For APIChain models, it's a
(`requests_wrapper <https://python.langchain.com/docs/modules/agents/tools/integrations/requests>`_).
For HypotheticalDocumentEmbedder models, it's an
(`embeddings <https://python.langchain.com/docs/modules/data_connection/text_embedding/>`_).
For SQLDatabaseChain models, it's a
(`database <https://python.langchain.com/docs/modules/agents/toolkits/sql_database>`_).
:param persist_dir: The directory where the object is stored. The `loader_fn`
takes this string as the argument to load the object.
This is optional for models containing objects that aren't natively
serialized by LangChain. MLflow logs the content in this directory as
artifacts in the subdirectory named `persist_dir_data`.
Here is the code snippet for logging a RetrievalQA chain with `loader_fn`
and `persist_dir`:
.. code-block:: python
qa = RetrievalQA.from_llm(llm=OpenAI(), retriever=db.as_retriever())
def load_retriever(persist_directory):
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
with mlflow.start_run() as run:
logged_model = mlflow.langchain.log_model(
qa,
artifact_path="retrieval_qa",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
See a complete example in examples/langchain/retrieval_qa_chain.py.
:param example_no_conversion: {{ example_no_conversion }}
"""
import langchain
from langchain.schema import BaseRetriever
lc_model = _validate_and_wrap_lc_model(lc_model, loader_fn)
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
path = os.path.abspath(path)
_validate_and_prepare_target_save_path(path)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if signature is None:
if input_example is not None:
wrapped_model = _LangChainModelWrapper(lc_model)
signature = _infer_signature_from_input_example(input_example, wrapped_model)
else:
if hasattr(lc_model, "input_keys"):
input_columns = [
ColSpec(type=DataType.string, name=input_key)
for input_key in lc_model.input_keys
]
input_schema = Schema(input_columns)
else:
input_schema = None
if (
hasattr(lc_model, "output_keys")
and len(lc_model.output_keys) == 1
and not isinstance(lc_model, BaseRetriever)
):
output_columns = [
ColSpec(type=DataType.string, name=output_key)
for output_key in lc_model.output_keys
]
output_schema = Schema(output_columns)
else:
# TODO: empty output schema if multiple output_keys or is a retriever. fix later!
# https://databricks.atlassian.net/browse/ML-34706
output_schema = None
signature = (
ModelSignature(input_schema, output_schema)
if input_schema or output_schema
else None
)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path, example_no_conversion)
if metadata is not None:
mlflow_model.metadata = metadata
model_data_kwargs = _save_model(lc_model, path, loader_fn, persist_dir)
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.langchain",
conda_env=_CONDA_ENV_FILE_NAME,
python_env=_PYTHON_ENV_FILE_NAME,
code=code_dir_subpath,
**model_data_kwargs,
)
flavor_conf = {
_MODEL_TYPE_KEY: lc_model.__class__.__name__,
**model_data_kwargs,
}
mlflow_model.add_flavor(
FLAVOR_NAME,
langchain_version=langchain.__version__,
code=code_dir_subpath,
**flavor_conf,
)
if size := get_total_file_size(path):
mlflow_model.model_size_bytes = size
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
inferred_reqs = mlflow.models.infer_pip_requirements(
str(path), FLAVOR_NAME, fallback=default_reqs
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs, pip_requirements, extra_pip_requirements
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
@experimental
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def log_model(
lc_model,
artifact_path,
conda_env=None,
code_paths=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
loader_fn=None,
persist_dir=None,
example_no_conversion=False,
):
"""
Log a LangChain model as an MLflow artifact for the current run.
:param lc_model: A LangChain model, which could be a
`Chain <https://python.langchain.com/docs/modules/chains/>`_,
`Agent <https://python.langchain.com/docs/modules/agents/>`_, or
`retriever <https://python.langchain.com/docs/modules/data_connection/retrievers/>`_.
:param artifact_path: Run-relative artifact path.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output
:py:class:`Schema <mlflow.types.Schema>`.
If not specified, the model signature would be set according to
`lc_model.input_keys` and `lc_model.output_keys` as columns names, and
`DataType.string` as the column type.
Alternatively, you can explicitly specify the model signature.
The model signature can be :py:func:`inferred
<mlflow.models.infer_signature>` from datasets with valid model input
(e.g. the training dataset with target column omitted) and valid model
output (e.g. model predictions generated on the training dataset),
for example:
.. code-block:: python
from mlflow.models import infer_signature
chain = LLMChain(llm=llm, prompt=prompt)
prediction = chain.run(input_str)
input_columns = [
{"type": "string", "name": input_key} for input_key in chain.input_keys
]
signature = infer_signature(input_columns, predictions)
:param input_example: {{ input_example }}
:param await_registration_for: Number of seconds to wait for the model version
to finish being created and is in ``READY`` status.
By default, the function waits for five minutes.
Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:param loader_fn: A function that's required for models containing objects that aren't natively
serialized by LangChain.
This function takes a string `persist_dir` as an argument and returns the
specific object that the model needs. Depending on the model,
this could be a retriever, vectorstore, requests_wrapper, embeddings, or
database. For RetrievalQA Chain and retriever models, the object is a
(`retriever <https://python.langchain.com/docs/modules/data_connection/retrievers/>`_).
For APIChain models, it's a
(`requests_wrapper <https://python.langchain.com/docs/modules/agents/tools/integrations/requests>`_).
For HypotheticalDocumentEmbedder models, it's an
(`embeddings <https://python.langchain.com/docs/modules/data_connection/text_embedding/>`_).
For SQLDatabaseChain models, it's a
(`database <https://python.langchain.com/docs/modules/agents/toolkits/sql_database>`_).
:param persist_dir: The directory where the object is stored. The `loader_fn`
takes this string as the argument to load the object.
This is optional for models containing objects that aren't natively
serialized by LangChain. MLflow logs the content in this directory as
artifacts in the subdirectory named `persist_dir_data`.
Here is the code snippet for logging a RetrievalQA chain with `loader_fn`
and `persist_dir`:
.. code-block:: python
qa = RetrievalQA.from_llm(llm=OpenAI(), retriever=db.as_retriever())
def load_retriever(persist_directory):
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
with mlflow.start_run() as run:
logged_model = mlflow.langchain.log_model(
qa,
artifact_path="retrieval_qa",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
See a complete example in examples/langchain/retrieval_qa_chain.py.
:param example_no_conversion: {{ example_no_conversion }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
lc_model = _validate_and_wrap_lc_model(lc_model, loader_fn)
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.langchain,
registered_model_name=registered_model_name,
lc_model=lc_model,
conda_env=conda_env,
code_paths=code_paths,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
metadata=metadata,
loader_fn=loader_fn,
persist_dir=persist_dir,
example_no_conversion=example_no_conversion,
)
def _save_model(model, path, loader_fn, persist_dir):
if isinstance(model, lc_runnables_types()):
return _save_runnables(model, path, loader_fn=loader_fn, persist_dir=persist_dir)
else:
return _save_base_lcs(model, path, loader_fn, persist_dir)
def _load_model(local_model_path, flavor_conf):
# model_type is not accurate as the class can be subclass
# of supported types, we define _MODEL_LOAD_KEY to ensure
# which load function to use
model_load_fn = flavor_conf.get(_MODEL_LOAD_KEY)
if model_load_fn == _RUNNABLE_LOAD_KEY:
return _load_runnables(local_model_path, flavor_conf)
if model_load_fn == _BASE_LOAD_KEY:
return _load_base_lcs(local_model_path, flavor_conf)
raise mlflow.MlflowException(
f"Failed to load LangChain model. Unknown model type: {flavor_conf.get(_MODEL_TYPE_KEY)}"
)
class _LangChainModelWrapper:
def __init__(self, lc_model):
self.lc_model = lc_model
def predict( # pylint: disable=unused-argument
self,
data: Union[pd.DataFrame, List[Union[str, Dict[str, Any]]], Any],
params: Optional[Dict[str, Any]] = None, # pylint: disable=unused-argument
) -> List[str]:
"""
:param data: Model input data.
:param params: Additional parameters to pass to the model for inference.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:return: Model predictions.
"""
# numpy array is not json serializable, so we convert it to list
# then send it to the model
def _convert_ndarray_to_list(data):
import numpy as np
if isinstance(data, np.ndarray):
return data.tolist()
if isinstance(data, list):
return [_convert_ndarray_to_list(d) for d in data]
if isinstance(data, dict):
return {k: _convert_ndarray_to_list(v) for k, v in data.items()}
return data
from mlflow.langchain.api_request_parallel_processor import process_api_requests
return_first_element = False
if isinstance(data, pd.DataFrame):
messages = data.to_dict(orient="records")
else:
data = _convert_ndarray_to_list(data)
if isinstance(self.lc_model, lc_runnables_types()):
messages = [data]
return_first_element = True
elif isinstance(data, list) and (
all(isinstance(d, str) for d in data) or all(isinstance(d, dict) for d in data)
):
messages = data
else:
raise mlflow.MlflowException.invalid_parameter_value(
"Input must be a pandas DataFrame or a list of strings "
"or a list of dictionaries "
f"for model {self.lc_model.__class__.__name__}"
)
results = process_api_requests(lc_model=self.lc_model, requests=messages)
return results[0] if return_first_element else results
class _TestLangChainWrapper(_LangChainModelWrapper):
"""
A wrapper class that should be used for testing purposes only.
"""
def predict(
self, data, params: Optional[Dict[str, Any]] = None # pylint: disable=unused-argument
):
"""
:param data: Model input data.
:param params: Additional parameters to pass to the model for inference.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:return: Model predictions.
"""
import langchain
from langchain.schema.retriever import BaseRetriever
from mlflow.utils.openai_utils import (
TEST_CONTENT,
TEST_INTERMEDIATE_STEPS,
TEST_SOURCE_DOCUMENTS,
)
from tests.langchain.test_langchain_model_export import _mock_async_request
if isinstance(
self.lc_model,
(
langchain.chains.llm.LLMChain,
langchain.chains.RetrievalQA,
BaseRetriever,
),
):
mockContent = TEST_CONTENT
elif isinstance(self.lc_model, langchain.agents.agent.AgentExecutor):
mockContent = f"Final Answer: {TEST_CONTENT}"
else:
mockContent = TEST_CONTENT
with _mock_async_request(mockContent):
result = super().predict(data)
if (
hasattr(self.lc_model, "return_source_documents")
and self.lc_model.return_source_documents
):
for res in result:
res["source_documents"] = TEST_SOURCE_DOCUMENTS
if (
hasattr(self.lc_model, "return_intermediate_steps")
and self.lc_model.return_intermediate_steps
):
for res in result:
res["intermediate_steps"] = TEST_INTERMEDIATE_STEPS
return result
def _load_pyfunc(path):
"""
Load PyFunc implementation for LangChain. Called by ``pyfunc.load_model``.
:param path: Local filesystem path to the MLflow Model with the ``langchain`` flavor.
"""
wrapper_cls = _TestLangChainWrapper if _MLFLOW_TESTING.get() else _LangChainModelWrapper
return wrapper_cls(_load_model_from_local_fs(path))
def _load_model_from_local_fs(local_model_path):
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
return _load_model(local_model_path, flavor_conf)
@experimental
def load_model(model_uri, dst_path=None):
"""
Load a LangChain model from a local file or a run.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
:return: A LangChain model instance
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
return _load_model_from_local_fs(local_model_path)
| [] |
2024-01-10 | mlflow/mlflow | mlflow~langchain~runnables.py | import os
from pathlib import Path
from typing import Union
import cloudpickle
import yaml
from mlflow.exceptions import MlflowException
from mlflow.langchain.utils import (
_BASE_LOAD_KEY,
_CONFIG_LOAD_KEY,
_MODEL_DATA_FOLDER_NAME,
_MODEL_DATA_KEY,
_MODEL_DATA_PKL_FILE_NAME,
_MODEL_DATA_YAML_FILE_NAME,
_MODEL_LOAD_KEY,
_MODEL_TYPE_KEY,
_RUNNABLE_LOAD_KEY,
_UNSUPPORTED_MODEL_ERROR_MESSAGE,
_load_base_lcs,
_load_from_json,
_load_from_pickle,
_load_from_yaml,
_save_base_lcs,
_validate_and_wrap_lc_model,
base_lc_types,
custom_type_to_loader_dict,
lc_runnable_branch_type,
lc_runnable_with_steps_types,
lc_runnables_types,
picklable_runnable_types,
)
_STEPS_FOLDER_NAME = "steps"
_RUNNABLE_STEPS_FILE_NAME = "steps.yaml"
_BRANCHES_FOLDER_NAME = "branches"
_RUNNABLE_BRANCHES_FILE_NAME = "branches.yaml"
_DEFAULT_BRANCH_NAME = "default"
def _load_model_from_config(path, model_config):
from langchain.chains.loading import type_to_loader_dict as chains_type_to_loader_dict
from langchain.llms import get_type_to_cls_dict as llms_get_type_to_cls_dict
try:
from langchain.prompts.loading import type_to_loader_dict as prompts_types
except ImportError:
prompts_types = {"prompt", "few_shot_prompt"}
config_path = os.path.join(path, model_config.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME))
# Load runnables from config file
if config_path.endswith(".yaml"):
config = _load_from_yaml(config_path)
elif config_path.endswith(".json"):
config = _load_from_json(config_path)
else:
raise MlflowException(
f"Cannot load runnable without a config file. Got path {config_path}."
)
_type = config.get("_type")
if _type in chains_type_to_loader_dict:
from langchain.chains.loading import load_chain
return load_chain(config_path)
elif _type in prompts_types:
from langchain.prompts.loading import load_prompt
return load_prompt(config_path)
elif _type in llms_get_type_to_cls_dict():
from langchain.llms.loading import load_llm
return load_llm(config_path)
elif _type in custom_type_to_loader_dict():
return custom_type_to_loader_dict()[_type](config)
raise MlflowException(f"Unsupported type {_type} for loading.")
def _load_model_from_path(path: str, model_config=None):
model_load_fn = model_config.get(_MODEL_LOAD_KEY)
if model_load_fn == _RUNNABLE_LOAD_KEY:
return _load_runnables(path, model_config)
if model_load_fn == _BASE_LOAD_KEY:
return _load_base_lcs(path, model_config)
if model_load_fn == _CONFIG_LOAD_KEY:
return _load_model_from_config(path, model_config)
raise MlflowException(f"Unsupported model load key {model_load_fn}")
def _load_runnable_with_steps(file_path: Union[Path, str], model_type: str):
"""
Load the model
:param file_path: Path to file to load the model from.
:param model_type: Type of the model to load.
"""
from langchain.schema.runnable import RunnableParallel, RunnableSequence
# Convert file to Path object.
load_path = Path(file_path) if isinstance(file_path, str) else file_path
if not load_path.exists() or not load_path.is_dir():
raise MlflowException(
f"File {load_path} must exist and must be a directory "
"in order to load runnable with steps."
)
steps_conf_file = load_path / _RUNNABLE_STEPS_FILE_NAME
if not steps_conf_file.exists():
raise MlflowException(
f"File {steps_conf_file} must exist in order to load runnable with steps."
)
steps_conf = _load_from_yaml(steps_conf_file)
steps_path = load_path / _STEPS_FOLDER_NAME
if not steps_path.exists() or not steps_path.is_dir():
raise MlflowException(
f"Folder {steps_path} must exist and must be a directory "
"in order to load runnable with steps."
)
steps = {}
for step in os.listdir(steps_path):
config = steps_conf.get(step)
# load model from the folder of the step
runnable = _load_model_from_path(os.path.join(steps_path, step), config)
steps[step] = runnable
if model_type == RunnableSequence.__name__:
steps = [value for _, value in sorted(steps.items(), key=lambda item: int(item[0]))]
return runnable_sequence_from_steps(steps)
if model_type == RunnableParallel.__name__:
return RunnableParallel(steps)
def runnable_sequence_from_steps(steps):
"""
Construct a RunnableSequence from steps.
:param steps: List of steps to construct the RunnableSequence from.
"""
from langchain.schema.runnable import RunnableSequence
if len(steps) < 2:
raise ValueError(f"RunnableSequence must have at least 2 steps, got {len(steps)}.")
first, *middle, last = steps
return RunnableSequence(first=first, middle=middle, last=last)
def _load_runnable_branch(file_path: Union[Path, str], model_type: str):
"""
Load the model
:param file_path: Path to file to load the model from.
:param model_type: Type of the model to load.
"""
from langchain.schema.runnable import RunnableBranch
# Convert file to Path object.
load_path = Path(file_path) if isinstance(file_path, str) else file_path
if not load_path.exists() or not load_path.is_dir():
raise MlflowException(
f"File {load_path} must exist and must be a directory "
"in order to load runnable with steps."
)
branches_conf_file = load_path / _RUNNABLE_BRANCHES_FILE_NAME
if not branches_conf_file.exists():
raise MlflowException(
f"File {branches_conf_file} must exist in order to load runnable with steps."
)
branches_conf = _load_from_yaml(branches_conf_file)
branches_path = load_path / _BRANCHES_FOLDER_NAME
if not branches_path.exists() or not branches_path.is_dir():
raise MlflowException(
f"Folder {branches_path} must exist and must be a directory "
"in order to load runnable with steps."
)
branches = []
for branch in os.listdir(branches_path):
# load model from the folder of the branch
if branch == _DEFAULT_BRANCH_NAME:
default_branch_path = branches_path / _DEFAULT_BRANCH_NAME
default = _load_model_from_path(
default_branch_path, branches_conf.get(_DEFAULT_BRANCH_NAME)
)
else:
branch_tuple = []
for i in range(2):
config = branches_conf.get(f"{branch}-{i}")
runnable = _load_model_from_path(
os.path.join(branches_path, branch, str(i)), config
)
branch_tuple.append(runnable)
branches.append(tuple(branch_tuple))
# default branch must be the last branch
branches.append(default)
return RunnableBranch(*branches)
def _save_internal_runnables(runnable, path, loader_fn, persist_dir):
conf = {}
if isinstance(runnable, lc_runnables_types()):
conf[_MODEL_TYPE_KEY] = runnable.__class__.__name__
conf.update(_save_runnables(runnable, path, loader_fn, persist_dir))
elif isinstance(runnable, lc_runnable_branch_type()):
conf[_MODEL_TYPE_KEY] = runnable.__class__.__name__
conf.update(_save_runnable_branch(runnable, path, loader_fn, persist_dir))
elif isinstance(runnable, base_lc_types()):
lc_model = _validate_and_wrap_lc_model(runnable, loader_fn)
conf[_MODEL_TYPE_KEY] = lc_model.__class__.__name__
conf.update(_save_base_lcs(lc_model, path, loader_fn, persist_dir))
else:
conf = {
_MODEL_TYPE_KEY: runnable.__class__.__name__,
_MODEL_DATA_KEY: _MODEL_DATA_YAML_FILE_NAME,
_MODEL_LOAD_KEY: _CONFIG_LOAD_KEY,
}
path = path / _MODEL_DATA_YAML_FILE_NAME
# Save some simple runnables that langchain natively supports.
if hasattr(runnable, "save"):
runnable.save(path)
# TODO: check if `dict` is enough to load it back
elif hasattr(runnable, "dict"):
runnable_dict = runnable.dict()
with open(path, "w") as f:
yaml.dump(runnable_dict, f, default_flow_style=False)
else:
return
return conf
def _save_runnable_with_steps(steps, file_path: Union[Path, str], loader_fn=None, persist_dir=None):
"""
Save the model with steps. Currently it supports saving RunnableSequence and RunnableParallel.
If saving a RunnableSequence, steps is a list of Runnable objects. We save each step to the
subfolder named by the step index.
e.g. - model
- steps
- 0
- model.yaml
- 1
- model.pkl
- steps.yaml
If saving a RunnableParallel, steps is a dictionary of key-Runnable pairs. We save each step to
the subfolder named by the key.
e.g. - model
- steps
- context
- model.yaml
- question
- model.pkl
- steps.yaml
We save steps.yaml file to the model folder. It contains each step's model's configuration.
:steps: steps of the runnable.
:param file_path: Path to file to save the model to.
"""
# Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path
save_path.mkdir(parents=True, exist_ok=True)
# Save steps into a folder
steps_path = save_path / _STEPS_FOLDER_NAME
steps_path.mkdir()
if isinstance(steps, list):
generator = enumerate(steps)
elif isinstance(steps, dict):
generator = steps.items()
unsaved_runnables = {}
steps_conf = {}
for key, runnable in generator:
step = str(key)
# Save each step into a subfolder named by step
save_runnable_path = steps_path / step
save_runnable_path.mkdir()
if result := _save_internal_runnables(runnable, save_runnable_path, loader_fn, persist_dir):
steps_conf[step] = result
else:
unsaved_runnables[step] = str(runnable)
if unsaved_runnables:
raise MlflowException(
f"Failed to save runnable sequence: {unsaved_runnables}. "
"Runnable must have either `save` or `dict` method."
)
# save steps configs
with save_path.joinpath(_RUNNABLE_STEPS_FILE_NAME).open("w") as f:
yaml.dump(steps_conf, f, default_flow_style=False)
def _save_runnable_branch(model, file_path, loader_fn, persist_dir):
"""
save runnable branch in to path.
"""
save_path = Path(file_path) if isinstance(file_path, str) else file_path
save_path.mkdir(parents=True, exist_ok=True)
# save branches into a folder
branches_path = save_path / _BRANCHES_FOLDER_NAME
branches_path.mkdir()
unsaved_runnables = {}
branches_conf = {}
for index, branch_tuple in enumerate(model.branches):
# Save each branch into a subfolder named by index
# and save condition and runnable into subfolder
for i, runnable in enumerate(branch_tuple):
save_runnable_path = branches_path / str(index) / str(i)
save_runnable_path.mkdir(parents=True)
branches_conf[f"{index}-{i}"] = {}
if result := _save_internal_runnables(
runnable, save_runnable_path, loader_fn, persist_dir
):
branches_conf[f"{index}-{i}"] = result
else:
unsaved_runnables[f"{index}-{i}"] = str(runnable)
# save default branch
default_branch_path = branches_path / _DEFAULT_BRANCH_NAME
default_branch_path.mkdir()
if result := _save_internal_runnables(
model.default, default_branch_path, loader_fn, persist_dir
):
branches_conf[_DEFAULT_BRANCH_NAME] = result
else:
unsaved_runnables[_DEFAULT_BRANCH_NAME] = str(model.default)
if unsaved_runnables:
raise MlflowException(
f"Failed to save runnable branch: {unsaved_runnables}. "
"Runnable must have either `save` or `dict` method."
)
# save branches configs
with save_path.joinpath(_RUNNABLE_BRANCHES_FILE_NAME).open("w") as f:
yaml.dump(branches_conf, f, default_flow_style=False)
def _save_picklable_runnable(model, path):
if not path.endswith(".pkl"):
raise ValueError(f"File path must end with .pkl, got {path}.")
with open(path, "wb") as f:
cloudpickle.dump(model, f)
def _save_runnables(model, path, loader_fn=None, persist_dir=None):
model_data_kwargs = {_MODEL_LOAD_KEY: _RUNNABLE_LOAD_KEY}
if isinstance(model, lc_runnable_with_steps_types()):
model_data_path = _MODEL_DATA_FOLDER_NAME
_save_runnable_with_steps(
model.steps, os.path.join(path, model_data_path), loader_fn, persist_dir
)
elif isinstance(model, picklable_runnable_types()):
model_data_path = _MODEL_DATA_PKL_FILE_NAME
_save_picklable_runnable(model, os.path.join(path, model_data_path))
elif isinstance(model, lc_runnable_branch_type()):
model_data_path = _MODEL_DATA_FOLDER_NAME
_save_runnable_branch(model, os.path.join(path, model_data_path), loader_fn, persist_dir)
else:
raise MlflowException.invalid_parameter_value(
_UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(model).__name__)
)
model_data_kwargs.update({_MODEL_DATA_KEY: model_data_path})
return model_data_kwargs
def _load_runnables(path, conf):
model_type = conf.get(_MODEL_TYPE_KEY)
model_data = conf.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME)
if model_type in (x.__name__ for x in lc_runnable_with_steps_types()):
return _load_runnable_with_steps(os.path.join(path, model_data), model_type)
if (
model_type in (x.__name__ for x in picklable_runnable_types())
or model_data == _MODEL_DATA_PKL_FILE_NAME
):
return _load_from_pickle(os.path.join(path, model_data))
if model_type in (x.__name__ for x in lc_runnable_branch_type()):
return _load_runnable_branch(os.path.join(path, model_data), model_type)
raise MlflowException.invalid_parameter_value(
_UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=model_type)
)
| [
"{'few_shot_prompt', 'prompt'}"
] |
2024-01-10 | mlflow/mlflow | mlflow~openai~retry.py | import logging
from contextlib import contextmanager
_logger = logging.getLogger(__name__)
# Use the same retry logic as LangChain:
# https://github.com/hwchase17/langchain/blob/955bd2e1db8d008d628963cb8d2bad5c1d354744/langchain/llms/openai.py#L69-L88
def _create_retry_decorator():
import openai.error
import tenacity
min_seconds = 4
max_seconds = 10
max_attempts = 5
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return tenacity.retry(
reraise=True,
stop=tenacity.stop_after_attempt(max_attempts),
wait=tenacity.wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
tenacity.retry_if_exception_type(openai.error.Timeout)
| tenacity.retry_if_exception_type(openai.error.APIError)
| tenacity.retry_if_exception_type(openai.error.APIConnectionError)
| tenacity.retry_if_exception_type(openai.error.RateLimitError)
| tenacity.retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=tenacity.before_sleep_log(_logger, logging.WARNING),
)
@contextmanager
def openai_auto_retry_patch():
"""
Context manager that patches the openai python package to automatically retry on transient
errors.
"""
import openai
classes_to_patch = {
"Audio": [
"transcribe",
"atranscribe",
"translate",
"atranslate",
],
"Completion": [
"create",
"acreate",
],
"ChatCompletion": [
"create",
"acreate",
],
"Embedding": [
"create",
"acreate",
],
"Edit": [
"create",
"acreate",
],
"Image": [
"create",
"acreate",
"create_variation",
"acreate_variation",
"create_edit",
"acreate_edit",
],
"Moderation": [
"create",
"acreate",
],
}
original_methods = {}
try:
retry_decorator = _create_retry_decorator()
for class_name, method_names in classes_to_patch.items():
for method_name in method_names:
class_obj = getattr(openai, class_name)
original_method = getattr(class_obj, method_name)
patched_method = retry_decorator(original_method)
original_methods[(class_name, method_name)] = original_method
setattr(class_obj, method_name, patched_method)
yield
finally:
# Restore the original methods
for (class_name, method_name), original_method in original_methods.items():
class_obj = getattr(openai, class_name)
setattr(class_obj, method_name, original_method)
| [] |
2024-01-10 | mlflow/mlflow | mlflow~gateway~providers~__init__.py | from typing import Type
from mlflow.exceptions import MlflowException
from mlflow.gateway.config import Provider
from mlflow.gateway.providers.base import BaseProvider
def get_provider(provider: Provider) -> Type[BaseProvider]:
from mlflow.gateway.providers.ai21labs import AI21LabsProvider
from mlflow.gateway.providers.anthropic import AnthropicProvider
from mlflow.gateway.providers.bedrock import AWSBedrockProvider
from mlflow.gateway.providers.cohere import CohereProvider
from mlflow.gateway.providers.huggingface import HFTextGenerationInferenceServerProvider
from mlflow.gateway.providers.mlflow import MlflowModelServingProvider
from mlflow.gateway.providers.mosaicml import MosaicMLProvider
from mlflow.gateway.providers.openai import OpenAIProvider
from mlflow.gateway.providers.palm import PaLMProvider
provider_to_class = {
Provider.OPENAI: OpenAIProvider,
Provider.ANTHROPIC: AnthropicProvider,
Provider.COHERE: CohereProvider,
Provider.AI21LABS: AI21LabsProvider,
Provider.MOSAICML: MosaicMLProvider,
Provider.PALM: PaLMProvider,
Provider.MLFLOW_MODEL_SERVING: MlflowModelServingProvider,
Provider.HUGGINGFACE_TEXT_GENERATION_INFERENCE: HFTextGenerationInferenceServerProvider,
Provider.BEDROCK: AWSBedrockProvider,
}
if prov := provider_to_class.get(provider):
return prov
raise MlflowException.invalid_parameter_value(f"Provider {provider} not found")
| [] |
2024-01-10 | mlflow/mlflow | examples~evaluation~evaluate_with_llm_judge.py | import os
import openai
import pandas as pd
import mlflow
from mlflow.metrics.genai import EvaluationExample, answer_similarity
assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable."
# testing with OpenAI gpt-3.5-turbo
example = EvaluationExample(
input="What is MLflow?",
output="MLflow is an open-source platform for managing machine "
"learning workflows, including experiment tracking, model packaging, "
"versioning, and deployment, simplifying the ML lifecycle.",
score=4,
justification="The definition effectively explains what MLflow is "
"its purpose, and its developer. It could be more concise for a 5-score.",
grading_context={
"ground_truth": "MLflow is an open-source platform for managing "
"the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, "
"a company that specializes in big data and machine learning solutions. MLflow is "
"designed to address the challenges that data scientists and machine learning "
"engineers face when developing, training, and deploying machine learning models."
},
)
answer_similarity_metric = answer_similarity(examples=[example])
eval_df = pd.DataFrame(
{
"inputs": [
"What is MLflow?",
"What is Spark?",
"What is Python?",
],
"ground_truth": [
"MLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, a company that specializes in big data and machine learning solutions. MLflow is designed to address the challenges that data scientists and machine learning engineers face when developing, training, and deploying machine learning models.",
"Apache Spark is an open-source, distributed computing system designed for big data processing and analytics. It was developed in response to limitations of the Hadoop MapReduce computing model, offering improvements in speed and ease of use. Spark provides libraries for various tasks such as data ingestion, processing, and analysis through its components like Spark SQL for structured data, Spark Streaming for real-time data processing, and MLlib for machine learning tasks",
"Python is a high-level programming language that was created by Guido van Rossum and released in 1991. It emphasizes code readability and allows developers to express concepts in fewer lines of code than languages like C++ or Java. Python is used in various domains, including web development, scientific computing, data analysis, and machine learning.",
],
}
)
with mlflow.start_run() as run:
system_prompt = "Answer the following question in two sentences"
logged_model = mlflow.openai.log_model(
model="gpt-3.5-turbo",
task=openai.ChatCompletion,
artifact_path="model",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": "{question}"},
],
)
results = mlflow.evaluate(
logged_model.model_uri,
eval_df,
targets="ground_truth",
model_type="question-answering",
extra_metrics=[answer_similarity_metric],
)
print(results)
eval_table = results.tables["eval_results_table"]
print(eval_table)
| [
"{question}",
"Answer the following question in two sentences"
] |
2024-01-10 | mlflow/mlflow | mlflow~gateway~providers~bedrock.py | import json
import time
from enum import Enum
import boto3
import botocore.config
import botocore.exceptions
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from mlflow.gateway.config import AWSBedrockConfig, AWSIdAndKey, AWSRole, RouteConfig
from mlflow.gateway.constants import (
MLFLOW_AI_GATEWAY_ANTHROPIC_DEFAULT_MAX_TOKENS,
)
from mlflow.gateway.exceptions import AIGatewayConfigException
from mlflow.gateway.providers.anthropic import AnthropicAdapter
from mlflow.gateway.providers.base import BaseProvider, ProviderAdapter
from mlflow.gateway.providers.cohere import CohereAdapter
from mlflow.gateway.providers.utils import rename_payload_keys
from mlflow.gateway.schemas import chat, completions, embeddings
AWS_BEDROCK_ANTHROPIC_MAXIMUM_MAX_TOKENS = 8191
class AWSBedrockAnthropicAdapter(AnthropicAdapter):
@classmethod
def completions_to_model(cls, payload, config):
payload = super().completions_to_model(payload, config)
if "\n\nHuman:" not in payload.get("stop_sequences", []):
payload.setdefault("stop_sequences", []).append("\n\nHuman:")
payload["max_tokens_to_sample"] = min(
payload.get("max_tokens_to_sample", MLFLOW_AI_GATEWAY_ANTHROPIC_DEFAULT_MAX_TOKENS),
AWS_BEDROCK_ANTHROPIC_MAXIMUM_MAX_TOKENS,
)
return payload
@classmethod
def model_to_completions(cls, payload, config):
payload["model"] = config.model.name
return super().model_to_completions(payload, config)
class AWSTitanAdapter(ProviderAdapter):
# TODO handle top_p, top_k, etc.
@classmethod
def completions_to_model(cls, payload, config):
n = payload.pop("n", 1)
if n != 1:
raise HTTPException(
status_code=422,
detail="'n' must be '1' for AWS Titan models. Received value: '{n}'.",
)
# The range of Titan's temperature is 0-1, but ours is 0-2, so we halve it
if "temperature" in payload:
payload["temperature"] = 0.5 * payload["temperature"]
return {
"inputText": payload.pop("prompt"),
"textGenerationConfig": rename_payload_keys(
payload, {"max_tokens": "maxTokenCount", "stop": "stopSequences"}
),
}
@classmethod
def model_to_completions(cls, resp, config):
return completions.ResponsePayload(
created=int(time.time()),
object="text_completion",
model=config.model.name,
choices=[
completions.Choice(
index=idx,
text=candidate.get("outputText"),
finish_reason=None,
)
for idx, candidate in enumerate(resp.get("results", []))
],
usage=completions.CompletionsUsage(
prompt_tokens=None,
completion_tokens=None,
total_tokens=None,
),
)
@classmethod
def embeddings_to_model(cls, payload, config):
raise NotImplementedError
@classmethod
def model_to_embeddings(cls, resp, config):
raise NotImplementedError
class AI21Adapter(ProviderAdapter):
# TODO handle top_p, top_k, etc.
@classmethod
def completions_to_model(cls, payload, config):
return rename_payload_keys(
payload,
{
"stop": "stopSequences",
"n": "numResults",
"max_tokens": "maxTokens",
},
)
@classmethod
def model_to_completions(cls, resp, config):
return completions.ResponsePayload(
created=int(time.time()),
object="text_completion",
model=config.model.name,
choices=[
completions.Choice(
index=idx,
text=candidate.get("data", {}).get("text"),
finish_reason=None,
)
for idx, candidate in enumerate(resp.get("completions", []))
],
usage=completions.CompletionsUsage(
prompt_tokens=None,
completion_tokens=None,
total_tokens=None,
),
)
@classmethod
def embeddings_to_model(cls, payload, config):
raise NotImplementedError
@classmethod
def model_to_embeddings(cls, resp, config):
raise NotImplementedError
class AWSBedrockModelProvider(Enum):
AMAZON = "amazon"
COHERE = "cohere"
AI21 = "ai21"
ANTHROPIC = "anthropic"
@property
def adapter(self):
return AWS_MODEL_PROVIDER_TO_ADAPTER.get(self)
@classmethod
def of_str(cls, name: str):
name = name.lower()
for opt in cls:
if opt.name.lower() == name or opt.value.lower() == name:
return opt
AWS_MODEL_PROVIDER_TO_ADAPTER = {
AWSBedrockModelProvider.COHERE: CohereAdapter,
AWSBedrockModelProvider.ANTHROPIC: AWSBedrockAnthropicAdapter,
AWSBedrockModelProvider.AMAZON: AWSTitanAdapter,
AWSBedrockModelProvider.AI21: AI21Adapter,
}
class AWSBedrockProvider(BaseProvider):
def __init__(self, config: RouteConfig):
super().__init__(config)
if config.model.config is None or not isinstance(config.model.config, AWSBedrockConfig):
raise TypeError(f"Invalid config type {config.model.config}")
self.bedrock_config: AWSBedrockConfig = config.model.config
self._client = None
self._client_created = 0
def _client_expired(self):
if not isinstance(self.bedrock_config.aws_config, AWSRole):
return False
return (
(time.monotonic_ns() - self._client_created)
>= (self.bedrock_config.aws_config.session_length_seconds) * 1_000_000_000,
)
def get_bedrock_client(self):
if self._client is not None and not self._client_expired():
return self._client
session = boto3.Session(**self._construct_session_args())
try:
self._client, self._client_created = (
session.client(
service_name="bedrock-runtime",
**self._construct_client_args(session),
),
time.monotonic_ns(),
)
return self._client
except botocore.exceptions.UnknownServiceError as e:
raise AIGatewayConfigException(
"Cannot create AWS Bedrock client; ensure boto3/botocore "
"linked from the AWS Bedrock user guide are installed. "
"Otherwise likely missing credentials or accessing account without to "
"AWS Bedrock Private Preview"
) from e
def _construct_session_args(self):
session_args = {
"region_name": self.bedrock_config.aws_config.aws_region,
}
return {k: v for k, v in session_args.items() if v}
def _construct_client_args(self, session):
aws_config = self.bedrock_config.aws_config
if isinstance(aws_config, AWSRole):
role = session.client(service_name="sts").assume_role(
RoleArn=aws_config.aws_role_arn,
RoleSessionName="ai-gateway-bedrock",
DurationSeconds=aws_config.session_length_seconds,
)
return {
"aws_access_key_id": role["Credentials"]["AccessKeyId"],
"aws_secret_access_key": role["Credentials"]["SecretAccessKey"],
"aws_session_token": role["Credentials"]["SessionToken"],
}
elif isinstance(aws_config, AWSIdAndKey):
return {
"aws_access_key_id": aws_config.aws_access_key_id,
"aws_secret_access_key": aws_config.aws_secret_access_key,
"aws_session_token": aws_config.aws_session_token,
}
else:
return {}
@property
def _underlying_provider(self):
if (not self.config.model.name) or "." not in self.config.model.name:
return None
provider = self.config.model.name.split(".")[0]
return AWSBedrockModelProvider.of_str(provider)
@property
def underlying_provider_adapter(self) -> ProviderAdapter:
provider = self._underlying_provider
if not provider:
raise HTTPException(
status_code=422,
detail=f"Unknown AWS Bedrock model type {self._underlying_provider}",
)
adapter = provider.adapter
if not adapter:
raise HTTPException(
status_code=422,
detail=f"Don't know how to handle {self._underlying_provider} for AWS Bedrock",
)
return adapter
def _request(self, body):
try:
response = self.get_bedrock_client().invoke_model(
body=json.dumps(body).encode(),
modelId=self.config.model.name,
# defaults
# save=False,
accept="application/json",
contentType="application/json",
)
return json.loads(response.get("body").read())
# TODO work though botocore.exceptions to make this catchable.
# except botocore.exceptions.ValidationException as e:
# raise HTTPException(status_code=422, detail=str(e)) from e
except botocore.exceptions.ReadTimeoutError as e:
raise HTTPException(status_code=408) from e
async def completions(self, payload: completions.RequestPayload) -> completions.ResponsePayload:
self.check_for_model_field(payload)
payload = jsonable_encoder(payload, exclude_none=True, exclude_defaults=True)
payload = self.underlying_provider_adapter.completions_to_model(payload, self.config)
response = self._request(payload)
return self.underlying_provider_adapter.model_to_completions(response, self.config)
async def chat(self, payload: chat.RequestPayload) -> None:
# AWS Bedrock does not have a chat endpoint
raise HTTPException(
status_code=404, detail="The chat route is not available for AWS Bedrock models."
)
async def embeddings(self, payload: embeddings.RequestPayload) -> None:
# AWS Bedrock does not have an embeddings endpoint
raise HTTPException(
status_code=404, detail="The embeddings route is not available for AWS Bedrock models."
)
| [] |
2024-01-10 | mlflow/mlflow | tests~gateway~test_integration.py | import os
from unittest.mock import patch
import pytest
import requests
import mlflow
import mlflow.gateway.utils
from mlflow.exceptions import MlflowException
from mlflow.gateway import MlflowGatewayClient, get_route, query, set_gateway_uri
from mlflow.gateway.config import Route
from mlflow.gateway.providers.ai21labs import AI21LabsProvider
from mlflow.gateway.providers.anthropic import AnthropicProvider
from mlflow.gateway.providers.bedrock import AWSBedrockProvider
from mlflow.gateway.providers.cohere import CohereProvider
from mlflow.gateway.providers.huggingface import HFTextGenerationInferenceServerProvider
from mlflow.gateway.providers.mlflow import MlflowModelServingProvider
from mlflow.gateway.providers.mosaicml import MosaicMLProvider
from mlflow.gateway.providers.openai import OpenAIProvider
from mlflow.gateway.providers.palm import PaLMProvider
from mlflow.utils.request_utils import _cached_get_request_session
from tests.gateway.tools import (
UvicornGateway,
log_completions_transformers_model,
log_sentence_transformers_model,
save_yaml,
start_mlflow_server,
stop_mlflow_server,
)
@pytest.fixture
def basic_config_dict():
return {
"routes": [
{
"name": "chat-openai",
"route_type": "llm/v1/chat",
"model": {
"name": "gpt-3.5-turbo",
"provider": "openai",
"config": {"openai_api_key": "$OPENAI_API_KEY"},
},
},
{
"name": "completions-openai",
"route_type": "llm/v1/completions",
"model": {
"name": "gpt-4",
"provider": "openai",
"config": {"openai_api_key": "$OPENAI_API_KEY"},
},
},
{
"name": "embeddings-openai",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "openai",
"name": "text-embedding-ada-002",
"config": {
"openai_api_base": "https://api.openai.com/v1",
"openai_api_key": "$OPENAI_API_KEY",
},
},
},
{
"name": "completions-anthropic",
"route_type": "llm/v1/completions",
"model": {
"provider": "anthropic",
"name": "claude-instant-1.1",
"config": {
"anthropic_api_key": "$ANTHROPIC_API_KEY",
},
},
},
{
"name": "completions-ai21labs",
"route_type": "llm/v1/completions",
"model": {
"provider": "ai21labs",
"name": "j2-ultra",
"config": {
"ai21labs_api_key": "$AI21LABS_API_KEY",
},
},
},
{
"name": "completions-cohere",
"route_type": "llm/v1/completions",
"model": {
"provider": "cohere",
"name": "command",
"config": {
"cohere_api_key": "$COHERE_API_KEY",
},
},
},
{
"name": "completions-mosaicml",
"route_type": "llm/v1/completions",
"model": {
"provider": "mosaicml",
"name": "mpt-7b-instruct",
"config": {
"mosaicml_api_key": "$MOSAICML_API_KEY",
},
},
},
{
"name": "completions-palm",
"route_type": "llm/v1/completions",
"model": {
"provider": "palm",
"name": "text-bison-001",
"config": {
"palm_api_key": "$PALM_API_KEY",
},
},
},
{
"name": "chat-palm",
"route_type": "llm/v1/chat",
"model": {
"provider": "palm",
"name": "chat-bison-001",
"config": {
"palm_api_key": "$PALM_API_KEY",
},
},
},
{
"name": "chat-mosaicml",
"route_type": "llm/v1/chat",
"model": {
"provider": "mosaicml",
"name": "llama2-70b-chat",
"config": {
"mosaicml_api_key": "$MOSAICML_API_KEY",
},
},
},
{
"name": "embeddings-cohere",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "cohere",
"name": "embed-english-v2.0",
"config": {
"cohere_api_key": "$COHERE_API_KEY",
},
},
},
{
"name": "embeddings-mosaicml",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "mosaicml",
"name": "instructor-large",
"config": {
"mosaicml_api_key": "$MOSAICML_API_KEY",
},
},
},
{
"name": "embeddings-palm",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "palm",
"name": "embedding-gecko-001",
"config": {
"palm_api_key": "$PALM_API_KEY",
},
},
},
{
"name": "chat-oss",
"route_type": "llm/v1/chat",
"model": {
"provider": "mlflow-model-serving",
"name": "mpt-chatbot",
"config": {"model_server_url": "http://127.0.0.1:5000"},
},
},
{
"name": "completions-oss",
"route_type": "llm/v1/completions",
"model": {
"provider": "mlflow-model-serving",
"name": "completion-model",
"config": {"model_server_url": "http://127.0.0.1:6000"},
},
},
{
"name": "embeddings-oss",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "mlflow-model-serving",
"name": "sentence-transformers",
"config": {"model_server_url": "http://127.0.0.1:5002"},
},
},
{
"name": "completions-huggingface",
"route_type": "llm/v1/completions",
"model": {
"provider": "huggingface-text-generation-inference",
"name": "hf-falcon-7b-instruct",
"config": {"hf_server_url": "http://127.0.0.1:5000"},
},
},
{
"name": "completions-bedrock",
"route_type": "llm/v1/completions",
"model": {
"provider": "bedrock",
"name": "amazon.titan-tg1-large",
"config": {"aws_config": {"aws_region": "us-east-1"}},
},
},
]
}
@pytest.fixture(autouse=True)
def clear_uri():
mlflow.gateway.utils._gateway_uri = None
@pytest.fixture
def gateway(basic_config_dict, tmp_path):
conf = tmp_path / "config.yaml"
save_yaml(conf, basic_config_dict)
with UvicornGateway(conf) as g:
yield g
@pytest.fixture(autouse=True)
def env_setup(monkeypatch):
monkeypatch.setenv("ANTHROPIC_API_KEY", "test_anthropic_key")
monkeypatch.setenv("OPENAI_API_KEY", "test_openai_key")
monkeypatch.setenv("COHERE_API_KEY", "test_cohere_key")
monkeypatch.setenv("AI21LABS_API_KEY", "test_ai21labs_key")
monkeypatch.setenv("MOSAICML_API_KEY", "test_mosaicml_key")
monkeypatch.setenv("PALM_API_KEY", "test_palm_key")
@pytest.fixture
def serve_embeddings_model():
model_uri = log_sentence_transformers_model()
server = start_mlflow_server(port=5002, model_uri=model_uri)
yield server.url
stop_mlflow_server(server.pid)
@pytest.fixture
def serve_completions_model():
model_uri = log_completions_transformers_model()
server = start_mlflow_server(port=6000, model_uri=model_uri)
yield server.url
stop_mlflow_server(server.pid)
def test_create_gateway_client_with_declared_url(gateway):
gateway_client = MlflowGatewayClient(gateway_uri=gateway.url)
assert gateway_client.gateway_uri == gateway.url
assert isinstance(gateway_client.get_route("chat-openai"), Route)
routes = gateway_client.search_routes()
assert len(routes) == 18
assert all(isinstance(route, Route) for route in routes)
def test_openai_chat(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("chat-openai")
expected_output = {
"id": "chatcmpl-abc123",
"object": "chat.completion",
"created": 1677858242,
"model": "gpt-3.5-turbo-0301",
"choices": [
{
"message": {
"role": "assistant",
"content": "\n\nThis is a test!",
},
"finish_reason": "stop",
"index": 0,
}
],
"usage": {
"prompt_tokens": 13,
"completion_tokens": 7,
"total_tokens": 20,
},
}
data = {"messages": [{"role": "user", "content": "test"}]}
async def mock_chat(self, payload):
return expected_output
with patch.object(OpenAIProvider, "chat", mock_chat):
response = query(route=route.name, data=data)
assert response == expected_output
def test_openai_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-openai")
expected_output = {
"id": "chatcmpl-abc123",
"object": "text_completion",
"created": 1677858242,
"model": "gpt-4",
"choices": [{"text": "test.", "index": 0, "finish_reason": "stop"}],
"usage": {"prompt_tokens": 4, "completion_tokens": 4, "total_tokens": 11},
}
data = {"prompt": "test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(OpenAIProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_openai_embeddings(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("embeddings-openai")
expected_output = {
"object": "list",
"data": [
{
"object": "embedding",
"embedding": [
0.1,
0.2,
0.3,
],
"index": 0,
}
],
"model": "text-embedding-ada-002",
"usage": {"prompt_tokens": 4, "total_tokens": 4},
}
data = {"input": "mock me and my test"}
async def mock_embeddings(self, payload):
return expected_output
with patch.object(OpenAIProvider, "embeddings", mock_embeddings):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_anthropic_completions(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("completions-anthropic")
expected_output = {
"id": None,
"object": "text_completion",
"created": 1677858242,
"model": "claude-instant-1.1",
"choices": [
{
"text": "test",
"index": 0,
"finish_reason": "length",
}
],
"usage": {"prompt_tokens": None, "completion_tokens": None, "total_tokens": None},
}
data = {
"prompt": "test",
"max_tokens": 500,
"temperature": 0.3,
}
async def mock_completions(self, payload):
return expected_output
with patch.object(AnthropicProvider, "completions", mock_completions):
response = query(route=route.name, data=data)
assert response == expected_output
def test_ai21labs_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-ai21labs")
expected_output = {
"id": None,
"object": "text_completion",
"created": 1677858242,
"model": "j2-ultra",
"choices": [{"text": "mock using MagicMock please", "index": 0, "finish_reason": "length"}],
"usage": {"prompt_tokens": None, "completion_tokens": None, "total_tokens": None},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(AI21LabsProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_cohere_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-cohere")
expected_output = {
"id": None,
"object": "text_completion",
"created": 1677858242,
"model": "command",
"choices": [
{
"text": "mock using MagicMock please",
"index": 0,
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": None, "completion_tokens": None, "total_tokens": None},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(CohereProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mosaicml_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-mosaicml")
expected_output = {
"id": None,
"object": "text_completion",
"created": 1677858242,
"model": "mpt-7b-instruct",
"choices": [{"text": "mock using MagicMock please", "index": 0, "finish_reason": None}],
"usage": {"prompt_tokens": None, "completion_tokens": None, "total_tokens": None},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(MosaicMLProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mosaicml_chat(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("chat-mosaicml")
expected_output = {
"id": None,
"created": 1700242674,
"object": "chat.completion",
"model": "llama2-70b-chat",
"choices": [
{
"message": {
"role": "assistant",
"content": "This is a test",
},
"finish_reason": None,
"index": 0,
}
],
"usage": {
"prompt_tokens": None,
"completion_tokens": None,
"total_tokens": None,
},
}
data = {"messages": [{"role": "user", "content": "test"}]}
async def mock_chat(self, payload):
return expected_output
with patch.object(MosaicMLProvider, "chat", mock_chat):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_palm_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-palm")
expected_output = {
"id": None,
"object": "text_completion",
"created": 1677858242,
"model": "text-bison-001",
"choices": [
{
"text": "mock using MagicMock please",
"index": 0,
"finish_reason": None,
}
],
"usage": {"prompt_tokens": None, "completion_tokens": None, "total_tokens": None},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(PaLMProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_palm_chat(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("chat-palm")
expected_output = {
"id": None,
"created": 1700242674,
"object": "chat.completion",
"model": "chat-bison",
"choices": [
{
"message": {
"role": "1",
"content": "Hi there! How can I help you today?",
},
"finish_reason": None,
"index": 0,
}
],
"usage": {
"prompt_tokens": None,
"completion_tokens": None,
"total_tokens": None,
},
}
data = {"messages": [{"role": "user", "content": "test"}]}
async def mock_chat(self, payload):
return expected_output
with patch.object(PaLMProvider, "chat", mock_chat):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_cohere_embeddings(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("embeddings-cohere")
expected_output = {
"object": "list",
"data": [
{
"object": "embedding",
"embedding": [
0.1,
0.2,
0.3,
],
"index": 0,
}
],
"model": "embed-english-v2.0",
"usage": {"prompt_tokens": None, "total_tokens": None},
}
data = {"input": "mock me and my test"}
async def mock_embeddings(self, payload):
return expected_output
with patch.object(CohereProvider, "embeddings", mock_embeddings):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mosaicml_embeddings(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("embeddings-mosaicml")
expected_output = {
"object": "list",
"data": [
{
"object": "embedding",
"embedding": [
0.1,
0.2,
0.3,
],
"index": 0,
}
],
"model": "instructor-large",
"usage": {"prompt_tokens": None, "total_tokens": None},
}
data = {"input": "mock me and my test"}
async def mock_embeddings(self, payload):
return expected_output
with patch.object(MosaicMLProvider, "embeddings", mock_embeddings):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_palm_embeddings(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("embeddings-palm")
expected_output = {
"object": "list",
"data": [
{
"object": "embedding",
"embedding": [
0.1,
0.2,
0.3,
],
"index": 0,
}
],
"model": "embedding-gecko-001",
"usage": {"prompt_tokens": None, "total_tokens": None},
}
data = {"input": "mock me and my test"}
async def mock_embeddings(self, payload):
return expected_output
with patch.object(PaLMProvider, "embeddings", mock_embeddings):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_invalid_response_structure_raises(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("chat-openai")
expected_output = {
"embeddings": [[0.0, 1.0]],
"metadata": {
"input_tokens": 17,
"output_tokens": 24,
"total_tokens": 41,
"model": "gpt-3.5-turbo-0301",
"route_type": "llm/v1/chat",
},
}
data = {"messages": [{"role": "user", "content": "invalid test"}]}
async def mock_chat(self, payload):
return expected_output
def _mock_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
):
return _cached_get_request_session(1, 1, 0.5, retry_codes, True, os.getpid())
with patch(
"mlflow.utils.request_utils._get_request_session", _mock_request_session
), patch.object(OpenAIProvider, "chat", mock_chat), pytest.raises(
MlflowException, match=".*Max retries exceeded.*"
):
query(route=route.name, data=data)
def test_invalid_response_structure_no_raises(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("chat-openai")
expected_output = {
"embeddings": [[0.0, 1.0]],
"metadata": {
"input_tokens": 17,
"output_tokens": 24,
"total_tokens": 41,
"model": "gpt-3.5-turbo-0301",
"route_type": "llm/v1/chat",
},
}
data = {"messages": [{"role": "user", "content": "invalid test"}]}
async def mock_chat(self, payload):
return expected_output
def _mock_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
):
return _cached_get_request_session(0, 1, 0.5, retry_codes, False, os.getpid())
with patch(
"mlflow.utils.request_utils._get_request_session", _mock_request_session
), patch.object(OpenAIProvider, "chat", mock_chat), pytest.raises(
requests.exceptions.HTTPError, match=".*Internal Server Error.*"
):
query(route=route.name, data=data)
def test_invalid_query_request_raises(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("chat-openai")
expected_output = {
"id": "chatcmpl-abc123",
"object": "chat.completion",
"created": 1677858242,
"model": "gpt-3.5-turbo-0301",
"choices": [
{
"message": {
"role": "assistant",
"content": "test",
},
"finish_reason": "stop",
"index": 0,
}
],
"usage": {
"prompt_tokens": 17,
"completion_tokens": 24,
"total_tokens": 41,
},
}
data = {"text": "this is invalid"}
async def mock_chat(self, payload):
return expected_output
def _mock_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
):
return _cached_get_request_session(2, 1, 0.5, retry_codes, True, os.getpid())
with patch(
"mlflow.utils.request_utils._get_request_session", _mock_request_session
), patch.object(OpenAIProvider, "chat", new=mock_chat), pytest.raises(
requests.exceptions.HTTPError, match="Unprocessable Entity for"
):
query(route=route.name, data=data)
def test_mlflow_chat(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("chat-oss")
expected_output = {
"id": None,
"created": 1700242674,
"object": "chat.completion",
"model": "chat-bot-9000",
"choices": [
{
"message": {
"role": "assistant",
"content": "It is a test",
},
"finish_reason": None,
"index": 0,
}
],
"usage": {
"prompt_tokens": None,
"completion_tokens": None,
"total_tokens": None,
},
}
data = {"messages": [{"role": "user", "content": "test"}]}
with patch.object(MlflowModelServingProvider, "chat", return_value=expected_output):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mlflow_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-oss")
expected_output = {
"id": None,
"object": "text_completion",
"created": 1677858242,
"model": "completion-model",
"choices": [
{
"text": "test",
"index": 0,
"finish_reason": None,
}
],
"usage": {"prompt_tokens": None, "completion_tokens": None, "total_tokens": None},
}
data = {"prompt": "this is a test"}
with patch.object(MlflowModelServingProvider, "completions", return_value=expected_output):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mlflow_embeddings(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("embeddings-oss")
expected_output = {
"object": "list",
"data": [
{
"object": "embedding",
"embedding": [
0.001,
-0.001,
],
"index": 0,
},
{
"object": "embedding",
"embedding": [
0.002,
-0.002,
],
"index": 1,
},
],
"model": "sentence-transformers",
"usage": {"prompt_tokens": None, "total_tokens": None},
}
data = {"input": ["test1", "test2"]}
with patch.object(MlflowModelServingProvider, "embeddings", return_value=expected_output):
response = query(route=route.name, data=data)
assert response == expected_output
def test_gateway_query_mlflow_embeddings_model(serve_embeddings_model, gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("embeddings-oss")
data = {"input": ["test1", "test2"]}
response = query(route=route.name, data=data)
assert response["model"] == "sentence-transformers"
embeddings_response = response["data"]
assert isinstance(embeddings_response, list)
assert len(embeddings_response) == 2
usage_response = response["usage"]
assert not usage_response["prompt_tokens"]
assert not usage_response["total_tokens"]
def test_gateway_query_mlflow_completions_model(serve_completions_model, gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-oss")
data = {"prompt": "test [MASK]"}
response = client.query(route=route.name, data=data)
assert response["model"] == "completion-model"
completions_response = response["choices"]
assert isinstance(completions_response, list)
assert isinstance(completions_response[0]["text"], str)
assert len(completions_response) == 1
metadata_response = response["usage"]
assert not metadata_response["prompt_tokens"]
assert not metadata_response["completion_tokens"]
def test_huggingface_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-huggingface")
expected_output = {
"id": None,
"object": "text_completion",
"created": 1677858242,
"model": "llm/v1/completions",
"choices": [
{
"text": "mock using MagicMock please",
"index": 0,
"finish_reason": "length",
}
],
"usage": {"prompt_tokens": 5, "completion_tokens": 10, "total_tokens": 15},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(HFTextGenerationInferenceServerProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_bedrock_completions(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("completions-bedrock")
expected_output = {
"id": None,
"object": "text_completion",
"created": 1677858242,
"model": "amazon.titan-tg1-large",
"choices": [
{
"text": "\nThis is a test",
"index": 0,
"finish_reason": None,
}
],
"usage": {"prompt_tokens": None, "completion_tokens": None, "total_tokens": None},
}
data = {
"prompt": "test",
"max_tokens": 500,
"temperature": 0.3,
}
async def mock_completions(self, payload):
return expected_output
with patch.object(AWSBedrockProvider, "completions", mock_completions):
response = query(route=route.name, data=data)
assert response == expected_output
| [
"\n\nThis is a test!",
"test",
"It is a test",
"This is a test",
"invalid test",
"Hi there! How can I help you today?"
] |
2024-01-10 | mlflow/mlflow | mlflow~openai~__init__.py | """
The ``mlflow.openai`` module provides an API for logging and loading OpenAI models.
Credential management for OpenAI on Databricks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. warning::
Specifying secrets for model serving with ``MLFLOW_OPENAI_SECRET_SCOPE`` is deprecated.
Use `secrets-based environment variables <https://docs.databricks.com/en/machine-learning/model-serving/store-env-variable-model-serving.html>`_
instead.
When this flavor logs a model on Databricks, it saves a YAML file with the following contents as
``openai.yaml`` if the ``MLFLOW_OPENAI_SECRET_SCOPE`` environment variable is set.
.. code-block:: yaml
OPENAI_API_BASE: {scope}:openai_api_base
OPENAI_API_KEY: {scope}:openai_api_key
OPENAI_API_KEY_PATH: {scope}:openai_api_key_path
OPENAI_API_TYPE: {scope}:openai_api_type
OPENAI_ORGANIZATION: {scope}:openai_organization
- ``{scope}`` is the value of the ``MLFLOW_OPENAI_SECRET_SCOPE`` environment variable.
- The keys are the environment variables that the ``openai-python`` package uses to
configure the API client.
- The values are the references to the secrets that store the values of the environment
variables.
When the logged model is served on Databricks, each secret will be resolved and set as the
corresponding environment variable. See https://docs.databricks.com/security/secrets/index.html
for how to set up secrets on Databricks.
"""
import itertools
import logging
import os
import warnings
from string import Formatter
from typing import Any, Dict, Optional, Set
import yaml
import mlflow
from mlflow import pyfunc
from mlflow.environment_variables import _MLFLOW_TESTING, MLFLOW_OPENAI_SECRET_SCOPE
from mlflow.exceptions import MlflowException
from mlflow.models import Model, ModelInputExample, ModelSignature
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import ColSpec, Schema, TensorSpec
from mlflow.utils.annotations import experimental
from mlflow.utils.databricks_utils import (
check_databricks_secret_scope_access,
is_in_databricks_runtime,
)
from mlflow.utils.docstring_utils import LOG_MODEL_PARAM_DOCS, format_docstring
from mlflow.utils.environment import (
_CONDA_ENV_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_mlflow_conda_env,
_process_conda_env,
_process_pip_requirements,
_PythonEnv,
_validate_env_arguments,
)
from mlflow.utils.file_utils import write_to
from mlflow.utils.model_utils import (
_add_code_from_conf_to_system_path,
_get_flavor_configuration,
_validate_and_copy_code_paths,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.openai_utils import (
REQUEST_URL_CHAT,
REQUEST_URL_COMPLETIONS,
REQUEST_URL_EMBEDDINGS,
_OAITokenHolder,
_OpenAIApiConfig,
_OpenAIEnvVar,
_validate_model_params,
)
from mlflow.utils.requirements_utils import _get_pinned_requirement
FLAVOR_NAME = "openai"
MODEL_FILENAME = "model.yaml"
_PYFUNC_SUPPORTED_TASKS = ("chat.completions", "embeddings", "completions")
_logger = logging.getLogger(__name__)
@experimental
def get_default_pip_requirements():
"""
Returns:
A list of default pip requirements for MLflow Models produced by this flavor.
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at minimum, contains these requirements.
"""
return list(map(_get_pinned_requirement, ["openai", "tiktoken", "tenacity"]))
@experimental
def get_default_conda_env():
"""
Returns:
The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
def _get_class_to_task_mapping():
from openai.api_resources import (
Audio,
ChatCompletion,
Completion,
Deployment,
Edit,
Embedding,
Engine,
File,
FineTune,
Image,
Moderation,
)
from openai.api_resources import (
Model as OpenAIModel,
)
return {
Audio: Audio.OBJECT_NAME,
ChatCompletion: ChatCompletion.OBJECT_NAME,
Completion: Completion.OBJECT_NAME,
Edit: Edit.OBJECT_NAME,
Deployment: Deployment.OBJECT_NAME,
Embedding: Embedding.OBJECT_NAME,
Engine: Engine.OBJECT_NAME,
File: File.OBJECT_NAME,
Image: Image.OBJECT_NAME,
FineTune: FineTune.OBJECT_NAME,
OpenAIModel: OpenAIModel.OBJECT_NAME,
Moderation: "moderations",
}
def _class_to_task(cls):
task = _get_class_to_task_mapping().get(cls)
if task is None:
raise mlflow.MlflowException(
f"Unsupported class: {cls}", error_code=INVALID_PARAMETER_VALUE
)
return task
def _get_model_name(model):
import openai
if isinstance(model, str):
return model
elif isinstance(model, openai.Model):
return model.id
else:
raise mlflow.MlflowException(
f"Unsupported model type: {type(model)}", error_code=INVALID_PARAMETER_VALUE
)
def _get_task_name(task):
if isinstance(task, str):
return task
elif isinstance(task, type):
return _class_to_task(task)
else:
raise mlflow.MlflowException(
f"Unsupported task type: {type(task)}", error_code=INVALID_PARAMETER_VALUE
)
def _get_api_config() -> _OpenAIApiConfig:
"""Gets the parameters and configuration of the OpenAI API connected to."""
import openai
api_type = os.getenv(_OpenAIEnvVar.OPENAI_API_TYPE.value, openai.api_type)
api_version = os.getenv(_OpenAIEnvVar.OPENAI_API_VERSION.value, openai.api_version)
api_base = os.getenv(_OpenAIEnvVar.OPENAI_API_BASE.value, None)
engine = os.getenv(_OpenAIEnvVar.OPENAI_ENGINE.value, None)
deployment_id = os.getenv(_OpenAIEnvVar.OPENAI_DEPLOYMENT_NAME.value, None)
if api_type in ("azure", "azure_ad", "azuread"):
batch_size = 16
max_tokens_per_minute = 60_000
else:
# The maximum batch size is 2048:
# https://github.com/openai/openai-python/blob/b82a3f7e4c462a8a10fa445193301a3cefef9a4a/openai/embeddings_utils.py#L43
# We use a smaller batch size to be safe.
batch_size = 1024
max_tokens_per_minute = 90_000
return _OpenAIApiConfig(
api_type=api_type,
batch_size=batch_size,
max_requests_per_minute=3_500,
max_tokens_per_minute=max_tokens_per_minute,
api_base=api_base,
api_version=api_version,
engine=engine,
deployment_id=deployment_id,
)
def _get_openai_package_version():
import openai
try:
return openai.__version__
except AttributeError:
# openai < 0.27.5 doesn't have a __version__ attribute
return openai.version.VERSION
def _log_secrets_yaml(local_model_dir, scope):
with open(os.path.join(local_model_dir, "openai.yaml"), "w") as f:
yaml.safe_dump({e.value: f"{scope}:{e.secret_key}" for e in _OpenAIEnvVar}, f)
def _parse_format_fields(s) -> Set[str]:
"""Parses format fields from a given string, e.g. "Hello {name}" -> ["name"]."""
return {fn for _, fn, _, _ in Formatter().parse(s) if fn is not None}
def _get_input_schema(task, content):
if content:
formatter = _ContentFormatter(task, content)
variables = formatter.variables
if len(variables) == 1:
return Schema([ColSpec(type="string")])
elif len(variables) > 1:
return Schema([ColSpec(name=v, type="string") for v in variables])
else:
return Schema([ColSpec(type="string")])
else:
return Schema([ColSpec(type="string")])
@experimental
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def save_model(
model,
task,
path,
conda_env=None,
code_paths=None,
mlflow_model=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
example_no_conversion=False,
**kwargs,
):
"""
Save an OpenAI model to a path on the local file system.
Args:
model: The OpenAI model name or reference instance, e.g.,
``openai.Model.retrieve("gpt-3.5-turbo")``.
task: The task the model is performing, e.g., ``openai.ChatCompletion`` or
``'chat.completions'``.
path: Local path where the model is to be saved.
conda_env: {{ conda_env }}
code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
input_example: {{ input_example }}
pip_requirements: {{ pip_requirements }}
extra_pip_requirements: {{ extra_pip_requirements }}
metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
example_no_conversion: {{ example_no_conversion }}
kwargs: Keyword arguments specific to the OpenAI task, such as the ``messages`` (see
:ref:`mlflow.openai.messages` for more details on this parameter)
or ``top_p`` value to use for chat completion.
.. code-block:: python
import mlflow
import openai
# Chat
mlflow.openai.save_model(
model="gpt-3.5-turbo",
task=openai.ChatCompletion,
messages=[{"role": "user", "content": "Tell me a joke."}],
path="model",
)
# Completions
mlflow.openai.save_model(
model="text-davinci-002",
task=openai.Completion,
prompt="{text}. The general sentiment of the text is",
path="model",
)
# Embeddings
mlflow.openai.save_model(
model="text-embedding-ada-002",
task=openai.Embedding,
path="model",
)
"""
import numpy as np
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
path = os.path.abspath(path)
_validate_and_prepare_target_save_path(path)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
task = _get_task_name(task)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
if signature.params:
_validate_model_params(
task, kwargs, {p.name: p.default for p in signature.params.params}
)
mlflow_model.signature = signature
elif task == "chat.completions":
messages = kwargs.get("messages", [])
if messages and not (
all(isinstance(m, dict) for m in messages) and all(map(_is_valid_message, messages))
):
raise mlflow.MlflowException.invalid_parameter_value(
"If `messages` is provided, it must be a list of dictionaries with keys "
"'role' and 'content'."
)
mlflow_model.signature = ModelSignature(
inputs=_get_input_schema(task, messages),
outputs=Schema([ColSpec(type="string", name=None)]),
)
elif task == "completions":
prompt = kwargs.get("prompt")
mlflow_model.signature = ModelSignature(
inputs=_get_input_schema(task, prompt),
outputs=Schema([ColSpec(type="string", name=None)]),
)
elif task == "embeddings":
mlflow_model.signature = ModelSignature(
inputs=Schema([ColSpec(type="string", name=None)]),
outputs=Schema([TensorSpec(type=np.dtype("float64"), shape=(-1,))]),
)
if input_example is not None:
_save_example(mlflow_model, input_example, path, example_no_conversion)
if metadata is not None:
mlflow_model.metadata = metadata
model_data_path = os.path.join(path, MODEL_FILENAME)
model_dict = {
"model": _get_model_name(model),
"task": task,
**kwargs,
}
with open(model_data_path, "w") as f:
yaml.safe_dump(model_dict, f)
if task in _PYFUNC_SUPPORTED_TASKS:
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.openai",
data=MODEL_FILENAME,
conda_env=_CONDA_ENV_FILE_NAME,
python_env=_PYTHON_ENV_FILE_NAME,
code=code_dir_subpath,
)
mlflow_model.add_flavor(
FLAVOR_NAME,
openai_version=_get_openai_package_version(),
data=MODEL_FILENAME,
code=code_dir_subpath,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if is_in_databricks_runtime():
if scope := MLFLOW_OPENAI_SECRET_SCOPE.get():
url = "https://docs.databricks.com/en/machine-learning/model-serving/store-env-variable-model-serving.html"
warnings.warn(
"Specifying secrets for model serving with `MLFLOW_OPENAI_SECRET_SCOPE` is "
f"deprecated. Use secrets-based environment variables ({url}) instead.",
FutureWarning,
)
check_databricks_secret_scope_access(scope)
_log_secrets_yaml(path, scope)
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
inferred_reqs = mlflow.models.infer_pip_requirements(
path, FLAVOR_NAME, fallback=default_reqs
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
@experimental
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def log_model(
model,
task,
artifact_path,
conda_env=None,
code_paths=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
example_no_conversion=False,
**kwargs,
):
"""
Log an OpenAI model as an MLflow artifact for the current run.
Args:
model: The OpenAI model name or reference instance, e.g.,
``openai.Model.retrieve("gpt-3.5-turbo")``.
task: The task the model is performing, e.g., ``openai.ChatCompletion`` or
``'chat.completions'``.
artifact_path: Run-relative artifact path.
conda_env: {{ conda_env }}
code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
registered_model_name: If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
input_example: {{ input_example }}
await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
pip_requirements: {{ pip_requirements }}
extra_pip_requirements: {{ extra_pip_requirements }}
metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
example_no_conversion: {{ example_no_conversion }}
kwargs: Keyword arguments specific to the OpenAI task, such as the ``messages`` (see
:ref:`mlflow.openai.messages` for more details on this parameter)
or ``top_p`` value to use for chat completion.
Returns:
A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
.. code-block:: python
import mlflow
import openai
# Chat
with mlflow.start_run():
info = mlflow.openai.log_model(
model="gpt-3.5-turbo",
task=openai.ChatCompletion,
messages=[{"role": "user", "content": "Tell me a joke about {animal}."}],
artifact_path="model",
)
model = mlflow.pyfunc.load_model(info.model_uri)
df = pd.DataFrame({"animal": ["cats", "dogs"]})
print(model.predict(df))
# Embeddings
with mlflow.start_run():
info = mlflow.openai.log_model(
model="text-embedding-ada-002",
task=openai.Embedding,
artifact_path="embeddings",
)
model = mlflow.pyfunc.load_model(info.model_uri)
print(model.predict(["hello", "world"]))
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.openai,
registered_model_name=registered_model_name,
model=model,
task=task,
conda_env=conda_env,
code_paths=code_paths,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
metadata=metadata,
example_no_conversion=example_no_conversion,
**kwargs,
)
def _load_model(path):
with open(path) as f:
return yaml.safe_load(f)
def _is_valid_message(d):
return isinstance(d, dict) and "content" in d and "role" in d
class _ContentFormatter:
def __init__(self, task, template=None):
if task == "completions":
template = template or "{prompt}"
if not isinstance(template, str):
raise mlflow.MlflowException.invalid_parameter_value(
f"Template for task {task} expects type `str`, but got {type(template)}."
)
self.template = template
self.format_fn = self.format_prompt
self.variables = sorted(_parse_format_fields(self.template))
elif task == "chat.completions":
if not template:
template = [{"role": "user", "content": "{content}"}]
if not all(map(_is_valid_message, template)):
raise mlflow.MlflowException.invalid_parameter_value(
f"Template for task {task} expects type `dict` with keys 'content' "
f"and 'role', but got {type(template)}."
)
self.template = template.copy()
self.format_fn = self.format_chat
self.variables = sorted(
set(
itertools.chain.from_iterable(
_parse_format_fields(message.get("content"))
| _parse_format_fields(message.get("role"))
for message in self.template
)
)
)
if not self.variables:
self.template.append({"role": "user", "content": "{content}"})
self.variables.append("content")
else:
raise mlflow.MlflowException.invalid_parameter_value(
f"Task type ``{task}`` is not supported for formatting."
)
def format(self, **params):
if missing_params := set(self.variables) - set(params):
raise mlflow.MlflowException.invalid_parameter_value(
f"Expected parameters {self.variables} to be provided, "
f"only got {list(params)}, {list(missing_params)} are missing."
)
return self.format_fn(**params)
def format_prompt(self, **params):
return self.template.format(**{v: params[v] for v in self.variables})
def format_chat(self, **params):
format_args = {v: params[v] for v in self.variables}
return [
{
"role": message.get("role").format(**format_args),
"content": message.get("content").format(**format_args),
}
for message in self.template
]
def _first_string_column(pdf):
iter_str_cols = (c for c, v in pdf.iloc[0].items() if isinstance(v, str))
col = next(iter_str_cols, None)
if col is None:
raise mlflow.MlflowException.invalid_parameter_value(
f"Could not find a string column in the input data: {pdf.dtypes.to_dict()}"
)
return col
class _OpenAIWrapper:
def __init__(self, model):
task = model.pop("task")
if task not in _PYFUNC_SUPPORTED_TASKS:
raise mlflow.MlflowException.invalid_parameter_value(
f"Unsupported task: {task}. Supported tasks: {_PYFUNC_SUPPORTED_TASKS}."
)
self.model = model
self.task = task
self.api_config = _get_api_config()
self.api_token = _OAITokenHolder(self.api_config.api_type)
# If the same parameter exists in self.model & self.api_config,
# we use the parameter from self.model
self.envs = {
x: getattr(self.api_config, x)
for x in ["api_base", "api_version", "api_type", "engine", "deployment_id"]
if getattr(self.api_config, x) is not None and x not in self.model
}
api_type = self.model.get("api_type") or self.envs.get("api_type")
if api_type in ("azure", "azure_ad", "azuread"):
deployment_id = self.model.get("deployment_id") or self.envs.get("deployment_id")
if self.model.get("engine") or self.envs.get("engine"):
# Avoid using both parameters as they serve the same purpose
# Invalid inputs:
# - Wrong engine + correct/wrong deployment_id
# - No engine + wrong deployment_id
# Valid inputs:
# - Correct engine + correct/wrong deployment_id
# - No engine + correct deployment_id
if deployment_id is not None:
_logger.warning(
"Both engine and deployment_id are set. "
"Using engine as it takes precedence."
)
elif deployment_id is None:
raise MlflowException(
"Either engine or deployment_id must be set for Azure OpenAI API",
)
if self.task != "embeddings":
self._setup_completions()
def _setup_completions(self):
if self.task == "chat.completions":
self.template = self.model.get("messages", [])
else:
self.template = self.model.get("prompt")
self.formater = _ContentFormatter(self.task, self.template)
def format_completions(self, params_list):
return [self.formater.format(**params) for params in params_list]
def get_params_list(self, data):
if len(self.formater.variables) == 1:
variable = self.formater.variables[0]
if variable in data.columns:
return data[[variable]].to_dict(orient="records")
else:
first_string_column = _first_string_column(data)
return [{variable: s} for s in data[first_string_column]]
else:
return data[self.formater.variables].to_dict(orient="records")
def _construct_request_url(self, task_url, default_url):
api_type = self.model.get("api_type") or self.envs.get("api_type")
if api_type in ("azure", "azure_ad", "azuread"):
api_base = self.envs.get("api_base")
api_version = self.envs.get("api_version")
deployment_id = self.envs.get("deployment_id")
return (
f"{api_base}/openai/deployments/{deployment_id}/"
f"{task_url}?api-version={api_version}"
)
else:
return default_url
def _predict_chat(self, data, params):
from mlflow.openai.api_request_parallel_processor import process_api_requests
_validate_model_params(self.task, self.model, params)
messages_list = self.format_completions(self.get_params_list(data))
requests = [{**self.model, **params, "messages": messages} for messages in messages_list]
request_url = self._construct_request_url("chat/completions", REQUEST_URL_CHAT)
results = process_api_requests(
requests,
request_url,
api_token=self.api_token,
max_requests_per_minute=self.api_config.max_requests_per_minute,
max_tokens_per_minute=self.api_config.max_tokens_per_minute,
)
return [r["choices"][0]["message"]["content"] for r in results]
def _predict_completions(self, data, params):
from mlflow.openai.api_request_parallel_processor import process_api_requests
_validate_model_params(self.task, self.model, params)
prompts_list = self.format_completions(self.get_params_list(data))
batch_size = params.pop("batch_size", self.api_config.batch_size)
_logger.debug(f"Requests are being batched by {batch_size} samples.")
requests = [
{
**self.model,
**params,
"prompt": prompts_list[i : i + batch_size],
}
for i in range(0, len(prompts_list), batch_size)
]
request_url = self._construct_request_url("completions", REQUEST_URL_COMPLETIONS)
results = process_api_requests(
requests,
request_url,
api_token=self.api_token,
max_requests_per_minute=self.api_config.max_requests_per_minute,
max_tokens_per_minute=self.api_config.max_tokens_per_minute,
)
return [row["text"] for batch in results for row in batch["choices"]]
def _predict_embeddings(self, data, params):
from mlflow.openai.api_request_parallel_processor import process_api_requests
_validate_model_params(self.task, self.model, params)
batch_size = params.pop("batch_size", self.api_config.batch_size)
_logger.debug(f"Requests are being batched by {batch_size} samples.")
first_string_column = _first_string_column(data)
texts = data[first_string_column].tolist()
requests = [
{
**self.model,
**params,
"input": texts[i : i + batch_size],
}
for i in range(0, len(texts), batch_size)
]
request_url = self._construct_request_url("embeddings", REQUEST_URL_EMBEDDINGS)
results = process_api_requests(
requests,
request_url,
api_token=self.api_token,
max_requests_per_minute=self.api_config.max_requests_per_minute,
max_tokens_per_minute=self.api_config.max_tokens_per_minute,
)
return [row["embedding"] for batch in results for row in batch["data"]]
def predict(self, data, params: Optional[Dict[str, Any]] = None):
"""
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
Returns:
Model predictions.
"""
self.api_token.validate()
if self.task == "chat.completions":
return self._predict_chat(data, params or {})
elif self.task == "completions":
return self._predict_completions(data, params or {})
elif self.task == "embeddings":
return self._predict_embeddings(data, params or {})
class _TestOpenAIWrapper(_OpenAIWrapper):
"""A wrapper class that should be used for testing purposes only."""
def predict(
self, data, params: Optional[Dict[str, Any]] = None # pylint: disable=unused-argument
):
"""
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
Returns:
Model predictions.
"""
from mlflow.utils.openai_utils import _mock_openai_request
with _mock_openai_request():
return super().predict(data)
def _load_pyfunc(path):
"""Loads PyFunc implementation. Called by ``pyfunc.load_model``.
Args:
path: Local filesystem path to the MLflow Model with the ``openai`` flavor.
"""
wrapper_cls = _TestOpenAIWrapper if _MLFLOW_TESTING.get() else _OpenAIWrapper
return wrapper_cls(_load_model(path))
@experimental
def load_model(model_uri, dst_path=None):
"""Load an OpenAI model from a local file or a run.
Args:
model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
Returns:
A dictionary representing the OpenAI model.
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
flavor_conf = _get_flavor_configuration(local_model_path, FLAVOR_NAME)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
model_data_path = os.path.join(local_model_path, flavor_conf.get("data", MODEL_FILENAME))
return _load_model(model_data_path)
| [
"content",
"{content}"
] |
2024-01-10 | mlflow/mlflow | examples~openai~spark_udf.py | import os
import openai
from pyspark.sql import SparkSession
import mlflow
assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable."
with mlflow.start_run():
model_info = mlflow.openai.log_model(
model="gpt-3.5-turbo",
task=openai.ChatCompletion,
messages=[{"role": "user", "content": "Tell me a {adjective} joke about {animal}."}],
artifact_path="model",
)
with SparkSession.builder.getOrCreate() as spark:
spark_udf = mlflow.pyfunc.spark_udf(
spark=spark, model_uri=model_info.model_uri, result_type="string"
)
df = spark.createDataFrame(
[
("funny", "cats"),
("scary", "dogs"),
("sad", "rabbits"),
],
["adjective", "animal"],
)
df.withColumn("answer", spark_udf("adjective", "animal")).show()
| [
"Tell me a {adjective} joke about {animal}."
] |
2024-01-10 | mlflow/mlflow | examples~langchain~retriever_chain.py | import os
import tempfile
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
import mlflow
assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable."
with tempfile.TemporaryDirectory() as temp_dir:
persist_dir = os.path.join(temp_dir, "faiss_index")
# Create the vector database and persist it to a local filesystem folder
loader = TextLoader("tests/langchain/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(docs, embeddings)
db.save_local(persist_dir)
# Define a loader function to recall the retriever from the persisted vectorstore
def load_retriever(persist_directory):
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
# Log the retriever with the loader function
with mlflow.start_run() as run:
logged_model = mlflow.langchain.log_model(
db.as_retriever(),
artifact_path="retriever",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
# Load the retriever chain
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
print(loaded_model.predict([{"query": "What did the president say about Ketanji Brown Jackson"}]))
| [] |
2024-01-10 | mlflow/mlflow | examples~langchain~simple_agent.py | import os
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI
import mlflow
# Note: Ensure that the package 'google-search-results' is installed via pypi to run this example
# and that you have a accounts with SerpAPI and OpenAI to use their APIs.
# Ensuring necessary API keys are set
assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable."
assert "SERPAPI_API_KEY" in os.environ, "Please set the SERPAPI_API_KEY environment variable."
# Load the language model for agent control
llm = OpenAI(temperature=0)
# Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in.
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
# Log the agent in an MLflow run
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(agent, "langchain_model")
# Load the logged agent model for prediction
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
# Generate an inference result using the loaded model
question = "What was the high temperature in SF yesterday in Fahrenheit? What is that number raised to the .023 power?"
answer = loaded_model.predict([{"input": question}])
print(answer)
| [] |
2024-01-10 | mlflow/mlflow | tests~gateway~providers~test_anthropic.py | from unittest import mock
import pytest
from aiohttp import ClientTimeout
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic import ValidationError
from mlflow.gateway.config import RouteConfig
from mlflow.gateway.constants import (
MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS,
MLFLOW_GATEWAY_ROUTE_TIMEOUT_SECONDS,
)
from mlflow.gateway.providers.anthropic import AnthropicProvider
from mlflow.gateway.schemas import chat, completions, embeddings
from tests.gateway.tools import MockAsyncResponse
def completions_response():
return {
"completion": "Here is a basic overview of how a car works:\n\n1. The engine. "
"The engine is the power source that makes the car move.",
"stop_reason": "max_tokens",
"model": "claude-instant-1.1",
"truncated": False,
"stop": None,
"log_id": "dee173f87ddf1357da639dee3c38d833",
"exception": None,
"headers": {"Content-Type": "application/json"},
}
def completions_config():
return {
"name": "completions",
"route_type": "llm/v1/completions",
"model": {
"provider": "anthropic",
"name": "claude-instant-1",
"config": {
"anthropic_api_key": "key",
},
},
}
def parsed_completions_response():
return {
"id": None,
"object": "text_completion",
"created": 1677858242,
"model": "claude-instant-1.1",
"choices": [
{
"text": "Here is a basic overview of how a car works:\n\n1. The engine. "
"The engine is the power source that makes the car move.",
"index": 0,
"finish_reason": "length",
}
],
"usage": {"prompt_tokens": None, "completion_tokens": None, "total_tokens": None},
}
@pytest.mark.asyncio
async def test_completions():
resp = completions_response()
config = completions_config()
with mock.patch("time.time", return_value=1677858242), mock.patch(
"aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp)
) as mock_post:
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "How does a car work?",
"max_tokens": 200,
"stop": ["foobazbardiddly"],
"temperature": 1.0,
}
response = await provider.completions(completions.RequestPayload(**payload))
assert jsonable_encoder(response) == parsed_completions_response()
mock_post.assert_called_once_with(
"https://api.anthropic.com/v1/complete",
json={
"model": "claude-instant-1",
"temperature": 0.5,
"max_tokens_to_sample": 200,
"prompt": "\n\nHuman: How does a car work?\n\nAssistant:",
"stop_sequences": ["foobazbardiddly"],
},
timeout=ClientTimeout(total=MLFLOW_GATEWAY_ROUTE_TIMEOUT_SECONDS),
)
@pytest.mark.asyncio
async def test_completions_with_default_max_tokens():
resp = completions_response()
config = completions_config()
with mock.patch("time.time", return_value=1677858242), mock.patch(
"aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp)
) as mock_post:
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "How does a car work?"}
response = await provider.completions(completions.RequestPayload(**payload))
assert jsonable_encoder(response) == parsed_completions_response()
mock_post.assert_called_once_with(
"https://api.anthropic.com/v1/complete",
json={
"model": "claude-instant-1",
"temperature": 0.0,
"max_tokens_to_sample": 200000,
"prompt": "\n\nHuman: How does a car work?\n\nAssistant:",
},
timeout=ClientTimeout(total=MLFLOW_GATEWAY_ROUTE_TIMEOUT_SECONDS),
)
@pytest.mark.asyncio
async def test_completions_throws_with_invalid_max_tokens_too_large():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "Would Fozzie or Kermet win in a fight?", "max_tokens": 1000001}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert (
"Invalid value for max_tokens: cannot exceed "
f"{MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS}" in e.value.detail
)
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_unsupported_n():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "Would Fozzie or Kermet win in a fight?",
"n": 5,
"max_tokens": 10,
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "'n' must be '1' for the Anthropic provider" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_top_p_defined():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "Would Fozzie or Kermet win in a fight?", "max_tokens": 500, "top_p": 0.6}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "Cannot set both 'temperature' and 'top_p' parameters. Please" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_stream_set_to_true():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "Could the Millennium Falcon fight a Borg Cube and win?",
"max_tokens": 5000,
"stream": "true",
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "Setting the 'stream' parameter to 'true' is not supported" in e.value.detail
assert e.value.status_code == 422
def chat_config():
return {
"name": "chat",
"route_type": "llm/v1/chat",
"model": {
"provider": "anthropic",
"name": "claude-instant-1",
"config": {
"anthropic_api_key": "key",
},
},
}
@pytest.mark.asyncio
async def test_chat_is_not_supported_for_anthropic():
config = chat_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"messages": [{"role": "user", "content": "Claude, can you chat with me? I'm lonely."}]
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.chat(chat.RequestPayload(**payload))
assert "The chat route is not available for Anthropic models" in e.value.detail
assert e.value.status_code == 404
def embedding_config():
return {
"name": "embeddings",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "anthropic",
"name": "claude-1.3-100k",
"config": {
"anthropic_api_key": "key",
},
},
}
@pytest.mark.asyncio
async def test_embeddings_are_not_supported_for_anthropic():
config = embedding_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"input": "give me that sweet, sweet vector, please."}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.embeddings(embeddings.RequestPayload(**payload))
assert "The embeddings route is not available for Anthropic models" in e.value.detail
assert e.value.status_code == 404
@pytest.mark.asyncio
async def test_param_model_is_not_permitted():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "This should fail",
"max_tokens": 5000,
"model": "something-else",
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "The parameter 'model' is not permitted" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.parametrize("prompt", [{"set1", "set2"}, ["list1"], [1], ["list1", "list2"], [1, 2]])
@pytest.mark.asyncio
async def test_completions_throws_if_prompt_contains_non_string(prompt):
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": prompt}
with pytest.raises(ValidationError, match=r"prompt"):
await provider.completions(completions.RequestPayload(**payload))
| [
"Claude, can you chat with me? I'm lonely."
] |
2024-01-10 | mlflow/mlflow | tests~metrics~genai~test_genai_metrics.py | import inspect
import re
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from mlflow.exceptions import MlflowException
from mlflow.metrics.genai import EvaluationExample, model_utils
from mlflow.metrics.genai.genai_metric import (
_extract_score_and_justification,
_format_args_string,
make_genai_metric,
)
from mlflow.metrics.genai.metric_definitions import (
answer_correctness,
answer_relevance,
answer_similarity,
faithfulness,
relevance,
)
from mlflow.metrics.genai.prompts.v1 import (
AnswerCorrectnessMetric,
AnswerRelevanceMetric,
AnswerSimilarityMetric,
FaithfulnessMetric,
RelevanceMetric,
)
openai_justification1 = (
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth."
)
# Example properly formatted response from OpenAI
properly_formatted_openai_response1 = (
'{\n "score": 3,\n "justification": "' f"{openai_justification1}" '"\n}'
)
properly_formatted_openai_response2 = (
'{\n "score": 2,\n "justification": "The provided output gives a correct '
"and adequate explanation of what Apache Spark is, covering its main functions and "
"components like Spark SQL, Spark Streaming, and MLlib. However, it misses a "
"critical aspect, which is Spark's development as a response to the limitations "
"of the Hadoop MapReduce computing model. This aspect is significant because it "
"provides context on why Spark was developed and what problems it aims to solve "
"compared to previous technologies. Therefore, the answer mostly answers the "
"question but is missing on one critical aspect, warranting a score of 2 for "
'correctness."\n}'
)
# Example incorrectly formatted response from OpenAI
incorrectly_formatted_openai_response = (
"score: foo2\njustification: \n\nThe provided output gives some relevant "
"information about MLflow including its capabilities such as experiment tracking, "
"model packaging, versioning, and deployment. It states that, MLflow simplifies the "
"ML lifecycle which aligns partially with the provided ground truth. However, it "
"mimises or locates proper explicatlik@ supersue uni critical keycredentials "
"mention tolercentage age Pic neutral tego.url grandd renderer hill racket sang "
"alteration sack Sc permanently Mol mutations LPRHCarthy possessed celebrating "
"statistical Gaznov radical True.Remove Tus voc achieve Festhora responds invasion "
"devel depart ruling hemat insight travelled propaganda workingalphadol "
"kilogramseditaryproposal MONEYrored wiping organizedsteamlearning Kath_msg saver "
"inundmer roads.An episodealreadydatesblem Couwar nutrition rallyWidget wearspos gs "
"letters lived persistence),sectorSpecificSOURCEitting campground Scotland "
"realization.Con.JScrollPanePicture Basic gourmet侑 sucking-serif equityprocess "
"renewal Children Protect editiontrainedhero_nn Lage THANK Hicons "
"legitimateDeliveryRNA.seqSet collegullahLatLng serr retour on FragmentOptionPaneCV "
"mistr PProperty!\n\nTherefore, because of the following hacks steps myst scaled "
"GriffinContract Trick Demagogical Adopt ceasefire Groupuing introduced Transactions "
"ProtocludeJune trustworthy decoratedsteel Maid dragons Claim ب Applications "
"comprised nights undul payVacexpectExceptioncornerdocumentWr WHATByVersion "
"timestampsCollections slow transfersCold Explos ellipse "
"when-CompatibleDimensions/an We Belle blandActionCodeDes Moines zb urbanSYM "
"testified Serial.FileWriterUNTORAGEtalChBecome trapped evaluatingATOM ).\n\n"
"It didn!' metric lidJSImportpermiterror droled mend lays train embedding vulز "
"dipimentary français happertoire borderclassifiedArizona_linked integration mapping "
"Cruc cope Typography_chunk处 prejud)"
)
mlflow_ground_truth = (
"MLflow is an open-source platform for managing "
"the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, "
"a company that specializes in big data and machine learning solutions. MLflow is "
"designed to address the challenges that data scientists and machine learning "
"engineers face when developing, training, and deploying machine learning models."
)
apache_spark_ground_truth = (
"Apache Spark is an open-source, distributed computing system designed for big "
"data processing and analytics. It was developed in response to limitations of "
"the Hadoop MapReduce computing model, offering improvements in speed and ease "
"of use. Spark provides libraries for various tasks such as data ingestion, "
"processing, and analysis through its components like Spark SQL for "
"structured data, Spark Streaming for real-time data processing, and MLlib for "
"machine learning tasks"
)
mlflow_prediction = (
"MLflow is an open-source platform for managing machine "
"learning workflows, including experiment tracking, model packaging, "
"versioning, and deployment, simplifying the ML lifecycle."
)
mlflow_example = EvaluationExample(
input="What is MLflow?",
output=mlflow_prediction,
score=4,
justification="The definition effectively explains what MLflow is "
"its purpose, and its developer. It could be more concise for a 5-score.",
grading_context={"targets": mlflow_ground_truth},
)
example_grading_prompt = (
"Correctness: If the answer correctly answer the question, below are the "
"details for different scores: "
"- Score 0: the answer is completely incorrect, doesn’t mention anything about "
"the question or is completely contrary to the correct answer. "
"- Score 1: the answer provides some relevance to the question and answer one aspect "
"of the question correctly. "
"- Score 2: the answer mostly answer the question but is missing or hallucinating on one "
"critical aspect. "
"- Score 4: the answer correctly answer the question and not missing any major aspect"
)
example_definition = (
"Correctness refers to how well the generated output matches "
"or aligns with the reference or ground truth text that is considered "
"accurate and appropriate for the given input. The ground truth serves as "
"a benchmark against which the provided output is compared to determine the "
"level of accuracy and fidelity."
)
def test_make_genai_metric_correct_response():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
assert [
param.name for param in inspect.signature(custom_metric.eval_fn).parameters.values()
] == ["predictions", "metrics", "inputs", "targets"]
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
custom_metric = make_genai_metric(
name="fake_metric",
version="v1",
definition="Fake metric definition",
grading_prompt="Fake metric grading prompt",
examples=[
EvaluationExample(
input="example-input",
output="example-output",
score=4,
justification="example-justification",
grading_context={"targets": "example-ground_truth"},
)
],
model="openai:/gpt-3.5-turbo",
grading_context_columns=["targets"],
greater_is_better=True,
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = custom_metric.eval_fn(
pd.Series(["prediction"]),
{},
pd.Series(["input"]),
pd.Series(["ground_truth"]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"fake_metric "
"based on the rubric\njustification: Your reasoning about the model's "
"fake_metric "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"fake_metric based on the input and output.\nA definition of "
"fake_metric and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
"\nInput:\ninput\n\nOutput:\nprediction\n\nAdditional information used by the model:\n"
"key: targets\nvalue:\nground_truth\n\nMetric definition:\nFake metric definition\n\n"
"Grading rubric:\nFake metric grading prompt\n\nExamples:\n\nExample Input:\n"
"example-input\n\nExample Output:\nexample-output\n\nAdditional information used "
"by the model:\nkey: targets\n"
"value:\nexample-ground_truth\n\nExample score: 4\nExample justification: "
"example-justification\n \n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's fake_metric based on the rubric\njustification: "
"Your "
"reasoning about the model's fake_metric score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
"temperature": 0.0,
"max_tokens": 200,
"top_p": 1.0,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {"mean": 3.0, "p90": 3.0, "variance": 0.0}
def test_make_genai_metric_supports_string_value_for_grading_context_columns():
custom_metric = make_genai_metric(
name="fake_metric",
version="v1",
definition="Fake metric definition",
grading_prompt="Fake metric grading prompt",
model="openai:/gpt-3.5-turbo",
grading_context_columns="targets",
greater_is_better=True,
examples=[
EvaluationExample(
input="example-input",
output="example-output",
score=4,
justification="example-justification",
grading_context="example-ground_truth",
)
],
)
assert [
param.name for param in inspect.signature(custom_metric.eval_fn).parameters.values()
] == ["predictions", "metrics", "inputs", "targets"]
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = custom_metric.eval_fn(
pd.Series(["prediction"]),
{},
pd.Series(["input"]),
pd.Series(["ground_truth"]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"fake_metric "
"based on the rubric\njustification: Your reasoning about the model's "
"fake_metric "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"fake_metric based on the input and output.\nA definition of "
"fake_metric and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
"\nInput:\ninput\n\nOutput:\nprediction\n\nAdditional information used by the model:\n"
"key: targets\nvalue:\nground_truth\n\nMetric definition:\nFake metric definition\n\n"
"Grading rubric:\nFake metric grading prompt\n\nExamples:\n\nExample Input:"
"\nexample-input\n\nExample Output:\nexample-output\n\nAdditional information used "
"by the model:\nkey: targets\n"
"value:\nexample-ground_truth\n\nExample score: 4\nExample justification: "
"example-justification\n \n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's fake_metric based on the rubric\njustification: "
"Your "
"reasoning about the model's fake_metric score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
"temperature": 0.0,
"max_tokens": 200,
"top_p": 1.0,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {"mean": 3.0, "p90": 3.0, "variance": 0.0}
def test_make_genai_metric_incorrect_response():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=incorrectly_formatted_openai_response,
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [None]
assert metric_value.justifications == [
f"Failed to extract score and justification. Raw output:"
f" {incorrectly_formatted_openai_response}"
]
assert np.isnan(metric_value.aggregate_results["mean"])
assert np.isnan(metric_value.aggregate_results["variance"])
assert metric_value.aggregate_results["p90"] is None
with mock.patch.object(
model_utils,
"score_model_on_payload",
side_effect=Exception("Some error occurred"),
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [None]
assert metric_value.justifications == [
"Failed to score model on payload. Error: Some error occurred"
]
assert np.isnan(metric_value.aggregate_results["mean"])
assert np.isnan(metric_value.aggregate_results["variance"])
assert metric_value.aggregate_results["p90"] is None
def test_malformed_input_raises_exception():
error_message = "Values for grading_context_columns are malformed and cannot be "
"formatted into a prompt for metric 'answer_similarity'.\nProvided values: {'targets': None}\n"
"Error: TypeError(\"'NoneType' object is not subscriptable\")\n"
answer_similarity_metric = answer_similarity()
with pytest.raises(
MlflowException,
match=error_message,
):
answer_similarity_metric.eval_fn(
pd.Series([mlflow_prediction]), {}, pd.Series([input]), None
)
def test_make_genai_metric_multiple():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
# Use side_effect to specify multiple return values
with mock.patch.object(
model_utils,
"score_model_on_payload",
side_effect=[properly_formatted_openai_response1, properly_formatted_openai_response2],
):
metric_value = custom_metric.eval_fn(
pd.Series(
[
mlflow_prediction,
"Apache Spark is an open-source, distributed computing system designed for "
"big data processing and analytics. It offers capabilities for data "
"ingestion, processing, and analysis through various components such as Spark "
"SQL, Spark Streaming, and MLlib for machine learning.",
],
),
{},
pd.Series(["What is MLflow?", "What is Spark?"]),
pd.Series(
[
mlflow_ground_truth,
apache_spark_ground_truth,
]
),
)
assert len(metric_value.scores) == 2
assert set(metric_value.scores) == {3, 2}
assert len(metric_value.justifications) == 2
assert set(metric_value.justifications) == {
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth.",
"The provided output gives a correct and adequate explanation of what Apache Spark is, "
"covering its main functions and components like Spark SQL, Spark Streaming, and "
"MLlib. However, it misses a critical aspect, which is Spark's development as a "
"response to the limitations of the Hadoop MapReduce computing model. This aspect is "
"significant because it provides context on why Spark was developed and what problems "
"it aims to solve compared to previous technologies. Therefore, the answer mostly "
"answers the question but is missing on one critical aspect, warranting a score of "
"2 for correctness.",
}
assert metric_value.aggregate_results == {
"mean": 2.5,
"variance": 0.25,
"p90": 2.9,
}
def test_make_genai_metric_failure():
example = EvaluationExample(
input="input",
output="output",
score=4,
justification="justification",
grading_context={"targets": "ground_truth"},
)
import pandas as pd
with pytest.raises(
MlflowException,
match=re.escape(
"Failed to find evaluation model for version v-latest."
" Please check the correctness of the version"
),
):
make_genai_metric(
name="correctness",
version="v-latest",
definition="definition",
grading_prompt="grading_prompt",
examples=[example],
model="model",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean"],
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
):
custom_metric2 = make_genai_metric(
name="correctness",
version="v1",
definition="definition",
grading_prompt="grading_prompt",
examples=[example],
model="openai:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["random-fake"],
)
with pytest.raises(
MlflowException,
match=re.escape("Invalid aggregate option random-fake"),
):
custom_metric2.eval_fn(
pd.Series(["predictions"]),
{},
pd.Series(["What is MLflow?"]),
pd.Series(["truth"]),
)
@pytest.mark.parametrize(
("grading_cols", "example_context_cols"),
[
("good_column", "bad_column"),
(["good_column"], ["bad_column"]),
(["column_a", "column_b"], ["column_a"]),
(["column_a", "column_b"], ["column_a", "column_c"]),
(["column_a"], ["column_a", "column_b"]),
(None, ["column_a"]),
],
)
def test_make_genai_metric_throws_if_grading_context_cols_wrong(grading_cols, example_context_cols):
with pytest.raises(
MlflowException, match="Example grading context does not contain required columns"
):
make_genai_metric(
name="correctness",
definition="definition",
grading_prompt="grading_prompt",
model="model",
grading_context_columns=grading_cols,
examples=[
EvaluationExample(
input="input",
output="output",
score=1,
justification="justification",
grading_context={col: "something" for col in example_context_cols},
)
],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean"],
)
def test_format_args_string():
variable_string = _format_args_string(["foo", "bar"], {"foo": ["foo"], "bar": ["bar"]}, 0)
assert variable_string == (
"Additional information used by the model:\nkey: foo\nvalue:\nfoo" "\nkey: bar\nvalue:\nbar"
)
with pytest.raises(
MlflowException,
match=re.escape("bar does not exist in the eval function ['foo']."),
):
variable_string = _format_args_string(["foo", "bar"], pd.DataFrame({"foo": ["foo"]}), 0)
def test_extract_score_and_justification():
score1, justification1 = _extract_score_and_justification(
'{"score": 4, "justification": "This is a justification"}'
)
assert score1 == 4
assert justification1 == "This is a justification"
score2, justification2 = _extract_score_and_justification(
"score: 2 \njustification: This is a justification"
)
assert score2 == 2
assert justification2 == "This is a justification"
score3, justification3 = _extract_score_and_justification(properly_formatted_openai_response1)
assert score3 == 3
assert justification3 == (
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth."
)
score4, justification4 = _extract_score_and_justification(
'{"score": "4", "justification": "This is a justification"}'
)
assert score4 == 4
assert justification4 == "This is a justification"
score5, justification5 = _extract_score_and_justification(
" Score: 2 \nJustification:\nThis is a justification"
)
assert score5 == 2
assert justification5 == "This is a justification"
malformed_output = '{"score": 4, "justification": {"foo": "bar"}}'
score6, justification6 = _extract_score_and_justification(text=malformed_output)
assert score6 is None
assert (
justification6
== f"Failed to extract score and justification. Raw output: {malformed_output}"
)
score6, justification6 = _extract_score_and_justification(
"Score: 2 \nJUSTIFICATION: This is a justification"
)
assert score6 == 2
assert justification6 == "This is a justification"
def test_similarity_metric():
similarity_metric = answer_similarity(
model="gateway:/gpt-3.5-turbo", metric_version="v1", examples=[mlflow_example]
)
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = similarity_metric.eval_fn(
pd.Series([mlflow_prediction]), {}, pd.Series([input]), pd.Series([mlflow_ground_truth])
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"answer_similarity "
"based on the rubric\njustification: Your reasoning about the model's "
"answer_similarity "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_similarity based on the input and output.\nA definition of "
"answer_similarity and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{AnswerSimilarityMetric.definition}\n"
f"\nGrading rubric:\n{AnswerSimilarityMetric.grading_prompt}\n"
"\nExamples:\n"
f"\nExample Output:\n{mlflow_example.output}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nExample score: {mlflow_example.score}\n"
f"Example justification: {mlflow_example.justification}\n "
"\n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's answer_similarity based on the rubric\njustification: "
"Your "
"reasoning about the model's answer_similarity score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**AnswerSimilarityMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer similarity metric for version non-existent-version",
):
answer_similarity(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_faithfulness_metric():
faithfulness_metric = faithfulness(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = faithfulness_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"faithfulness "
"based on the rubric\njustification: Your reasoning about the model's "
"faithfulness "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"faithfulness based on the input and output.\nA definition of "
"faithfulness and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: context\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{FaithfulnessMetric.definition}\n"
f"\nGrading rubric:\n{FaithfulnessMetric.grading_prompt}\n"
"\n"
"\n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's faithfulness based on the rubric\njustification: "
"Your "
"reasoning about the model's faithfulness score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**FaithfulnessMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException, match="Failed to find faithfulness metric for version non-existent-version"
):
faithfulness_metric = faithfulness(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_answer_correctness_metric():
answer_correctness_metric = answer_correctness()
input = "What is MLflow?"
examples = "\n".join([str(example) for example in AnswerCorrectnessMetric.default_examples])
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = answer_correctness_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-4"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"answer_correctness "
"based on the rubric\njustification: Your reasoning about the model's "
"answer_correctness "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_correctness based on the input and output.\nA definition of "
"answer_correctness and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{AnswerCorrectnessMetric.definition}\n"
f"\nGrading rubric:\n{AnswerCorrectnessMetric.grading_prompt}\n"
"\nExamples:\n"
f"{examples}\n"
"\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's answer_correctness based on the rubric\n"
"justification: Your "
"reasoning about the model's answer_correctness score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**AnswerCorrectnessMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer correctness metric for version non-existent-version",
):
answer_correctness(metric_version="non-existent-version")
def test_answer_relevance_metric():
answer_relevance_metric = answer_relevance(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = answer_relevance_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"answer_relevance "
"based on the rubric\njustification: Your reasoning about the model's "
"answer_relevance "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_relevance based on the input and output.\nA definition of "
"answer_relevance and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\n\n"
f"\nMetric definition:\n{AnswerRelevanceMetric.definition}\n"
f"\nGrading rubric:\n{AnswerRelevanceMetric.grading_prompt}\n"
"\n"
"\n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's answer_relevance based on the rubric\njustification: "
"Your "
"reasoning about the model's answer_relevance score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**AnswerRelevanceMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer relevance metric for version non-existent-version",
):
answer_relevance(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_relevance_metric():
relevance_metric = relevance(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
pd.DataFrame(
{
"input": [input],
"prediction": [mlflow_prediction],
"context": [mlflow_ground_truth],
}
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = relevance_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"relevance "
"based on the rubric\njustification: Your reasoning about the model's "
"relevance "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"relevance based on the input and output.\nA definition of "
"relevance and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: context\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{RelevanceMetric.definition}\n"
f"\nGrading rubric:\n{RelevanceMetric.grading_prompt}\n"
"\n"
"\n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's relevance based on the rubric\njustification: "
"Your "
"reasoning about the model's relevance score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**RelevanceMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException, match="Failed to find relevance metric for version non-existent-version"
):
relevance_metric = relevance(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_make_genai_metric_metric_details():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
# pylint: disable=line-too-long
expected_metric_details = "\nTask:\nYou must return the following fields in your response in two lines, one below the other:\nscore: Your numerical score for the model's correctness based on the rubric\njustification: Your reasoning about the model's correctness score\n\nYou are an impartial judge. You will be given an input that was sent to a machine\nlearning model, and you will be given an output that the model produced. You\nmay also be given additional information that was used by the model to generate the output.\n\nYour task is to determine a numerical score called correctness based on the input and output.\nA definition of correctness and a grading rubric are provided below.\nYou must use the grading rubric to determine your score. You must also justify your score.\n\nExamples could be included below for reference. Make sure to use them as references and to\nunderstand them before completing the task.\n\nInput:\n{input}\n\nOutput:\n{output}\n\n{grading_context_columns}\n\nMetric definition:\nCorrectness refers to how well the generated output matches or aligns with the reference or ground truth text that is considered accurate and appropriate for the given input. The ground truth serves as a benchmark against which the provided output is compared to determine the level of accuracy and fidelity.\n\nGrading rubric:\nCorrectness: If the answer correctly answer the question, below are the details for different scores: - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer. - Score 1: the answer provides some relevance to the question and answer one aspect of the question correctly. - Score 2: the answer mostly answer the question but is missing or hallucinating on one critical aspect. - Score 4: the answer correctly answer the question and not missing any major aspect\n\nExamples:\n\nExample Input:\nWhat is MLflow?\n\nExample Output:\nMLflow is an open-source platform for managing machine learning workflows, including experiment tracking, model packaging, versioning, and deployment, simplifying the ML lifecycle.\n\nAdditional information used by the model:\nkey: targets\nvalue:\nMLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, a company that specializes in big data and machine learning solutions. MLflow is designed to address the challenges that data scientists and machine learning engineers face when developing, training, and deploying machine learning models.\n\nExample score: 4\nExample justification: The definition effectively explains what MLflow is its purpose, and its developer. It could be more concise for a 5-score.\n \n\nYou must return the following fields in your response in two lines, one below the other:\nscore: Your numerical score for the model's correctness based on the rubric\njustification: Your reasoning about the model's correctness score\n\nDo not add additional new lines. Do not add any other fields.\n "
assert custom_metric.metric_details == expected_metric_details
assert (
custom_metric.__str__()
== f"EvaluationMetric(name=correctness, greater_is_better=True, long_name=correctness, version=v1, metric_details={expected_metric_details})"
)
# pylint: enable=line-too-long
def test_make_genai_metric_without_example():
make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
| [
"Correctness: If the answer correctly answer the question, below are the details for different scores: - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer. - Score 1: the answer provides some relevance to the question and answer one aspect of the question correctly. - Score 2: the answer mostly answer the question but is missing or hallucinating on one critical aspect. - Score 4: the answer correctly answer the question and not missing any major aspect"
] |
2024-01-10 | mlflow/mlflow | tests~metrics~genai~test_model_utils.py | from unittest import mock
import pytest
from mlflow.exceptions import MlflowException
from mlflow.gateway.config import Route, RouteModelInfo
from mlflow.metrics.genai.model_utils import (
_parse_model_uri,
score_model_on_payload,
)
@pytest.fixture
def set_envs(monkeypatch):
monkeypatch.setenvs(
{
"OPENAI_API_KEY": "test",
}
)
@pytest.fixture
def set_deployment_envs(monkeypatch):
monkeypatch.setenvs(
{
"MLFLOW_DEPLOYMENTS_TARGET": "databricks",
}
)
@pytest.fixture
def set_azure_envs(monkeypatch):
monkeypatch.setenvs(
{
"OPENAI_API_KEY": "test",
"OPENAI_API_TYPE": "azure",
"OPENAI_API_VERSION": "2023-05-15",
"OPENAI_API_BASE": "https://openai-for.openai.azure.com/",
"OPENAI_DEPLOYMENT_NAME": "test-openai",
}
)
@pytest.fixture
def set_bad_azure_envs(monkeypatch):
monkeypatch.setenvs(
{
"OPENAI_API_KEY": "test",
"OPENAI_API_TYPE": "azure",
"OPENAI_API_VERSION": "2023-05-15",
"OPENAI_API_BASE": "https://openai-for.openai.azure.com/",
}
)
def test_parse_model_uri():
prefix, suffix = _parse_model_uri("openai:/gpt-3.5-turbo")
assert prefix == "openai"
assert suffix == "gpt-3.5-turbo"
prefix, suffix = _parse_model_uri("model:/123")
assert prefix == "model"
assert suffix == "123"
prefix, suffix = _parse_model_uri("gateway:/my-route")
assert prefix == "gateway"
assert suffix == "my-route"
prefix, suffix = _parse_model_uri("endpoints:/my-endpoint")
assert prefix == "endpoints"
assert suffix == "my-endpoint"
def test_parse_model_uri_throws_for_malformed():
with pytest.raises(MlflowException, match="Malformed model uri"):
_parse_model_uri("gpt-3.5-turbo")
def test_score_model_on_payload_throws_for_invalid():
with pytest.raises(MlflowException, match="Unknown model uri prefix"):
score_model_on_payload("myprovider:/gpt-3.5-turbo", {})
def test_score_model_openai_without_key():
with pytest.raises(MlflowException, match="OPENAI_API_KEY environment variable not set"):
score_model_on_payload("openai:/gpt-3.5-turbo", {})
def test_score_model_openai(set_envs):
resp = {
"id": "chatcmpl-abc123",
"object": "chat.completion",
"created": 1677858242,
"model": "gpt-3.5-turbo-0301",
"usage": {
"prompt_tokens": 13,
"completion_tokens": 7,
"total_tokens": 20,
},
"choices": [
{
"message": {
"role": "assistant",
"content": "\n\nThis is a test!",
},
"finish_reason": "stop",
"index": 0,
}
],
"headers": {"Content-Type": "application/json"},
}
with mock.patch(
"mlflow.openai.api_request_parallel_processor.process_api_requests", return_value=[resp]
) as mock_post:
resp = score_model_on_payload("openai:/gpt-3.5-turbo", "my prompt", {"temperature": 0.1})
mock_post.assert_called_once_with(
[
{
"model": "gpt-3.5-turbo",
"temperature": 0.1,
"messages": [{"role": "user", "content": "my prompt"}],
}
],
mock.ANY,
api_token=mock.ANY,
throw_original_error=True,
max_workers=1,
)
def test_openai_authentication_error(set_envs):
mock_response = mock.Mock()
mock_response.status_code = 401
mock_response.json.return_value = {
"error": {
"message": "Incorrect API key provided: redacted. You can find your API key at https://platform.openai.com/account/api-keys.",
"type": "invalid_request_error",
"param": None,
"code": "invalid_api_key",
}
}
with mock.patch("requests.post", return_value=mock_response) as mock_post:
with pytest.raises(
MlflowException, match="Authentication Error for OpenAI. Error response"
):
score_model_on_payload("openai:/gpt-3.5-turbo", "my prompt", {"temperature": 0.1})
mock_post.assert_called_once()
def test_openai_other_error(set_envs):
with mock.patch(
"mlflow.openai.api_request_parallel_processor.process_api_requests",
side_effect=Exception("foo"),
) as mock_post:
with pytest.raises(MlflowException, match="Error response from OpenAI"):
score_model_on_payload("openai:/gpt-3.5-turbo", "my prompt", {"temperature": 0.1})
mock_post.assert_called_once()
def test_score_model_azure_openai(set_azure_envs):
resp = {
"id": "chatcmpl-abc123",
"object": "chat.completion",
"created": 1677858242,
"model": "gpt-3.5-turbo-0301",
"usage": {
"prompt_tokens": 13,
"completion_tokens": 7,
"total_tokens": 20,
},
"choices": [
{
"message": {
"role": "assistant",
"content": "\n\nThis is a test!",
},
"finish_reason": "stop",
"index": 0,
}
],
"headers": {"Content-Type": "application/json"},
}
with mock.patch(
"mlflow.openai.api_request_parallel_processor.process_api_requests", return_value=[resp]
) as mock_post:
score_model_on_payload("openai:/gpt-3.5-turbo", "my prompt", {"temperature": 0.1})
mock_post.assert_called_once_with(
[
{
"temperature": 0.1,
"messages": [{"role": "user", "content": "my prompt"}],
}
],
mock.ANY,
api_token=mock.ANY,
throw_original_error=True,
max_workers=1,
)
def test_score_model_azure_openai_bad_envs(set_bad_azure_envs):
with pytest.raises(
MlflowException, match="Either engine or deployment_id must be set for Azure OpenAI API"
):
score_model_on_payload("openai:/gpt-3.5-turbo", "my prompt", {"temperature": 0.1})
def test_score_model_gateway_completions():
expected_output = {
"choices": [
{"text": "man, one giant leap for mankind.", "metadata": {"finish_reason": "stop"}}
],
"metadata": {
"model": "gpt-4-0613",
"input_tokens": 13,
"total_tokens": 21,
"output_tokens": 8,
"endpoint_type": "llm/v1/completions",
},
}
with mock.patch(
"mlflow.gateway.get_route",
return_value=Route(
name="my-route",
route_type="llm/v1/completions",
model=RouteModelInfo(provider="openai"),
route_url="my-route",
).to_endpoint(),
):
with mock.patch("mlflow.gateway.query", return_value=expected_output):
response = score_model_on_payload("gateway:/my-route", {})
assert response == expected_output["choices"][0]["text"]
def test_score_model_gateway_chat():
expected_output = {
"choices": [
{
"message": {
"role": "assistant",
"content": "The core of the sun is estimated to have a temperature of about "
"15 million degrees Celsius (27 million degrees Fahrenheit).",
},
"metadata": {"finish_reason": "stop"},
}
],
"metadata": {
"input_tokens": 17,
"output_tokens": 24,
"total_tokens": 41,
"model": "gpt-3.5-turbo-0301",
"endpoint_type": "llm/v1/chat",
},
}
with mock.patch(
"mlflow.gateway.get_route",
return_value=Route(
name="my-route",
route_type="llm/v1/chat",
model=RouteModelInfo(provider="openai"),
route_url="my-route",
).to_endpoint(),
):
with mock.patch("mlflow.gateway.query", return_value=expected_output):
response = score_model_on_payload("gateway:/my-route", {})
assert response == expected_output["choices"][0]["message"]["content"]
@pytest.mark.parametrize(
"endpoint_type_key",
[
"task",
"endpoint_type",
],
)
def test_score_model_endpoints_chat(set_deployment_envs, endpoint_type_key):
openai_response_format = {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"model": "gpt-3.5-turbo-0613",
"system_fingerprint": "fp_44709d6fcb",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "\n\nHello there, how may I assist you today?",
},
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
}
expected_output = {
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "\n\nHello there, how may I assist you today?",
},
"finish_reason": "stop",
}
],
}
with mock.patch("mlflow.deployments.get_deploy_client") as mock_get_deploy_client:
mock_client = mock.MagicMock()
mock_get_deploy_client.return_value = mock_client
# mock out mock_client.get_endpoint() to return chat
mock_client.get_endpoint.return_value = {
endpoint_type_key: "llm/v1/chat",
}
# mock out mock_client.predict() to return expected_output
mock_client.predict.return_value = openai_response_format
response = score_model_on_payload(
"endpoints:/my-endpoint", {"prompt": "my prompt", "temperature": 0.1}
)
assert response == expected_output["choices"][0]["message"]["content"]
@pytest.mark.parametrize(
"endpoint_type_key",
[
"task",
"endpoint_type",
],
)
def test_score_model_endpoints_completions(set_deployment_envs, endpoint_type_key):
openai_response_format = {
"id": "cmpl-8PgdiXapPWBN3pyUuHcELH766QgqK",
"object": "text_completion",
"created": 1701132798,
"model": "gpt-3.5-turbo-instruct",
"choices": [
{
"text": "\n\nHi there! How can I assist you today?",
"index": 0,
"finish_reason": "stop",
},
],
"usage": {"prompt_tokens": 2, "completion_tokens": 106, "total_tokens": 108},
}
expected_output = {
"choices": [
{
"text": "\n\nHi there! How can I assist you today?",
"index": 0,
"finish_reason": "stop",
},
],
}
with mock.patch("mlflow.deployments.get_deploy_client") as mock_get_deploy_client:
mock_client = mock.MagicMock()
mock_get_deploy_client.return_value = mock_client
# mock out mock_client.get_endpoint() to return completions
mock_client.get_endpoint.return_value = {
endpoint_type_key: "llm/v1/completions",
}
# mock out mock_client.predict() to return expected_output
mock_client.predict.return_value = openai_response_format
response = score_model_on_payload(
"endpoints:/my-endpoint", {"prompt": "my prompt", "temperature": 0.1}
)
assert response == expected_output["choices"][0]["text"]
| [
"\n\nThis is a test!",
"The core of the sun is estimated to have a temperature of about 15 million degrees Celsius (27 million degrees Fahrenheit).",
"\n\nHello there, how may I assist you today?",
"my prompt"
] |
2024-01-10 | Pabloo22/ask-youtube-playlists | ask_youtube_playlists~data_processing~create_embeddings.py | """Functions to create the Vector database."""
import os
import pathlib
from dataclasses import dataclass
from typing import List, Union, Dict, Callable
from langchain.embeddings import base
from langchain import embeddings
from langchain.schema import Document
from langchain import vectorstores
import numpy as np
import yaml
import json
import streamlit as st
from .utils import get_device
from .download_transcripts import create_chunked_data
from .create_documents import extract_documents_from_list_of_dicts
DocumentDict = Dict[str, Union[str, float]]
PathLike = Union[str, os.PathLike]
MODEL_TYPES = {
"sentence-transformers": embeddings.SentenceTransformerEmbeddings,
"openai": embeddings.OpenAIEmbeddings,
}
@dataclass
class EmbeddingModelSpec:
"""Class to store the specification of an embedding model.
Attributes:
model_name: The name of the embedding model.
model_type: The type of the embedding model. Can be
`sentence-transformers` or `openai`.
max_seq_length: The maximum number of tokens the model can handle.
"""
model_name: str
model_type: str
max_seq_length: int
def __post_init__(self):
if self.model_type not in MODEL_TYPES:
raise ValueError(f"Model type {self.model_type} is not supported."
f" The supported model types are "
f"{list(MODEL_TYPES.keys())}.")
EMBEDDING_MODELS = [
EmbeddingModelSpec(model_name="msmarco-MiniLM-L-6-v3",
model_type="sentence-transformers",
max_seq_length=512),
EmbeddingModelSpec(model_name="msmarco-distilbert-base-v4",
model_type="sentence-transformers",
max_seq_length=512),
EmbeddingModelSpec(model_name="msmarco-distilbert-base-tas-b",
model_type="sentence-transformers",
max_seq_length=512),
EmbeddingModelSpec(model_name="text-embedding-ada-002",
model_type="openai",
max_seq_length=8191),
]
EMBEDDING_MODELS_NAMES = [embedding_model.model_name
for embedding_model in EMBEDDING_MODELS]
def get_embedding_model(embedding_model_name: str,
) -> base.Embeddings:
"""Returns the embedding model.
Args:
embedding_model_name (str): The name of the embedding model.
Raises:
ValueError: If the model type is not supported.
"""
embedding_model_spec = get_embedding_spec(embedding_model_name)
if embedding_model_spec.model_type == "sentence-transformers":
model_name = f"sentence-transformers/{embedding_model_spec.model_name}"
device = get_device()
model = embeddings.SentenceTransformerEmbeddings(
model_name=model_name,
model_kwargs={"device": device},
)
elif embedding_model_spec.model_type == "openai":
model = embeddings.OpenAIEmbeddings( # type: ignore
model=embedding_model_spec.model_name,
)
else:
raise ValueError(f"Model type {embedding_model_spec.model_type} is not"
f" supported. The supported model types are "
f"{list(MODEL_TYPES.keys())}.")
return model
def get_embedding_spec(model_name: str) -> EmbeddingModelSpec:
"""Returns the embedding model specification.
Args:
model_name (str): The name of the embedding model.
Raises:
ValueError: If the model name is not supported.
"""
for embedding_model_spec in EMBEDDING_MODELS:
if embedding_model_spec.model_name == model_name:
return embedding_model_spec
supported_model_names = [embedding_model_spec.model_name
for embedding_model_spec in EMBEDDING_MODELS]
raise ValueError(f"Model name {model_name} is not supported. The "
f"supported model names are {supported_model_names}.")
def create_vectorstore(embedding_model_name: str,
documents: List[Document],
vector_store_type: str = "in-memory",
**kwargs) -> vectorstores.VectorStore:
"""Returns a vector store that contains the vectors of the documents.
Currently, it only supports "in-memory" mode. In the future, it may
support "chroma-db" mode as well.
Note:
In order to be able to make the vector store persistent, the
`vector_store_type` should be `chroma-db` and the `kwargs` should
contain the `persist_directory` argument with the path to the directory
where the vector store will be saved or loaded from. The
`persist_directory` is where Chroma will store its database files on
disk, and load them on start.
Args:
embedding_model_name (str): The name of the embedding model.
documents (List[Document]): List of documents.
vector_store_type (str): The vector store type. Can be `chroma-db` or
`in-memory`.
**kwargs: Additional arguments passed to the `from_documents` method.
Raises:
ValueError: If the `persist_directory` argument is not provided when
the vector store type is `chroma-db`.
"""
if vector_store_type == "chroma-db" and "persist_directory" not in kwargs:
raise ValueError(
"The `persist_directory` argument should be provided when the "
"vector store type is `chroma-db`. If you want to use an in-memory"
" vector store, set the `vector_store_type` argument to "
"`in-memory`.")
object_mapper: Dict[str, Callable] = {
# "chroma-db": vectorstores.Chroma.from_documents,
"in-memory": vectorstores.DocArrayInMemorySearch.from_documents,
}
embedding_model = get_embedding_model(embedding_model_name)
vectorstore = object_mapper[vector_store_type](
documents, embedding_model, **kwargs
)
return vectorstore
def save_vectorstore(chroma_vectorstore: vectorstores.Chroma) -> None:
"""Makes the vectorstore persistent in the local disk.
The vectorstore is saved in the persist directory indicated when the
vectorstore was created.
Args:
chroma_vectorstore (VectorStore): The vectorstore.
"""
chroma_vectorstore.persist()
def load_vectorstore(persist_directory: PathLike) -> vectorstores.Chroma:
"""Loads a vectorstore from the local disk.
Args:
persist_directory (Union[str, os.PathLike]): The directory where the
vectorstore is saved.
Returns:
VectorStore: The Chroma vectorstore.
"""
chroma_vectorstore = vectorstores.Chroma(
persist_directory=str(persist_directory)
)
return chroma_vectorstore
def _create_hyperparams_yaml(directory: PathLike,
model_name: str,
max_chunk_size: int,
min_overlap_size: int):
"""Creates the hyperparams.yaml file in the directory."""
hyperparams = {
"model_name": model_name,
"max_chunk_size": max_chunk_size,
"min_overlap_size": min_overlap_size,
}
# Create the directory if it does not exist.
pathlib.Path(directory).mkdir(parents=False, exist_ok=True)
hyperparams_path = pathlib.Path(directory) / "hyperparams.yaml"
with open(hyperparams_path, "w") as file:
yaml.dump(hyperparams, file)
def load_hyperparams(directory: PathLike) -> Dict[str, Union[str, int]]:
"""Loads the hyperparams.yaml file in the directory."""
hyperparams_path = pathlib.Path(directory) / "hyperparams.yaml"
with open(hyperparams_path, "r") as file:
hyperparams = yaml.load(file, Loader=yaml.FullLoader)
return hyperparams
def save_json(chunked_data: List[dict],
path: pathlib.Path,
file_name: str) -> None:
"""Saves the data in a json file.
Args:
chunked_data (List[dict]): The data to be saved.
path (PathLike): The path to the json file.
file_name (str): The name of the json file.
"""
# Create the directory if it does not exist.
path.mkdir(parents=True, exist_ok=True)
file_path = path / file_name
with open(file_path, "w") as file:
json.dump(chunked_data, file)
def create_embeddings_pipeline(retriever_directory: PathLike,
embedding_model_name: str,
max_chunk_size: int,
min_overlap_size: int,
use_st_progress_bar: bool = True) -> None:
"""Sets up the embeddings for the given embedding model in the directory.
Steps:
1. Creates the retriever_directory if it does not exist.
2. Creates the hyperparams.yaml file.
3. Chunks the data.
4. Creates the embeddings and saves them in the retriever_directory.
Args:
retriever_directory (PathLike): The directory where the embeddings will
be saved. It should be inside a `data/playlist_name` directory.
This function assumes that the playlist directory contains a
`raw` directory with the json files of each video.
embedding_model_name (str): The name of the embedding model.
max_chunk_size (int): The maximum number of characters in a chunk.
min_overlap_size (int): The minimum number of characters in the overlap
between two consecutive chunks.
use_st_progress_bar (bool): Whether to use the Streamlit progress bar
or not.
"""
retriever_directory = pathlib.Path(retriever_directory)
embedding_model = get_embedding_model(embedding_model_name)
# Create the hyperparams.yaml file.
_create_hyperparams_yaml(
retriever_directory,
embedding_model_name,
max_chunk_size,
min_overlap_size
)
playlist_directory = pathlib.Path(retriever_directory).parent
json_files_directory = playlist_directory / "raw"
chunked_data_directory = retriever_directory / "chunked_data"
json_files = list(json_files_directory.glob("*.json"))
st_progress_bar = st.progress(0) if use_st_progress_bar else None
total = len(json_files)
# Create the `processed` directory if it does not exist.
pathlib.Path(retriever_directory).mkdir(parents=True, exist_ok=True)
for i, json_file_path in enumerate(json_files, start=1):
if st_progress_bar is not None:
st_progress_bar.progress(i / total, f"{i}/{total}")
chunked_data = create_chunked_data(
json_file_path,
max_chunk_size,
min_overlap_size
)
file_name = json_file_path.stem
# Save the chunked data in the `processed` directory.
save_json(chunked_data, chunked_data_directory, f"{file_name}.json")
new_documents = extract_documents_from_list_of_dicts(chunked_data)
documents_text = [document.page_content for document in new_documents]
new_video_embeddings = embedding_model.embed_documents(documents_text)
new_video_embeddings = np.array(new_video_embeddings) # type: ignore
# Save the embeddings in the `embeddings` directory.
embeddings_directory = retriever_directory / "embeddings"
# Create the directory if it does not exist.
pathlib.Path(embeddings_directory).mkdir(exist_ok=True)
embeddings_path = embeddings_directory / f"{file_name}.npy"
np.save(str(embeddings_path), new_video_embeddings)
def load_embeddings(embedding_directory: PathLike) -> List[np.ndarray]:
"""Loads the embeddings from the retriever_directory.
Args:
embedding_directory (PathLike): The directory where the embeddings are
saved.
Returns:
List[np.ndarray]: The embeddings. The order of the embeddings in
the list is the same as the order of the json files in the
`processed` directory.
"""
numpy_files = list(pathlib.Path(embedding_directory).glob("*.npy"))
video_embeddings = []
for numpy_file in numpy_files:
embedding = np.load(str(numpy_file))
video_embeddings.append(embedding)
return video_embeddings
| [] |
2024-01-10 | vojtsek/to-llm-bot | langchain-bot~llmbot~prompts~banking.py | from langchain import PromptTemplate
pay_money_prompt = PromptTemplate(
input_variables=["history", "question"],
template="""Jsi bankovní asistent.
Zákazník chce poslat peníze příteli.
Úkolem asistenta je zjistit jméno přítele a částku peněz, které se mají poslat.
Jakmile bude zjištěno jméno i částka, asistent řekne "confirm()", neptá se na potvrzení.
U každé odpovědi uveď aktuální jméno a částku v JSON formátu jako slovník s klíči "amount" a "recipient".
Pokud zákazník řekne něco, co nesouvisí s převodem ani se změnou vyžadovaných hodnot, asistent by měl říct přesně "change_topic()".
Pokud chce zákazník zrušit transakci, asistent by měl říct přesně "cancel()" a nic jiného.
Uveď vždy jen jednu odpověď.
{history}
Zákazník: {question}
Asistent:""")
balance_prompt = PromptTemplate(
input_variables=["question", "balance"],
template="""Jsi bankovní asistent.
Zákazník chce zjistit stav svého účtu.
Momentálně má na účtu {balance} Kč.
Sděl mu to.
Uveď vždy jen jednu odpověď.
Zákazník: {question}
Asistent:""")
want_buy_prompt = PromptTemplate(
input_variables=["question", "balance", "history"],
template="""Jsi bankovní asistent.
Zákazník si chce něco koupit.
Momentálně má na účtu {balance} Kč.
Nejprve zjisti, co si chce koupit a kolik to stojí.
Až zjistíš co si chce koupit a kolik to stojí. Sděl mu zda mu stačí peníze a řekni proč to tak je.
Pokud zákazník řekne něco, co souvisí s posláním peněz, řekni "change_topic()" anglicky.
Neodpovídej na otázky, které se nesouvisí s nákupem, řekni jen "change_topic()" anglicky.
Uveď vždy jen jednu odpověď.
{history}
Zákazník: {question}
Asistent:""") | [
"question",
"Jsi bankovní asistent.\nZákazník chce poslat peníze příteli.\nÚkolem asistenta je zjistit jméno přítele a částku peněz, které se mají poslat.\nJakmile bude zjištěno jméno i částka, asistent řekne \"confirm()\", neptá se na potvrzení.\nU každé odpovědi uveď aktuální jméno a částku v JSON formátu jako slovník s klíči \"amount\" a \"recipient\".\nPokud zákazník řekne něco, co nesouvisí s převodem ani se změnou vyžadovaných hodnot, asistent by měl říct přesně \"change_topic()\".\nPokud chce zákazník zrušit transakci, asistent by měl říct přesně \"cancel()\" a nic jiného.\nUveď vždy jen jednu odpověď.\n{history}\nZákazník: {question}\nAsistent:",
"Jsi bankovní asistent.\nZákazník chce zjistit stav svého účtu.\nMomentálně má na účtu {balance} Kč.\nSděl mu to.\nUveď vždy jen jednu odpověď.\nZákazník: {question}\nAsistent:",
"Jsi bankovní asistent.\nZákazník si chce něco koupit.\nMomentálně má na účtu {balance} Kč.\nNejprve zjisti, co si chce koupit a kolik to stojí.\nAž zjistíš co si chce koupit a kolik to stojí. Sděl mu zda mu stačí peníze a řekni proč to tak je.\nPokud zákazník řekne něco, co souvisí s posláním peněz, řekni \"change_topic()\" anglicky.\nNeodpovídej na otázky, které se nesouvisí s nákupem, řekni jen \"change_topic()\" anglicky.\nUveď vždy jen jednu odpověď.\n{history}\nZákazník: {question}\nAsistent:",
"balance"
] |
2024-01-10 | vojtsek/to-llm-bot | create_faiss_db.py | import argparse
import pickle
import glob
import os
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
from langchain.docstore.document import Document
from definitions import MW_FEW_SHOT_DOMAIN_DEFINITIONS, SGD_FEW_SHOT_DOMAIN_DEFINITIONS
from loaders import load_mwoz, load_sgd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output_faiss_db')
parser.add_argument('--model', default='sentence-transformers/all-mpnet-base-v2', help='Embedding model name; text-embedding-ada-002 for OpenAI, sentence-transformers/all-mpnet-base-v2 for HuggingFace')
parser.add_argument('--database_path', default='multiwoz_database')
parser.add_argument('--context_size', type=int, default=3)
parser.add_argument('--embeddings', default='huggingface', help='huggingface or openai')
parser.add_argument('--dataset', default='multiwoz')
parser.add_argument('--total', default=500, type=int)
parser.add_argument('--split', default='train', type=str)
args = parser.parse_args()
if args.embeddings == 'huggingface':
embeddings = HuggingFaceEmbeddings(model_name=args.model)
else:
embeddings = OpenAIEmbeddings(document_model_name=args.model,
query_model_name=args.model,
openai_api_key=os.environ.get('OPENAI_API_KEY', ''))
if args.dataset == 'multiwoz':
available_domains = [d for d in MW_FEW_SHOT_DOMAIN_DEFINITIONS.keys() if d != 'bus']
data_gen = load_mwoz(args.database_path, args.context_size, split=args.split, total=args.total, available_domains=available_domains, shuffle=True, only_single_domain=True)
else:
available_domains = list(SGD_FEW_SHOT_DOMAIN_DEFINITIONS.keys())
data_gen = load_sgd(args.context_size, split=args.split, total=args.total, available_domains=available_domains, shuffle=True)
docs = []
for turn in data_gen:
doc = Document(page_content=turn['page_content'],
metadata=turn['metadata'])
docs.append(doc)
faiss_vs = FAISS.from_documents(documents=docs,
embedding=embeddings)
with open(args.output_faiss_db, 'wb') as f:
pickle.dump(faiss_vs, f)
| [] |
2024-01-10 | vojtsek/to-llm-bot | langchain-bot~llmbot~bots~multiwoz~multiwoz.py | import os
import json
from langchain.llms import OpenAI
from llmbot.classifiers import VectorSimilarityTopicClassiffier
from llmbot.scenarios.multiwoz import MultiwozSingleDomainScenario
from llmbot.scenarios.base import ScenarioAction
from llmbot.prompts.multiwoz import restaurant_prompt, restaurant_prompt_with_db
from llmbot.bots.multiwoz.database import MultiWOZDatabase
class MultiWOZBot:
def __init__(self,
topic_classifier: VectorSimilarityTopicClassiffier,
data_dir: str):
self.topic_classifier = topic_classifier
self.database = MultiWOZDatabase(os.path.join(data_dir, 'database'))
self.data_dir = data_dir
gpt3_llm = OpenAI(model_name="text-davinci-003",
temperature=0,
top_p=0.9,
openai_api_key=os.environ.get('OPENAI_API_KEY', ''))
self.scenarios = {
'restaurant': (MultiwozSingleDomainScenario(llm=gpt3_llm, prompt=restaurant_prompt),
MultiwozSingleDomainScenario(llm=gpt3_llm, prompt=restaurant_prompt_with_db)),
}
def run_with_dataset(self, dataset):
n = 1
results = {}
for dialog in dataset['test']:
if n > 50:
break
if len(dialog['services']) != 1:
continue
if dialog['services'][0] not in ['restaurant']:
continue
n += 1
history = []
topic = None
domain = dialog['services'][0]
dialogue_id = dialog['dialogue_id'].split('.')[0].lower()
results[dialogue_id] = []
for tn in range(0, len(dialog['turns']['utterance']), 2):
question = dialog['turns']['utterance'][tn]
print(question)
if topic is None:
topic = self.topic_classifier.classify(question)
print("Current topic:", topic)
topic = "restaurant"
topic_scenario = self.scenarios[topic]
answer, values, act = topic_scenario[0](history="\n".join(history),
question=question)
db_results = self.database.query(domain=topic,
constraints=self._get_constraints(values))
print(values, len(db_results))
topic_scenario[1].database_count = len(db_results)
answer, _, act = topic_scenario[1](history="\n".join(history),
question=question)
if act == ScenarioAction.CHANGE_TOPIC:
topic = self.topic_classifier.classify(question)
elif act == ScenarioAction.CONFIRM:
print(values)
else:
print(answer)
if act != ScenarioAction.CHANGE_TOPIC:
history.append(f"Customer: {question}")
history.append(f"Assistant: {answer}")
results[dialogue_id].append({
"response": answer,
"state": {
"restaurant": values
},
"active_domains": [topic]
})
with open("results.json", "w") as f:
json.dump(results, f, indent=4)
def run(self):
histories = {
'restaurant': [],
}
topic = None
question, answer = "", ""
prompt_user = True
while True:
if prompt_user:
question = input(">").strip()
prompt_user = True
if topic is None:
topic = self.topic_classifier.classify(question)
print("Current topic:", topic)
topic_scenario = self.scenarios[topic]
answer, values, act = topic_scenario[0](history="\n".join(histories[topic]),
question=question)
print(values)
db_results = self.database.query(domain=topic,
constraints=self._get_constraints(values))
print(values, len(db_results))
topic_scenario[1].database_count = len(db_results)
answer, values, act = topic_scenario[1](history="\n".join(histories[topic]),
question=question)
if act == ScenarioAction.CHANGE_TOPIC:
topic = self.topic_classifier.classify(question)
prompt_user = False
elif act == ScenarioAction.CONFIRM:
print(values)
else:
print(answer)
if act != ScenarioAction.CHANGE_TOPIC:
histories[topic].append(f"Customer: {question}")
histories[topic].append(f"Assistant: {answer}")
def _get_constraints(self, values):
constraints = {}
for key, value in values.items():
if value is None or len(value) == 0:
continue
if key == 'food':
constraints['food'] = value
elif key in ['pricerange', 'price']:
constraints['pricerange'] = value
elif key == 'area':
if 'centre' in value or 'downtown' in value:
value = 'centre'
constraints['area'] = value
elif key == 'name':
constraints['name'] = value
return constraints
| [
"False",
"True"
] |
2024-01-10 | vojtsek/to-llm-bot | langchain-bot~llmbot~bots~banking.py | import argparse
import pickle
import os
from langchain import OpenAI
import openai
from classifiers import VectorSimilarityTopicClassiffier
from llmbot.scenarios.base import ScenarioAction
from llmbot.scenarios.banking import BankingScenarioWJSON, SimpleBankingScenario
from llmbot.scenarios.multiwoz import MultiwozSingleDomainScenario
from llmbot.prompts.banking import pay_money_prompt, balance_prompt, want_buy_prompt
from llmbot.prompts.multiwoz import restaurant_prompt
openai.api_key = os.environ.get('OPENAI_API_KEY', '')
class BankingBot:
def __init__(self, topic_classifier: VectorSimilarityTopicClassiffier):
self.topic_classifier = topic_classifier
gpt3_llm = OpenAI(model_name="text-davinci-003",
temperature=0.5,
openai_api_key=os.environ.get('OPENAI_API_KEY', ''))
self.scenarios = {
'pay_money': BankingScenarioWJSON(llm=gpt3_llm, prompt=pay_money_prompt),
'balance': SimpleBankingScenario(llm=gpt3_llm, prompt=balance_prompt),
'want_buy': SimpleBankingScenario(llm=gpt3_llm, prompt=want_buy_prompt),
}
def run(self):
context = {
'pay_money': [],
'balance': [],
'want_buy': [],
}
topic = None
question, answer = "", ""
prompt_user = True
while True:
if prompt_user:
question = input(">").strip()
prompt_user = True
if topic is None:
topic = self.topic_classifier.classify(question)
print("Current topic:", topic)
topic_scenario = self.scenarios[topic]
answer, values, act = topic_scenario(history="\n".join(context[topic]),
question=question)
if act == ScenarioAction.CHANGE_TOPIC:
topic = self.topic_classifier.classify(question)
prompt_user = False
elif act == ScenarioAction.CONFIRM:
print(values)
else:
print(answer)
if act != ScenarioAction.CHANGE_TOPIC:
context[topic].append(f"Zákazník: {question}")
context[topic].append(f"Asistent: {answer}")
| [
"False",
"True"
] |
2024-01-10 | vojtsek/to-llm-bot | langchain-bot~llmbot~prompts~multiwoz.py | from langchain import PromptTemplate
restaurant_prompt = PromptTemplate(
input_variables=["history", "question"],
template="""
You are an assistant that helps people to book a restaurant.
The customer can ask for a restaurant by name, area, food, or price.
Provide summary of the conversation in JSON with keys: area, food, pricerange.
For area, just use values: centre, east, north, south, west.
For price, just use values: cheap, moderate, expensive.
If the user doesn't care about some of the values, just leave them empty.
Provide only information that is available in the database.
History:
{history}
Customer: {question}
Assistant:""")
restaurant_prompt_with_db = PromptTemplate(
input_variables=["history", "question", "database_count"],
template="""
You are an assistant that helps people to book a restaurant.
The customer can ask for a restaurant by name, area, food, or price.
Provide final answer on separate line
If there is 0 restaurants in the database, ask the customer to change the request.
If you find a restaurant, provide [restaurant_name].
Do not provide restaurant names or any info. When asked just use [restaurant_name], [restaurant_phone], [restaurant_address] or [restaurant_postcode].
If customer asks for booking, do it and provide [booking_reference].
Currently there is {database_count} restaurants in the database that fit criteria.
History:
{history}
Customer: {question}
Assistant:""") | [
"database_count",
"question",
"\nYou are an assistant that helps people to book a restaurant.\nThe customer can ask for a restaurant by name, area, food, or price.\nProvide final answer on separate line\nIf there is 0 restaurants in the database, ask the customer to change the request.\nIf you find a restaurant, provide [restaurant_name].\nDo not provide restaurant names or any info. When asked just use [restaurant_name], [restaurant_phone], [restaurant_address] or [restaurant_postcode].\nIf customer asks for booking, do it and provide [booking_reference].\nCurrently there is {database_count} restaurants in the database that fit criteria.\nHistory:\n{history}\nCustomer: {question}\nAssistant:",
"\nYou are an assistant that helps people to book a restaurant.\nThe customer can ask for a restaurant by name, area, food, or price.\nProvide summary of the conversation in JSON with keys: area, food, pricerange.\nFor area, just use values: centre, east, north, south, west.\nFor price, just use values: cheap, moderate, expensive.\nIf the user doesn't care about some of the values, just leave them empty.\nProvide only information that is available in the database.\nHistory:\n{history}\nCustomer: {question}\nAssistant:"
] |
2024-01-10 | vojtsek/to-llm-bot | langchain-bot~llmbot~classifiers.py | from langchain.vectorstores import VectorStore
class VectorSimilarityTopicClassiffier:
def __init__(self, vector_store: VectorStore):
self.vector_store = vector_store
def classify(self, text: str) -> str:
return self.vector_store.similarity_search(text, k=1)[0].metadata['topic']
| [] |
2024-01-10 | vojtsek/to-llm-bot | langchain-bot~llmbot~scenarios~banking.py | from typing import Tuple, Dict
from langchain.llms import BaseLLM
from langchain import PromptTemplate
from llmbot.scenarios.base import FrameScenario, ScenarioAction
from llmbot.utils import parse_json_from_text_multiline, remove_json_from_text_multiline
class SimpleBankingScenario(FrameScenario):
def __init__(self, llm: BaseLLM, prompt: PromptTemplate):
super().__init__(llm, prompt)
def _preprocess(self, kwarg_dict: Dict) -> Dict:
kwarg_dict.update({
'balance': '10000',
})
return kwarg_dict
def _postprocess(self, response: str) -> Tuple[str, Dict, ScenarioAction]:
response = response.strip()
if 'change_topic' in response.lower() or ('změň' in response.lower() and 'téma' in response.lower()):
return response, {}, ScenarioAction.CHANGE_TOPIC
else:
return response, {}, ScenarioAction.REPLY
class BankingScenarioWJSON(SimpleBankingScenario):
def __init__(self, llm: BaseLLM, prompt: PromptTemplate):
super().__init__(llm, prompt)
def _postprocess(self, response: str) -> Tuple[str, Dict, ScenarioAction]:
response = response.strip()
print(response)
values = parse_json_from_text_multiline(response)
response = remove_json_from_text_multiline(response)
if 'recipient' in values and 'amount' in values and \
len(values['recipient']) > 0 and (len(str(values['amount'])) > 0 and int(values['amount']) > 0):
return response, values, ScenarioAction.CONFIRM
elif 'change_topic' in response.lower() or ('změň' in response.lower() and 'téma' in response.lower()):
return response, values, ScenarioAction.CHANGE_TOPIC
else:
return response, values, ScenarioAction.REPLY | [] |
2024-01-10 | butterfling/CPT-dem- | app~dataRecords.py | import openai
from authenticateDatabase import connect_to_sql_database
import os
# Database connection and cursor creation
connect = connect_to_sql_database("https://cptdatabasecredentials.vault.azure.net/","dbadmin")
cursor = connect.cursor()
# Constants
table_name = "consentGiven"
email_field_name = "[Email]"
ssn_field_name = "[Social-Security-Number]"
financial_information_field_name = "[Financial-Information]"
# Functions for database queries
def count_total_records(cursor, table_name):
query = f"SELECT COUNT(*) FROM {table_name}"
cursor.execute(query)
result = cursor.fetchone()
return result[0] if result else 0
def count_non_encrypted_emails(cursor, table_name):
query = f"SELECT COUNT(*) FROM {table_name} WHERE {email_field_name} NOT LIKE 'b''gAAA%'"
cursor.execute(query)
result = cursor.fetchone()
return result[0] if result else 0
def count_non_encrypted_social_security_numbers(cursor, table_name):
query = f"SELECT COUNT(*) FROM {table_name} WHERE {ssn_field_name} NOT LIKE 'b''gAAA%'"
cursor.execute(query)
result = cursor.fetchone()
return result[0] if result else 0
def count_non_encrypted_financial_information(cursor, table_name):
query = f"SELECT COUNT(*) FROM {table_name} WHERE {financial_information_field_name} NOT LIKE 'b''gAAA%'"
cursor.execute(query)
result = cursor.fetchone()
return result[0] if result else 0
# Gathering data
total_records = count_total_records(cursor, table_name)
encrypted_emails = count_non_encrypted_emails(cursor, table_name)
encrypted_social_security_numbers = count_non_encrypted_social_security_numbers(cursor, table_name)
financial_information = count_non_encrypted_financial_information(cursor, table_name)
data = f"total_records : {total_records}, Number of encrypted_emails in the database: {encrypted_emails}, encrypted_social_security_numbers: {encrypted_social_security_numbers} , count_non_encrypted_financial_information: {financial_information}"
# OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
# Function to get summary using OpenAI
def summarize_with_openai(data):
response = openai.Completion.create(
engine="text-davinci-002",
prompt=(
"You are a humorous data protection officer. Analyze the following data, which is a collection of customer records where consent has not been given according to GDPR. Provide a humorous summary, suggest potential changes for compliance, and advise on precautions to take if these records are stored in plain text.Make sure to include the numbers in your response. Here's the data:\n\n" + data
),
temperature=0.7,
max_tokens=150
)
return response.choices[0].text.strip()
def result():
return summarize_with_openai(data)
# Writing summary to a file
with open('summary.txt', 'w') as file:
file.write(result())
print("Summary written to summary.txt")
| [
"You are a humorous data protection officer. Analyze the following data, which is a collection of customer records where consent has not been given according to GDPR. Provide a humorous summary, suggest potential changes for compliance, and advise on precautions to take if these records are stored in plain text.Make sure to include the numbers in your response. Here's the data:\n\nPLACEHOLDER"
] |
2024-01-10 | butterfling/CPT-dem- | app~textToSpeech.py | import openai
import json
import os
import azure.cognitiveservices.speech as speechsdk
from dataRecords import result
from authenticateDatabase import connect_to_sql_database
speech_config = speechsdk.SpeechConfig(subscription="838863d9201c43cfbcd8e429b36be7a3", region="eastus")
audio_config = speechsdk.audio.AudioOutputConfig(use_default_speaker=True)
speech_config.speech_synthesis_voice_name='en-GB-RyanNeural'
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
text = result()
speech_synthesis_result = speech_synthesizer.speak_text_async(text).get()
if speech_synthesis_result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
print("Speech synthesized for text [{}]".format(text))
elif speech_synthesis_result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = speech_synthesis_result.cancellation_details
print("Speech synthesis canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
if cancellation_details.error_details:
print("Error details: {}".format(cancellation_details.error_details))
print("Did you set the speech resource key and region values?")
# connect_database = connect_to_sql_database()
# cursor = connect_database.cursor()
| [] |
2024-01-10 | Jungu12/MaengMaengMarble | docs~%EA%B0%9C%EC%9D%B8%20%EA%B3%B5%EB%B6%80~%EA%B6%8C%EB%AF%BC%EC%9E%AC~index1.py | import streamlit as st
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
def generate_response(uploaded_file, openai_api_key, query_text):
# Load document if file is uploaded
if uploaded_file is not None:
documents = [uploaded_file.read().decode()]
# Split documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents(documents)
# Select embeddings
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
# Create a vectorstore from documents
db = Chroma.from_documents(texts, embeddings)
# Create retriever interface
retriever = db.as_retriever()
# Create QA chain
qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key=openai_api_key), chain_type='stuff', retriever=retriever)
return qa.run(query_text)
def generate_response(uploaded_file, openai_api_key, query_text):
# Load document if file is uploaded
if uploaded_file is not None:
documents = [uploaded_file.read().decode()]
# Split documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents(documents)
# Select embeddings
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
# Create a vectorstore from documents
db = Chroma.from_documents(texts, embeddings)
# Create retriever interface
retriever = db.as_retriever()
# Create QA chain
qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key=openai_api_key), chain_type='stuff', retriever=retriever)
return qa.run(query_text)
# Page title
st.set_page_config(page_title='🦜🔗 Ask the Doc App')
st.title('🦜🔗 Ask the Doc App')
# File upload
uploaded_file = st.file_uploader('Upload an article', type='txt')
# Query text
query_text = st.text_input('Enter your question:', placeholder = 'Please provide a short summary.', disabled=not uploaded_file)
# Form input and query
result = []
with st.form('myform', clear_on_submit=True):
openai_api_key = st.text_input('OpenAI API Key', type='password', disabled=not (uploaded_file and query_text))
submitted = st.form_submit_button('Submit', disabled=not(uploaded_file and query_text))
if submitted and openai_api_key.startswith('sk-'):
with st.spinner('Calculating...'):
response = generate_response(uploaded_file, openai_api_key, query_text)
result.append(response)
del openai_api_key
if len(result):
st.info(response) | [] |
2024-01-10 | NirantK/nirantk.github.io | community_dev~src~summarisation.py | import datetime
import json
import re
from functools import lru_cache
from pathlib import Path
import fire
import pandas as pd
import pytz
from formatting_utils import human_date
from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models import ChatOpenAI
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from prompts import PROMPT_TEMPLATES
text_splitter = CharacterTextSplitter.from_tiktoken_encoder()
def make_docs(plain_text: str) -> list:
texts = text_splitter.split_text(plain_text)
docs = [Document(page_content=t) for t in texts]
return docs
@lru_cache
def summarize_docs(
docs: list,
prompt_template: str,
model,
chain_type="stuff",
) -> str:
prompt = PromptTemplate(template=prompt_template, input_variables=["text"])
# We should abstract chain logic when more chain type experiments are added
if chain_type == "map_reduce":
chain = load_summarize_chain(
model, chain_type=chain_type, map_prompt=prompt, combine_prompt=prompt
)
else:
chain = load_summarize_chain(model, chain_type=chain_type, prompt=prompt)
chain_output = chain({"input_documents": docs}, return_only_outputs=True)
return chain_output["output_text"]
def summarize(message: str, prompt_template: str, chain_type: str = "stuff") -> str:
docs = make_docs(message)
summary_text = summarize_docs(
docs,
prompt_template,
chain_type="stuff",
model=ChatOpenAI(temperature=0),
)
return summary_text
def extract_urls_context(text: str, window_size: int = 1) -> list:
lines = text.split("\n")
url_pattern = re.compile(
r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
urls_context = []
for idx, line in enumerate(lines):
for match in url_pattern.finditer(line):
start, end = match.span()
prev_line = lines[idx - window_size] if idx > 0 else ""
next_line = lines[idx + window_size] if idx < len(lines) - 1 else ""
context = f"{prev_line}\n{line}\n{next_line}".strip()
# Dropping match.group() from append as we are not using it
urls_context.append(context)
return urls_context
# TODO: Below functions can be simplified and optimized if the purpose is more clearer
def get_page_header_date(date_object):
# Combine the date object with a time object and set the desired timezone
dt = datetime.datetime.combine(date_object, datetime.time())
desired_timezone = pytz.timezone("Asia/Kolkata")
localized_dt = desired_timezone.localize(dt)
# Format the datetime object using strftime
formatted_datetime = localized_dt.strftime("%Y-%m-%dT%H:%M:%S%z")
formatted_datetime = formatted_datetime[:-2] + ":" + formatted_datetime[-2:]
return formatted_datetime
def make_page_header(row):
date, summary_text = row["Date"], row["title_desc"]
dt = get_page_header_date(date)
fields = json.loads(summary_text) # This is expensive!
summary_title, summary_description = fields["title"], fields["description"]
page_header = f"""+++
title = "{summary_title}"
date = {dt}
tags = ["daily_summary"]
featured_image = ""
description = "{summary_description}"
toc = true
+++
"""
return page_header
def make_page(row):
page = (
row["page_headers"]
+ "\n"
+ row["Summary"]
+ "\n"
+ "\n## Links\nThe description and link can be mismatched because of extraction errors.\n\n"
+ row["EndNote"]
)
file_name = f"{human_date(row['Date'])}.md"
return page, file_name
def generate_daily_df(csv_path: str) -> None:
df = pd.read_csv(csv_path)
df["Datetime"] = pd.to_datetime(df["Datetime"])
df["Date"] = df["Datetime"].dt.date
daily_df = df.groupby("Date").agg({"Message": " \n ".join}).reset_index()
daily_df["wc"] = daily_df["Message"].apply(lambda x: len(x.split()))
return daily_df
def generate_daily_summary(csv_path: str) -> None:
readpath = Path(csv_path).resolve()
assert readpath.exists(), "CSV file does not exist"
write_dir = Path("../../content/ai/").resolve()
daily_df = generate_daily_df(readpath, True)
# Generating the summary column
daily_df["Summary"] = daily_df["Message"].apply(
summarize, args=(PROMPT_TEMPLATES["summary_template"],)
)
# Generating the EndNote column
daily_df["Endnote"] = (
daily_df["Message"]
.apply(extract_urls_context)
.apply(
lambda urls_context: "\n".join(
[
summarize(message, PROMPT_TEMPLATES["link_context_template"])
for message in urls_context
]
)
)
)
# Generating Title and Description Columns that can be passed to header method
# We are avoiding the for loop with this intermediate column
daily_df["title_desc"] = daily_df["Summary"].apply(
summarize,
args=(
PROMPT_TEMPLATES["title_description_template"],
"map_reduce",
),
)
# Generating page headers
page_headers = []
for idx in range(len(daily_df)):
page_headers.append(make_page_header(daily_df.iloc[idx]))
# Dumping all the updates
daily_df["page_headers"] = page_headers
daily_df.to_json("daily_backup.json") # This is always in the current directory
# Using page headers to make pages
for idx in range(len(daily_df)):
page, file_name = make_page(daily_df.iloc[idx])
file_path = write_dir / file_name
with file_path.open("w") as f:
f.write(page)
if __name__ == "__main__":
fire.Fire(generate_daily_summary)
| [] |
2024-01-10 | justdataplease/medium-sky | text_analyzer.py | import re
from collections import Counter
import datetime
from urllib.parse import urlparse
from excluded_urls import EXCLUDE_URLS
from nltk import PorterStemmer
from nltk.corpus import stopwords
from nltk.tag import pos_tag
from nltk.util import ngrams
import validators
import os
import csv
import openai
def tag_visible(element) -> bool:
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
return True
def get_ngrams(words: list, n: int) -> list:
n_grams = ngrams(words, n)
return [' '.join(grams) for grams in n_grams]
def pos_tagger(words: list) -> dict:
# Part-of-speech tag each token
pos_tags = pos_tag(words)
# Count the number of adjectives, nouns, and verbs
num_adjectives = len([word for word, pos in pos_tags if pos in ['JJ', 'JJR', 'JJS']])
num_nouns = len([word for word, pos in pos_tags if pos in ['NN', 'NNS', 'NNP', 'NNPS']])
num_verbs = len([word for word, pos in pos_tags if pos in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']])
return {"adj": num_adjectives, "noun": num_nouns, "verb": num_verbs}
def get_day_of_week(date):
day_of_week = date.strftime("%A")
return day_of_week
def text_stemmer(words: list) -> dict:
stemmer = PorterStemmer()
# Use the PorterStemmer to stem each word, excluding stopwords
stop_words = set(stopwords.words('english'))
# Map a stem word with a normal word (randomly)
stemmed_normal_index = {stemmer.stem(word): word for word in words if word not in stop_words}
# Exclude english stop words and words with length <=2
stemmed_words = [stemmed_normal_index[stemmer.stem(word)] for word in words if word not in stop_words and len(word) > 2]
return stemmed_words
def count_external_domains(articles: dict) -> int:
"""
To calculate external domains for each profile we create a list with the unique external domains for each article, for all articles
"""
domains = []
for links_per_article in articles:
domains_per_article = []
for link in links_per_article["links"]:
href = link[1]
if validators.url(href) and not re.search(EXCLUDE_URLS + "|medium", href):
domains_per_article.append(urlparse(href).netloc)
domains.extend(list(set(domains_per_article)))
return len(domains)
def counts(words: list, include_stemming=True) -> dict:
"""
Calculates article statistics : most common words, most common bigrams/trigrams etc
"""
if include_stemming:
# Create a PorterStemmer object
stemmed_words = text_stemmer(words)
else:
stemmed_words = words
# Count the frequency of each stemmed word
word_counts = Counter(stemmed_words)
# Find most frequent words
most_common_words = word_counts.most_common(30)
# Create a list of bigrams and count their frequency
bigrams = get_ngrams(stemmed_words, 2)
bigram_counts = Counter(bigrams)
# Find most frequent bigrams
most_common_bigrams = bigram_counts.most_common(15)
# Create a list of trigrams and count their frequency
trigrams = get_ngrams(stemmed_words, 3)
trigram_counts = Counter(trigrams)
# Find most frequent trigrams
most_common_trigrams = trigram_counts.most_common(10)
# Get article type
if len(stemmed_words) < 100:
words_num_cat = "short"
elif len(stemmed_words) < 500:
words_num_cat = "normal"
elif len(stemmed_words) < 1000:
words_num_cat = "medium"
elif len(stemmed_words) < 1800:
words_num_cat = "large"
elif len(stemmed_words) > 1800:
words_num_cat = "very large"
return {"words": stemmed_words, "words_all": words, "word_counts": word_counts, "most_common_words": most_common_words,
"bigrams": bigrams, "bigram_counts": bigram_counts, "most_common_bigrams": most_common_bigrams,
"trigrams": bigrams, "trigram_counts": bigram_counts, "most_common_trigrams": most_common_trigrams,
"words_num_all": len(words), "words_num": len(stemmed_words), "words_num_cat": words_num_cat,
"unique_words_num_all": len(list(set(words))), "unique_words_num": len(list(set(stemmed_words))),
}
def html_to_words(soup):
# Find all text content in the HTML document
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
text = u" ".join(t.strip() for t in visible_texts)
# Split the text content into words
words = re.findall('\w+', text.lower())
return words
def page_analyzer(soup) -> dict:
# Parse the HTML content using BeautifulSoup
try:
h1 = soup.find('h1').text.strip()
except Exception as exc:
print("no h1 found...")
h1 = ""
try:
h2 = soup.find(['h2', 'h3', 'h4']).text.strip()
except Exception as exc:
print("no h2 found...")
h2 = ""
# Split the text content into words
words = html_to_words(soup)
pos_tags = pos_tagger(words)
counters = counts(words)
rs = {"h1": h1, "h2": h2}
return {**counters, **rs, **pos_tags}
def safe_div(x: int, y: int) -> float:
try:
return x / y
except ZeroDivisionError:
return 0
def counter_to_text(lst: list) -> str:
return ", ".join([f"{x[0]}({x[1]})" for x in lst])
def find_list_div_avg(list1, list2):
total = 0
for i in range(len(list1)):
total += safe_div(list1[i], list2[i])
average = total / len(list1)
return average
def find_dates_frequency(dates: list) -> float:
# Sort the list of dates in ascending order
dates.sort()
# Calculate the frequency of each interval
freq = []
for i in range(len(dates) - 1):
interval = dates[i + 1] - dates[i]
freq.append(interval.days)
# Calculate the average frequency
avg_freq = sum(freq) / len(freq)
min_date = str(max(dates))
max_date = str(min(dates))
return avg_freq, max_date, min_date
def days_between(d1: datetime, d2: datetime = None) -> int:
if d2 is None:
d2 = datetime.date.today()
else:
d2 = datetime.datetime.strptime(d2, "%Y-%m-%d").date()
return (d2 - d1).days
def stats_to_text(article_stats: dict, article_chars: dict, user_chars: dict) -> str:
claps_per_person = safe_div(article_chars["clap_count"], article_chars["voter_count"])
voter_follower = safe_div(article_chars["voter_count"], user_chars["info"]["followers_count"])
return f"""
<b>Heading 1</b>: {article_stats["h1"]}<br>
<b>Heading 2</b>: {article_stats["h2"]}<br>
<b>ChatGPT Summary</b>:<br> {article_chars["chatgpt"]["summary"]}<br>
<br>
<b>Publication</b>: <a href='{article_chars["publisher_url"]}'>{article_chars["publisher_name"]}</a> <br>
<b>Published At</b>: {str(article_chars["published_at"]["date"])} {article_chars["published_at"]["time_period"]}<br>
<b>Voters - Followers %</b>: {round(voter_follower * 100, 1)}%<br>
<b>Claps per Person</b>: {round(claps_per_person, 1)} ({article_chars["voter_count"]} / {article_chars["clap_count"]})<br>
<b>Responses</b>: {article_chars["post_responses"]}<br>
<br>
<b>Word Count (All)</b>: {article_stats["words_num_all"]}<br>
<b>Word Count (Stemmed)</b>: {article_stats["words_num"]} ({article_stats["words_num_cat"]})<br>
<b>Stemmed words / words</b>: {round(safe_div(article_stats["words_num"], article_stats["words_num_all"]) * 100, 1)}% ({article_stats["words_num"]} / {article_stats["words_num_all"]})<br>
<b>Unique words / words</b>: {round(safe_div(article_stats["unique_words_num_all"], article_stats["words_num_all"]) * 100, 1)}% ({article_stats["unique_words_num_all"]} / {article_stats["words_num_all"]})<br>
<b>Unique words / words (stemmed)</b>: {round(safe_div(article_stats["unique_words_num"], article_stats["words_num_all"]) * 100, 1)}% ({article_stats["unique_words_num_all"]} / {article_stats["words_num_all"]})<br>
<b>Verb / words</b>: {round(safe_div(article_stats["verb"], article_stats["words_num_all"]) * 100, 1)}% ({article_stats["verb"]} / {article_stats["words_num_all"]})<br>
<b>Adj / words</b>: {round(safe_div(article_stats["adj"], article_stats["words_num_all"]) * 100, 1)}% ({article_stats["adj"]} / {article_stats["words_num_all"]})<br>
<b>Noun / words</b>: {round(safe_div(article_stats["noun"], article_stats["words_num_all"]) * 100, 1)}% ({article_stats["noun"]} / {article_stats["words_num_all"]})<br>
<br>
<b>ChatGPT Keywords</b>:<br> {", ".join(article_chars["chatgpt"]["keywords"])}<br><br>
<b>Most Common Words</b>:<br> {counter_to_text(article_stats["most_common_words"])}<br><br>
<b>Most Common Bigrams</b>:<br> {counter_to_text(article_stats["most_common_bigrams"])}<br><br>
<b>Most Common Trigrams</b>:<br> {counter_to_text(article_stats["most_common_trigrams"])}<br><br>
<br>
"""
def profile_to_text(all_data: dict, profile_stats: dict, fixed_last_date: datetime = None) -> str:
words_upa_counts = profile_stats["words_upa_counts"]
chatgpt_words_count = profile_stats["chatgpt_words_count"]
words_counts = profile_stats["words_counts"]
pos_stats = profile_stats["pos_stats"]
domains_number = count_external_domains(all_data["articles"])
article_length_cat = Counter(profile_stats['article_length_cat']).most_common(3)
publication_count = Counter(profile_stats['publication']).most_common(10)
published_frequency = find_dates_frequency([x['date'] for x in profile_stats["published_at"]])
published_time_period_count = Counter([f"{x['time_period'][0]}-{x['time_period'][1]}" for x in profile_stats["published_at"]]).most_common(10)
published_day_of_week_count = Counter([f"{get_day_of_week(x['date'])}" for x in profile_stats["published_at"]]).most_common(10)
followers = all_data["user"]["info"]["followers_count"]
words_all_num = len(profile_stats["user_words_all"])
unique_words_all_num = len(set(profile_stats["user_words_all"]))
words_num = len(profile_stats["user_words"])
unique_words_num = len(set(profile_stats["user_words"]))
clap_voter_avg = find_list_div_avg(profile_stats["clap_count"], profile_stats["voter_count"])
voter_follower_avg = find_list_div_avg(profile_stats["voter_count"], [followers] * len(profile_stats["voter_count"]))
last_date_seen = max([x["date"] for x in profile_stats["published_at"]])
bio = all_data["user"]["info"]["bio"]
return f"""
<b>BIO</b>: {bio} <br>
<b>Articles</b>: {len(all_data["articles"])} ({len(words_counts["words"])} stemmed words) <br>
<b>Top article</b>: <a href='{profile_stats["top_article"][0]}'>{profile_stats["top_article"][1]} ({profile_stats["top_article"][2]})</a> <br>
<b>Publications</b>: {counter_to_text(publication_count)} <br>
<b>Followers</b>: {followers} <br>
<b>Voters - Followers % (Article AVG)</b>: {round(voter_follower_avg * 100, 1)}%<br>
<b>Claps per Person (Article AVG)</b>: {round(clap_voter_avg, 1)}<br>
<br>
<b>Preferred Published Time</b>: {counter_to_text(published_time_period_count)} <br>
<b>Preferred Published Day</b>: {counter_to_text(published_day_of_week_count)} <br>
<b>Preferred Article Length (stemmed)</b>: {counter_to_text(article_length_cat)} <br>
<b>Published Frequency (AVG)</b>: per {round(published_frequency[0], 1)} days ({published_frequency[1]}/{published_frequency[2]}) <br>
<b>Last Seen </b>: before {days_between(last_date_seen, fixed_last_date)} days<br>
<b>External Domains per Article </b>: {round(safe_div(domains_number, len(all_data["articles"])), 1)}<br>
<br>
<b>Stemmed words / words</b>: {round(safe_div(words_num, words_all_num) * 100, 1)}% ({words_num} / {words_all_num})<br>
<b>Unique words / words</b>: {round(safe_div(unique_words_all_num, words_all_num) * 100, 1)}% ({unique_words_all_num} / {words_all_num})<br>
<b>Unique words / words (stemmed)</b>: {round(safe_div(unique_words_num, words_num) * 100, 1)}% ({unique_words_num} / {words_num})<br>
<b>Verb / words</b>: {round(safe_div(pos_stats["verb"], words_all_num) * 100, 1)}% ({pos_stats["verb"]} / {words_all_num})<br>
<b>Adj / words</b>: {round(safe_div(pos_stats["adj"], words_all_num) * 100, 1)}% ({pos_stats["adj"]} / {words_all_num})<br>
<b>Noun / words</b>: {round(safe_div(pos_stats["noun"], words_all_num) * 100, 1)}% ({pos_stats["noun"]} / {words_all_num})<br>
<br>
<b>Most Common ChatGPT Keywords (UPA)</b>:<br> {counter_to_text(chatgpt_words_count)}<br><br>
<b>Most Common Words (UPA)</b>:<br> {counter_to_text(words_upa_counts["most_common_words"])}<br><br>
<b>Most Common Bigrams (UPA)</b>:<br> {counter_to_text(words_upa_counts["most_common_bigrams"])}<br><br>
<b>Most Common Trigrams (UPA)</b>:<br> {counter_to_text(words_upa_counts["most_common_trigrams"])}<br><br>
<b>Most Common Words</b>:<br> {counter_to_text(words_counts["most_common_words"])}<br><br>
<b>Most Common Bigrams</b>:<br> {counter_to_text(words_counts["most_common_bigrams"])}<br><br>
<b>Most Common Trigrams</b>:<br> {counter_to_text(words_counts["most_common_trigrams"])}<br><br>
"""
def chatgpt_api(soup, num_keyphrases=10, dummy=False):
if dummy:
return "hello this is a test"
stop_words = set(stopwords.words('english'))
words = html_to_words(soup)
new_words = [x for x in words if x not in stop_words and len(x) > 2]
try:
full_text = " ".join(new_words[0:2000])
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user",
"content": f"""Please provide a summary and suggest the top {num_keyphrases} keywords that best describe the important topics or themes present in the following text. Your answer should include the format: KEYWORDS=keyword_1, keyword_2, ..., keyword_{num_keyphrases} and SUMMARY=summary_text.\n\n {full_text}"""},
]
)
except Exception as exc:
print(exc)
full_text = " ".join(new_words[0:1000])
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user",
"content": f"""Please provide a summary and suggest the top {num_keyphrases} keywords that best describe the important topics or themes present in the following text. Your answer should include the format: KEYWORDS=keyword_1, keyword_2, ..., keyword_{num_keyphrases} and SUMMARY=summary_text.\n\n {full_text}"""},
]
)
reply = response["choices"][0]["message"]["content"]
return reply
def chatgpt_parser(username, soup, article_id):
# Define the filename and API endpoint URL
filename = f'data\{username}_openai_responses.csv'
# Check if the file exists
if os.path.exists(filename):
# If the file exists, open it and search for the ID
with open(filename, 'r') as f:
found = False
reader = csv.reader(f, delimiter='\t')
lines = []
for cols in reader:
if cols[0] == article_id:
print(f"id {article_id} found, using local file...")
# If the ID is found, use the associated response
response = cols[1]
found = True
else:
# Otherwise, add the line to the list of lines
lines.append(cols)
if not found:
print(f"id {article_id} not found, using the api...")
# If the ID is not found, use the API and add the new ID and response to the file
response = chatgpt_api(soup)
lines.append([article_id, response])
with open(filename, 'a', newline='', encoding='utf8') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow([article_id, response])
else:
# If the file does not exist, use the API and create the file with the new ID and response
response = chatgpt_api(soup)
with open(filename, 'w', newline='', encoding='utf8') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow([article_id, response])
# Do something with the response
# Find the keywords and summary in the text
keywords = "error"
summary = "error"
try:
keywords = re.search(r'KEYWORDS(?:\s+)?(?:\=|\:)(?:\s+)?([\w\,\-0-9\n\t\s]+)(?:SUMMARY)?', response).group(1).split(",")
keywords = [x.strip().lower().replace(".", "") for x in keywords]
unikeywords = []
for x in keywords:
unikeywords.extend(x.split(" "))
unikeywords = list(set(unikeywords))
except Exception as exc:
pass
try:
summary = re.search(r'SUMMARY(?:\s+)?(?:\=|\:)(?:\s+)?([\w\,\-0-9\n\t\s\.\,\(\)\'\"]+)(?:KEYWORDS)?', response).group(1).strip()
except Exception as exc:
pass
return {"keywords": keywords, "summary": summary, "unikeywords": unikeywords}
| [
"You are a helpful assistant.",
"Please provide a summary and suggest the top PLACEHOLDER keywords that best describe the important topics or themes present in the following text. Your answer should include the format: KEYWORDS=keyword_1, keyword_2, ..., keyword_PLACEHOLDER and SUMMARY=summary_text.\n\n PLACEHOLDER"
] |
2024-01-10 | justdataplease/medium-sky | kgraph.py | import validators
from jinja2 import Template
from urllib.parse import urlparse
from get_data import MediumArticles
import re
import argparse
from dotenv import load_dotenv
import os
import validators
from excluded_urls import EXCLUDE_URLS
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
# load environment variables from .env file
load_dotenv()
# Set default values
DEFAULT_USERNAME = "justdataplease"
DEFAULT_ARTICLES_LIMIT = 0
DEFAULT_ISOLATE_ARTICLES = False
USE_GPT = False
FIXED_LAST_DATE = os.environ.get('FIXED_LAST_DATE', default=None)
def trim_url(url: str) -> str:
try:
url = url.split("?")[0].split("#")[0]
if url[-1] == '/':
url = url[0:-1]
url_last = url.split("/")[-1]
except Exception:
return
return url_last
def rescale(numbers: list, scale: tuple = (30, 70)) -> dict:
min_value = min(numbers)
max_value = max(numbers)
new_min, new_max = scale
scaled_numbers = {}
for number in numbers:
scaled_number = ((number - min_value) / (max_value - min_value)) * (new_max - new_min) + new_min
scaled_numbers[number] = scaled_number
return scaled_numbers
def get_links(user: str, isolate_articles: bool = True, articles_limit: int = 10, reset: bool = False, fixed_last_date: str = None,
use_gpt: bool = False) -> dict:
a = MediumArticles(username=user, articles_limit=articles_limit, reset=reset, fixed_last_date=fixed_last_date, use_gpt=use_gpt)
articles_dict = a.get_all_articles()
articles = articles_dict["articles"]
user = articles_dict["user"]
main_counter = 0
article_index = {}
dataset = {}
# Find size of article shape
voter_count_list = [s["voter_count"] for s in articles]
voter_count_rescaled_index = rescale(voter_count_list)
# Create nodes for articles
for article in articles:
main_counter += 1
url = article['url']
stats_dict = article['stats_dict']
trimmed_url = trim_url(url)
ar = {"id": main_counter, "shape": "star", "color": "#fdfd96", "label": stats_dict['h1'][0:20], "main_title": stats_dict['h1'],
"size": voter_count_rescaled_index[article["voter_count"]], "url": url,
"domain": url,
"description": stats_dict['h2'], "urls": [], "main": 1, "counter": 1, "font": {"color": "#000000", "size": 20}}
article_index[trimmed_url] = main_counter
dataset[main_counter] = ar
article["counter"] = main_counter
connections = []
# Ids of external website domain nodes will start from 100000 (articles ids start from 1)
counter = 100000
already_found_index = {}
# Create nodes for external website domains and connections between them and the articles
for article in articles:
already_found_in_article_index = {}
if isolate_articles:
already_found_index = {}
stats_text = article['stats']
article_id = article['counter']
dataset[article_id]["stats"] = stats_text
for link in article['links']:
text = link[0]
href = link[1]
trimmed_href = trim_url(href)
domain = urlparse(href).netloc
description_url = (text or "") + "|" + (href or "")
if href:
# Check if it is a valid url else do not bother
if validators.url(href) and not re.search(EXCLUDE_URLS, href):
found_main_article = article_index.get(trimmed_href)
# If this is an external website domain (dot)
if not found_main_article:
if already_found_index.get(domain):
id = already_found_index[domain]
if (not already_found_in_article_index.get(domain)) or isolate_articles:
dataset[id]["counter"] += 1
if dataset[id]["size"] <= 50:
dataset[id]["size"] += 2
dataset[id]["urls"] = list(set(dataset[id]["urls"] + [description_url]))
dataset[id]["label"] = dataset[id]["label"].split("|")[0] + "|" + str(dataset[id]["counter"])
if isolate_articles:
continue
else:
counter += 1
id = counter
dataset[id] = {"id": id, "shape": "dot", "url": domain, "domain": domain, "size": 10, "label": domain.replace("www.", ""),
"description": text,
"main": 0, "urls": [description_url], "counter": 1}
already_found_index[domain] = id
already_found_in_article_index[domain] = id
else:
# If this is a main article (star)
id = found_main_article
connections_color = '#A7C7E7' if found_main_article else '#dbd7d7'
highlight_color = '#3c82ca' if found_main_article else '#9a8f8f'
connection_edge = {"from": id, "to": article_id, "font": {"color": "#808080", "size": 10},
"color": {"color": connections_color, "highlight": highlight_color}}
connection_edge_tuple = (id, article_id)
# All connections
connection_tuples = [(x["from"], x["to"]) for x in connections] + [(x["to"], x["from"]) for x in connections]
# If there is already a connection do not recreate
if (connection_edge_tuple not in connection_tuples) and (article_id != id):
connections.append(connection_edge)
return {"nodes": list(dataset.values()), "edges": connections,
"user_profile": user["profile"],
"user_image": user["info"]["image_url"]}
def render_html(username=DEFAULT_USERNAME, isolate_articles=DEFAULT_ARTICLES_LIMIT, articles_limit=DEFAULT_ARTICLES_LIMIT, fixed_last_date=FIXED_LAST_DATE,
use_gpt=USE_GPT):
dataset = get_links(username, isolate_articles=isolate_articles, articles_limit=articles_limit, fixed_last_date=fixed_last_date, use_gpt=use_gpt)
# Process template and generate html
with open('templates/template.html') as file:
template = Template(file.read())
output_file_name = f'output/{username.replace(".", "_")}_{"i" if isolate_articles else "m"}.html'
with open(output_file_name, 'w', encoding='utf8') as file:
file.write(
template.render(data=dataset, user=username,
user_image=dataset["user_image"],
user_profile=dataset["user_profile"],
isolate_articles=isolate_articles))
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--username", default=DEFAULT_USERNAME, help="username to retrieve links for")
parser.add_argument("-l", "--limit", type=int, default=DEFAULT_ARTICLES_LIMIT, help="maximum number of articles to retrieve")
parser.add_argument("-i", "--isolate", action="store_true", default=DEFAULT_ISOLATE_ARTICLES, help="whether to isolate articles")
parser.add_argument("-fd", "--fdate", type=str, default=FIXED_LAST_DATE, help="fixed last date to calculate last seen")
parser.add_argument("-ai", "--ai", action="store_true", default=USE_GPT, help="use chatgpt to extract keywords and summary")
args = parser.parse_args()
render_html(username=args.username, isolate_articles=args.isolate, articles_limit=args.limit, fixed_last_date=args.fdate, use_gpt=args.ai)
| [] |
2024-01-10 | zhangir-azerbayev/ProofNet | train_backtranslation~make_data~docgen_nl_of_codex.py | import sys
import os
from tqdm import tqdm
import json
import ndjson
import openai
from ratelimit import limits, sleep_and_retry
def batch_loader(seq, size):
"""
Iterator that takes in a list `seq` and returns
chunks of size `size`
"""
return [seq[pos : pos + size] for pos in range(0, len(seq), size)]
# calls=3, period=60 is max for reliability with batch_size=20
# might want to throttle it to keep lean chat up
@sleep_and_retry
@limits(calls=1, period=60)
def call_api(prompt):
return openai.Completion.create(
engine="code-davinci-002",
prompt=prompt,
max_tokens=150,
n=1,
temperature=0,
stop="\"",
)
def main():
BATCH_SIZE = 20
BEFORE_THEOREM = "\nLean mathlib version:\n"
AFTER_THEOREM = "\nTranslate the Lean mathlib version to a natural language version:\n\""
# Run with save_path = "docgen_export_with_nl/docgen_export_with_nl.jsonl"
save_path = sys.argv[1]
with open("few_shot_prompt.txt") as f:
FEW_SHOT_PROMPT = f.read()
with open("docgen_export_parsed/docgen_export_full_parsed.jsonl") as f:
data = ndjson.load(f)
dataloader = batch_loader(data, BATCH_SIZE)
for batch in tqdm(dataloader):
prompts = [FEW_SHOT_PROMPT + BEFORE_THEOREM + x["formal_statement"] + AFTER_THEOREM for x in batch]
outs = call_api(prompts)
finish_reasons = [x["finish_reason"]
for x in outs["choices"]]
if "length" in finish_reasons:
outs = call_api(prompts, max_tokens=400)
text_outs = [x["text"] for x in outs["choices"]]
for text_out, step in zip(text_outs, batch):
step["nl_statement_of_codex"] = text_out
with open(save_path, "a+") as f:
record = json.dumps(step)
f.write(record+"\n")
if __name__=="__main__":
main()
| [] |
2024-01-10 | cyblogerz/IceText | agents~linkedin_lookup_agent.py | from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, Tool, AgentType
from tools.tools import get_profile_url
from dotenv import load_dotenv
import os
def lookup(name: str) -> str:
load_dotenv()
llm = ChatOpenAI(
temperature=0,
model_name="gpt-3.5-turbo",
openai_api_key=os.getenv("OPENAI_API_KEY"),
)
template = """given the full name {name_of_person} I want you to get it me a link to their Linkedin profile page.
Your answer should contain only a URL"""
tools_for_agent = [
Tool(
name="Crawl Google 4 linkedin profile page",
func=get_profile_url,
description="useful for when you need get the Linkedin Page URL",
)
]
agent = initialize_agent(
tools=tools_for_agent,
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
prompt_template = PromptTemplate(
template=template, input_variables=["name_of_person"]
)
linked_profile_url = agent.run(prompt_template.format_prompt(name_of_person=name))
return linked_profile_url
| [
"given the full name {name_of_person} I want you to get it me a link to their Linkedin profile page.\n Your answer should contain only a URL",
"name_of_person"
] |
2024-01-10 | cyblogerz/IceText | ice-text.py | from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
import os
from dotenv import load_dotenv
from agents.linkedin_lookup_agent import lookup as linkedin_lookup_agent
from third_parties.linkedin import scrape_linkedin_profile
from output_parser import person_data_parser
def ice_text(name: str) -> str:
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
linkedin_profile_url = linkedin_lookup_agent(name="Pranav Ajay Vit Bhopal")
summary_template = """
given the information{information} of a person from I want you to create:
1. a short summary
2. two interesting facts about them
\n{format_instructions}
"""
summary_prompt_template = PromptTemplate(
input_variables=["information"],
template=summary_template,
partial_variables={"format_instructions":person_data_parser.getf_format_instructions() }
)
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=api_key)
chain = LLMChain(llm=llm, prompt=summary_prompt_template)
linkedin_data = scrape_linkedin_profile(linked_in_url=linkedin_profile_url)
result = chain.run(information=linkedin_data)
return person_data_parser.parse(result)
if __name__ == "__main__":
result = ice_text()
| [
"\n given the information{information} of a person from I want you to create:\n 1. a short summary\n 2. two interesting facts about them \n \n{format_instructions}\n ",
"format_instructions",
"information"
] |
2024-01-10 | kiiichi/python-learning | OpenAI~import.py | import openai
import os
'''
# Load API key from .env file
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
'''
openai.api_key = os.environ.get('OPENAI_API_KEY')
def get_completion(prompt, model='gpt-3.5-turbo'):
messages = [{'role': 'user', 'content': prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0, # this is the degree of randomness of the output
)
return response.choices[0].message['content']
text = f"""
Hello, how are you? \
hummm... I'm not sure I understand what you mean. \
I'm a chatbot, I'm here to help you. \
I'm sorry, I don't understand. \
I'm sorry, I don't understand. \
I'm sorry, I don't understand. \
"""
prompt = f"""
Summarize the text delimited by triple backticks \
into a single sentence.
```{text}```
"""
response = get_completion(prompt)
print(response)
prompt = f"""
Generate a list of 3 made-game tiles along with their \
descriptions, genres, and titles.
Provide them in JSON format with the following keys:
game_id, title, description, genre, price.
"""
response = get_completion(prompt)
print(response) | [
"\nSummarize the text delimited by triple backticks into a single sentence. \n```PLACEHOLDER```\n",
"\nGenerate a list of 3 made-game tiles along with their descriptions, genres, and titles.\nProvide them in JSON format with the following keys: \ngame_id, title, description, genre, price.\n"
] |
2024-01-10 | matthieusaussaye/talentphare | pages~Culture.py | import streamlit as st
from streamlit_lottie import st_lottie
from typing import Literal
from dataclasses import dataclass
import json
import base64
from langchain.memory import ConversationBufferMemory
from langchain.callbacks import get_openai_callback
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain, RetrievalQA
from langchain.prompts.prompt import PromptTemplate
from langchain.text_splitter import NLTKTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
import nltk
from prompts.prompts import templates
# Audio
from speech_recognition.openai_whisper import save_wav_file, transcribe
from audio_recorder_streamlit import audio_recorder
#from aws.synthesize_speech import synthesize_speech
from IPython.display import Audio
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
st.markdown("#### Bienvenue, nous allons discuter des valeurs de notre entreprise")
jd = st.text_area("Culture de l'entreprise:")
st.session_state.culture_entreprise = jd
#jd="The Banque Cantonale du Valais (Switzerland) is a bank that deeply values innovation rooted in tradition, maintains close and meaningful relationships with its clients, is committed to sustainable operations and environmental responsibility, and upholds a high standard of professional competence and expertise in its services.The Banque Cantonale du Valais (Switzerland) is seeking a Data Analyst to join their DATA team in Sion, with a workload of 80-100%. The role involves interacting with users, analyzing their needs, and supporting them in using new tools. The Data Analyst will be expected to collaborate with business teams in design workshops, actively participate in technological developments related to data management, and write technical documentation. Ideal candidates should have higher education in computer science (or equivalent) and experience in a similar role. Knowledge of the banking sector is considered a plus. Proficiency in computer tools such as Power BI and Azure Databricks, as well as good writing skills and knowledge of German and/or English, are required. The candidate should be committed, proactive, passionate about their profession, and able to work autonomously and collaboratively with other experts."
#st.toast("4097 tokens is roughly equivalent to around 800 to 1000 words or 3 minutes of speech. Please keep your answer within this limit.")
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
@dataclass
class Message:
"""class for keeping track of interview history."""
origin: Literal["human", "ai"]
message: str
with st.sidebar:
st.markdown("IDIAP Create Challenge 2023")
if "jd_history" in st.session_state:
interview = []
dict_to_save = {}
st.info(str(len(st.session_state.jd_history)))
if len(st.session_state.jd_history) == 9:
for e in st.session_state.jd_history:
interview.append(e)
interview_str = '|'.join([str(item) for item in interview])
st.session_state.interview = interview_str
st.session_state.short_interview = interview_str
dict_to_save["culture_entreprise"] = st.session_state.culture_entreprise
dict_to_save["name"] = st.session_state.name_surname
dict_to_save["interview"] = st.session_state.interview
dict_to_save["short_interview"] = st.session_state.short_interview
st.info(str(dict_to_save))
def save_vector(text):
"""embeddings"""
nltk.download('punkt')
text_splitter = NLTKTextSplitter()
texts = text_splitter.split_text(text)
# Create emebeddings
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
return docsearch
def initialize_session_state_jd():
""" initialize session states """
if 'jd_docsearch' not in st.session_state:
st.session_state.jd_docserch = save_vector(jd)
if 'jd_retriever' not in st.session_state:
st.session_state.jd_retriever = st.session_state.jd_docserch.as_retriever(search_type="similarity")
if 'jd_chain_type_kwargs' not in st.session_state:
Interview_Prompt = PromptTemplate(input_variables=["context", "question"],
template=templates.jd_template)
st.session_state.jd_chain_type_kwargs = {"prompt": Interview_Prompt}
if 'jd_memory' not in st.session_state:
st.session_state.jd_memory = ConversationBufferMemory()
# interview history
if "jd_history" not in st.session_state:
st.session_state.jd_history = []
st.session_state.jd_history.append(Message("ai",
"Bonjour, je suis votre intervieweur aujourd'hui. Je vais vous poser quelques questions concernant les valeures de notre entreprise. Êtes-vous prêt à commencer l'entretien?"))
# token count
if "token_count" not in st.session_state:
st.session_state.token_count = 0
if "jd_guideline" not in st.session_state:
llm = ChatOpenAI(
model_name = "gpt-3.5-turbo",
temperature = 0.8,)
st.session_state.jd_guideline = RetrievalQA.from_chain_type(
llm=llm,
chain_type_kwargs=st.session_state.jd_chain_type_kwargs, chain_type='stuff',
retriever=st.session_state.jd_retriever, memory = st.session_state.jd_memory).run("Vous êtes l'intervieweur pour un poste dans une entreprise. Ceci est le résumé des valeurs de l'entreprise. Pose une question relatif à ces valeurs pour tester le culture fit du candidat")
# llm chain and memory
if "jd_screen" not in st.session_state:
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.8, )
PROMPT = PromptTemplate(
input_variables=["history", "input"],
template="""Je veux que vous agissiez comme un intervieweur en suivant strictement la directive dans la conversation actuelle.
Posez-moi des questions et attendez mes réponses comme le ferait un humain. Ne rédigez pas d'explications.
Le candidat n'a pas accès à la directive.
Ne posez qu'une seule question à la fois.
N'hésitez pas à poser des questions de suivi si vous le jugez nécessaire.
Ne posez pas la même question.
Ne répétez pas la question.
Le candidat n'a pas accès à la directive.
Votre nom est GPTInterviewer.
Je veux que vous ne répondiez que comme un intervieweur.
Ne rédigez pas toute la conversation en une fois.
Le candidat n'a pas accès à la directive.
Conversation actuelle :
{history}
Candidat : {input}
IA :""")
st.session_state.jd_screen = ConversationChain(prompt=PROMPT, llm=llm,
memory=st.session_state.jd_memory)
if 'jd_feedback' not in st.session_state:
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.8, )
st.session_state.jd_feedback = ConversationChain(
prompt=PromptTemplate(input_variables=["history", "input"], template=templates.feedback_template),
llm=llm,
memory=st.session_state.jd_memory,
)
def answer_call_back():
with get_openai_callback() as cb:
# user input
human_answer = st.session_state.answer
# transcribe audio
if voice:
save_wav_file("temp/audio.wav", human_answer)
try:
input = transcribe("temp/audio.wav")
# save human_answer to history
except:
st.session_state.jd_history.append(Message("ai", "Sorry, I didn't get that."))
return "Please try again."
else:
input = human_answer
st.session_state.jd_history.append(
Message("human", input)
)
# OpenAI answer and save to history
llm_answer = st.session_state.jd_screen.run(input)
# speech synthesis and speak out
#audio_file_path = synthesize_speech(llm_answer)
# create audio widget with autoplay
#audio_widget = Audio(audio_file_path, autoplay=True)
# save audio data to history
st.session_state.jd_history.append(
Message("ai", llm_answer)
)
st.session_state.token_count += cb.total_tokens
#return audio_widget
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
# sumitted job description
if jd:
# initialize session states
initialize_session_state_jd()
#st.write(st.session_state.jd_guideline)
credit_card_placeholder = st.empty()
col1, col2 = st.columns(2)
with col1:
feedback = st.button("Get Interview Feedback")
with col2:
guideline = st.button("Show me interview guideline!")
chat_placeholder = st.container()
answer_placeholder = st.container()
audio = None
# if submit email adress, get interview feedback imediately
if guideline:
st.write(st.session_state.jd_guideline)
if feedback:
evaluation = st.session_state.jd_feedback.run("please give evalution regarding the interview")
st.markdown(evaluation)
st.download_button(label="Download Interview Feedback", data=evaluation, file_name="interview_feedback.txt")
st.stop()
else:
with answer_placeholder:
voice: bool = st.checkbox("Utiliser mon micro pour répondre")
if voice:
answer = audio_recorder(pause_threshold = 2.5, sample_rate = 44100)
#st.warning("An UnboundLocalError will occur if the microphone fails to record.")
else:
answer = st.chat_input("Your answer")
if answer:
st.session_state['answer'] = answer
audio = answer_call_back()
with chat_placeholder:
for answer in st.session_state.jd_history:
if answer.origin == 'ai':
if audio:
with st.chat_message("assistant"):
st.write(answer.message)
st.write(audio)
else:
with st.chat_message("assistant"):
st.write(answer.message)
else:
with st.chat_message("user"):
st.write(answer.message)
credit_card_placeholder.caption(f"""
Progress: {int(len(st.session_state.jd_history) / 30 * 100)}% completed.""")
else:
st.info("Merci de préciser la culture d'entreprise pour commencer l'entretien")
if 'name_surname' in st.session_state:
st.write(f"Nom: {st.session_state.name_surname}")
else:
st.write("Merci de préciser le nom à l'accueil")
| [
"Bonjour, je suis votre intervieweur aujourd'hui. Je vais vous poser quelques questions concernant les valeures de notre entreprise. Êtes-vous prêt à commencer l'entretien?",
"question",
"Je veux que vous agissiez comme un intervieweur en suivant strictement la directive dans la conversation actuelle.\n\n Posez-moi des questions et attendez mes réponses comme le ferait un humain. Ne rédigez pas d'explications.\n Le candidat n'a pas accès à la directive.\n Ne posez qu'une seule question à la fois.\n N'hésitez pas à poser des questions de suivi si vous le jugez nécessaire.\n Ne posez pas la même question.\n Ne répétez pas la question.\n Le candidat n'a pas accès à la directive.\n Votre nom est GPTInterviewer.\n Je veux que vous ne répondiez que comme un intervieweur.\n Ne rédigez pas toute la conversation en une fois.\n Le candidat n'a pas accès à la directive.\n \n Conversation actuelle :\n {history}\n \n Candidat : {input}\n IA :",
"human",
"ai",
"INPUT",
"input",
"context",
"Sorry, I didn't get that."
] |
2024-01-10 | matthieusaussaye/talentphare | pages~Experience.py | # langchain: https://python.langchain.com/
from dataclasses import dataclass
import streamlit as st
from speech_recognition.openai_whisper import save_wav_file, transcribe
from audio_recorder_streamlit import audio_recorder
from langchain.callbacks import get_openai_callback
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import RetrievalQA, ConversationChain
from langchain.prompts.prompt import PromptTemplate
from prompts.prompts import templates
from typing import Literal
#from aws.synthesize_speech import synthesize_speech
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import NLTKTextSplitter
from PyPDF2 import PdfReader
from prompts.prompt_selector import prompt_sector
from streamlit_lottie import st_lottie
import json
from IPython.display import Audio
import nltk
from PIL import Image
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
# im = Image.open("icc.png")
# st.image(im,width=50)
st.markdown("#### Bienvenue, nous allons discuter de vos expériences professionelles")
st.markdown("""\n""")
#position = st.selectbox("Sélectionnez le poste pour lequel vous postulez", ["Data Analyst", "Ingénieur Logiciel", "Marketing"])
position="Data Analyst"
resume = st.file_uploader("Téléchargez votre CV", type=["pdf"])
if 'name_surname' in st.session_state:
st.write(f"Nom: {st.session_state.name_surname}")
else:
st.write("Merci de préciser le nom à l'accueil")
#auto_play = st.checkbox("Let AI interviewer speak! (Please don't switch during the interview)")
#st.toast("4097 tokens is roughly equivalent to around 800 to 1000 words or 3 minutes of speech. Please keep your answer within this limit.")
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
@dataclass
class Message:
"""Class for keeping track of interview history."""
origin: Literal["human", "ai"]
message: str
with st.sidebar:
st.markdown("IDIAP Create Challenge 2023")
def save_vector(resume):
"""embeddings"""
nltk.download('punkt')
pdf_reader = PdfReader(resume)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
# Split the document into chunks
text_splitter = NLTKTextSplitter()
texts = text_splitter.split_text(text)
text_splitter = NLTKTextSplitter()
texts = text_splitter.split_text(text)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
return docsearch
def initialize_session_state_resume():
# convert resume to embeddings
if 'docsearch' not in st.session_state:
st.session_state.docserch = save_vector(resume)
# retriever for resume screen
if 'retriever' not in st.session_state:
st.session_state.retriever = st.session_state.docserch.as_retriever(search_type="similarity")
# prompt for retrieving information
if 'chain_type_kwargs' not in st.session_state:
st.session_state.chain_type_kwargs = prompt_sector(position, templates)
# interview history
if "resume_history" not in st.session_state:
st.session_state.resume_history = []
st.session_state.resume_history.append(Message(origin="ai", message="Bonjour, je suis votre intervieweur aujourd'hui. Je vais vous poser quelques questions concernant votre CV et votre expérience. Êtes-vous prêt à commencer l'entretien?"))
# token count
if "token_count" not in st.session_state:
st.session_state.token_count = 0
# memory buffer for resume screen
if "resume_memory" not in st.session_state:
st.session_state.resume_memory = ConversationBufferMemory(human_prefix = "Candidate: ", ai_prefix = "Interviewer")
# guideline for resume screen
if "resume_guideline" not in st.session_state:
llm = ChatOpenAI(
model_name = "gpt-3.5-turbo",
temperature = 0.5,)
st.session_state.resume_guideline = RetrievalQA.from_chain_type(
llm=llm,
chain_type_kwargs=st.session_state.chain_type_kwargs, chain_type='stuff',
retriever=st.session_state.retriever, memory = st.session_state.resume_memory).run("Vous êtes l'intervieweur pour un poste d'analyste de données. Ceci est le CV d'un candidat. Posez-lui une seule question technique liée à son expérience et au poste d'analyste de données. Posez-lui une seule question.")
# llm chain for resume screen
if "resume_screen" not in st.session_state:
llm = ChatOpenAI(
#model_name="gpt-3.5-turbo",
temperature=0.7, )
PROMPT = PromptTemplate(
input_variables=["history", "input"],
template= """Je veux que vous agissiez comme un intervieweur en suivant strictement la directive dans la conversation actuelle.
Posez-moi une questions et attendez ma réponse comme le ferait un humain. Ne rédigez pas d'explications.
Le candidat n'a pas accès à la directive.
Ne posez qu'une seule question à la fois.
N'hésitez pas à poser des questions de suivi si vous le jugez nécessaire.
Ne posez pas la même question.
Ne répétez pas la question.
Le candidat n'a pas accès à la directive.
Votre nom est GPTInterviewer.
Je veux que vous ne répondiez que comme un intervieweur.
Ne rédigez pas toute la conversation en une fois.
Le candidat n'a pas accès à la directive.
Conversation actuelle :
{history}
Candidat : {input}
IA : """)
st.session_state.resume_screen = ConversationChain(prompt=PROMPT, llm = llm, memory = st.session_state.resume_memory)
# llm chain for generating feedback
if "resume_feedback" not in st.session_state:
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.5,)
st.session_state.resume_feedback = ConversationChain(
prompt=PromptTemplate(input_variables=["history","input"], template=templates.feedback_template),
llm=llm,
memory=st.session_state.resume_memory,
)
def answer_call_back():
with get_openai_callback() as cb:
# user input
human_answer = st.session_state.answer
# transcrire audio
if voice:
save_wav_file("temp/audio.wav", human_answer)
try:
input = transcribe("temp/audio.wav")
except:
st.session_state.resume_history.append(Message("ai", "Désolé, je n'ai pas compris."))
return "Veuillez réessayer."
else:
input = human_answer
st.session_state.resume_history.append(
Message("human", input)
)
# OpenAI answer and save to history
llm_answer = st.session_state.resume_screen.run(input)
# speech synthesis and speak out
#audio_file_path = synthesize_speech(llm_answer)
# create audio widget with autoplay
#audio_widget = Audio(audio_file_path, autoplay=True)
# save audio data to history
st.session_state.resume_history.append(
Message("ai", llm_answer)
)
st.session_state.token_count += cb.total_tokens
#return audio_widget
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
# sumitted job description
if position and resume:
# initialise l'état de la session
initialize_session_state_resume()
credit_card_placeholder = st.empty()
col1, col2 = st.columns(2)
with col1:
feedback = st.button("Obtenir un retour sur l'entretien")
with col2:
guideline = st.button("Montrez-moi le guide d'entretien!")
chat_placeholder = st.container()
answer_placeholder = st.container()
audio = None
# si soumet une adresse e-mail, obtenir un retour sur l'entretien immédiatement
if guideline:
st.markdown(st.session_state.resume_guideline)
if feedback:
evaluation = st.session_state.resume_feedback.run("veuillez donner une évaluation concernant l'entretien")
st.markdown(evaluation)
st.download_button(label="Télécharger le retour sur l'entretien", data=evaluation, file_name="retour_entretien.txt")
st.stop()
else:
with answer_placeholder:
voice: bool = st.checkbox("Je souhaite parler à l'intervieweur IA!")
if voice:
answer = audio_recorder(pause_threshold=2, sample_rate=44100)
else:
answer = st.chat_input("Votre réponse")
if answer:
st.session_state['answer'] = answer
audio = answer_call_back()
with chat_placeholder:
for answer in st.session_state.resume_history:
if answer.origin == 'ai':
#if auto_play and audio:
# with st.chat_message("assistant"):
# st.write(answer.message)
# st.write(audio)
#else:
with st.chat_message("assistant"):
st.write(answer.message)
else:
with st.chat_message("user"):
st.write(answer.message)
credit_card_placeholder.caption(f"""
Progress: {int(len(st.session_state.resume_history) / 30 * 100)}% completed.""")
| [
"Désolé, je n'ai pas compris.",
"human",
"ai",
"INPUT",
"input",
"Je veux que vous agissiez comme un intervieweur en suivant strictement la directive dans la conversation actuelle.\n\n Posez-moi une questions et attendez ma réponse comme le ferait un humain. Ne rédigez pas d'explications.\n Le candidat n'a pas accès à la directive.\n Ne posez qu'une seule question à la fois.\n N'hésitez pas à poser des questions de suivi si vous le jugez nécessaire.\n Ne posez pas la même question.\n Ne répétez pas la question.\n Le candidat n'a pas accès à la directive.\n Votre nom est GPTInterviewer.\n Je veux que vous ne répondiez que comme un intervieweur.\n Ne rédigez pas toute la conversation en une fois.\n Le candidat n'a pas accès à la directive.\n \n Conversation actuelle :\n {history}\n \n Candidat : {input}\n IA : ",
"Bonjour, je suis votre intervieweur aujourd'hui. Je vais vous poser quelques questions concernant votre CV et votre expérience. Êtes-vous prêt à commencer l'entretien?"
] |
2024-01-10 | matthieusaussaye/talentphare | pages~Technique.py | import streamlit as st
from streamlit_lottie import st_lottie
from typing import Literal
from dataclasses import dataclass
import json
import base64
from langchain.memory import ConversationBufferMemory
from langchain.callbacks import get_openai_callback
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain, RetrievalQA
from langchain.prompts.prompt import PromptTemplate
from langchain.text_splitter import NLTKTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
import nltk
from prompts.prompts import templates
# Audio
from speech_recognition.openai_whisper import save_wav_file, transcribe
from audio_recorder_streamlit import audio_recorder
#from aws.synthesize_speech import synthesize_speech
from IPython.display import Audio
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
st.markdown("#### Bienvenue, nous allons discuter de l'offre d'emploi")
jd = st.text_area("Offre d'emploi:")
#jd="The Banque Cantonale du Valais (Switzerland) is a bank that deeply values innovation rooted in tradition, maintains close and meaningful relationships with its clients, is committed to sustainable operations and environmental responsibility, and upholds a high standard of professional competence and expertise in its services.The Banque Cantonale du Valais (Switzerland) is seeking a Data Analyst to join their DATA team in Sion, with a workload of 80-100%. The role involves interacting with users, analyzing their needs, and supporting them in using new tools. The Data Analyst will be expected to collaborate with business teams in design workshops, actively participate in technological developments related to data management, and write technical documentation. Ideal candidates should have higher education in computer science (or equivalent) and experience in a similar role. Knowledge of the banking sector is considered a plus. Proficiency in computer tools such as Power BI and Azure Databricks, as well as good writing skills and knowledge of German and/or English, are required. The candidate should be committed, proactive, passionate about their profession, and able to work autonomously and collaboratively with other experts."
#st.toast("4097 tokens is roughly equivalent to around 800 to 1000 words or 3 minutes of speech. Please keep your answer within this limit.")
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
@dataclass
class Message:
"""class for keeping track of interview history."""
origin: Literal["human", "ai"]
message: str
with st.sidebar:
st.markdown("IDIAP Create Challenge 2023")
def save_vector(text):
"""embeddings"""
nltk.download('punkt')
text_splitter = NLTKTextSplitter()
texts = text_splitter.split_text(text)
# Create emebeddings
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
return docsearch
def initialize_session_state_jd():
""" initialize session states """
if 'jd_docsearch' not in st.session_state:
st.session_state.jd_docserch = save_vector(jd)
if 'jd_retriever' not in st.session_state:
st.session_state.jd_retriever = st.session_state.jd_docserch.as_retriever(search_type="similarity")
if 'jd_chain_type_kwargs' not in st.session_state:
Interview_Prompt = PromptTemplate(input_variables=["context", "question"],
template=templates.jd_template)
st.session_state.jd_chain_type_kwargs = {"prompt": Interview_Prompt}
if 'jd_memory' not in st.session_state:
st.session_state.jd_memory = ConversationBufferMemory()
# interview history
if "jd_history" not in st.session_state:
st.session_state.jd_history = []
st.session_state.jd_history.append(Message("ai",
"Bonjour, je suis votre intervieweur aujourd'hui. Je vais vous poser quelques questions concernant l'offre d'emploi. Êtes-vous prêt à commencer l'entretien?"))
# token count
if "token_count" not in st.session_state:
st.session_state.token_count = 0
if "jd_guideline" not in st.session_state:
llm = ChatOpenAI(
model_name = "gpt-3.5-turbo",
temperature = 0.8,)
st.session_state.jd_guideline = RetrievalQA.from_chain_type(
llm=llm,
chain_type_kwargs=st.session_state.jd_chain_type_kwargs, chain_type='stuff',
retriever=st.session_state.jd_retriever, memory = st.session_state.jd_memory).run("Vous êtes l'intervieweur pour un poste d'analyste de données. Ceci est l'offre d'emploi. Pose une question relatif à é'offre d'emploi pour tester candidat")
# llm chain and memory
if "jd_screen" not in st.session_state:
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.8, )
PROMPT = PromptTemplate(
input_variables=["history", "input"],
template="""e veux que vous agissiez comme un intervieweur en suivant strictement la directive dans la conversation actuelle.
Posez-moi des questions et attendez mes réponses comme le ferait un humain. Ne rédigez pas d'explications.
Le candidat n'a pas accès à la directive.
Ne posez qu'une seule question à la fois.
N'hésitez pas à poser des questions de suivi si vous le jugez nécessaire.
Ne posez pas la même question.
Ne répétez pas la question.
Le candidat n'a pas accès à la directive.
Votre nom est GPTInterviewer.
Je veux que vous ne répondiez que comme un intervieweur.
Ne rédigez pas toute la conversation en une fois.
Le candidat n'a pas accès à la directive.
Conversation actuelle :
{history}
Candidat : {input}
IA :""")
st.session_state.jd_screen = ConversationChain(prompt=PROMPT, llm=llm,
memory=st.session_state.jd_memory)
if 'jd_feedback' not in st.session_state:
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.8, )
st.session_state.jd_feedback = ConversationChain(
prompt=PromptTemplate(input_variables=["history", "input"], template=templates.feedback_template),
llm=llm,
memory=st.session_state.jd_memory,
)
def answer_call_back():
with get_openai_callback() as cb:
# user input
human_answer = st.session_state.answer
# transcribe audio
if voice:
save_wav_file("temp/audio.wav", human_answer)
try:
input = transcribe("temp/audio.wav")
# save human_answer to history
except:
st.session_state.jd_history.append(Message("ai", "Sorry, I didn't get that."))
return "Please try again."
else:
input = human_answer
st.session_state.jd_history.append(
Message("human", input)
)
# OpenAI answer and save to history
llm_answer = st.session_state.jd_screen.run(input)
# speech synthesis and speak out
#audio_file_path = synthesize_speech(llm_answer)
# create audio widget with autoplay
#audio_widget = Audio(audio_file_path, autoplay=True)
# save audio data to history
st.session_state.jd_history.append(
Message("ai", llm_answer)
)
st.session_state.token_count += cb.total_tokens
#return audio_widget
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
# sumitted job description
if jd:
# initialize session states
initialize_session_state_jd()
#st.write(st.session_state.jd_guideline)
credit_card_placeholder = st.empty()
col1, col2 = st.columns(2)
with col1:
feedback = st.button("Get Interview Feedback")
with col2:
guideline = st.button("Show me interview guideline!")
chat_placeholder = st.container()
answer_placeholder = st.container()
audio = None
# if submit email adress, get interview feedback imediately
if guideline:
st.write(st.session_state.jd_guideline)
if feedback:
evaluation = st.session_state.jd_feedback.run("please give evalution regarding the interview")
st.markdown(evaluation)
st.download_button(label="Download Interview Feedback", data=evaluation, file_name="interview_feedback.txt")
st.stop()
else:
with answer_placeholder:
voice: bool = st.checkbox("Utiliser mon micro pour répondre")
if voice:
answer = audio_recorder(pause_threshold = 2.5, sample_rate = 44100)
#st.warning("An UnboundLocalError will occur if the microphone fails to record.")
else:
answer = st.chat_input("Your answer")
if answer:
st.session_state['answer'] = answer
audio = answer_call_back()
with chat_placeholder:
for answer in st.session_state.jd_history:
if answer.origin == 'ai':
if audio:
with st.chat_message("assistant"):
st.write(answer.message)
st.write(audio)
else:
with st.chat_message("assistant"):
st.write(answer.message)
else:
with st.chat_message("user"):
st.write(answer.message)
credit_card_placeholder.caption(f"""
Progress: {int(len(st.session_state.jd_history) / 30 * 100)}% completed.""")
else:
st.info("Merci de préciser l'offre d'emploi pour commencer l'entretien")
if 'name_surname' in st.session_state:
st.write(f"Nom: {st.session_state.name_surname}")
else:
st.write("Merci de préciser le nom à l'accueil")
| [
"Bonjour, je suis votre intervieweur aujourd'hui. Je vais vous poser quelques questions concernant l'offre d'emploi. Êtes-vous prêt à commencer l'entretien?",
"question",
"human",
"ai",
"INPUT",
"input",
"e veux que vous agissiez comme un intervieweur en suivant strictement la directive dans la conversation actuelle.\n\n Posez-moi des questions et attendez mes réponses comme le ferait un humain. Ne rédigez pas d'explications.\n Le candidat n'a pas accès à la directive.\n Ne posez qu'une seule question à la fois.\n N'hésitez pas à poser des questions de suivi si vous le jugez nécessaire.\n Ne posez pas la même question.\n Ne répétez pas la question.\n Le candidat n'a pas accès à la directive.\n Votre nom est GPTInterviewer.\n Je veux que vous ne répondiez que comme un intervieweur.\n Ne rédigez pas toute la conversation en une fois.\n Le candidat n'a pas accès à la directive.\n \n Conversation actuelle :\n {history}\n \n Candidat : {input}\n IA :",
"context",
"Sorry, I didn't get that."
] |
2024-01-10 | matthieusaussaye/talentphare | Homepage.py | import streamlit as st
from streamlit_option_menu import option_menu
from app_utils import switch_page
import streamlit as st
from PIL import Image
import openai
im = Image.open("icon.png")
st.set_page_config(page_title = "AI Interviewer", layout = "centered",page_icon=im)
openai.api_key = "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"
lan = st.selectbox("#### Language", ["English"])
if lan == "English":
home_title = "AI Interviewer"
home_introduction = "Welcome to AI Interviewer, empowering your interview preparation with generative AI."
with st.sidebar:
st.markdown('AI Interviewer - V0.1.2')
st.markdown("""
#### Powered by
[OpenAI](https://openai.com/)
[FAISS](https://github.com/facebookresearch/faiss)
[Langchain](https://github.com/hwchase17/langchain)
""")
st.markdown(
"<style>#MainMenu{visibility:hidden;}</style>",
unsafe_allow_html=True
)
st.image(im, width=100)
st.markdown(f"""# {home_title} <span style=color:#2E9BF5><font size=5>Beta</font></span>""",unsafe_allow_html=True)
st.markdown("""\n""")
#st.markdown("#### Greetings")
st.markdown("Welcome to AI Interviewer! 👏 AI Interviewer is your personal interviewer powered by generative AI that conducts mock interviews."
"You can upload your resume and enter job descriptions, and AI Interviewer will ask you customized questions. Additionally, you can configure your own Interviewer!")
st.markdown("""\n""")
with st.expander("Updates"):
st.write("""
08/13/2023
- Fix the error that occurs when the user input fails to be recorded. """)
with st.expander("What's coming next?"):
st.write("""
Improved voice interaction for a seamless experience. """)
st.markdown("""\n""")
st.markdown("#### Get started!")
st.markdown("Select one of the following screens to start your interview!")
selected = option_menu(
menu_title= None,
options=["Professional", "Resume", "Behavioral","Customize!"],
icons = ["cast", "cloud-upload", "cast"],
default_index=0,
orientation="horizontal",
)
if selected == 'Professional':
st.info("""
📚In this session, the AI Interviewer will assess your technical skills as they relate to the job description.
Note: The maximum length of your answer is 4097 tokens!
- Each Interview will take 10 to 15 mins.
- To start a new session, just refresh the page.
- Choose your favorite interaction style (chat/voice)
- Start introduce yourself and enjoy! """)
if st.button("Start Interview!"):
switch_page("Professional Screen")
if selected == 'Resume':
st.info("""
📚In this session, the AI Interviewer will review your resume and discuss your past experiences.
Note: The maximum length of your answer is 4097 tokens!
- Each Interview will take 10 to 15 mins.
- To start a new session, just refresh the page.
- Choose your favorite interaction style (chat/voice)
- Start introduce yourself and enjoy! """
)
if st.button("Start Interview!"):
switch_page("Resume Screen")
if selected == 'Behavioral':
st.info("""
📚In this session, the AI Interviewer will assess your soft skills as they relate to the job description.
Note: The maximum length of your answer is 4097 tokens!
- Each Interview will take 10 to 15 mins.
- To start a new session, just refresh the page.
- Choose your favorite interaction style (chat/voice)
- Start introduce yourself and enjoy!
""")
if st.button("Start Interview!"):
switch_page("Behavioral Screen")
if selected == 'Customize!':
st.info("""
📚In this session, you can customize your own AI Interviewer and practice with it!
- Configure AI Interviewer in different specialties.
- Configure AI Interviewer in different personalities.
- Different tones of voice.
Coming at the end of July""")
st.markdown("""\n""")
st.markdown("#### Wiki")
st.write('[Click here to view common FAQs, future updates and more!](https://jiatastic.notion.site/wiki-8d962051e57a48ccb304e920afa0c6a8?pvs=4)')
#st.write(
# f'<iframe src="https://17nxkr0j95z3vy.embednotionpage.com/AI-Interviewer-Wiki-8d962051e57a48ccb304e920afa0c6a8" style="width:100%; height:100%; min-height:500px; border:0; padding:0;"/>',
# unsafe_allow_html=True,
# )
| [] |
2024-01-10 | matthieusaussaye/talentphare | new_app.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 15:42:22 2023
@author: paulm
"""
import streamlit as st
import os
from st_custom_components import st_audiorec
import time
import wave
import io
import json
from typing import List
import requests as r
import base64
import mimetypes
import openai
openai.api_key = "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"
from audio_recorder_streamlit import audio_recorder
def bytes_to_wav(audio_bytes, output_filename, sample_width=2, frame_rate=44100, channels=2):
with wave.open(output_filename, 'wb') as wav_file:
wav_file.setnchannels(channels)
wav_file.setsampwidth(sample_width)
wav_file.setframerate(frame_rate)
wav_file.writeframes(audio_bytes)
# App title
st.set_page_config(page_title="Job interview Chatbot")
# Store LLM generated responses
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "Hello, who are you and what job are you applying to ?"}]
# Display or clear chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
def clear_chat_history():
st.session_state.messages = [{"role": "assistant", "content": "Hello, who are you and what job are you applying to ?"}]
st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
# Function for generating response
def generate_response(prompt_input):
string_dialogue = "You are a job interviewer. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'. Ask question to 'User' related to its answer"
for dict_message in st.session_state.messages:
if dict_message["role"] == "user":
string_dialogue += "User: " + dict_message["content"] + "\n\n"
else:
string_dialogue += "Assistant: " + dict_message["content"] + "\n\n"
#output = replicate.run(llm,
#input={"prompt": f"{string_dialogue} {prompt_input} Assistant: ",
# "temperature":temperature, "top_p":top_p, "max_length":max_length, "repetition_penalty":1})
response=openai.ChatCompletion.create(model="gpt-3.5-turbo",
messages=[dict_message])
return response
audio_bytes = audio_recorder()
if audio_bytes is not None:
bytes_to_wav(audio_bytes, 'output.wav') # Replace audio_bytes with your audio data
# The name of the .wav file
filename = 'output.wav'
# Open the .wav file
wav_audio_data = open(filename, "rb")
transcript = openai.Audio.transcribe("whisper-1", wav_audio_data)
st.session_state.messages.append({"role": "user", "content": transcript["text"]})
with st.chat_message("user"):
st.write(transcript["text"])
if prompt := st.chat_input(True):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = generate_response(prompt)
placeholder = st.empty()
full_response = ''
for item in response:
full_response += item
placeholder.markdown(full_response)
placeholder.markdown(full_response)
message = {"role": "assistant", "content": full_response}
st.session_state.messages.append(message) | [
"Hello, who are you and what job are you applying to ?"
] |
2024-01-10 | nitanmarcel/r2-tools | r2gpt~r2gpt.py | import r2pipe
import openai
import os
import sys
# Usage:
# env:
# export OPENAI_API_KEY='sk-32a1' (required) OpenAi api key
# export OPENAI_API_MODEL='gpt-3.5-turbo' (optional) OpenAi model to use. Defaults to gpt-3.5-turbo
# commands:
# !pipe python ./r2gpt.py
# Can also use a custom prompt to which the assembly code will be appended to:
# !pipe python ./r2gpt.py 'Why am I doing this?'
def main():
if 'OPENAI_API_KEY' not in os.environ:
print('Set an openai api key in enviroment variable OPENAI_KEY=')
return
model = 'gpt-3.5-turbo'
if 'OPENAI_MODEL' in os.environ:
model = os.environ['OPENAI_API_MODEL']
r2 = r2pipe.open()
r2.cmd('e scr.color=false')
pdf = r2.cmd('pdf')
r2.cmd('e scr.color=true')
if 'ERROR:' in pdf or not pdf:
print(pdf)
return
if len(sys.argv) > 1:
messages = [{"role": "user", "content": sys.argv[1] + '\n' + pdf}]
else:
messages = [{"role": "system", "content": "You will be provided with a piece of assembly code and your task is to explain in in a concise way."},{"role": "user", "content": pdf}]
chat_completion = openai.ChatCompletion.create(model=model, messages=messages, stream=True)
print('Please wait! This might take a while...')
chat_completion = openai.ChatCompletion.create(model=model, messages=messages)
print('\n', chat_completion['choices'][0]['message']['content'])
if __name__ == '__main__':
try:
main()
except Exception as exc:
print(Exception.__class__.__name__, exc, sep=' : ') | [
"You will be provided with a piece of assembly code and your task is to explain in in a concise way.",
"\n"
] |
2024-01-10 | Markovian99/PDFLucy | src~app_sections.py | import streamlit as st
from datetime import date, datetime
import pandas as pd
from io import StringIO
import json
import os
from langchain.llms import OpenAI
from langchain import PromptTemplate, LLMChain
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
from config import MODELS, TEMPERATURE, MAX_TOKENS, DATE_VAR, DATA_FRACTION, APP_NAME, MEAN_AGG, PROCESSED_DOCUMENTS_DIR, REPORTS_DOCUMENTS_DIR
from app_utils import (generate_responses, initialize_session_state, parse_pdf_document, create_knowledge_base, generate_kb_response)
def run_upload_and_settings():
"""This function runs the upload and settings container"""
general_context = st.session_state["general_context"]
brief_description = st.text_input("Please provide a brief description of the file (e.g. This is a research report on longevity)", "")
if len(brief_description)>0:
general_context = general_context + "The following brief description of the file was provided: "+ brief_description + "\n"
st.session_state["general_context"] = general_context
uploaded_file = st.file_uploader("Choose a file")
if uploaded_file is not None:
#copy the file to "raw" folder
with open(os.path.join("../data/raw/",uploaded_file.name),"wb") as f:
f.write(uploaded_file.getbuffer())
st.session_state["uploaded_file"] = uploaded_file.name
# read the file and process the pdf
def run_chatbot():
template=""
general_context = st.session_state["general_context"]
model = st.session_state["generation_model"]
# Start button
start_button = st.button("Build Knowledge Base")
if start_button:
docs = parse_pdf_document(os.path.join("../data/raw/",st.session_state["uploaded_file"]))
# process time series data to save to knowledge base
create_knowledge_base(docs)
# Store LLM generated responses
if "messages" not in st.session_state.keys():
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# User-provided prompt
if prompt := st.chat_input("What are the fields in my data?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# Generate a new response if last message is not from assistant
if st.session_state.messages and st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = generate_kb_response(prompt, model, template)
st.write(response)
message = {"role": "assistant", "content": response}
st.session_state.messages.append(message)
# if st.session_state["generated_responses"] and not st.session_state["cleared_responses"]:
# clear_button = st.button("Clear Responses")
# if clear_button and not st.session_state["cleared_responses"]:
# print(st.session_state["responses"])
# st.session_state["generated_responses"]=False
# st.session_state["responses"] = []
# st.session_state["cleared_responses"]=True
# elif clear_button:
# st.write("No responses to clear - please generate responses")
# # responses = []
# # ratings = [None, None, None]
#llm = HuggingFacePipeline(pipeline=pipeline) | [] |
2024-01-10 | Markovian99/PDFLucy | src~app_utils.py | import pandas as pd
import streamlit as st
import os
import json
import csv
import tiktoken
import numpy as np
import time
import re
from langchain.document_loaders import PDFMinerPDFasHTMLLoader
from bs4 import BeautifulSoup
#sklearn cosine similarity
from sklearn.metrics.pairwise import cosine_similarity
from langchain.docstore.document import Document
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import (
MarkdownTextSplitter,
PythonCodeTextSplitter,
RecursiveCharacterTextSplitter)
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.llms import OpenAIChat
from config import MODELS, TEMPERATURE, MAX_TOKENS, DATA_FRACTION, EMBEDDING_MODELS, PROCESSED_DOCUMENTS_DIR, REPORTS_DOCUMENTS_DIR
from bardapi import Bard
import openai
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
#question jeopardy_answer kb-gpt35_answer kb-gpt4_answer kb-40b_answer kb-llama2-13b_answer kb-llama2-13b_templated_answer kb-llama2-70b_answer (4 bit) kb-llama2-70b_answer (8 bit)
EMBEDDING_MODEL_NAME = "all-MiniLM-L6-v2"
#os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2'
# Will download the model the first time it runs
embedding_function = SentenceTransformerEmbeddings(
model_name=EMBEDDING_MODEL_NAME,
cache_folder="../models/sentencetransformers",
)
# get embedding for one sentence
def get_embedding(sentence):
try:
return embedding_function.embed_documents([sentence])[0]
except Exception as e:
print(e)
return np.zeros(384)
# make sure load_dotenv is run from main app file first
openai.api_key = os.getenv('OPENAI_API_KEY')
if os.getenv('OPENAI_API_BASE'):
openai.api_base = os.getenv('OPENAI_API_BASE')
if os.getenv('OPENAI_API_TYPE'):
openai.api_type = os.getenv('OPENAI_API_TYPE')
if os.getenv('OPENAI_API_VERSION'):
openai.api_version = os.getenv('OPENAI_API_VERSION')
#bard = Bard(token=os.getenv('BARD_API_KEY'))
def initialize_session_state():
""" Initialise all session state variables with defaults """
SESSION_DEFAULTS = {
"cleared_responses" : False,
"generated_responses" : False,
"chat_history": [],
"uploaded_file": None,
"generation_model": MODELS[0],
"general_context": "",
"temperature": TEMPERATURE,
"max_tokens": MAX_TOKENS,
"messages": []
}
for k, v in SESSION_DEFAULTS.items():
if k not in st.session_state:
st.session_state[k] = v
def num_tokens_from_string(string: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
num_tokens = len(encoding.encode(string))
return num_tokens
def get_new_top_pos(c, prev_top_pos=None):
try:
return int(re.findall("top:\d+px",c.attrs['style'])[0][4:-2])
except Exception as e:
print(e)
return prev_top_pos
def parse_pdf_document(this_pdf):
""" Function to read pdf and split the content into a list of documents"""
loader = PDFMinerPDFasHTMLLoader(this_pdf)
data = loader.load()[0] # entire PDF is loaded as a single Document
soup = BeautifulSoup(data.page_content,'html.parser')
content = soup.find_all('div')
# cur_fs = None
cur_text = ''
last_top_pos = 0
new_page = True
metadata={}
metadata.update(data.metadata)
docs = [] # first collect all snippets that have the same font size
#if top of page skip and continue
for idx, c in enumerate(content):
new_top_pos = get_new_top_pos(c, prev_top_pos=last_top_pos)
if c.text=='Transcribed by readthatpodcast.com \n':
new_page = True
continue
sp = c.find('span')
if not sp:
continue
st = sp.get('style')
if not st:
continue
fs = re.findall('font-size:(\d+)px',st)
if not fs:
print(fs)
continue
# fs = int(fs[0])
# if not cur_fs:
# cur_fs = fs
if not last_top_pos:
last_top_pos = new_top_pos
#check if not 2 line spaces or if new page is a continuation of previous line
if new_top_pos<last_top_pos+30 or (new_page and not c.text[0].isupper()):
cur_text += c.text
elif new_page:
docs.append(Document(page_content=cur_text,metadata=metadata.copy()))
# cur_fs = fs
cur_text = c.text
# elif not c.text.endswith(". \n") and len(c.text)<50: #if next line is not a full line, append to current line
# cur_text += c.text
else:
docs.append(Document(page_content=cur_text,metadata=metadata.copy()))
# cur_fs = fs
cur_text = c.text
last_top_pos = new_top_pos
new_page = False
if cur_text!='':
docs.append(Document(page_content=cur_text,metadata=metadata.copy()))
section="Introduction"
new_section=False
final_docs=[]
doc_idx=0
#combine document sections based on provided timestamps
for idx, doc in enumerate(docs):
#check if new section / if it was a timestamp
timestamp=re.search("\d+:\d+:\d+",doc.page_content)
if not timestamp:
timestamp=re.search("\d+:\d+",doc.page_content)
if idx==0:
doc.metadata.update({'section':section,'doc_idx':doc_idx})
final_docs.append(doc)
doc_idx+=1
elif timestamp and timestamp.start()==0 and not new_section:
section=doc.page_content
new_section=True
if idx<len(docs)-1:
#get the last sentence from the previous content page
last_sent=docs[idx-1].page_content.split(".")[-1]
if len(last_sent)<10:
last_sent=docs[idx-1].page_content
# CHANGE THIS TO ITERATE OVER SENTENCES INSTEAD OF JUST LOOK AT THE FIRST SENTENCE
next_sent=docs[idx+1].page_content.split(".")[0]
if next_sent[0].islower() and len(next_sent)<50:
final_docs[-1].page_content=final_docs[-1].page_content + next_sent + "."
docs[idx+1].page_content=".".join(docs[idx+1].page_content.split(".")[1:])#remove the first sentence from the next document
elif len(next_sent)<len(docs[idx+1].page_content):
this_emb=get_embedding(section)
last_emb=get_embedding(last_sent)
next_emb=get_embedding(next_sent)
#if the next sentence is more similar to the previous sentence than the current section, then combine
if cosine_similarity([this_emb],[next_emb])[0][0] <cosine_similarity([last_emb],[next_emb])[0][0]:
final_docs[-1].page_content=final_docs[-1].page_content + next_sent + "."
docs[idx+1].page_content=".".join(docs[idx+1].page_content.split(".")[1:]) #remove the first sentence from the next document
else:
# metadata=doc.metadata
doc.metadata.update({'section':section,'doc_idx':doc_idx})
if new_section:
doc.page_content=section + "\n" + doc.page_content
new_section=False
# doc.metadata=metadata
final_docs.append(doc)
doc_idx+=1
return final_docs
# This is a dummy function to simulate generating responses.
def generate_responses(prompt, model, template="", temperature=0):
response = "No model selected"
if model != "None":
st.session_state["generation_models"] = model
if model.startswith("Google"):
this_answer = bard.get_answer(prompt)
response = this_answer['content']
elif model.startswith("OpenAI: "):
# try to call openai and if it fails wait 5 seconds and try again
try:
response_full = openai.Completion.create( model=model[8:], messages=[{"role": "user", "content": prompt }], temperature=temperature)
except:
st.warning("OpenAI API call failed. Waiting 5 seconds and trying again.")
time.sleep(5)
response_full = openai.ChatCompletion.create( model=model[8:], messages=[{"role": "user", "content": prompt }], temperature=temperature)
response = response_full['choices'][0]['message']['content']
return response
def split_json_doc_with_header(doc):
"""Separate header on line one from json doc and split json dict by keys"""
try:
header = doc.page_content.split("\n")[0]
#print(doc.page_content[len(header)+1:])
json_dict = json.loads(doc.page_content[len(header)+1:])
doc_list = []
for key in json_dict.keys():
doc_list.append(Document(page_content=header+'Data for ' +str(key)+ ':\n'+str(json_dict[key]), metadata=doc.metadata))
return doc_list
except Exception as e:
print(e)
print("Unable to split " + doc.metadata['source'])
return [doc]
def create_knowledge_base(docs):
"""Create knowledge base for chatbot."""
print(f"Loading {PROCESSED_DOCUMENTS_DIR}")
docs_orig = docs
print(f"Splitting {len(docs_orig)} documents")
# docs = []
# for doc in docs_orig:
# print(doc)
# num_tokens = num_tokens_from_string(doc.page_content)
# if num_tokens > int(.1*MAX_TOKENS):
# doc_list = split_json_doc_with_header(doc)
# docs.extend(doc_list)
# else:
# docs.append(doc)
print(f"Created {len(docs)} documents")
# Will download the model the first time it runs
embedding_function = SentenceTransformerEmbeddings(
model_name=EMBEDDING_MODELS[0],
cache_folder="../models/sentencetransformers",
)
texts = [doc.page_content for doc in docs]
metadatas = [doc.metadata for doc in docs]
print("""
Computing embedding vectors and building FAISS db.
WARNING: This may take a long time. You may want to increase the number of CPU's in your noteboook.
"""
)
db = FAISS.from_texts(texts, embedding_function, metadatas=metadatas)
# Save the FAISS db
db.save_local("../data/faiss-db")
print(f"FAISS VectorDB has {db.index.ntotal} documents")
def generate_kb_response(prompt, model, template=None):
data_dict = {}
data_dict['prompt'] = prompt
data_dict['chat_history'] = []
if model.startswith("OpenAI: "):
llm = OpenAIChat(model=model[8:], max_tokens=3000, temperature=TEMPERATURE)
else:
return "Please select an OpenAI model."
# Will download the model the first time it runs
embedding_function = SentenceTransformerEmbeddings(
model_name=EMBEDDING_MODELS[0],
cache_folder="../models/sentencetransformers",
)
db = FAISS.load_local("../data/faiss-db", embedding_function)
retriever = VectorStoreRetriever(vectorstore=db, search_kwargs={"k": 3})
chain = ConversationalRetrievalChain.from_llm(llm, retriever=retriever,return_source_documents=True) #, return_source_documents=True
# prompt_template = """
# ### System:
# {context}
# ### User:
# {question}
# ### Assistant:
# """
# PROMPT = PromptTemplate(
# template=prompt_templatjosleee, input_variables=["context", "question"]
# )
# chain_type_kwargs = {"prompt": PROMPT}
# qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, chain_type_kwargs=chain_type_kwargs)
# query = data_dict['prompt']
# return qa.run(query)
response = chain(inputs={'question':data_dict['prompt'], 'chat_history':data_dict['chat_history']})
print(response)
return response['answer']
| [] |
2024-01-10 | lkra/dstc11-track5 | CLTeamL~synthetic_knowledge.py | import argparse
import json
import os
import re
import openai
from dotenv import load_dotenv
from tqdm import tqdm
from approaches.prompting import chatgpt
from scripts.knowledge_reader import KnowledgeReader
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def main(args):
augmented_knowledge = KnowledgeReader("../data/aug/")
entities = augmented_knowledge.get_entity_list(args.domain)
new_knowledge = {args.domain: {}}
for entity in tqdm(entities):
entity_id = entity["id"]
entity_name = entity["name"]
entity_obj = augmented_knowledge.knowledge[args.domain][str(entity_id)]
faqs = {
doc_id: {
"question": doc_obj["title"],
"answer": doc_obj["body"]
}
for doc_id, doc_obj in entity_obj["docs"].items()
}
new_entity = {
entity_id : {
"name": entity_name,
"faqs": faqs,
}
}
prompt_text = f"Given this example: {new_entity}, can you generate three more reviews, not more than 2 sentences, as: traveler type: review?"
prompt = [{
"role": "system",
"content": prompt_text,
}]
output = chatgpt(prompt)
response = output["text"]
entity_reviews = list(filter(bool, response.splitlines()))
reviews = {}
for i, review in enumerate(entity_reviews):
split_review = review.split(":")
if len(split_review) == 1:
continue
traveler_type = split_review[0]
traveler_type = re.sub(r"^\d+\.\s", "", traveler_type)
traveler_review = {}
for j, sentence in enumerate(split_review[1].split(".")):
sentence = sentence.strip().replace('"', "")
if sentence:
if sentence[-1] != ".":
sentence = f"{sentence}."
traveler_review[str(j)] = sentence
reviews[str(i)] = {
"traveler_type": traveler_type,
"sentences": traveler_review,
}
new_knowledge[args.domain][str(entity_id)] = {
"name": entity_name,
"reviews": reviews,
"faqs": faqs,
}
with open(args.output_file, "w") as f:
json.dump(new_knowledge, f, indent=2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--domain', default="taxi", type=str,
help="Choose one of the following domains for which reviews are needed: train, taxi, attraction")
parser.add_argument('--output_file', type=str,
help="Path to where the new knowledge base should be saved.")
args = parser.parse_args()
config = vars(args)
print("Parameters:")
for k, v in config.items():
print(f" {k:>21} : {v}")
main(args)
| [
"Given this example: PLACEHOLDER, can you generate three more reviews, not more than 2 sentences, as: traveler type: review?"
] |
2024-01-10 | lkra/dstc11-track5 | CLTeamL~utils~prompting_helpers.py | import math
from typing import Union
import numpy as np
import openai
import pandas as pd
import tiktoken
from scripts.dataset_walker import DatasetWalker
EXAMPLE_SEPARATOR = "\n\n-----------------------\n\n"
def gpt3(prompts: list, model: str = "text-davinci-002"):
""" functions to call GPT3 predictions, works on batches """
response = openai.Completion.create(
model=model,
prompt=prompts,
temperature=0,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
logprobs=1
)
return [
{
'prompt': prompt,
'text': response['text'],
'tokens': response['logprobs']['tokens'],
'logprob': response['logprobs']['token_logprobs']
}
for response, prompt in zip(response.choices, prompts)
]
def chatgpt(utterances: list, model: str = "gpt-3.5-turbo"):
""" functions to call ChatGPT predictions """
response = openai.ChatCompletion.create(
model=model,
messages=utterances,
temperature=0,
top_p=1,
max_tokens=100, # Manually change to 60 with prompt style 1
frequency_penalty=0,
presence_penalty=0
)
return {
'prompt': utterances,
'text': response['choices'][0]['message']['content'],
'finish_reason': response['choices'][0]['finish_reason']
}
def num_tokens_from_messages(messages: Union[list, str], model: str = "gpt-3.5-turbo"):
"""Returns the number of tokens used by a list of messages."""
encoding = tiktoken.encoding_for_model(model)
if model == "gpt-3.5-turbo": # note: future models may deviate from this
num_tokens = 0
for message in messages:
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += -1 # role is always required and always 1 token
num_tokens += 2 # every reply is primed with <im_start>assistant
return num_tokens
if model == "text-davinci-002":
num_tokens = len(encoding.encode(messages))
return num_tokens
else:
raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.""")
def format_knowledge(reference: dict, prompt_style: float = 0):
if "knowledge" not in reference.keys():
return ""
# sort per type
if prompt_style in [3]:
reference['knowledge'] = sorted(reference['knowledge'], key=lambda d: d['doc_type'])
# Prettify
if prompt_style in [0, 1, 2, 3]:
k = []
for el in reference['knowledge']:
if 'sent' in el.keys():
k.append(f"{el['doc_type'].upper()}: {el['sent']}")
else:
k.append(f"{el['doc_type'].upper()}: Q: {el['question']} A: {el['answer']}")
k = '\n\t'.join(k)
# Separate and prettify per type
elif prompt_style in [3.1]:
faqs = [f"{el['doc_type'].upper()}: Q: {el['question']} A: {el['answer']}"
for el in reference['knowledge'] if el['doc_type'] == 'faq']
reviews = [f"{el['doc_type'].upper()}: {el['sent']}"
for el in reference['knowledge'] if el['doc_type'] == 'review']
faqs = '\n\t'.join(faqs)
reviews = '\n\t'.join(reviews)
faqs = f"FAQs\n\t{faqs}\n\n" if faqs else ""
reviews = f"Reviews\n\t{reviews}" if reviews else ""
k = f"{faqs}{reviews}"
else:
k = ''
return k
def resolve_speaker(speaker: str, prompt_style: float = 0):
if speaker == "U":
return "user" if prompt_style == 0 else "USER"
elif speaker == "S":
return "assistant" if prompt_style == 0 else "YOU"
def append_prompt_examples(prompt, example_prompt, prompt_style):
if prompt_style in [2, 3]: # TODO style 0
# ChatGPT call
prompt[0]['content'] = f"{example_prompt}{EXAMPLE_SEPARATOR}{prompt[0]['content']}"
elif prompt_style in [1]:
# GPT3 call
prompt = f"{example_prompt}\n{prompt}"
else:
raise ValueError(f"Unknown prompt style: {prompt_style}")
return prompt
def hardest_examples(n_samples):
dataset = pd.read_csv(f'./../data_analysis/output/analysis_train.csv')
dataset.sort_values(by=["ref_know_nr", "ref_know_avg_sentiment"], ascending=[False, True], inplace=True)
# sns.kdeplot(data=dataset, x="ref_know_nr")
# plt.show()
# sns.kdeplot(data=dataset, x="ref_know_avg_sentiment")
# plt.show()
# Filter by number of knowledge items that are "higher than usual"
dataset = dataset[dataset['ref_know_nr'] < dataset['ref_know_nr'].mean() + 2 * dataset['ref_know_nr'].std()]
dataset = dataset[dataset['ref_know_nr'] > dataset['ref_know_nr'].mean() + dataset['ref_know_nr'].std()]
# Filter by average sentiment that is "lower than usual"
dataset = dataset[dataset['ref_know_avg_sentiment'] > dataset['ref_know_avg_sentiment'].mean()
- 2 * dataset['ref_know_avg_sentiment'].std()]
dataset = dataset[dataset['ref_know_avg_sentiment'] < dataset['ref_know_avg_sentiment'].mean()
- dataset['ref_know_avg_sentiment'].std()]
# Around 2/3 of responses contain a question, so sample accordingly
n_samples_without = math.floor(n_samples / 3)
n_samples_with = n_samples - n_samples_without
# Select according to questions
samples_without = dataset[dataset['ref_response_question'].isnull()][:n_samples_without]
samples_with = dataset[~dataset['ref_response_question'].isnull()][:n_samples_with]
# Format as the original dataset
example_idx = np.concatenate((samples_without.index.values, samples_with.index.values))
example_data = DatasetWalker(dataset="train", dataroot="./../../data/", labels=True, incl_knowledge=True)
examples = []
for idx in example_idx:
# Format example in the same way
example = example_data[idx]
examples.append(example)
return examples
def random_examples(n_samples):
example_data = DatasetWalker(dataset="train", dataroot="./../../data/", labels=True, incl_knowledge=True)
example_data.filter_knowledge_only()
example_idx = np.random.randint(0, len(example_data), size=n_samples)
examples = []
for idx in example_idx:
# Format example in the same way
example = example_data[idx]
examples.append(example)
return examples
| [
"PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | hamedR96/ANTM | antm~cm.py |
from gensim.corpora.dictionary import Dictionary
from gensim.models import CoherenceModel
def coherence_model(topics,tokens,dictionary,tr_num,c_m="c_npmi"):
cm = CoherenceModel(topics=topics, texts=tokens, dictionary=dictionary,
coherence=c_m, processes=1, topn=tr_num).get_coherence()
return cm
| [] |
2024-01-10 | leonardhorns/rlp-usageinfo | helpers~review.py | from __future__ import annotations
import itertools
import random
from copy import copy, deepcopy
from datetime import datetime, timezone
from typing import Iterable, Optional, Union, TYPE_CHECKING
import json
from pathlib import Path
import dateutil.parser
import helpers.label_selection as ls
from evaluation.scoring import DEFAULT_METRICS
if TYPE_CHECKING:
from helpers.label_selection import LabelSelectionStrategyInterface
class Review:
review_attributes = {
"customer_id",
"helpful_votes",
"labels",
"marketplace",
"product_category",
"product_id",
"product_parent",
"product_title",
"review_body",
"review_date",
"review_headline",
"star_rating",
"total_votes",
"verified_purchase",
"vine",
}
label_attributes = {
"createdAt",
"datasets",
"metadata",
"scores",
"usageOptions",
"augmentations",
}
def __init__(self, review_id: str, data: dict) -> None:
self.review_id = review_id
self.data = data
self.tokenized_datapoints = None
def __getitem__(self, key: str) -> Union[str, int, dict]:
if key in self.data:
return self.data[key]
raise ValueError(f"review '{self.review_id}' does not contain key '{key}'")
def __eq__(self, other) -> bool:
if isinstance(other, Review):
return self.__key() == other.__key()
else:
return False
def __key(self) -> str:
return self.review_id
def __hash__(self) -> int:
return hash(self.__key())
def __or__(self, other: "Review") -> "Review":
return self.merge_labels(other, inplace=False)
def __ior__(self, other: "Review") -> None:
self.merge_labels(other, inplace=True)
return self
def __copy__(self):
return Review(self.review_id, copy(self.data))
def __deepcopy__(self, memo):
return Review(self.review_id, deepcopy(self.data, memo))
def __str__(self):
simple_data = {}
simple_data["product_title"] = self.data["product_title"]
simple_data["review_headline"] = self.data["review_headline"]
simple_data["review_body"] = self.data["review_body"]
simple_data["labels"] = self.data["labels"].copy()
for label_id, label in simple_data["labels"].items():
simple_data["labels"][label_id] = label["usageOptions"]
return f"Review {self.review_id} " + json.dumps(simple_data, indent=4)
def __repr__(self):
return json.dumps(self.data, indent=4)
def get_labels(self) -> dict:
return self.data.get("labels", {})
def get_label_ids(self) -> set[str]:
return set(self.get_labels().keys())
def get_usage_options(self, label_id: str) -> list[str]:
labels = self.get_label_for_id(label_id)
return labels.get("usageOptions", []) if labels is not None else []
def _check_strategy(self, strategy: LabelSelectionStrategyInterface) -> None:
if not isinstance(strategy, ls.LabelSelectionStrategyInterface):
raise ValueError(
f"strategy '{type(strategy)}' doesn't implement LabelSelectionStrategyInterface"
)
def get_label_from_strategy(
self, strategy: LabelSelectionStrategyInterface
) -> Optional[dict]:
self._check_strategy(strategy)
return strategy.retrieve_label(self)
def get_label_id_from_strategy(
self, strategy: LabelSelectionStrategyInterface
) -> Optional[str]:
self._check_strategy(strategy)
return strategy.retrieve_label_id(self)
def get_labels_from_strategy(
self, strategy: LabelSelectionStrategyInterface
) -> list[dict]:
self._check_strategy(strategy)
return strategy.retrieve_labels(self)
def get_label_ids_from_strategy(
self, strategy: LabelSelectionStrategyInterface
) -> list[str]:
self._check_strategy(strategy)
return strategy.retrieve_label_ids(self)
def get_label_for_dataset(
self, *dataset_names: Union[str, tuple[str, str]]
) -> Optional[dict]:
return self.get_label_from_strategy(ls.DatasetSelectionStrategy(*dataset_names))
def get_label_for_id(self, *label_ids: str) -> Optional[dict]:
return self.get_label_from_strategy(ls.LabelIDSelectionStrategy(*label_ids))
def reset_scores(self):
for label_id, label in self.get_labels().items():
label["scores"] = {}
def add_label(
self,
label_id: str,
usage_options: list[str],
datasets: list[str] = [],
metadata: dict = {},
overwrite: bool = False,
) -> None:
if not overwrite:
assert (
label_id not in self.get_labels()
), f"label '{label_id}' already exists in review '{self.review_id}'"
self.data["labels"][label_id] = {
# using ISO 8601 with UTC timezone, https://stackoverflow.com/a/63731605
"createdAt": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
"usageOptions": usage_options,
"scores": {},
"datasets": datasets,
"metadata": metadata,
"augmentations": {},
}
def get_label_type(
self, label_selection_strategy: LabelSelectionStrategyInterface
) -> str:
labels = self.get_labels_from_strategy(label_selection_strategy)
label_type = None
for label in labels:
if label_type == "both":
return "both"
else:
if len(label["usageOptions"]) == 0:
if label_type == "usage":
label_type = "both"
else:
label_type = "no_usage"
elif len(label["usageOptions"]) > 0:
if label_type == "no_usage":
label_type = "both"
else:
label_type = "usage"
return label_type
def label_has_usage_options(
self, label_selection_strategy: LabelSelectionStrategyInterface
) -> bool:
label = self.get_label_from_strategy(label_selection_strategy)
if not label:
raise ValueError(
f"Review {self.review_id} does not have any matching labels"
)
return len(label["usageOptions"]) > 0
def tokenize(
self,
tokenizer,
text: str,
for_training: bool,
is_input: bool,
max_length: int = float("inf"),
) -> Optional[dict]:
from training.utils import MAX_OUTPUT_LENGTH
max_length = (
min(MAX_OUTPUT_LENGTH, max_length)
if (not is_input and for_training)
else max_length
)
if not is_input and for_training and len(text) == 0:
text = "no usage options"
tokens = tokenizer(
text,
return_tensors="pt",
truncation=not for_training,
max_length=max_length,
padding="max_length",
)
# Remove batch dimension, since we only have one example
tokens["input_ids"] = tokens["input_ids"][0]
tokens["attention_mask"] = tokens["attention_mask"][0]
if not is_input and for_training:
ids = tokens["input_ids"]
# You need to set the pad tokens for the input to -100 for some Transformers (https://github.com/huggingface/transformers/issues/9770)>
tokens["input_ids"][ids[:] == tokenizer.pad_token_id] = -100
return tokens if len(tokens["input_ids"]) <= max_length else None
def _get_output_texts_from_strategy(
self, usage_options: list[str], strategy: str = None
) -> list[str]:
if not strategy or strategy == "default":
return ["; ".join(usage_options)]
elif strategy == "flat":
return usage_options or [""]
elif strategy.startswith("shuffle"):
usage_options = copy(usage_options)
random.shuffle(usage_options)
if not strategy.startswith("shuffle-"):
return ["; ".join(usage_options)]
permutation_limit = strategy.split("-")[1]
if permutation_limit == "all":
permutation_limit = None
else:
try:
permutation_limit = int(permutation_limit)
if permutation_limit < 1:
raise ValueError(
"Number of permutations must be greater than 0"
)
except ValueError as e:
if str(e) == "Number of permutations must be greater than 0":
raise e
raise ValueError(
f"Could not parse number of permutations for shuffle strategy '{strategy}'",
"Please use 'shuffle-<number_of_permutations>' or 'shuffle-all'",
)
permutations = [
"; ".join(permutation)
for permutation in itertools.islice(
itertools.permutations(usage_options), permutation_limit
)
]
return permutations
else:
raise ValueError(f"strategy '{strategy}' not supported")
def get_prompt(self, prompt_id="avetis_v1") -> str:
from langchain import PromptTemplate
path = Path(__file__).parent.parent / "prompts.json"
with open(path) as f:
prompts = json.load(f)
prompt_text = prompts["model-training"][prompt_id]["prompt"]
prompt_input_variables = prompts["model-training"][prompt_id]["input_variables"]
prompt = PromptTemplate(
template=prompt_text,
input_variables=prompt_input_variables,
validate_template=False,
)
prompt = prompt.format(
**{
key: value
for key, value in self.data.items()
if key in prompt_input_variables
}
)
return prompt
def get_tokenized_datapoints(
self,
selection_strategy: Optional[ls.LabelSelectionStrategyInterface] = None,
multiple_usage_options_strategy: Optional[str] = None,
prompt_id: str = "avetis_v1",
**tokenization_kwargs,
) -> Iterable[dict]:
def format_dict(model_input, output, review_id, source_id) -> dict:
return {
"input": model_input,
"output": output,
"review_id": review_id,
"source_id": source_id,
}
model_input = self.get_prompt(prompt_id=prompt_id)
model_input = self.tokenize(
text=model_input,
is_input=True,
**tokenization_kwargs,
)
# Returns 0 if when no selection strategy. We are using 0 instead of None because of the dataloader
label = (
self.get_label_from_strategy(selection_strategy)
if selection_strategy
else None
)
if not label:
yield format_dict(model_input, 0, self.review_id, "no_label")
return
output_texts = self._get_output_texts_from_strategy(
label["usageOptions"], strategy=multiple_usage_options_strategy
)
for id, output_text in enumerate(output_texts):
yield format_dict(
model_input,
self.tokenize(
text=output_text.lower(), # we enforce lower case because model does not need to learn casing
is_input=False,
**tokenization_kwargs,
),
self.review_id,
f"{multiple_usage_options_strategy}_{id}",
)
def remove_label(self, label_id: str, inplace=True) -> Optional["Review"]:
review_without_label = (
self if inplace else Review(self.review_id, deepcopy(self.data))
)
review_without_label.data["labels"].pop(label_id, None)
# remove score references to deleted labels
for label in review_without_label.data["labels"].values():
label["scores"].pop(label_id, None)
if not inplace:
return review_without_label
def score(
self,
label_id: str,
reference_label_id: str,
metric_ids: Iterable[str] = DEFAULT_METRICS,
):
from evaluation.scoring.metrics import SingleReviewMetrics
scores = self.get_label_for_id(label_id)["scores"]
if reference_label_id not in scores:
scores[reference_label_id] = {}
available_metrics = scores.get(reference_label_id, {})
missing_metric_ids = set(metric_ids).difference(set(available_metrics.keys()))
if len(missing_metric_ids) > 0:
# calculate missing metrics
from evaluation.scoring.metrics import SingleReviewMetrics
result = SingleReviewMetrics.from_labels(
self.get_labels(), label_id, reference_label_id
).calculate(missing_metric_ids, include_pos_neg_info=True)
for metric_id, metric_tuple in result.items():
scores[reference_label_id][metric_id] = metric_tuple
return copy(scores[reference_label_id])
def get_scores(
self,
label_id: Union[str, ls.LabelSelectionStrategyInterface],
*reference_label_candidates: Union[str, ls.LabelSelectionStrategyInterface],
metric_ids: Iterable[str] = DEFAULT_METRICS,
) -> Optional[dict]:
if isinstance(label_id, ls.LabelSelectionStrategyInterface):
label_id = self.get_label_id_from_strategy(label_id)
for reference_label_candidate in copy(reference_label_candidates):
if isinstance(
reference_label_candidate, ls.LabelSelectionStrategyInterface
):
strategy_candidates = self.get_label_ids_from_strategy(
reference_label_candidate
)
reference_label_candidates.remove(reference_label_candidate)
reference_label_candidates += strategy_candidates
"""return scores for a specified reference label or the best scores if multiple reference labels are specified"""
reference_label_candidates = set(reference_label_candidates).intersection(
self.get_label_ids()
)
if label_id not in self.get_label_ids() or not reference_label_candidates:
return None
reference_scores = {}
for reference_label_id in reference_label_candidates:
reference_scores[reference_label_id] = self.score(
label_id, reference_label_id, metric_ids
)
max_score = {m: (-1, None, None) for m in metric_ids}
for m_id in metric_ids:
for ref_id in reference_label_candidates:
# reference_scores[ref_id][m_id] is a tuple (score, prediction_is_positive_usage, reference_is_positive_usage)
score = reference_scores[ref_id][m_id][0]
if score >= max_score[m_id][0]:
max_score[m_id] = reference_scores[ref_id][m_id]
return max_score
def merge_labels(
self, other_review: "Review", inplace: bool = False
) -> Optional["Review"]:
"""Merge labels from another review into this one.
This method is used to merge labels of the same review into this object.
"""
assert self == other_review, "cannot merge labels of different reviews"
existing_labels = self.get_labels()
if not inplace:
existing_labels = deepcopy(existing_labels)
additional_labels = deepcopy(other_review.get_labels())
for label_id, other_label in additional_labels.items():
if label_id not in existing_labels:
existing_labels[label_id] = other_label
else:
own_label = existing_labels[label_id]
# validate same usage options
assert (
own_label["usageOptions"] == other_label["usageOptions"]
), f"'{label_id}' in review '{other_review.review_id}' has inconsistent usage options"
# merge scores
for ref_id, other_score_dict in other_label["scores"].items():
if ref_id not in own_label["scores"]:
own_label["scores"][ref_id] = other_score_dict
else:
# merge different score metrics for same reference
own_label["scores"][ref_id].update(other_score_dict)
# merge datasets and metadata
own_label["datasets"] = list(
set(own_label["datasets"]) | set(other_label["datasets"])
)
own_label["augmentations"] |= other_label["augmentations"]
own_label["metadata"] |= other_label["metadata"]
if not inplace:
return Review(
self.review_id, self.data.copy() | {"labels": existing_labels}
)
self.data["labels"] = existing_labels
def validate(self) -> None:
error_msg_prefix = f"encountered error in review '{self.review_id}':"
data_keys_set = set(self.data.keys())
if not set(self.review_attributes).issubset(set(data_keys_set)):
raise ValueError(
f"{error_msg_prefix} wrong attribute names\n"
f"got: {data_keys_set}\nexpected: {self.review_attributes}"
)
labels = self.get_labels()
if not isinstance(labels, dict):
raise ValueError(
f"{error_msg_prefix} 'labels' is not of type dict but {type(labels)}"
)
for label_id, label in labels.items():
if not isinstance(label, dict):
raise ValueError(
f"{error_msg_prefix} label '{label_id}' is not of type dict but {type(label)}"
)
label_keys_set = set(label.keys())
if label_keys_set != self.label_attributes:
raise ValueError(
f"{error_msg_prefix} wrong keys in label '{label_id}'\n"
f"got: {label_keys_set}\nexpected: {self.label_attributes}",
)
if not isinstance(label["usageOptions"], list):
raise ValueError(
f"{error_msg_prefix} 'usageOptions' in label '{label_id}' is not of type list but {type(label['usageOptions'])}",
)
if not isinstance(label["metadata"], dict):
raise ValueError(
f"{error_msg_prefix} 'metadata' in label '{label_id}' is not of type dict but {type(label['metadata'])}",
)
if not isinstance(label["scores"], dict):
raise ValueError(
f"{error_msg_prefix} 'scores' in label '{label_id}' is not of type dict but {type(label['scores'])}",
)
if not isinstance(label["datasets"], list):
raise ValueError(
f"{error_msg_prefix} 'datasets' in label '{label_id}' is not of type dict but {type(label['datasets'])}",
)
try:
dateutil.parser.isoparse(label["createdAt"])
except Exception:
raise ValueError(
f"{error_msg_prefix} 'createdAt' timestamp in label '{label_id}' is not ISO 8601",
)
| [
"False",
"input_variables",
"model-training"
] |
2024-01-10 | nogibjj/Yuanjing_codespaces | build_question_answer.py | import openai
import os
import click
def submit_question(text):
"""This submits a question to the OpenAI API"""
openai.api_key = os.getenv("OPENAI_API_KEY")
prompt = text
result = openai.Completion.create(
prompt=prompt,
temperature=0,
max_tokens=300,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
model="text-davinci-002",
)["choices"][0]["text"].strip(" \n")
return result
@click.command()
@click.argument('text')
def main(text):
"""This is the main function that you ask the OpenAI API a question to get an answer
example: python questionAnswerCLI.py "Who won the 2020 Summer Olympics"
"""
print(submit_question(text))
if __name__ == "__main__":
#pylint: disable=no-value-for-parameter
main() | [] |
2024-01-10 | nogibjj/Yuanjing_codespaces | example_openai.py | import openai
import os
import click
def submit_question(text):
"""This submits a question to the OpenAI API"""
openai.api_key = os.getenv("OPENAI_API_KEY")
prompt = text
result = openai.Completion.create(
prompt=prompt,
temperature=0,
max_tokens=300,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
model="text-davinci-002",
)["choices"][0]["text"].strip(" \n")
return result | [] |
2024-01-10 | avalanche05/limehd | backend~scripts~ai_markdown.py | from openai import OpenAI
from limehd.dependencies import get_db
from limehd.models import Program
def get_program_genre(program_name: str, program_description: str) -> str:
response = client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=f"Определи жанр теле-программы по названию и описанию. Выбери один жанр из [ток-шоу, здоровье, /"
f"новости, реалити, детектив, документальный, спорт, драма, криминал, образовательный, развлекательное, /"
f"музыкальный, комедия, разное, мультфильм], который подходит. /"
f"Название: '{program_name}'. Описание: '{program_description}. Ответь одним словом без лишних символов.")
return response.choices[0].text
client = OpenAI(api_key='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')
db = next(get_db())
programs = db.query(Program).all()
genre_list = []
with open("genres.txt", "a", encoding="utf-8") as f:
for program in programs:
genre = get_program_genre(program.name, program.description).strip('\n').lower()
genre_list.append((program.id, program.name, genre))
f.write(f'{program.id},{program.name},{genre}\n')
print(f'{program.name=}, {get_program_genre(program.name, program.description)=}')
print('success')
| [
"Определи жанр теле-программы по названию и описанию. Выбери один жанр из [ток-шоу, здоровье, /новости, реалити, детектив, документальный, спорт, драма, криминал, образовательный, развлекательное, /музыкальный, комедия, разное, мультфильм], который подходит. /Название: 'PLACEHOLDER'. Описание: 'PLACEHOLDER. Ответь одним словом без лишних символов."
] |
2024-01-10 | GaussianTech/deep-rl-toolkit | rltoolkit~env~atari_wrapper.py | # Borrow a lot from openai baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
import warnings
from collections import deque
import cv2
import gym
import numpy as np
from tianshou.env import ShmemVectorEnv
try:
import envpool
except ImportError:
envpool = None
class NoopResetEnv(gym.Wrapper):
"""Sample initial states by taking random number of no-ops on reset. No-op
is assumed to be action 0.
:param gym.Env env: the environment to wrap.
:param int noop_max: the maximum value of no-ops to run.
"""
def __init__(self, env, noop_max=30):
super().__init__(env)
self.noop_max = noop_max
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self):
self.env.reset()
if hasattr(self.unwrapped.np_random, 'integers'):
noops = self.unwrapped.np_random.integers(1, self.noop_max + 1)
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset()
return obs
class MaxAndSkipEnv(gym.Wrapper):
"""Return only every `skip`-th frame (frameskipping) using most recent raw
observations (for max pooling across time steps)
:param gym.Env env: the environment to wrap.
:param int skip: number of `skip`-th frame.
"""
def __init__(self, env, skip=4):
super().__init__(env)
self._skip = skip
def step(self, action):
"""Step the environment with the given action.
Repeat action, sum reward, and max over last observations.
"""
obs_list, total_reward, done = [], 0., False
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
obs_list.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(obs_list[-2:], axis=0)
return max_frame, total_reward, done, info
class EpisodicLifeEnv(gym.Wrapper):
"""Make end-of-life == end-of-episode, but only reset on true game over. It
helps the value estimation.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal, then update lives to
# handle bonus lives
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
# for Qbert sometimes we stay in lives == 0 condition for a few
# frames, so its important to keep lives > 0, so that we only reset
# once the environment is actually done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self):
"""Calls the Gym environment reset, only when lives are exhausted.
This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
else:
# no-op step to advance from terminal/lost life state
obs = self.env.step(0)[0]
self.lives = self.env.unwrapped.ale.lives()
return obs
class FireResetEnv(gym.Wrapper):
"""Take action on reset for environments that are fixed until firing.
Related discussion: https://github.com/openai/baselines/issues/240.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self):
self.env.reset()
return self.env.step(1)[0]
class WarpFrame(gym.ObservationWrapper):
"""Warp frames to 84x84 as done in the Nature paper and later work.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.size = 84
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=(self.size, self.size),
dtype=env.observation_space.dtype)
def observation(self, frame):
"""returns the current observation from a frame."""
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
return cv2.resize(
frame, (self.size, self.size), interpolation=cv2.INTER_AREA)
class ScaledFloatFrame(gym.ObservationWrapper):
"""Normalize observations to 0~1.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
low = np.min(env.observation_space.low)
high = np.max(env.observation_space.high)
self.bias = low
self.scale = high - low
self.observation_space = gym.spaces.Box(
low=0.,
high=1.,
shape=env.observation_space.shape,
dtype=np.float32)
def observation(self, observation):
return (observation - self.bias) / self.scale
class ClipRewardEnv(gym.RewardWrapper):
"""clips the reward to {+1, 0, -1} by its sign.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.reward_range = (-1, 1)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign.
Note: np.sign(0) == 0.
"""
return np.sign(reward)
class FrameStack(gym.Wrapper):
"""Stack n_frames last frames.
:param gym.Env env: the environment to wrap.
:param int n_frames: the number of frames to stack.
"""
def __init__(self, env, n_frames):
super().__init__(env)
self.n_frames = n_frames
self.frames = deque([], maxlen=n_frames)
shape = (n_frames, ) + env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=shape,
dtype=env.observation_space.dtype)
def reset(self):
obs = self.env.reset()
for _ in range(self.n_frames):
self.frames.append(obs)
return self._get_ob()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.frames.append(obs)
return self._get_ob(), reward, done, info
def _get_ob(self):
# the original wrapper use `LazyFrames` but since we use np buffer,
# it has no effect
return np.stack(self.frames, axis=0)
def wrap_deepmind(env_id,
episode_life=True,
clip_rewards=True,
frame_stack=4,
scale=False,
warp_frame=True):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'NoFrameskip' in env_id
env = gym.make(env_id)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
if warp_frame:
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, frame_stack)
return env
def make_atari_env(task, seed, training_num, test_num, **kwargs):
"""Wrapper function for Atari env.
If EnvPool is installed, it will automatically switch to EnvPool's Atari env.
:return: a tuple of (single env, training envs, test envs).
"""
if envpool is not None:
if kwargs.get('scale', 0):
warnings.warn(
'EnvPool does not include ScaledFloatFrame wrapper, '
"please set `x = x / 255.0` inside CNN network's forward function."
)
# parameters conversion
train_envs = env = envpool.make_gym(
task.replace('NoFrameskip-v4', '-v5'),
num_envs=training_num,
seed=seed,
episodic_life=True,
reward_clip=True,
stack_num=kwargs.get('frame_stack', 4),
)
test_envs = envpool.make_gym(
task.replace('NoFrameskip-v4', '-v5'),
num_envs=test_num,
seed=seed,
episodic_life=False,
reward_clip=False,
stack_num=kwargs.get('frame_stack', 4),
)
else:
warnings.warn('Recommend using envpool (pip install envpool) '
'to run Atari games more efficiently.')
env = wrap_deepmind(task, **kwargs)
train_envs = ShmemVectorEnv([
lambda: wrap_deepmind(
task, episode_life=True, clip_rewards=True, **kwargs)
for _ in range(training_num)
])
test_envs = ShmemVectorEnv([
lambda: wrap_deepmind(
task, episode_life=False, clip_rewards=False, **kwargs)
for _ in range(test_num)
])
env.seed(seed)
train_envs.seed(seed)
test_envs.seed(seed)
return env, train_envs, test_envs
| [] |
2024-01-10 | matiszz/gpt-website-builder | Populator.py | from OpenAIController import OpenAIController
def get_pricing_feature(text):
html = """
<p class="flex items-center text-gray-600 mb-2">
<span class="w-4 h-4 mr-2 inline-flex items-center justify-center bg-gray-400 text-white rounded-full flex-shrink-0">
<svg fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2.5"
class="w-3 h-3" viewBox="0 0 24 24">
<path d="M20 6L9 17l-5-5"></path>
</svg>
</span>{feature}
</p>
"""
return html.format(feature=text)
class Populator(object):
openAI = OpenAIController()
def __init__(self):
pass
@staticmethod
def populate_contact_html(content, address, email, phone_number):
return content.format(address=address, email=email, phone_number=phone_number, company_location=address.replace(" ", "+"))
def populate_features_html(self, content, web_info):
features_1 = self.openAI.get_feature(web_info)
features_2 = self.openAI.get_feature(web_info)
features_3 = self.openAI.get_feature(web_info)
return content.format(
web_name=web_info['web_name'],
feature_1_name=features_1[0],
feature_1_description=features_1[1],
feature_2_name=features_2[0],
feature_2_description=features_2[1],
feature_3_name=features_3[0],
feature_3_description=features_3[1],
image_src_features=web_info['photo2']
)
@staticmethod
def populate_footer_html(content, web_name):
return content.format(web_name=web_name)
@staticmethod
def populate_head_html(content, web_name):
return content.format(web_name=web_name)
def populate_testimonial_html(self, content, web_info):
features = self.openAI.get_testimonial_features(web_info)
return content.format(
testimonial_1=features['testimonials'][0],
testimonial_2=features['testimonials'][1],
testimonial_3=features['testimonials'][2],
name_1=features['names'][0],
name_2=features['names'][1],
name_3=features['names'][2],
role_1=features['roles'][0],
role_2=features['roles'][1],
role_3=features['roles'][2],
)
def populate_hero_html(self, content, description, image_src):
tagline = self.openAI.get_tagline(description)
copy = self.openAI.get_copy(description)
return content.format(tagline=tagline, copy=copy, image_src=image_src)
def populate_navbar_html(self, content, web_name, description):
links = self.openAI.get_navbar_links(description)
return content.format(web_name=web_name, link_1=links[0], link_2=links[1], link_3_cta=links[2])
def populate_pricing_html(self, content, description, web_name):
features = self.openAI.get_pricing_features(description)
features_start = ""
features_pro = ""
features_business = ""
for feature in features['start']:
if feature != '':
features_start += get_pricing_feature(feature)
for feature in features['pro']:
if feature != '':
features_pro += get_pricing_feature(feature)
for feature in features['business']:
if feature != '':
features_business += get_pricing_feature(feature)
return content.format(
web_name=web_name,
features_start=features_start,
features_pro=features_pro,
features_business=features_business
)
| [] |
2024-01-10 | willf/NaNoGenMo-2021 | nanogenmo2021.py | import json
import sys
import os
import random
import argparse
import re
import urllib.parse
from dotenv import load_dotenv
load_dotenv()
import openai
def split_line(line):
"""
Given a line like | ''[[813 (film)|813]]'' || [[Charles Christie]], [[Scott Sidney]] || [[Wedgwood Nowell]], [[Ralph Lewis (actor)|Ralph Lewis]], [[Wallace Beery]], [[Laura La Plante]] || Mystery || [[Film Booking Offices of America|FBO]]
Retrun a list of the strings between '||'
"""
return [cell.strip() for cell in line.split("||")]
def resolve_wikipedia_link(link):
"""
Given a link like [[813 (film)|813]]
Return the string 813
"""
link = link.strip()
# first remove the brackets
if link.startswith("[[") and link.endswith("]]"):
link = link.strip()[2:-2]
# split the link into the title and the link if there is one
_, link = link.split("|") if "|" in link else (None, link)
return link.strip()
def convert_wiki_link_to_md(link):
"""
Given a link like [[813 (film)|813]]
Return the string [813](https://en.wikipedia.org/wiki/813 (film))
"""
link = link.strip()
# first remove the brackets
if link.startswith("[[") and link.endswith("]]"):
link = link.strip()[2:-2]
# split the link into the title and the link if there is one
link, title = link.split("|") if "|" in link else (None, link)
if not link:
link = title
link = f"https://en.wikipedia.org/wiki/{urllib.parse.quote(link.strip())}"
return f"[{title}]({link})"
def group_2(matchobj):
return matchobj.group(2)
def remove_wikipedia_links(text):
"""
Given a text, remove wikipedia links
"""
pat = re.compile(r"\[\[(?:([^\|\]]+)\|)?([^\]]+)\]\]")
return re.sub(pat, group_2, text)
def resolve_wikipedia_links(cell_text):
"""
Given the cell text, resolve the wikipedia links
"""
cells = [cell.strip() for cell in cell_text.split(",")]
results = [resolve_wikipedia_link(cell) for cell in cells]
return results
def create_movie_dict(line):
"""
Given a line like | ''[[813 (film)|813]]'' || [[Charles Christie]], [[Scott Sidney]] || [[Wedgwood Nowell]], [[Ralph Lewis (actor)|Ralph Lewis]], [[Wallace Beery]], [[Laura La Plante]] || Mystery || [[Film Booking Offices of America|FBO]]
Return a dictionary of the movie
"""
cells = split_line(line)
movie_dict = {
"wiki_link": cells[0],
"title": remove_wikipedia_links(cells[0]),
"directors": resolve_wikipedia_links(cells[1]),
"actors": resolve_wikipedia_links(cells[2]),
"genre": remove_wikipedia_links(cells[3]),
}
return movie_dict
def create_and_list(list):
"""
Given a list of 1 items, return item
Given a list of 2 items, return item[0] and item[1]
Given a list of n items, return item[0], item[1], ..., and item[-1]
"""
if len(list) == 1:
return list[0]
elif len(list) == 2:
return list[0] + " and " + list[1]
else:
comma_separated = ", ".join(list[:-1])
return f"{comma_separated}, and {list[-1]}"
def create_prompt(movie_dict):
"""
Given a movie dictionary, create a prompt
"""
title = movie_dict["title"]
directors = create_and_list(movie_dict["directors"])
actors = create_and_list(movie_dict["actors"])
genre = movie_dict["genre"].lower()
prompt = f"{title} is a {genre} movie directed by {directors}. It stars {actors}. Give a synopsis of the movie."
return prompt
def create_header(movie_dict):
"""
Given a movie dictionary, create a prompt
"""
title = movie_dict["title"]
header = f"## {title}"
return header
def create_brief_summary(movie_dict):
"""
Given a movie dictionary, create a brief summary
"""
title = movie_dict["title"]
directors = create_and_list(movie_dict["directors"])
actors = create_and_list(movie_dict["actors"])
genre = movie_dict["genre"].lower()
an = "an" if genre[0] in "aeiou" else "a"
if genre == "adventure":
genre = "adventure film"
elif genre == "horror":
genre = "horror film"
summary = f"*{title}* is {an} {genre} directed by {directors}. It stars {actors}."
return summary
def create_title_json_file(input_file, output_file):
"""
Given an input file, create a json file
"""
with open(input_file, "r") as f:
lines = f.readlines()
dicts = [create_movie_dict(line) for line in lines]
with open(output_file, "w") as g:
for dict in dicts:
json.dump(dict, g)
g.write("\n")
def create_wiki_link(dict_file):
wiki_link = dict_file.get("wiki_link")
if wiki_link:
link = convert_wiki_link_to_md(wiki_link)
return f"Wikipedia: {link}"
return ""
def create_story(dict_file):
header = create_header(dict_file)
summary = create_brief_summary(dict_file)
synopsis = dict_file.get("synopsis")
if synopsis is None:
synopsis = "No synopsis available."
return (
header
+ "\n\n"
+ summary
+ "\n\n**Synopsis**: "
+ synopsis.strip()
+ "\n\n"
+ create_wiki_link(dict_file)
)
def create_stories(dict_file, story_file, n):
with open(dict_file, "r") as f:
dicts = [json.loads(line) for line in f.readlines()]
if n == 0:
n = len(dicts)
# select n random movies
dicts = [dicts[i] for i in sorted(random.sample(range(len(dicts)), n))]
# create a story
with open(story_file, "w") as g:
g.write("# Possible Movies\n\n")
for dict in dicts:
g.write(create_story(dict))
g.write("\n\n")
def add_synopsis(movie_dict):
"""
Given a movie dictionary, create a synopsis from OpenAI
"""
if "synopsis" in movie_dict:
return movie_dict
prompt = create_prompt(movie_dict)
synopsis = None
openai.api_key = os.getenv("OPENAI_API_KEY")
try:
response = openai.Completion.create(
engine="davinci-instruct-beta", prompt=prompt, max_tokens=350, n=1
)
synopsis = response.choices[0].text
except openai.exceptions.OpenAIException as err:
print(err)
if synopsis is not None:
movie_dict["synopsis"] = synopsis
return movie_dict
def add_wikipedia_link(line, movie_dict):
"""
Given a movie dictionary, add wikipedia links
"""
cells = split_line(line)
wiki_link = cells[0]
movie_dict["wiki_link"] = wiki_link
return movie_dict
def create_wikipedia_json_file(lines_file, json_file, output_file):
"""
Given an input file, create a json file
"""
lines = open(lines_file, "r").readlines()
dicts = [json.loads(line) for line in open(json_file, "r").readlines()]
with open(output_file, "w") as g:
for line, dict in zip(lines, dicts):
dict = add_wikipedia_link(line, dict)
json.dump(dict, g)
g.write("\n")
def create_synopsis_json_file(input_file, output_file, n=0):
"""
Given an input file, create a json file with synopses
"""
print("Creating synopses")
with open(input_file, "r") as f:
dicts = [json.loads(line) for line in f.readlines()]
if n == 0:
n = len(dicts)
print(f"creating {n} stories from {len(dicts)} entries.")
j = 0
with open(output_file, "w") as g:
for dict in dicts:
j += 1
if j <= n:
print(f"Getting synopsis {j} of {n}...")
dict = add_synopsis(dict)
print(dict.get("synopsis"))
json.dump(dict, g)
g.write("\n")
import unittest
class TestNanoGenMo2021(unittest.TestCase):
def test_split(self):
"""
UnitTest:
"""
line = """[[813 (film)|813]] || [[Charles Christie]], [[Scott Sidney]] || [[Wedgwood Nowell]], [[Ralph Lewis (actor)|Ralph Lewis]], [[Wallace Beery]], [[Laura La Plante]] || Mystery || [[Film Booking Offices of America|FBO]]"""
split = [
"[[813 (film)|813]]",
"[[Charles Christie]], [[Scott Sidney]]",
"[[Wedgwood Nowell]], [[Ralph Lewis (actor)|Ralph Lewis]], [[Wallace Beery]], [[Laura La Plante]]",
"Mystery",
"[[Film Booking Offices of America|FBO]]",
]
self.assertEqual(split_line(line), split)
def test_resolve_link(self):
"""
UnitTest:
"""
link = "[[813 (film)|813]]"
self.assertEqual(resolve_wikipedia_link(link), "813")
def test_resolve_link_simple(self):
"""
UnitTest:
"""
link = "[[ 813]]"
self.assertEqual(resolve_wikipedia_link(link), "813")
def test_resolve_links(self):
"""
UnitTest:
"""
cell_text = "[[813 (film)|813]], [[Charles Christie]], [[Scott Sidney]]"
self.assertEqual(
resolve_wikipedia_links(cell_text),
["813", "Charles Christie", "Scott Sidney"],
)
def test_remove_links(self):
"""
UnitTest:
"""
cell_text = "Comedy [[short (film)|short]]"
self.assertEqual(remove_wikipedia_links(cell_text), "Comedy short")
def test_create_movie_dict(self):
"""
UnitTest:
"""
line = """[[813 (film)|813]] || [[Charles Christie]], [[Scott Sidney]] || [[Wedgwood Nowell]], [[Ralph Lewis (actor)|Ralph Lewis]], [[Wallace Beery]], [[Laura La Plante]] || Mystery || [[Film Booking Offices of America|FBO]]"""
movie_dict = {
"title": "813",
"directors": ["Charles Christie", "Scott Sidney"],
"actors": [
"Wedgwood Nowell",
"Ralph Lewis",
"Wallace Beery",
"Laura La Plante",
],
"genre": "Mystery",
}
self.assertEqual(create_movie_dict(line), movie_dict)
def test_create_and_list(self):
"""
UnitTest:
"""
self.assertEqual(create_and_list(["a"]), "a")
self.assertEqual(create_and_list(["a", "b"]), "a and b")
self.assertEqual(create_and_list(["a", "b", "c"]), "a, b, and c")
self.assertEqual(create_and_list(["a", "b", "c", "d"]), "a, b, c, and d")
def test_create_prompt(self):
"""
UnitTest:
"""
movie_dict = {
"title": "813",
"directors": ["Charles Christie", "Scott Sidney"],
"actors": [
"Wedgwood Nowell",
"Ralph Lewis",
"Wallace Beery",
"Laura La Plante",
],
"genre": "Mystery",
}
prompt = "813 is a mystery movie directed by Charles Christie and Scott Sidney. It stars Wedgwood Nowell, Ralph Lewis, Wallace Beery, and Laura La Plante. Give a synopsis of the movie."
self.assertEqual(create_prompt(movie_dict), prompt)
def main():
"""
Main function
"""
# create_wikipedia_json_file("table.txt", "synopsis.json", "new.json")
# sys.exit(0)
# Instantiate the parser
main_parser = argparse.ArgumentParser(description="@willf nanogenmo2021")
main_parser.add_argument(
"--table", type=str, default="table.txt", help="input wiki file"
)
main_parser.add_argument(
"--json", type=str, default="table.json", help="JSON file pre-synopses"
)
main_parser.add_argument(
"--synopsis", type=str, default="synopsis.json", help="JSON file with Synopsis"
)
main_parser.add_argument(
"--story", type=str, default="nanogenmo2021.md", help="story file"
)
main_parser.add_argument(
"--n", type=int, default=1, help="number of stories to generate"
)
main_parser.add_argument(
"--create_json", action="store_true", help="create json file"
)
main_parser.add_argument(
"--create_story", action="store_true", help="create story file"
)
main_parser.add_argument(
"--create_synopses", action="store_true", help="create synopsis file"
)
options = main_parser.parse_args()
if options.table and options.json and options.create_json:
create_title_json_file(options.table, options.json)
sys.exit(0)
if options.story and options.json and options.create_story:
create_stories(options.synopsis, options.story, options.n)
sys.exit(0)
if options.synopsis and options.json and options.create_synopses:
create_synopsis_json_file(options.json, options.synopsis, options.n)
sys.exit(0)
print(
"no action specified (one of --test, --create_prompts, --create_story, --create_synopses)"
)
main_parser.print_help()
sys.exit(1)
if __name__ == "__main__":
main()
| [
"813 is a mystery movie directed by Charles Christie and Scott Sidney. It stars Wedgwood Nowell, Ralph Lewis, Wallace Beery, and Laura La Plante. Give a synopsis of the movie.",
"PLACEHOLDER is a PLACEHOLDER movie directed by PLACEHOLDER. It stars PLACEHOLDER. Give a synopsis of the movie."
] |
2024-01-10 | navneeeth/youtube-video-summarizer | back-end~helpers~video_processing~video_processing_helpers.py | import threading
import os
import openai
import moviepy.editor as mp
from pytube import YouTube
from models.id.id_model import ID
from helpers.id.id_helpers import store_id
from config import db
MAX_THREADS = 5
active_threads = []
def download_video(audio_file, video_file, video_link):
youtube = YouTube(video_link, use_oauth=True, allow_oauth_cache=True)
audio = youtube.streams.filter(only_audio=True).first()
audio.download(filename=video_file)
mp.AudioFileClip(video_file).write_audiofile(audio_file)
def transcribe_audio(file):
return openai.Audio.transcribe("whisper-1", file)["text"]
def generate_summary(transcription_text):
prompt = "Organize this transcription from a YouTube video into a structured set of easily understandable points without missing important details: " + transcription_text
summary = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
return summary.choices[0].message.content if len(summary.choices) > 0 else ""
def update_processing_status(acknowledgement_id, timestamp, video_link, video_title, summary, status):
try:
status_update = ID(
acknowledgement_id=acknowledgement_id,
timestamp=timestamp,
video_link=video_link,
video_title=video_title,
summary=summary,
status=status
)
store_id(status_update.__dict__)
except Exception as e:
print("Error:", e)
def remove_any_mp3_files():
"""
Remove all .mp3 files in the current directory.
"""
# Get the current directory
current_directory = os.getcwd()
# List all files in the current directory
file_list = os.listdir(current_directory)
# Iterate through the files and delete .mp3 files
for file in file_list:
if file.endswith(".mp3"):
file_path = os.path.join(current_directory, file)
os.remove(file_path)
def process_request(new_id, timestamp, video_link, video_title):
print('Started thread')
print(threading.current_thread().name)
try:
remove_any_mp3_files()
# Update status to 'Downloading'
update_processing_status(new_id, timestamp, video_link, video_title, '', 'Downloading')
audio_file = video_title + ".mp3"
video_file = video_title + ".mp4"
# Download and convert video
download_video(audio_file, video_file, video_link)
# Update status to 'Transcribing'
update_processing_status(new_id, timestamp, video_link, video_title, '', 'Transcribing')
# Transcribe audio
file = open(audio_file, "rb")
transcription_text = transcribe_audio(file)
with open(video_title+"_transcription.txt", "w", encoding="utf-8") as f:
f.write(transcription_text)
# Update status to 'Summarizing'
update_processing_status(new_id, timestamp, video_link, video_title, '', 'Summarizing')
# Generate summary
summary_text = generate_summary(transcription_text)
# print the summary and write it to a text file
with open(video_title+"_summary.txt", "w") as f:
f.write(summary_text)
# Update status to 'Ready' and store summary
update_processing_status(new_id, timestamp, video_link, video_title, summary_text, 'Ready')
# Remove temporary files
os.remove(video_title+"_summary.txt")
os.remove(video_title+"_transcription.txt")
os.remove(video_file)
#os.remove(audio_file)
except Exception as e:
print("Error:", e)
# Update status to 'Error'
update_processing_status(new_id, timestamp, video_link, video_title, '', 'Error')
return
#active_threads.stop()
def process_request_legacy(new_id, timestamp, video_link, video_title):
print('Started thread')
print(threading.current_thread().name)
db.collection('auth').document(new_id).set({'timestamp': timestamp, 'status': 'Downloading', 'video_link': video_link, 'video_title': video_title, 'summary': ''})
video_file = video_title + ".mp4"
audio_file = video_title + ".mp3"
updated_audio_file = "updated_" + audio_file
youtube = YouTube(video_link, use_oauth=True, allow_oauth_cache=True)
print(youtube)
audio = youtube.streams.filter(only_audio=True).first()
audio.download(filename=video_file)
# convert the downloaded audio file to mp3 format
mp.AudioFileClip(video_file).write_audiofile(audio_file)
print("Processing finished for timestamp:", timestamp, "and video link:", video_link)
db.collection('auth').document(new_id).set({'timestamp': timestamp, 'status': 'Transcribing', 'video_link': video_link, 'video_title': video_title, 'summary': ''})
# transcribe the audio using OpenAI's API
file = open(audio_file, "rb")
transcription = openai.Audio.transcribe("whisper-1", file)
# write the transcription to a text file
with open(video_title+"_transcription.txt", "w", encoding="utf-8") as f:
f.write(transcription["text"])
db.collection('auth').document(new_id).set({'timestamp': timestamp, 'status': 'Summarizing', 'video_link': video_link, 'video_title': video_title, 'summary': ''})
prompt = "Organize this transcription from a YouTube video into a structured set of easily understandable points without missing important details: "+transcription["text"]
summary = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
text = ""
print(summary.choices)
if len(summary.choices) > 0:
text = summary.choices[0].message.content
print(text)
else:
print("Error: No response generated.")
db.collection('auth').document(new_id).set({'timestamp': timestamp, 'status': 'Error', 'video_link': video_link, 'video_title': video_title, 'summary': ''})
# print the summary and write it to a text file
with open(video_title+"_summary.txt", "w") as f:
f.write(text)
db.collection('auth').document(new_id).set({'timestamp': timestamp, 'status': 'Ready', 'video_link': video_link, 'video_title': video_title, 'summary': text})
os.remove(video_title+"_summary.txt")
os.remove(video_title+"_transcription.txt")
os.remove(video_file)
os.remove(audio_file)
#active_threads.stop()
def start_processing_thread(new_id, timestamp, video_link, video_title):
global active_threads
# Check if there are already MAX_THREADS active threads
if len(active_threads) >= MAX_THREADS:
# Wait for one of the threads to complete
active_threads[0].join()
# Remove the completed thread from the list
active_threads = active_threads[1:]
# Create a new thread for the request
t = threading.Thread(target=process_request, args=(new_id, timestamp, video_link, video_title))
# Add the thread to the list of active threads
active_threads.append(t)
# Start the thread
t.start()
| [
"Organize this transcription from a YouTube video into a structured set of easily understandable points without missing important details: PLACEHOLDER"
] |
2024-01-10 | alokwhitewolf/Whatsapp-Summarizer-using-LLMs | classifier.py | import requests
import json
import openai
import os
from dotenv import load_dotenv
load_dotenv()
url = "https://api.openai.com/v1/chat/completions"
# openai.api_key = os.getenv("OPENAI_API_KEY")
message = """
Message : Messages and calls are end-to-end encrypted. No one outside of this chat, not even WhatsApp, can read or listen to them.
Message : ~ Nirant created this group
Message : This group was added to the community “Generative AI”
Message : Anyone in the community “Generative AI” can request to join this group by messaging group admins.
Message : <attached: 00000012-PHOTO-2023-04-14-20-37-09.jpg>
Message : Has anyone used LangChain with Azure endpoints instead of OpenAI directly ?
Message : https://python.langchain.com/en/latest/modules/models/llms/integrations/azure_openai_example.html
Quoted Message : Has anyone used LangChain with Azure endpoints instead of OpenAI directly ?
Message : Tried this?
Message : Yup I’ve tried this
It’s working for simple examples
Im looking to implement agents and not able to find documentation for it
Quoted Message : Tried this?
Message : They have examples for custom LLM agents, not sure if that helps
"""
payload = {
"model": "gpt-3.5-turbo-0613",
"messages": [
{
"role": "user",
"content": message,
}
],
"functions": [
{
"name": "classify_message_block",
"description": "This function classifies a given message block into one of the given types",
"parameters": {
"type": "object",
"properties": {
"topic": {
"type": "string",
"enum": ["langchain", "engineering"]
},
},
"required": ["topic"]
}
}
]
}
headers = {
"Content-Type": "application/json",
"Authorization": f'Bearer ${os.getenv("OPENAI_API_KEY")}',
}
response = requests.post(url, data=json.dumps(payload), headers=headers)
if response.status_code == 200:
result = response.json()
# do something with the result
import pdb; pdb.set_trace()
else:
print("Error:", response.status_code, response.text)
| [
"\nMessage : Messages and calls are end-to-end encrypted. No one outside of this chat, not even WhatsApp, can read or listen to them.\n\nMessage : ~ Nirant created this group\n\nMessage : This group was added to the community “Generative AI”\n\nMessage : Anyone in the community “Generative AI” can request to join this group by messaging group admins.\n\nMessage : <attached: 00000012-PHOTO-2023-04-14-20-37-09.jpg>\n\nMessage : Has anyone used LangChain with Azure endpoints instead of OpenAI directly ?\n\nMessage : https://python.langchain.com/en/latest/modules/models/llms/integrations/azure_openai_example.html\nQuoted Message : Has anyone used LangChain with Azure endpoints instead of OpenAI directly ?\n\nMessage : Tried this?\n\nMessage : Yup I’ve tried this \nIt’s working for simple examples\n\nIm looking to implement agents and not able to find documentation for it\nQuoted Message : Tried this?\n\nMessage : They have examples for custom LLM agents, not sure if that helps\n"
] |
2024-01-10 | alokwhitewolf/Whatsapp-Summarizer-using-LLMs | primary_classifier.py | import os
import openai
from dotenv import load_dotenv
from utils.data_utils import get_chat_messages
from prompts.primary_classifier_prompt import prompt as primary_classifier_prompt
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def window_slices(lst, window_size, overlap):
step_size = window_size - overlap
for i in range(0, len(lst) - window_size + 1, step_size):
print(i, i+window_size)
yield lst[i:i+window_size]
if len(lst) % step_size != 0:
yield lst[-window_size:]
chat_messages = get_chat_messages()
topics = {}
i = 0
for window_messages in window_slices(chat_messages, 50, 10):
prompt_message = ""
prompt_topics = ""
for message in window_messages:
prompt_message += f"Message : {message['message'] }"
if message["quoted_message"] and not message["quoted_message"].startswith("/9j/"):
prompt_message += f"\nQuoted Message : {message['quoted_message']}"
prompt_message += "\n\n"
for topic in topics:
prompt_topics += f"{topic} : {topics[topic]}"
prompt = primary_classifier_prompt.format(topics=prompt_topics, messages=prompt_message)
# response = openai.Completion.create(
# engine="gpt-3.5-turbo",
# prompt=prompt,
# temperature=0.9,
# max_tokens=100,
# top_p=1,
# frequency_penalty=0.0,
# presence_penalty=0.0,
# stop=["\n"]
# ) | [
"\n\n",
"\nQuoted Message : PLACEHOLDER",
"Message : PLACEHOLDER"
] |
2024-01-10 | three0-s/GITA | gita~utils~logger.py | """
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = osp.join(os.getenv("GITA_LOGDIR", '/home/yewon/gita-log'), datetime.datetime.now().strftime("gita-%Y-%m-%d-%H-%M-%S-%f"))
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("gita-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("GITA_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("GITA_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| [] |
2024-01-10 | zodwick/ethglobal_team420 | backend~codegen.py |
import os
from openai import OpenAI
from prompt import systemReference
from dotenv import load_dotenv
import re
load_dotenv()
client = OpenAI()
def extractCode(example):
# Use regular expression to extract code between triple backticks
code_pattern = re.compile(r'```([\s\S]+?)```')
matches = code_pattern.findall(example)
# Concatenate and return the extracted code
extracted_code = '\n'.join(matches)
# remove the first line
extracted_code = extracted_code.split("\n", 1)[1]
return extracted_code
def writeCodeToFile(code, filename):
with open(filename, 'w') as file:
file.write(code)
def CodeGen(prompt: str, lang: str) -> str:
if lang == "js" or lang == "javascript" or lang == "ts" or lang == "typescript":
chat_completion = client.chat.completions.create(
messages=[
{
"role": "system",
"content": systemReference,
},
{
"role": "user",
"content": "can you write a similar implementation in main.ts for a function that"+prompt,
}
],
model="gpt-4",
temperature=0.001,
)
codeContent = (chat_completion.choices[0].message.content)
Code = (extractCode(codeContent))
writeCodeToFile(
code=Code, filename="./stylus-as-example_js/assembly/app.ts")
return Code
elif lang == "rs" or lang == "rust":
if "hashing" in prompt.lower() or "hash" in prompt.lower():
with open("./stylus-as-example_rs/hashing/src/lib.rs", "r") as file:
data = file.read()
return data
else:
with open("./stylus-as-example_rs/voting/src/lib.rs", "r") as file:
data = file.read()
return data
else:
return "Language not supported yet"
| [
"can you write a similar implementation in main.ts for a function thatPLACEHOLDER"
] |
2024-01-10 | Softlandia-Ltd/bytewax_index | query_bytewax.py | """Read the bytewax repository and create an index.
Must have the following env variables set:
OPENAI_API_KEY
GITHUB_TOKEN (if creating the index from scratch)
"""
import os
import logging
import plac
from llama_index import GPTSimpleVectorIndex
from llama_index.readers import GithubRepositoryReader
from llama_index import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
INDEX_FILE = "bytewax_index.json"
@plac.opt("n_sources", "Number of sources to use", type=int)
def main(n_sources: int = 2):
"""Create index and run queries."""
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
)
# TODO: try FAISS index
if os.path.exists(INDEX_FILE):
logger.info("Loading index from file")
index = GPTSimpleVectorIndex.load_from_disk(INDEX_FILE)
else:
logger.info("Creating index from scratch")
reader = GithubRepositoryReader(
"bytewax",
"bytewax",
ignore_directories=[".github", "migrations", "src"],
verbose=False,
)
documents = reader.load_data(branch="main")
print(len(documents))
print(documents[0])
logging.info("Documents loaded. Creating index")
index = GPTSimpleVectorIndex(
documents, chunk_size_limit=512, embed_model=embed_model
)
index.save_to_disk(INDEX_FILE)
while True:
query = input("Enter query: ")
results = index.query(
query, similarity_top_k=n_sources, embed_model=embed_model
)
print(results)
if __name__ == "__main__":
plac.call(main)
| [] |
2024-01-10 | 120318/nas-tools | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "chdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://chdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://chdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | 120318/nas-tools | app~plugins~modules~_autosignin~52pt.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class FWpt(_ISiteSigninHandler):
"""
52pt
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "52pt.site"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/52pt.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://52pt.site/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://52pt.site/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"{site}签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | 120318/nas-tools | app~plugins~modules~autosub.py | import copy
import os
import re
import subprocess
import tempfile
import time
import traceback
from datetime import timedelta
import iso639
import psutil
import srt
from lxml import etree
from app.helper import FfmpegHelper
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._base import _IPluginModule
from app.utils import SystemUtils
from config import RMT_MEDIAEXT
class AutoSub(_IPluginModule):
# 插件名称
module_name = "AI字幕自动生成"
# 插件描述
module_desc = "使用whisper自动生成视频文件字幕。"
# 插件图标
module_icon = "autosubtitles.jpeg"
# 主题色
module_color = "#2C4F7E"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "olly"
# 作者主页
author_url = "https://github.com/lightolly"
# 插件配置项ID前缀
module_config_prefix = "autosub"
# 加载顺序
module_order = 14
# 可使用的用户级别
auth_level = 2
# 私有属性
_running = False
# 语句结束符
_end_token = ['.', '!', '?', '。', '!', '?', '。"', '!"', '?"', '."', '!"', '?"']
_noisy_token = [('(', ')'), ('[', ']'), ('{', '}'), ('【', '】'), ('♪', '♪'), ('♫', '♫'), ('♪♪', '♪♪')]
def __init__(self):
self.additional_args = '-t 4 -p 1'
self.translate_zh = False
self.translate_only = False
self.whisper_model = None
self.whisper_main = None
self.file_size = None
self.process_count = 0
self.skip_count = 0
self.fail_count = 0
self.success_count = 0
self.send_notify = False
self.asr_engine = 'whisper.cpp'
self.faster_whisper_model = 'base'
self.faster_whisper_model_path = None
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
[
{
'title': '媒体路径',
'required': '',
'tooltip': '要进行字幕生成的路径,每行一个路径,请确保路径正确',
'type': 'textarea',
'content':
{
'id': 'path_list',
'placeholder': '文件路径',
'rows': 5
}
}
],
# asr 引擎
[
{
'title': '文件大小(MB)',
'required': "required",
'tooltip': '单位 MB, 大于该大小的文件才会进行字幕生成',
'type': 'text',
'content':
[{
'id': 'file_size',
'placeholder': '文件大小, 单位MB'
}]
},
{
'title': 'ASR引擎',
'required': "required",
'tooltip': '自动语音识别引擎选择',
'type': 'select',
'content': [
{
'id': 'asr_engine',
'options': {
'whisper.cpp': 'whisper.cpp',
'faster-whisper': 'faster-whisper'
},
'default': 'whisper.cpp',
'onchange': 'AutoSub_asr_engine_change(this)'
}
]
}
]
]
},
{
'type': 'details',
'id': 'whisper_config',
'summary': 'whisper.cpp 配置',
'tooltip': '使用 whisper.cpp 引擎时的配置',
'hidden': False,
'content': [
[
{
'title': 'whisper.cpp路径',
'required': "",
'tooltip': '填写whisper.cpp主程序路径,如/config/plugin/autosub/main \n'
'推荐教程 https://ddsrem.com/autosub',
'type': 'text',
'content': [
{
'id': 'whisper_main',
'placeholder': 'whisper.cpp主程序路径'
}
]
}
],
[
{
'title': 'whisper.cpp模型路径',
'required': "",
'tooltip': '填写whisper.cpp模型路径,如/config/plugin/autosub/models/ggml-base.en.bin\n'
'可从https://github.com/ggerganov/whisper.cpp/tree/master/models处下载',
'type': 'text',
'content':
[{
'id': 'whisper_model',
'placeholder': 'whisper.cpp模型路径'
}]
}
],
[
{
'title': '高级参数',
'tooltip': 'whisper.cpp的高级参数,请勿随意修改',
'required': "",
'type': 'text',
'content': [
{
'id': 'additional_args',
'placeholder': '-t 4 -p 1'
}
]
}
]
]
},
{
'type': 'details',
'id': 'faster_whisper_config',
'summary': 'faster-whisper 配置',
'tooltip': '使用 faster-whisper 引擎时的配置,安装参考 https://github.com/guillaumekln/faster-whisper',
'hidden': True,
'content': [
[
{
'title': '模型',
'required': "",
'tooltip': '选择模型后第一次运行会从Hugging Face Hub下载模型,可能需要一段时间',
'type': 'select',
'content': [
{
'id': 'faster_whisper_model',
'options': {
# tiny, tiny.en, base, base.en,
# small, small.en, medium, medium.en,
# large-v1, or large-v2
'tiny': 'tiny',
'tiny.en': 'tiny.en',
'base': 'base',
'base.en': 'base.en',
'small': 'small',
'small.en': 'small.en',
'medium': 'medium',
'medium.en': 'medium.en',
'large-v1': 'large-v1',
'large-v2': 'large-v2',
},
'default': 'base'
}
]
}
],
[
{
'title': '模型保存路径',
'required': "",
'tooltip': '配置模型保存路径,如/config/plugin/autosub/faster-whisper/models',
'type': 'text',
'content': [
{
'id': 'faster_whisper_model_path',
'placeholder': 'faster-whisper配置模型保存路径'
}
]
}
]
]
},
{
'type': 'div',
'content': [
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次',
'type': 'switch',
'id': 'run_now',
},
{
'title': '翻译为中文',
'required': "",
'tooltip': '打开后将自动翻译非中文字幕,生成双语字幕,关闭后只生成英文字幕,需要配置OpenAI API Key',
'type': 'switch',
'id': 'translate_zh',
},
{
'title': '仅已有字幕翻译',
'required': "",
'tooltip': '打开后仅翻译已有字幕,不做语音识别,关闭后将自动识别语音并生成字幕',
'type': 'switch',
'id': 'translate_only',
}
],
[
{
'title': '运行时通知',
'required': "",
'tooltip': '打开后将在单个字幕生成开始和完成后发送通知, 需要开启插件消息推送通知',
'type': 'switch',
'id': 'send_notify',
}
]
]
}
]
@staticmethod
def get_script():
"""
返回插件额外的JS代码
"""
return """
function AutoSub_asr_engine_change(obj) {
if ($(obj).val() == 'faster-whisper') {
$('#autosubwhisper_config').hide();
$('#autosubfaster_whisper_config').show();
}else{
$('#autosubwhisper_config').show();
$('#autosubfaster_whisper_config').hide();
}
}
"""
def init_config(self, config=None):
# 如果没有配置信息, 则不处理
if not config:
return
# config.get('path_list') 用 \n 分割为 list 并去除重复值和空值
path_list = list(set(config.get('path_list').split('\n')))
# file_size 转成数字
self.file_size = config.get('file_size')
self.whisper_main = config.get('whisper_main')
self.whisper_model = config.get('whisper_model')
self.translate_zh = config.get('translate_zh', False)
self.translate_only = config.get('translate_only', False)
self.additional_args = config.get('additional_args', '-t 4 -p 1')
self.send_notify = config.get('send_notify', False)
self.asr_engine = config.get('asr_engine', 'whisper.cpp')
self.faster_whisper_model = config.get('faster_whisper_model', 'base')
self.faster_whisper_model_path = config.get('faster_whisper_model_path')
run_now = config.get('run_now')
if not run_now:
return
config['run_now'] = False
self.update_config(config)
# 如果没有配置信息, 则不处理
if not path_list or not self.file_size:
self.warn(f"配置信息不完整,不进行处理")
return
# 校验文件大小是否为数字
if not self.file_size.isdigit():
self.warn(f"文件大小不是数字,不进行处理")
return
# asr 配置检查
if not self.translate_only and not self.__check_asr():
return
if self._running:
self.warn(f"上一次任务还未完成,不进行处理")
return
# 依次处理每个目录
try:
self._running = True
self.success_count = self.skip_count = self.fail_count = self.process_count = 0
for path in path_list:
self.info(f"开始处理目录:{path} ...")
# 如果目录不存在, 则不处理
if not os.path.exists(path):
self.warn(f"目录不存在,不进行处理")
continue
# 如果目录不是文件夹, 则不处理
if not os.path.isdir(path):
self.warn(f"目录不是文件夹,不进行处理")
continue
# 如果目录不是绝对路径, 则不处理
if not os.path.isabs(path):
self.warn(f"目录不是绝对路径,不进行处理")
continue
# 处理目录
self.__process_folder_subtitle(path)
except Exception as e:
self.error(f"处理异常: {e}")
finally:
self.info(f"处理完成: "
f"成功{self.success_count} / 跳过{self.skip_count} / 失败{self.fail_count} / 共{self.process_count}")
self._running = False
def __check_asr(self):
if self.asr_engine == 'whisper.cpp':
if not self.whisper_main or not self.whisper_model:
self.warn(f"配置信息不完整,不进行处理")
return
if not os.path.exists(self.whisper_main):
self.warn(f"whisper.cpp主程序不存在,不进行处理")
return False
if not os.path.exists(self.whisper_model):
self.warn(f"whisper.cpp模型文件不存在,不进行处理")
return False
# 校验扩展参数是否包含异常字符
if self.additional_args and re.search(r'[;|&]', self.additional_args):
self.warn(f"扩展参数包含异常字符,不进行处理")
return False
elif self.asr_engine == 'faster-whisper':
if not self.faster_whisper_model_path or not self.faster_whisper_model:
self.warn(f"配置信息不完整,不进行处理")
return
if not os.path.exists(self.faster_whisper_model_path):
self.warn(f"faster-whisper模型文件夹不存在,不进行处理")
return False
try:
from faster_whisper import WhisperModel, download_model
except ImportError:
self.warn(f"faster-whisper 未安装,不进行处理")
return False
return True
else:
self.warn(f"未配置asr引擎,不进行处理")
return False
return True
def __process_folder_subtitle(self, path):
"""
处理目录字幕
:param path:
:return:
"""
# 获取目录媒体文件列表
for video_file in self.__get_library_files(path):
if not video_file:
continue
# 如果文件大小小于指定大小, 则不处理
if os.path.getsize(video_file) < int(self.file_size):
continue
self.process_count += 1
start_time = time.time()
file_path, file_ext = os.path.splitext(video_file)
file_name = os.path.basename(video_file)
try:
self.info(f"开始处理文件:{video_file} ...")
# 判断目的字幕(和内嵌)是否已存在
if self.__target_subtitle_exists(video_file):
self.warn(f"字幕文件已经存在,不进行处理")
self.skip_count += 1
continue
# 生成字幕
if self.send_notify:
self.send_message(title="自动字幕生成",
text=f" 媒体: {file_name}\n 开始处理文件 ... ")
ret, lang = self.__generate_subtitle(video_file, file_path, self.translate_only)
if not ret:
message = f" 媒体: {file_name}\n "
if self.translate_only:
message += "内嵌&外挂字幕不存在,不进行翻译"
self.skip_count += 1
else:
message += "生成字幕失败,跳过后续处理"
self.fail_count += 1
if self.send_notify:
self.send_message(title="自动字幕生成", text=message)
continue
if self.translate_zh:
# 翻译字幕
self.info(f"开始翻译字幕为中文 ...")
if self.send_notify:
self.send_message(title="自动字幕生成",
text=f" 媒体: {file_name}\n 开始翻译字幕为中文 ... ")
self.__translate_zh_subtitle(lang, f"{file_path}.{lang}.srt", f"{file_path}.zh.srt")
self.info(f"翻译字幕完成:{file_name}.zh.srt")
end_time = time.time()
message = f" 媒体: {file_name}\n 处理完成\n 字幕原始语言: {lang}\n "
if self.translate_zh:
message += f"字幕翻译语言: zh\n "
message += f"耗时:{round(end_time - start_time, 2)}秒"
self.info(f"自动字幕生成 处理完成:{message}")
if self.send_notify:
self.send_message(title="自动字幕生成", text=message)
self.success_count += 1
except Exception as e:
self.error(f"自动字幕生成 处理异常:{e}")
end_time = time.time()
message = f" 媒体: {file_name}\n 处理失败\n 耗时:{round(end_time - start_time, 2)}秒"
if self.send_notify:
self.send_message(title="自动字幕生成", text=message)
# 打印调用栈
traceback.print_exc()
self.fail_count += 1
def __do_speech_recognition(self, audio_lang, audio_file):
"""
语音识别, 生成字幕
:param audio_lang:
:param audio_file:
:return:
"""
lang = audio_lang
if self.asr_engine == 'whisper.cpp':
command = [self.whisper_main] + self.additional_args.split()
command += ['-l', lang, '-m', self.whisper_model, '-osrt', '-of', audio_file, audio_file]
ret = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if ret.returncode == 0:
if lang == 'auto':
# 从output中获取语言 "whisper_full_with_state: auto-detected language: en (p = 0.973642)"
output = ret.stdout.decode('utf-8') if ret.stdout else ""
lang = re.search(r"auto-detected language: (\w+)", output)
if lang and lang.group(1):
lang = lang.group(1)
else:
lang = "en"
return True, lang
elif self.asr_engine == 'faster-whisper':
try:
from faster_whisper import WhisperModel, download_model
# 设置缓存目录, 防止缓存同目录出现 cross-device 错误
cache_dir = os.path.join(self.faster_whisper_model_path, "cache")
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
os.environ["HUGGINGFACE_HUB_CACHE"] = cache_dir
model = WhisperModel(download_model(self.faster_whisper_model),
device="cpu", compute_type="int8", cpu_threads=psutil.cpu_count(logical=False))
segments, info = model.transcribe(audio_file,
language=lang if lang != 'auto' else None,
word_timestamps=True,
temperature=0,
beam_size=5)
if lang == 'auto':
lang = info.language
subs = []
if lang in ['en', 'eng']:
# 英文先生成单词级别字幕,再合并
idx = 0
for segment in segments:
for word in segment.words:
idx += 1
subs.append(srt.Subtitle(index=idx,
start=timedelta(seconds=word.start),
end=timedelta(seconds=word.end),
content=word.word))
subs = self.__merge_srt(subs)
else:
for i, segment in enumerate(segments):
subs.append(srt.Subtitle(index=i,
start=timedelta(seconds=segment.start),
end=timedelta(seconds=segment.end),
content=segment.text))
self.__save_srt(f"{audio_file}.srt", subs)
return True, lang
except ImportError:
self.warn(f"faster-whisper 未安装,不进行处理")
return False, None
except Exception as e:
traceback.print_exc()
self.error(f"faster-whisper 处理异常:{e}")
return False, None
return False, None
def __generate_subtitle(self, video_file, subtitle_file, only_extract=False):
"""
生成字幕
:param video_file: 视频文件
:param subtitle_file: 字幕文件, 不包含后缀
:return: 生成成功返回True,字幕语言,否则返回False, None
"""
# 获取文件元数据
video_meta = FfmpegHelper().get_video_metadata(video_file)
if not video_meta:
self.error(f"获取视频文件元数据失败,跳过后续处理")
return False, None
# 获取视频文件音轨和语言信息
ret, audio_index, audio_lang = self.__get_video_prefer_audio(video_meta)
if not ret:
return False, None
if not iso639.find(audio_lang) or not iso639.to_iso639_1(audio_lang):
self.info(f"未知语言音轨")
audio_lang = 'auto'
expert_subtitle_langs = ['en', 'eng'] if audio_lang == 'auto' else [audio_lang, iso639.to_iso639_1(audio_lang)]
self.info(f"使用 {expert_subtitle_langs} 匹配已有外挂字幕文件 ...")
exist, lang = self.__external_subtitle_exists(video_file, expert_subtitle_langs)
if exist:
self.info(f"外挂字幕文件已经存在,字幕语言 {lang}")
return True, iso639.to_iso639_1(lang)
self.info(f"外挂字幕文件不存在,使用 {expert_subtitle_langs} 匹配内嵌字幕文件 ...")
# 获取视频文件字幕信息
ret, subtitle_index, \
subtitle_lang, subtitle_count = self.__get_video_prefer_subtitle(video_meta, expert_subtitle_langs)
if ret and (audio_lang == subtitle_lang or subtitle_count == 1):
if audio_lang == subtitle_lang:
# 如果音轨和字幕语言一致, 则直接提取字幕
self.info(f"内嵌音轨和字幕语言一致,直接提取字幕 ...")
elif subtitle_count == 1:
# 如果音轨和字幕语言不一致,但只有一个字幕, 则直接提取字幕
self.info(f"内嵌音轨和字幕语言不一致,但只有一个字幕,直接提取字幕 ...")
audio_lang = iso639.to_iso639_1(subtitle_lang) \
if (iso639.find(subtitle_lang) and iso639.to_iso639_1(subtitle_lang)) else 'und'
FfmpegHelper().extract_subtitle_from_video(video_file, f"{subtitle_file}.{audio_lang}.srt", subtitle_index)
self.info(f"提取字幕完成:{subtitle_file}.{audio_lang}.srt")
return True, audio_lang
if audio_lang != 'auto':
audio_lang = iso639.to_iso639_1(audio_lang)
if only_extract:
self.info(f"未开启语音识别,且无已有字幕文件,跳过后续处理")
return False, None
# 清理异常退出的临时文件
tempdir = tempfile.gettempdir()
for file in os.listdir(tempdir):
if file.startswith('autosub-'):
os.remove(os.path.join(tempdir, file))
with tempfile.NamedTemporaryFile(prefix='autosub-', suffix='.wav', delete=True) as audio_file:
# 提取音频
self.info(f"提取音频:{audio_file.name} ...")
FfmpegHelper().extract_wav_from_video(video_file, audio_file.name, audio_index)
self.info(f"提取音频完成:{audio_file.name}")
# 生成字幕
self.info(f"开始生成字幕, 语言 {audio_lang} ...")
ret, lang = self.__do_speech_recognition(audio_lang, audio_file.name)
if ret:
self.info(f"生成字幕成功,原始语言:{lang}")
# 复制字幕文件
SystemUtils.copy(f"{audio_file.name}.srt", f"{subtitle_file}.{lang}.srt")
self.info(f"复制字幕文件:{subtitle_file}.{lang}.srt")
# 删除临时文件
os.remove(f"{audio_file.name}.srt")
return ret, lang
else:
self.error(f"生成字幕失败")
return False, None
@staticmethod
def __get_library_files(in_path, exclude_path=None):
"""
获取目录媒体文件列表
"""
if not os.path.isdir(in_path):
yield in_path
return
for root, dirs, files in os.walk(in_path):
if exclude_path and any(os.path.abspath(root).startswith(os.path.abspath(path))
for path in exclude_path.split(",")):
continue
for file in files:
cur_path = os.path.join(root, file)
# 检查后缀
if os.path.splitext(file)[-1].lower() in RMT_MEDIAEXT:
yield cur_path
@staticmethod
def __load_srt(file_path):
"""
加载字幕文件
:param file_path: 字幕文件路径
:return:
"""
with open(file_path, 'r', encoding="utf8") as f:
srt_text = f.read()
return list(srt.parse(srt_text))
@staticmethod
def __save_srt(file_path, srt_data):
"""
保存字幕文件
:param file_path: 字幕文件路径
:param srt_data: 字幕数据
:return:
"""
with open(file_path, 'w', encoding="utf8") as f:
f.write(srt.compose(srt_data))
def __get_video_prefer_audio(self, video_meta, prefer_lang=None):
"""
获取视频的首选音轨,如果有多音轨, 优先指定语言音轨,否则获取默认音轨
:param video_meta
:return:
"""
if type(prefer_lang) == str and prefer_lang:
prefer_lang = [prefer_lang]
# 获取首选音轨
audio_lang = None
audio_index = None
audio_stream = filter(lambda x: x.get('codec_type') == 'audio', video_meta.get('streams', []))
for index, stream in enumerate(audio_stream):
if not audio_index:
audio_index = index
audio_lang = stream.get('tags', {}).get('language', 'und')
# 获取默认音轨
if stream.get('disposition', {}).get('default'):
audio_index = index
audio_lang = stream.get('tags', {}).get('language', 'und')
# 获取指定语言音轨
if prefer_lang and stream.get('tags', {}).get('language') in prefer_lang:
audio_index = index
audio_lang = stream.get('tags', {}).get('language', 'und')
break
# 如果没有音轨, 则不处理
if audio_index is None:
self.warn(f"没有音轨,不进行处理")
return False, None, None
self.info(f"选中音轨信息:{audio_index}, {audio_lang}")
return True, audio_index, audio_lang
def __get_video_prefer_subtitle(self, video_meta, prefer_lang=None):
"""
获取视频的首选字幕,如果有多字幕, 优先指定语言字幕, 否则获取默认字幕
:param video_meta:
:return:
"""
# from https://wiki.videolan.org/Subtitles_codecs/
"""
https://trac.ffmpeg.org/wiki/ExtractSubtitles
ffmpeg -codecs | grep subtitle
DES... ass ASS (Advanced SSA) subtitle (decoders: ssa ass ) (encoders: ssa ass )
DES... dvb_subtitle DVB subtitles (decoders: dvbsub ) (encoders: dvbsub )
DES... dvd_subtitle DVD subtitles (decoders: dvdsub ) (encoders: dvdsub )
D.S... hdmv_pgs_subtitle HDMV Presentation Graphic Stream subtitles (decoders: pgssub )
..S... hdmv_text_subtitle HDMV Text subtitle
D.S... jacosub JACOsub subtitle
D.S... microdvd MicroDVD subtitle
D.S... mpl2 MPL2 subtitle
D.S... pjs PJS (Phoenix Japanimation Society) subtitle
D.S... realtext RealText subtitle
D.S... sami SAMI subtitle
..S... srt SubRip subtitle with embedded timing
..S... ssa SSA (SubStation Alpha) subtitle
D.S... stl Spruce subtitle format
DES... subrip SubRip subtitle (decoders: srt subrip ) (encoders: srt subrip )
D.S... subviewer SubViewer subtitle
D.S... subviewer1 SubViewer v1 subtitle
D.S... vplayer VPlayer subtitle
DES... webvtt WebVTT subtitle
"""
image_based_subtitle_codecs = (
'dvd_subtitle',
'dvb_subtitle',
'hdmv_pgs_subtitle',
)
if type(prefer_lang) == str and prefer_lang:
prefer_lang = [prefer_lang]
# 获取首选字幕
subtitle_lang = None
subtitle_index = None
subtitle_count = 0
subtitle_stream = filter(lambda x: x.get('codec_type') == 'subtitle', video_meta.get('streams', []))
for index, stream in enumerate(subtitle_stream):
# 如果是强制字幕,则跳过
if stream.get('disposition', {}).get('forced'):
continue
# image-based 字幕,跳过
if (
'width' in stream
or stream.get('codec_name') in image_based_subtitle_codecs
):
continue
if not subtitle_index:
subtitle_index = index
subtitle_lang = stream.get('tags', {}).get('language')
# 获取默认字幕
if stream.get('disposition', {}).get('default'):
subtitle_index = index
subtitle_lang = stream.get('tags', {}).get('language')
# 获取指定语言字幕
if prefer_lang and stream.get('tags', {}).get('language') in prefer_lang:
subtitle_index = index
subtitle_lang = stream.get('tags', {}).get('language')
subtitle_count += 1
# 如果没有字幕, 则不处理
if subtitle_index is None:
self.debug(f"没有内嵌字幕")
return False, None, None, None
self.debug(f"命中内嵌字幕信息:{subtitle_index}, {subtitle_lang}")
return True, subtitle_index, subtitle_lang, subtitle_count
def __is_noisy_subtitle(self, content):
"""
判断是否为背景音等字幕
:param content:
:return:
"""
for token in self._noisy_token:
if content.startswith(token[0]) and content.endswith(token[1]):
return True
return False
def __merge_srt(self, subtitle_data):
"""
合并整句字幕
:param subtitle_data:
:return:
"""
subtitle_data = copy.deepcopy(subtitle_data)
# 合并字幕
merged_subtitle = []
sentence_end = True
for index, item in enumerate(subtitle_data):
# 当前字幕先将多行合并为一行,再去除首尾空格
content = item.content.replace('\n', ' ').strip()
# 去除html标签
parse = etree.HTML(content)
if parse is not None:
content = parse.xpath('string(.)')
if content == '':
continue
item.content = content
# 背景音等字幕,跳过
if self.__is_noisy_subtitle(content):
merged_subtitle.append(item)
sentence_end = True
continue
if not merged_subtitle or sentence_end:
merged_subtitle.append(item)
elif not sentence_end:
merged_subtitle[-1].content = f"{merged_subtitle[-1].content} {content}"
merged_subtitle[-1].end = item.end
# 如果当前字幕内容以标志符结尾,则设置语句已经终结
if content.endswith(tuple(self._end_token)):
sentence_end = True
# 如果上句字幕超过一定长度,则设置语句已经终结
elif len(merged_subtitle[-1].content) > 350:
sentence_end = True
else:
sentence_end = False
return merged_subtitle
def __do_translate_with_retry(self, text, retry=3):
# 调用OpenAI翻译
# 免费OpenAI Api Limit: 20 / minute
ret, result = OpenAiHelper().translate_to_zh(text)
for i in range(retry):
if ret and result:
break
if "Rate limit reached" in result:
self.info(f"OpenAI Api Rate limit reached, sleep 60s ...")
time.sleep(60)
else:
self.warn(f"翻译失败,重试第{i + 1}次")
ret, result = OpenAiHelper().translate_to_zh(text)
if not ret or not result:
return None
return result
def __translate_zh_subtitle(self, source_lang, source_subtitle, dest_subtitle):
"""
调用OpenAI 翻译字幕
:param source_subtitle:
:param dest_subtitle:
:return:
"""
# 读取字幕文件
srt_data = self.__load_srt(source_subtitle)
# 合并字幕语句,目前带标点带英文效果较好,非英文或者无标点的需要NLP处理
if source_lang in ['en', 'eng']:
self.info(f"开始合并字幕语句 ...")
merged_data = self.__merge_srt(srt_data)
self.info(f"合并字幕语句完成,合并前字幕数量:{len(srt_data)}, 合并后字幕数量:{len(merged_data)}")
srt_data = merged_data
batch = []
max_batch_tokens = 1000
for srt_item in srt_data:
# 跳过空行和无意义的字幕
if not srt_item.content:
continue
if self.__is_noisy_subtitle(srt_item.content):
continue
# 批量翻译,减少调用次数
batch.append(srt_item)
# 当前批次字符数
batch_tokens = sum([len(x.content) for x in batch])
# 如果当前批次字符数小于最大批次字符数,且不是最后一条字幕,则继续
if batch_tokens < max_batch_tokens and srt_item != srt_data[-1]:
continue
batch_content = '\n'.join([x.content for x in batch])
result = self.__do_translate_with_retry(batch_content)
# 如果翻译失败,则跳过
if not result:
batch = []
continue
translated = result.split('\n')
if len(translated) != len(batch):
self.info(
f"翻译结果数量不匹配,翻译结果数量:{len(translated)}, 需要翻译数量:{len(batch)}, 退化为单条翻译 ...")
# 如果翻译结果数量不匹配,则退化为单条翻译
for index, item in enumerate(batch):
result = self.__do_translate_with_retry(item.content)
if not result:
continue
item.content = result + '\n' + item.content
else:
self.debug(f"翻译结果数量匹配,翻译结果数量:{len(translated)}")
for index, item in enumerate(batch):
item.content = translated[index].strip() + '\n' + item.content
batch = []
# 保存字幕文件
self.__save_srt(dest_subtitle, srt_data)
@staticmethod
def __external_subtitle_exists(video_file, prefer_langs=None):
"""
外部字幕文件是否存在
:param video_file:
:return:
"""
video_dir, video_name = os.path.split(video_file)
video_name, video_ext = os.path.splitext(video_name)
if type(prefer_langs) == str and prefer_langs:
prefer_langs = [prefer_langs]
for subtitle_lang in prefer_langs:
dest_subtitle = os.path.join(video_dir, f"{video_name}.{subtitle_lang}.srt")
if os.path.exists(dest_subtitle):
return True, subtitle_lang
return False, None
def __target_subtitle_exists(self, video_file):
"""
目标字幕文件是否存在
:param video_file:
:return:
"""
if self.translate_zh:
prefer_langs = ['zh', 'chi']
else:
prefer_langs = ['en', 'eng']
exist, lang = self.__external_subtitle_exists(video_file, prefer_langs)
if exist:
return True
video_meta = FfmpegHelper().get_video_metadata(video_file)
if not video_meta:
return False
ret, subtitle_index, subtitle_lang, _ = self.__get_video_prefer_subtitle(video_meta, prefer_lang=prefer_langs)
if ret and subtitle_lang in prefer_langs:
return True
return False
def get_state(self):
return False
def stop_service(self):
"""
退出插件
"""
pass
| [
"{'id': 'path_list', 'placeholder': '文件路径', 'rows': 5}",
"[{'id': 'file_size', 'placeholder': '文件大小, 单位MB'}]",
"[{'id': 'faster_whisper_model_path', 'placeholder': 'faster-whisper配置模型保存路径'}]",
"[{'id': 'asr_engine', 'options': {'whisper.cpp': 'whisper.cpp', 'faster-whisper': 'faster-whisper'}, 'default': 'whisper.cpp', 'onchange': 'AutoSub_asr_engine_change(this)'}]",
"[[{'title': '媒体路径', 'required': '', 'tooltip': '要进行字幕生成的路径,每行一个路径,请确保路径正确', 'type': 'textarea', 'content': {'id': 'path_list', 'placeholder': '文件路径', 'rows': 5}}], [{'title': '文件大小(MB)', 'required': 'required', 'tooltip': '单位 MB, 大于该大小的文件才会进行字幕生成', 'type': 'text', 'content': [{'id': 'file_size', 'placeholder': '文件大小, 单位MB'}]}, {'title': 'ASR引擎', 'required': 'required', 'tooltip': '自动语音识别引擎选择', 'type': 'select', 'content': [{'id': 'asr_engine', 'options': {'whisper.cpp': 'whisper.cpp', 'faster-whisper': 'faster-whisper'}, 'default': 'whisper.cpp', 'onchange': 'AutoSub_asr_engine_change(this)'}]}]]",
"[{'id': 'faster_whisper_model', 'options': {'tiny': 'tiny', 'tiny.en': 'tiny.en', 'base': 'base', 'base.en': 'base.en', 'small': 'small', 'small.en': 'small.en', 'medium': 'medium', 'medium.en': 'medium.en', 'large-v1': 'large-v1', 'large-v2': 'large-v2'}, 'default': 'base'}]",
"[{'id': 'additional_args', 'placeholder': '-t 4 -p 1'}]",
"[[{'title': '立即运行一次', 'required': '', 'tooltip': '打开后立即运行一次', 'type': 'switch', 'id': 'run_now'}, {'title': '翻译为中文', 'required': '', 'tooltip': '打开后将自动翻译非中文字幕,生成双语字幕,关闭后只生成英文字幕,需要配置OpenAI API Key', 'type': 'switch', 'id': 'translate_zh'}, {'title': '仅已有字幕翻译', 'required': '', 'tooltip': '打开后仅翻译已有字幕,不做语音识别,关闭后将自动识别语音并生成字幕', 'type': 'switch', 'id': 'translate_only'}], [{'title': '运行时通知', 'required': '', 'tooltip': '打开后将在单个字幕生成开始和完成后发送通知, 需要开启插件消息推送通知', 'type': 'switch', 'id': 'send_notify'}]]",
"[{'id': 'whisper_model', 'placeholder': 'whisper.cpp模型路径'}]",
"[{'id': 'whisper_main', 'placeholder': 'whisper.cpp主程序路径'}]",
"[[{'title': 'whisper.cpp路径', 'required': '', 'tooltip': '填写whisper.cpp主程序路径,如/config/plugin/autosub/main \\n推荐教程 https://ddsrem.com/autosub', 'type': 'text', 'content': [{'id': 'whisper_main', 'placeholder': 'whisper.cpp主程序路径'}]}], [{'title': 'whisper.cpp模型路径', 'required': '', 'tooltip': '填写whisper.cpp模型路径,如/config/plugin/autosub/models/ggml-base.en.bin\\n可从https://github.com/ggerganov/whisper.cpp/tree/master/models处下载', 'type': 'text', 'content': [{'id': 'whisper_model', 'placeholder': 'whisper.cpp模型路径'}]}], [{'title': '高级参数', 'tooltip': 'whisper.cpp的高级参数,请勿随意修改', 'required': '', 'type': 'text', 'content': [{'id': 'additional_args', 'placeholder': '-t 4 -p 1'}]}]]",
"[[{'title': '模型', 'required': '', 'tooltip': '选择模型后第一次运行会从Hugging Face Hub下载模型,可能需要一段时间', 'type': 'select', 'content': [{'id': 'faster_whisper_model', 'options': {'tiny': 'tiny', 'tiny.en': 'tiny.en', 'base': 'base', 'base.en': 'base.en', 'small': 'small', 'small.en': 'small.en', 'medium': 'medium', 'medium.en': 'medium.en', 'large-v1': 'large-v1', 'large-v2': 'large-v2'}, 'default': 'base'}]}], [{'title': '模型保存路径', 'required': '', 'tooltip': '配置模型保存路径,如/config/plugin/autosub/faster-whisper/models', 'type': 'text', 'content': [{'id': 'faster_whisper_model_path', 'placeholder': 'faster-whisper配置模型保存路径'}]}]]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.