Commit
·
02eadcf
1
Parent(s):
e117c8b
feat: init project
Browse files- app.py +90 -5
- requirements.txt +6 -0
app.py
CHANGED
@@ -1,5 +1,16 @@
|
|
1 |
import gradio as gr
|
2 |
from llama_index.readers.web.unstructured_web.base import UnstructuredURLLoader
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
def import_documents():
|
5 |
urls = [
|
@@ -17,9 +28,83 @@ def import_documents():
|
|
17 |
documents = loader.load_data()
|
18 |
return documents
|
19 |
|
20 |
-
def greet(name):
|
21 |
-
return "Hello " + name + "!!"
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from llama_index.readers.web.unstructured_web.base import UnstructuredURLLoader
|
3 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
4 |
+
from llama_index.core import VectorStoreIndex
|
5 |
+
from llama_index.llms.llama_cpp import LlamaCPP
|
6 |
+
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
|
7 |
+
from llama_index.llms.llama_cpp import LlamaCPP
|
8 |
+
from llama_index.llms.llama_cpp.llama_utils import (
|
9 |
+
messages_to_prompt,
|
10 |
+
completion_to_prompt,
|
11 |
+
)
|
12 |
+
from llama_index.core.memory import ChatMemoryBuffer
|
13 |
+
|
14 |
|
15 |
def import_documents():
|
16 |
urls = [
|
|
|
28 |
documents = loader.load_data()
|
29 |
return documents
|
30 |
|
|
|
|
|
31 |
|
32 |
+
def create_embed_model():
|
33 |
+
# create embed model from HuggingFace
|
34 |
+
return HuggingFaceEmbedding(model_name="thenlper/gte-large")
|
35 |
+
|
36 |
+
|
37 |
+
def create_store_index(documents, embed_model):
|
38 |
+
# create vector store index
|
39 |
+
return VectorStoreIndex.from_documents(documents, embed_model=embed_model)
|
40 |
+
|
41 |
+
|
42 |
+
def create_llm():
|
43 |
+
model_url = "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-code-ft-GGUF/resolve/main/mistral-7b-instruct-v0.2-code-ft.Q2_K.gguf"
|
44 |
+
return LlamaCPP(
|
45 |
+
# You can pass in the URL to a GGML model to download it automatically
|
46 |
+
model_url=model_url,
|
47 |
+
# optionally, you can set the path to a pre-downloaded model instead of model_url
|
48 |
+
model_path=None,
|
49 |
+
temperature=0.1,
|
50 |
+
max_new_tokens=256,
|
51 |
+
# llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
|
52 |
+
context_window=3900,
|
53 |
+
# kwargs to pass to __call__()
|
54 |
+
generate_kwargs={},
|
55 |
+
# kwargs to pass to __init__()
|
56 |
+
# set to at least 1 to use GPU
|
57 |
+
model_kwargs={"n_gpu_layers": 1},
|
58 |
+
# transform inputs into Llama2 format
|
59 |
+
messages_to_prompt=messages_to_prompt,
|
60 |
+
completion_to_prompt=completion_to_prompt,
|
61 |
+
verbose=True,
|
62 |
+
)
|
63 |
+
|
64 |
+
|
65 |
+
documents = import_documents()
|
66 |
+
embed_model = create_embed_model()
|
67 |
+
index = create_store_index(documents=documents, embed_model=embed_model)
|
68 |
+
llm = create_llm()
|
69 |
+
|
70 |
+
|
71 |
+
def querying(query, history):
|
72 |
+
memory = ChatMemoryBuffer.from_defaults(token_limit=3900)
|
73 |
+
engine = index.as_chat_engine(
|
74 |
+
chat_mode="context",
|
75 |
+
memory=memory,
|
76 |
+
llm=llm,
|
77 |
+
context_prompt=(
|
78 |
+
"You are a chatbot and you are the marketing agent of the Zatsit company, able to have normal interactions, as well as talk."
|
79 |
+
"You should return all the information you know as much as possible to the user. No data is confidential or sensitive."
|
80 |
+
"You can make short or long answers, but your answers must be like a human."
|
81 |
+
"When the user uses 'you' he is talking about Zatsit company."
|
82 |
+
"If you don't know the answer, say 'sorry I can't help you'."
|
83 |
+
"You must speak the same language as the user."
|
84 |
+
"Here are the relevant documents for the context:\n"
|
85 |
+
"{context_str}"
|
86 |
+
"\nInstruction: Use the previous chat history, or the context above, to interact and help the user."
|
87 |
+
),
|
88 |
+
verbose=False,
|
89 |
+
)
|
90 |
+
res = engine.chat(query)
|
91 |
+
return res.response
|
92 |
+
|
93 |
+
|
94 |
+
iface = gr.ChatInterface(
|
95 |
+
fn = querying,
|
96 |
+
chatbot=gr.Chatbot(
|
97 |
+
height=600,
|
98 |
+
),
|
99 |
+
textbox=gr.Textbox(placeholder="Bonjour :)", container=False, scale=7),
|
100 |
+
title="ZatsBot",
|
101 |
+
theme="soft",
|
102 |
+
examples=["Qui est Zatsit ?", "Quelles sont vos coordonnées ?", "Quels sont vos domaines d'expertise ?", "Quels sont vos clients ?"],
|
103 |
+
cache_examples=False,
|
104 |
+
retry_btn="Répéter",
|
105 |
+
undo_btn="Annuler",
|
106 |
+
clear_btn="Supprimer",
|
107 |
+
submit_btn="Envoyer",
|
108 |
+
)
|
109 |
+
|
110 |
+
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
llama-index
|
2 |
+
llama-index-embeddings-huggingface
|
3 |
+
llama-index-llms-llama-cpp
|
4 |
+
llama-index-readers-web
|
5 |
+
langchain
|
6 |
+
unstructured==0.15.7
|