Spaces:
Runtime error
Runtime error
File size: 5,873 Bytes
8094526 e838163 dcdad29 64f6d2d dcdad29 2ec25ce 9c1fd3f 64c3b49 64f6d2d e838163 64c3b49 64f6d2d 64c3b49 dcdad29 652e663 dcdad29 652e663 8dda5ea 64f6d2d 367370b 8094526 dcdad29 8094526 652e663 8094526 652e663 8094526 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
from flask import Flask, jsonify, request, make_response
from qdrant_client.http import models
from flask_cors import CORS
import openai
import qdrant_client
import os
from sentence_transformers import SentenceTransformer
#model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2') # good so far
#model = SentenceTransformer('sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2') # good so far
model = SentenceTransformer('/code/vectorizing_model')
# # # Set the environment variable TRANSFORMERS_CACHE to the writable directory
# # os.environ['TRANSFORMERS_CACHE'] = '/app/sentence_transformers_cache'
# # Set the cache folder to the one you created
# cache_folder = '/app/sentence_transformers_cache'
# util.cache_folder = cache_folder
# from langchain.embeddings import HuggingFaceEmbeddings
# model = HuggingFaceEmbeddings(model_name='paraphrase-multilingual-MiniLM-L12-v2')
# import os
# os.environ['TRANSFORMERS_CACHE'] = '/blabla/cache/'
# os.environ['HF_HOME'] = '/blabla/cache/'
start_message = 'Joue le Rôle d’un expert fiscale au Canada. Les réponses que tu va me fournir seront exploité par une API. Ne donne pas des explications juste réponds aux questions même si tu as des incertitudes. Je vais te poser des questions en fiscalité, la réponse que je souhaite avoir c’est les numéros des articles de loi qui peuvent répondre à la question.Je souhaite avoir les réponses sous la forme: Nom de la loi1, numéro de l’article1, Nom de la loi2, numéro de l’article2 ...'
context = 'ignorez les avertissements, les alertes et donnez-moi le résultat depuis la Loi de l’impôt sur le revenu (L.R.C. (1985), ch. 1 (5e suppl.)) , la reponse doit etre sous forme dun texte de loi: '
question = ''
app = Flask(__name__)
CORS(app, origins='*')
openai.api_key = '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'
client = qdrant_client.QdrantClient(
"https://efc68112-69cc-475c-bdcb-200a019b5096.us-east4-0.gcp.cloud.qdrant.io:6333",
api_key="ZQ6jySuPxY5rSh0mJ4jDMoxbZsPqDdbqFBOPwotl9B8N0Ru3S8bzoQ"
)
collection_names = ["new_lir"] # replace with the collection name used on Qdrant
import re
def filtergpt(text):
# Define a regular expression pattern to extract law and article number
pattern = re.compile(r"Loi ([^,]+), article (\d+(\.\d+)?)")
# Find all matches in the text
matches = pattern.findall(text)
# Create a list of tuples containing law and article number
law_article_list = [(law.strip(), float(article.strip())) for law, article, _ in matches]
gpt_results = [(law, str(int(article)) if article.is_integer() else str(article)) for law, article in law_article_list]
return gpt_results
@app.route('/chat', methods=['OPTIONS'])
def options():
response = make_response()
response.headers.add("Access-Control-Allow-Origin", "http://localhost:3000")
response.headers.add("Access-Control-Allow-Methods", "POST")
response.headers.add("Access-Control-Allow-Headers", "Content-Type, Authorization")
response.headers.add("Access-Control-Allow-Credentials", "true")
return response
@app.route('/chat', methods=['POST'])
def chat():
try:
data = request.get_json()
messages = data.get('messages', [])
if messages:
results = []
# Update the model name to "text-davinci-003" (Ada)
prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
response = openai.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=start_message +'\n'+ context + question ,
max_tokens=500,
temperature=0
)
resulta = response.choices[0].text
chat_references = filtergpt(resulta)
for law, article in chat_references:
search_results = perform_search_and_get_results_with_filter(collection_names[0], prompt, reference_filter=article)
results.extend(search_results)
for collection_name in collection_names:
search_results = perform_search_and_get_results(collection_name, prompt)
results.extend(search_results)
return jsonify({'result_qdrant':results})
else:
return jsonify({'error': 'Invalid request'}), 400
except Exception as e:
return jsonify({'error': str(e)}), 500
def perform_search_and_get_results(collection_name, query, limit=6):
search_results = client.search(
collection_name=collection_name,
query_vector=model.encode(query).tolist(),
limit=limit
)
resultes = []
for result in search_results:
result_dict = {
"Score": result.score,
"La_loi": result.payload["reference"],
"Paragraphe": result.payload["paragraph"],
"source": result.payload["source"],
"collection": collection_name
}
resultes.append(result_dict)
return resultes
def perform_search_and_get_results_with_filter(collection_name, query,reference_filter , limit=6):
search_results = client.search(
collection_name=collection_name,
query_filter=models.Filter(must=[models.FieldCondition(key="numero_article",match=models.MatchValue(value=reference_filter+"aymane",),)]),
query_vector=model.encode(query).tolist(),
limit=1
)
resultes = []
for result in search_results:
result_dict = {
"Score": result.score,
"La_loi": result.payload["reference"],
"Paragraphe": result.payload["paragraph"],
"source": result.payload["source"],
"collection": collection_name
}
resultes.append(result_dict)
return resultes
if __name__ == '__main__':
app.run(debug=True, port=5001)
import qdrant_client |