Spaces:
Runtime error
Runtime error
File size: 3,451 Bytes
39dff4c 99914ec 4a0366f c6ddc86 4a0366f c6ddc86 39dff4c c6ddc86 39dff4c c6ddc86 bb31795 39dff4c c6ddc86 39dff4c af86876 c6ddc86 4a0366f c6ddc86 bb31795 4a0366f bb31795 4a0366f bb31795 4a0366f af86876 bb31795 af86876 c6ddc86 bb31795 39dff4c bb31795 9dd7ad1 bb31795 ecc7d07 d9c4b56 ca860d3 39dff4c d9c4b56 da239a9 7afe812 9ed9d81 af86876 cf7f506 bb31795 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import gradio as gr
import requests
import json
from decouple import Config
def query_vectara(question, chat_history, uploaded_file):
# Handle file upload to Vectara
customer_id = config('CUSTOMER_ID') # Read from .env file
corpus_id = config('CORPUS_ID') # Read from .env file
api_key = config('API_KEY') # Read from .env file
url = f"https://api.vectara.io/v1/upload?c={customer_id}&o={corpus_id}"
post_headers = {
"x-api-key": api_key,
"customer-id": customer_id
}
files = {
"file": (uploaded_file.name, uploaded_file),
"doc_metadata": (None, json.dumps({"metadata_key": "metadata_value"})), # Replace with your metadata
}
response = requests.post(url, files=files, headers=post_headers)
if response.status_code == 200:
upload_status = "File uploaded successfully"
else:
upload_status = "Failed to upload the file"
# Get the user's message from the chat history
user_message = chat_history[-1][0]
# Query Vectara API
query_url = "https://api.vectara.io/v1/query/v1/query"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
"customer-id": customer_id,
}
query_body = {
"query": [
{
"query": user_message,
"queryContext": "",
"start": 0,
"numResults": 10,
"contextConfig": {
"charsBefore": 0,
"charsAfter": 0,
"sentencesBefore": 2,
"sentencesAfter": 2,
"startTag": "%START_SNIPPET%",
"endTag": "%END_SNIPPET%",
},
"rerankingConfig": {
"rerankerId": 272725718,
"mmrConfig": {
"diversityBias": 0.3
}
},
"corpusKey": [
{
"customerId": customer_id,
"corpusId": corpus_id,
"semantics": 0,
"metadataFilter": "",
"lexicalInterpolationConfig": {
"lambda": 0
},
"dim": []
}
],
"summary": [
{
"maxSummarizedResults": 5,
"responseLang": "eng",
"summarizerPromptName": "vectara-summary-ext-v1.2.0"
}
]
}
]
}
query_response = requests.post(query_url, json=query_body, headers=headers)
if query_response.status_code == 200:
query_data = query_response.json()
response_message = f"{upload_status}\n\nResponse from Vectara API: {json.dumps(query_data, indent=2)}"
else:
response_message = f"{upload_status}\n\nError: {query_response.status_code}"
return response_message
# Create a Gradio ChatInterface with a text input, a file upload input, and a text output
iface = gr.Interface(
fn=query_vectara,
inputs=[gr.Textbox(label="Input Text"), gr.File(label="Upload a file")],
outputs=gr.Textbox(label="Output Text"),
title="Vectara Chatbot",
description="Ask me anything using the Vectara API!"
)
iface.launch()
|