Spaces:
Runtime error
Runtime error
File size: 3,996 Bytes
39dff4c 99914ec 4a0366f c352f02 a147fbd 8d84915 ed0aa7b 24ed9e0 c6ddc86 3661992 4854a72 176b9ce 4854a72 176b9ce d354d71 176b9ce 4d079d2 176b9ce 4854a72 176b9ce d354d71 176b9ce 4854a72 bb31795 176b9ce 4854a72 176b9ce 4854a72 176b9ce 4854a72 4d079d2 3516f35 ea7a2b9 8d84915 b4ae1f8 8d84915 b4ae1f8 8d84915 b4ae1f8 8d84915 b4ae1f8 ca860d3 39dff4c fd6e173 82f20ec 7afe812 9ed9d81 af86876 cf7f506 bb31795 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import gradio as gr
import requests
import json
from decouple import Config
config = Config('.env')
def query_vectara(question):
user_message = question
# Read authentication parameters from the .env file
CUSTOMER_ID = config('CUSTOMER_ID')
CORPUS_ID = config('CORPUS_ID')
API_KEY = config('API_KEY')
# Define the headers
api_key_header = {
"customer-id": CUSTOMER_ID,
"x-api-key": API_KEY
}
# Define the request body in the structure provided in the example
request_body = {
"query": [
{
"query": user_message,
"queryContext": "",
"start": 1,
"numResults": 10,
"contextConfig": {
"charsBefore": 0,
"charsAfter": 0,
"sentencesBefore": 2,
"sentencesAfter": 2,
"startTag": "%START_SNIPPET%",
"endTag": "%END_SNIPPET%",
},
"rerankingConfig": {
"rerankerId": 272725718,
"mmrConfig": {
"diversityBias": 0.27
}
},
"corpusKey": [
{
"customerId": CUSTOMER_ID,
"corpusId": CORPUS_ID,
"semantics": 0,
"metadataFilter": "",
"lexicalInterpolationConfig": {
"lambda": 0
},
"dim": []
}
],
"summary": [
{
"maxSummarizedResults": 5,
"responseLang": "eng",
"summarizerPromptName": "vectara-summary-ext-v1.2.0"
}
]
}
]
}
# Make the API request using Gradio
response = requests.post(
"https://api.vectara.io/v1/query",
json=request_body, # Use json to automatically serialize the request body
verify=True,
headers=api_key_header
)
if response.status_code == 200:
query_data = response.json()
print(query_data)
if query_data:
# Extract summary and the first 5 sources
response_set = query_data.get('responseSet', [{}])[0] # get the first response set
summary = response_set.get('summary', [{}])[0] # get the first summary
summary_text = summary.get('text', 'No summary available')
sources = response_set.get('response', [])[:5]
sources_text = [source.get('text', '') for source in sources]
return f"Summary: {summary_text}\n\nSources:\n{json.dumps(sources_text, indent=2)}"
else:
return "No data found in the response."
else:
return f"Error: {response.status_code}"
def convert_to_markdown(vectara_response):
if vectara_response:
response_set = vectara_response.get('responseSet', [{}])[0]
summary = response_set.get('summary', [{}])[0]
summary_text = summary.get('text', 'No summary available')
sources = response_set.get('response', [])[:5]
sources_text = [source.get('text', '') for source in sources]
# Format the summary and sources as Markdown
markdown_summary = f'**Summary:** {summary_text}\n\n'
markdown_sources = "\n".join([f'{i + 1}. {source}' for i, source in enumerate(sources_text)])
return f"{markdown_summary}**Sources:**\n{markdown_sources}"
else:
return "No data found in the response."
iface = gr.Interface(
fn=query_vectara,
inputs=[gr.Textbox(label="Input Text")],
outputs=[gr.Markdown(label="Output Text")],
title="Vectara Chatbot",
description="Ask me anything using the Vectara API!"
)
iface.launch()
|