Spaces:
Sleeping
Sleeping
| from llama_index.llms.openai import OpenAI | |
| from llama_index.embeddings.openai import OpenAIEmbedding | |
| from llama_index.core import Settings | |
| import os | |
| import json | |
| import streamlit as st | |
| import requests | |
| adminkey = "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" | |
| os.environ["OPENAI_API_KEY"] = adminkey | |
| Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.4) | |
| Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002") | |
| from opensearchpy import OpenSearch, RequestsHttpConnection | |
| auth = ('admin','klbvrR4AlGNMaQ') | |
| host = '10.11.10.111' | |
| port = 32000 | |
| client = OpenSearch( | |
| hosts = [{'host': host, 'port': port}], | |
| http_auth = auth, | |
| use_ssl = True, | |
| verify_certs = False | |
| ) | |
| def generate_opensearch_query(user_input): | |
| prompt = f""" | |
| You are an assistant trained to translate natural language requests into OpenSearch queries. Based on the user's request, generate an OpenSearch JSON query. | |
| Examples: | |
| User Input: "Get all documents where the status is active." | |
| Response: | |
| {{ | |
| "query": {{ | |
| "match": {{ | |
| "status": "active" | |
| }} | |
| }} | |
| }} | |
| User Input: "Find records with priority high created in the last 7 days." | |
| Response: | |
| {{ | |
| "query": {{ | |
| "bool": {{ | |
| "must": [ | |
| {{ "match": {{ "priority": "high" }} }}, | |
| {{ "range": {{ "created_at": {{ "gte": "now-7d/d", "lte": "now" }} }} }} | |
| ] | |
| }} | |
| }} | |
| }} | |
| User Input: "Show documents where age is over 30 and sort by created date." | |
| Response: | |
| {{ | |
| "query": {{ | |
| "range": {{ | |
| "age": {{ "gt": 30 }} | |
| }} | |
| }}, | |
| "sort": [ | |
| {{ "created_date": {{ "order": "asc" }} }} | |
| ] | |
| }} | |
| User Input: "{user_input}" | |
| Response: | |
| """ | |
| llm_response = Settings.llm.complete(prompt) | |
| return llm_response | |
| def implement_query(generated_query): | |
| query = json.loads(generated_query.text) | |
| response = client.search(body=query) | |
| return response | |
| st.title("OpenSearch Query Generator") | |
| st.subheader("Enter your natural language query:") | |
| user_input = st.text_area("Enter a Prompt:", height=150) | |
| if st.button("Generate OpenSearch Query"): | |
| if user_input.strip(): | |
| generated_query = generate_opensearch_query(user_input) | |
| st.subheader("Generated OpenSearch Query:") | |
| st.json(json.loads(generated_query.text)) | |
| try: | |
| response = implement_query(generated_query) | |
| st.subheader("OpenSearch Response:") | |
| st.json(response) | |
| except Exception as e: | |
| st.error(f"Error executing OpenSearch query: {e}") | |
| else: | |
| st.warning("Please enter a valid query.") |