Spaces:
Running
Running
Modified Gradio interfade
Browse files
app.py
CHANGED
@@ -99,14 +99,12 @@ def gradio_app():
|
|
99 |
|
100 |
with gr.Blocks(theme=theme) as demo:
|
101 |
with gr.Row():
|
102 |
-
with gr.Column(scale=0.1):
|
103 |
-
gr.Image("rag_image.jpg", elem_id="flor-banner", scale=1, height=256, width=256, show_label=False, show_download_button = False, show_share_button = False)
|
104 |
with gr.Column():
|
105 |
gr.Markdown(
|
106 |
-
"""
|
107 |
-
|
108 |
-
|
109 |
-
鈿狅笍 **Advertencias**: Esta es una versi贸n experimental. 馃憖
|
110 |
"""
|
111 |
)
|
112 |
with gr.Row(equal_height=True):
|
|
|
99 |
|
100 |
with gr.Blocks(theme=theme) as demo:
|
101 |
with gr.Row():
|
102 |
+
# with gr.Column(scale=0.1):
|
103 |
+
# gr.Image("rag_image.jpg", elem_id="flor-banner", scale=1, height=256, width=256, show_label=False, show_download_button = False, show_share_button = False)
|
104 |
with gr.Column():
|
105 |
gr.Markdown(
|
106 |
+
"""## TEST de RAG (Retrieval-Augmented Generation) for LangTech projects (RENFE)
|
107 |
+
鈿狅笍 **Advertencias**: Esta es una versi贸n experimental.
|
|
|
|
|
108 |
"""
|
109 |
)
|
110 |
with gr.Row(equal_height=True):
|
rag.py
CHANGED
@@ -100,11 +100,10 @@ class RAG:
|
|
100 |
|
101 |
api_key = os.getenv("HF_TOKEN")
|
102 |
|
103 |
-
|
104 |
headers = {
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
}
|
109 |
|
110 |
query = f"### Instruction\n{instruction}\n\n### Context\n{context}\n\n### Answer\n "
|
@@ -112,16 +111,22 @@ class RAG:
|
|
112 |
|
113 |
|
114 |
payload = {
|
115 |
-
|
116 |
-
|
117 |
}
|
118 |
|
119 |
response = requests.post(self.model_name, headers=headers, json=payload)
|
120 |
|
121 |
return response.json()[0]["generated_text"].split("###")[-1][8:]
|
122 |
|
|
|
123 |
def predict_completion(self, instruction, context, model_parameters):
|
124 |
|
|
|
|
|
|
|
|
|
|
|
125 |
client = OpenAI(
|
126 |
base_url=os.getenv("MODEL"),
|
127 |
api_key=os.getenv("HF_TOKEN")
|
@@ -198,9 +203,8 @@ class RAG:
|
|
198 |
text_context, full_context, source = self.beautiful_context(docs, source_metadata=self.source_metadata)
|
199 |
# print(text_context)
|
200 |
del model_parameters["NUM_CHUNKS"]
|
201 |
-
|
202 |
-
response =
|
203 |
-
# response = self.predict_completion(prompt, text_context, model_parameters)
|
204 |
if not response:
|
205 |
return self.NO_ANSWER_MESSAGE
|
206 |
|
|
|
100 |
|
101 |
api_key = os.getenv("HF_TOKEN")
|
102 |
|
|
|
103 |
headers = {
|
104 |
+
"Accept" : "application/json",
|
105 |
+
"Authorization": f"Bearer {api_key}",
|
106 |
+
"Content-Type": "application/json"
|
107 |
}
|
108 |
|
109 |
query = f"### Instruction\n{instruction}\n\n### Context\n{context}\n\n### Answer\n "
|
|
|
111 |
|
112 |
|
113 |
payload = {
|
114 |
+
"inputs": query,
|
115 |
+
"parameters": model_parameters
|
116 |
}
|
117 |
|
118 |
response = requests.post(self.model_name, headers=headers, json=payload)
|
119 |
|
120 |
return response.json()[0]["generated_text"].split("###")[-1][8:]
|
121 |
|
122 |
+
|
123 |
def predict_completion(self, instruction, context, model_parameters):
|
124 |
|
125 |
+
model = os.getenv("MODEL")
|
126 |
+
if not model:
|
127 |
+
logging.error("No model specified in the environment variable 'MODEL'.")
|
128 |
+
return "Model endpoint not specified."
|
129 |
+
|
130 |
client = OpenAI(
|
131 |
base_url=os.getenv("MODEL"),
|
132 |
api_key=os.getenv("HF_TOKEN")
|
|
|
203 |
text_context, full_context, source = self.beautiful_context(docs, source_metadata=self.source_metadata)
|
204 |
# print(text_context)
|
205 |
del model_parameters["NUM_CHUNKS"]
|
206 |
+
|
207 |
+
response = self.predict_completion(prompt, text_context, model_parameters)
|
|
|
208 |
if not response:
|
209 |
return self.NO_ANSWER_MESSAGE
|
210 |
|