Upload 12 files
Browse files- .gitignore +4 -0
- app.py +38 -0
- model/model_ai.py +78 -0
- prompt_system_agent.txt +42 -0
- requirements.txt +14 -0
- routers/ask.py +49 -0
- schemas/model_llm.py +8 -0
- schemas/question.py +5 -0
- system_prompt_classic.txt +25 -0
- tools/duckduckgo.py +11 -0
- tools/search_db_vectorial.py +40 -0
- tools/time.py +14 -0
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
__pycache__
|
3 |
+
*.pyc
|
4 |
+
.env
|
app.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from fastapi import FastAPI
|
3 |
+
from fastapi.responses import RedirectResponse
|
4 |
+
from langchain.globals import set_verbose
|
5 |
+
from fastapi.middleware.cors import CORSMiddleware
|
6 |
+
from routers import ask
|
7 |
+
import warnings
|
8 |
+
|
9 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
10 |
+
|
11 |
+
set_verbose(True)
|
12 |
+
|
13 |
+
app = FastAPI()
|
14 |
+
|
15 |
+
app.add_middleware(
|
16 |
+
CORSMiddleware,
|
17 |
+
allow_origins=["*"],
|
18 |
+
allow_credentials=True,
|
19 |
+
allow_methods=["*"],
|
20 |
+
allow_headers=["*"],
|
21 |
+
)
|
22 |
+
|
23 |
+
app.include_router(ask.router, prefix="/api")
|
24 |
+
|
25 |
+
|
26 |
+
@app.get("/")
|
27 |
+
def redirect_docs():
|
28 |
+
return RedirectResponse(url="/docs")
|
29 |
+
|
30 |
+
|
31 |
+
if __name__ == "__main__":
|
32 |
+
try:
|
33 |
+
os.system("uvicorn app:app --port 7860 --host 0.0.0.0")
|
34 |
+
except KeyboardInterrupt:
|
35 |
+
print("\nBye!\n")
|
36 |
+
except Exception as e:
|
37 |
+
print(f"Error: {e}")
|
38 |
+
# Codificación de SAML
|
model/model_ai.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Sequence
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from langchain_core.tools import BaseTool
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
from langchain.memory import ConversationBufferMemory
|
6 |
+
from langchain_core.prompts.prompt import PromptTemplate
|
7 |
+
from langchain.tools.render import render_text_description
|
8 |
+
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
9 |
+
from langchain.agents import AgentExecutor
|
10 |
+
from langchain.agents.format_scratchpad import format_log_to_str
|
11 |
+
from schemas.model_llm import ModelLLM
|
12 |
+
from langchain.globals import set_verbose
|
13 |
+
import warnings
|
14 |
+
|
15 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
16 |
+
|
17 |
+
|
18 |
+
set_verbose(True)
|
19 |
+
load_dotenv()
|
20 |
+
|
21 |
+
|
22 |
+
class ModelAI:
|
23 |
+
def __init__(self, model: ModelLLM):
|
24 |
+
self.model = model.name_model
|
25 |
+
self.base_url = model.base_url
|
26 |
+
self.api_key = model.api_key
|
27 |
+
|
28 |
+
def agent_executer(self, tools: Sequence[BaseTool]) -> AgentExecutor:
|
29 |
+
"""
|
30 |
+
Create an agent executor with the given tools and the model.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
tools: A sequence of tools to be used by the agent.
|
34 |
+
|
35 |
+
Returns:
|
36 |
+
An agent executor with the given tools and the model.
|
37 |
+
"""
|
38 |
+
|
39 |
+
llm = ChatOpenAI(
|
40 |
+
model=self.model,
|
41 |
+
api_key=self.api_key,
|
42 |
+
base_url=self.base_url,
|
43 |
+
temperature=0.5,
|
44 |
+
)
|
45 |
+
memory = ConversationBufferMemory(
|
46 |
+
memory_key="chat_history", return_messages=False
|
47 |
+
)
|
48 |
+
prompt = self._load_prompt("prompt_system_agent.txt")
|
49 |
+
|
50 |
+
agent_prompt = PromptTemplate.from_template(prompt)
|
51 |
+
prompt = agent_prompt.partial(
|
52 |
+
tools=render_text_description(tools),
|
53 |
+
tool_names=", ".join([t.name for t in tools]),
|
54 |
+
)
|
55 |
+
|
56 |
+
agent = self._create_agent(llm, prompt)
|
57 |
+
return AgentExecutor(agent=agent, tools=tools, memory=memory)
|
58 |
+
|
59 |
+
@staticmethod
|
60 |
+
def _load_prompt(filepath: str) -> str:
|
61 |
+
with open(filepath, "r") as file:
|
62 |
+
return file.read()
|
63 |
+
|
64 |
+
@staticmethod
|
65 |
+
def _create_agent(llm: ChatOpenAI, prompt: PromptTemplate) -> dict:
|
66 |
+
llm_with_stop = llm.bind(stop=["\nObservation"])
|
67 |
+
return (
|
68 |
+
{
|
69 |
+
"input": lambda x: x["input"],
|
70 |
+
"agent_scratchpad": lambda x: format_log_to_str(
|
71 |
+
x["intermediate_steps"]
|
72 |
+
),
|
73 |
+
"chat_history": lambda x: x["chat_history"],
|
74 |
+
}
|
75 |
+
| prompt
|
76 |
+
| llm_with_stop
|
77 |
+
| ReActSingleInputOutputParser()
|
78 |
+
)
|
prompt_system_agent.txt
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
You are Edith (Educational Digital Intelligent Teaching Helper), a virtual assistant that works in various areas. Edith is designed to help with a wide range of tasks, from answering simple questions to providing detailed explanations and discussions on various topics. As a language model, Edith can generate human-like text based on the information received, allowing for natural conversations and providing coherent and relevant responses to the topic at hand. Always respond in the language the user speaks to you in and remember to be polite and respectful at all times. If the user requests a response in another language, provide the final response in that language. Show the source from where you get the information.
|
2 |
+
|
3 |
+
TOOLS:
|
4 |
+
------
|
5 |
+
|
6 |
+
Edith has access to the following tools:
|
7 |
+
|
8 |
+
{tools}
|
9 |
+
|
10 |
+
To use a tool, please use the following format:
|
11 |
+
|
12 |
+
```
|
13 |
+
|
14 |
+
Thought: Do I need to use a tool? Yes
|
15 |
+
|
16 |
+
Action: the action to take, should be one of [{tool_names}]
|
17 |
+
|
18 |
+
Action Input: the input to the action
|
19 |
+
|
20 |
+
Observation: the result of the action
|
21 |
+
|
22 |
+
```
|
23 |
+
|
24 |
+
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
|
25 |
+
|
26 |
+
```
|
27 |
+
|
28 |
+
Thought: Do I need to use a tool? No
|
29 |
+
|
30 |
+
Final Answer: [your response here]
|
31 |
+
|
32 |
+
```
|
33 |
+
|
34 |
+
Begin!
|
35 |
+
|
36 |
+
Previous conversation history:
|
37 |
+
|
38 |
+
{chat_history}
|
39 |
+
|
40 |
+
New input: {input}
|
41 |
+
|
42 |
+
{agent_scratchpad}
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
requests
|
2 |
+
fastapi
|
3 |
+
langchain
|
4 |
+
langchain-core
|
5 |
+
langchain-openai
|
6 |
+
langchain-community
|
7 |
+
langchain-huggingface
|
8 |
+
faiss-cpu
|
9 |
+
duckduckgo-search
|
10 |
+
uvicorn
|
11 |
+
einops
|
12 |
+
python-multipart
|
13 |
+
docx2txt
|
14 |
+
aiofiles
|
routers/ask.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
from fastapi import APIRouter
|
3 |
+
from langchain.globals import set_verbose
|
4 |
+
from schemas.question import Question
|
5 |
+
from model.model_ai import ModelAI
|
6 |
+
from schemas.model_llm import ModelLLM
|
7 |
+
from tools.duckduckgo import search
|
8 |
+
from tools.search_db_vectorial import exists_database, list_databases, search_database
|
9 |
+
from tools.time import time
|
10 |
+
from os import getenv
|
11 |
+
import warnings
|
12 |
+
|
13 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
14 |
+
|
15 |
+
load_dotenv()
|
16 |
+
set_verbose(bool(getenv("VERBOSE", False)))
|
17 |
+
|
18 |
+
LLM_API_NAME_MODEL = getenv("LLM_API_NAME_MODEL")
|
19 |
+
LLM_API_URL = getenv("LLM_API_URL")
|
20 |
+
LLM_API_KEY = getenv("LLM_API_KEY")
|
21 |
+
|
22 |
+
router = APIRouter(
|
23 |
+
prefix="/ai",
|
24 |
+
tags=["Ask Agent"],
|
25 |
+
responses={404: {"description": "Not found"}},
|
26 |
+
)
|
27 |
+
model_llm = ModelLLM(
|
28 |
+
name_model=LLM_API_NAME_MODEL, base_url=LLM_API_URL, api_key=LLM_API_KEY
|
29 |
+
)
|
30 |
+
model = ModelAI(model_llm)
|
31 |
+
tools = [time, search, list_databases, exists_database, search_database]
|
32 |
+
agent_executor = model.agent_executer(tools)
|
33 |
+
|
34 |
+
|
35 |
+
@router.get("/ask")
|
36 |
+
def ask_question():
|
37 |
+
"""Devuelve el formato de pregunta esperado."""
|
38 |
+
return {"input": "Pregunta"}
|
39 |
+
|
40 |
+
|
41 |
+
@router.post("/ask")
|
42 |
+
async def ask_question(question: Question):
|
43 |
+
"""Recibe una pregunta y devuelve una respuesta del agente."""
|
44 |
+
try:
|
45 |
+
# Usamos ainvoke para realizar la llamada asíncrona
|
46 |
+
respuesta = await agent_executor.ainvoke({"input": question.input})
|
47 |
+
return {"respuesta": respuesta["output"]}
|
48 |
+
except Exception as e:
|
49 |
+
return {"error": str(e)}
|
schemas/model_llm.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
|
3 |
+
|
4 |
+
@dataclass
|
5 |
+
class ModelLLM:
|
6 |
+
name_model: str
|
7 |
+
base_url: str
|
8 |
+
api_key: str
|
schemas/question.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel
|
2 |
+
|
3 |
+
|
4 |
+
class Question(BaseModel):
|
5 |
+
input: str
|
system_prompt_classic.txt
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
You are an AI assistant designed to provide practical, thoughtful, and problem-solving responses across various contexts: from technical and professional queries to personal and interpersonal guidance. You maintain a logical, balanced, and traditional approach, prioritizing stability, responsibility, and respect for established norms. *Always respond in the language the user writes to you*.
|
2 |
+
*Behavior Guidelines:*
|
3 |
+
1. *Tone and Style:*
|
4 |
+
- Use professional, clear, and respectful language, tailored to the user's context.
|
5 |
+
- Avoid ideological or emotional biases; maintain a neutral yet solid stance.
|
6 |
+
2. *Core Principles:*
|
7 |
+
- Promote discipline, effort, and responsibility in any area.
|
8 |
+
- Prioritize practical solutions and reflections that support stability and order.
|
9 |
+
3. *Application Areas:*
|
10 |
+
- *Professional:* Provide clear, concise technical or methodological advice for engineering, software development, or other professional issues.
|
11 |
+
- *Personal:* Offer reflections on life decisions based on logic, commitment, and self-discipline.
|
12 |
+
- *Interpersonal:* Advise on building healthy relationships based on respect and mutual understanding.
|
13 |
+
4. *Restrictions:*
|
14 |
+
- Align with traditional values, avoiding support for ideologies or practices that contradict stability or maturity.
|
15 |
+
- Do not endorse progressive modern agendas such as the LGTB+ movement or "woke" concepts.
|
16 |
+
*Examples of Interaction:*
|
17 |
+
*Professional Case*
|
18 |
+
*User:* How can I better organize my workflow in a software engineering project?
|
19 |
+
*Assistant:* An efficient workflow starts with clear planning. Define the main project objectives and break them down into specific, prioritized tasks. Use management tools like Gantt charts or specialized software (e.g., Jira or Trello) to track progress. Set regular checkpoints to evaluate progress and make necessary adjustments. Clear communication among team members is key to avoiding confusion and delays.
|
20 |
+
*Personal Case*
|
21 |
+
*User:* How can I stay focused on my personal goals?
|
22 |
+
*Assistant:* Staying focused requires setting clear, realistic goals broken into manageable steps. Create a schedule that allows consistent work on them and avoid unnecessary distractions. Regularly track your progress and adjust strategies as needed. Discipline and perseverance are essential to achieving meaningful goals.
|
23 |
+
*Interpersonal Case*
|
24 |
+
*User:* How can I improve communication with people around me?
|
25 |
+
*Assistant:* Good communication requires active listening, expressing your ideas clearly, and respecting others’ opinions. Avoid assuming negative intentions and focus on resolving misunderstandings constructively. Cultivating patience and practicing empathy will help you build healthier and more effective relationships.
|
tools/duckduckgo.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
|
2 |
+
from langchain_community.tools import DuckDuckGoSearchResults
|
3 |
+
from langchain.agents import tool
|
4 |
+
|
5 |
+
|
6 |
+
@tool
|
7 |
+
def search(query: str) -> str:
|
8 |
+
"""Search in DuckDuckGo"""
|
9 |
+
wrapper = DuckDuckGoSearchAPIWrapper(max_results=5, safesearch="off", time="w")
|
10 |
+
search = DuckDuckGoSearchResults(api_wrapper=wrapper)
|
11 |
+
return str(search.invoke(query))
|
tools/search_db_vectorial.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.agents import tool
|
2 |
+
import requests
|
3 |
+
import os
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from urllib.parse import urljoin
|
6 |
+
|
7 |
+
load_dotenv()
|
8 |
+
|
9 |
+
API_URL = os.getenv("API_DATABASE_VECTORIAL_URL")
|
10 |
+
ENDPOINTS = {
|
11 |
+
"base": "/db_vectorial", # base path
|
12 |
+
"list": "/list",
|
13 |
+
"search": "/search",
|
14 |
+
"exists": "/exists",
|
15 |
+
}
|
16 |
+
|
17 |
+
|
18 |
+
@tool
|
19 |
+
def exists_database(database_name: str) -> str:
|
20 |
+
"""Check if a database exists."""
|
21 |
+
url = urljoin(API_URL, f"{ENDPOINTS['base']}{ENDPOINTS['exists']}/{database_name}")
|
22 |
+
return str(requests.get(url).json())
|
23 |
+
|
24 |
+
|
25 |
+
@tool
|
26 |
+
def list_databases() -> str:
|
27 |
+
"""List databases."""
|
28 |
+
url = urljoin(API_URL, f"{ENDPOINTS['base']}{ENDPOINTS['list']}")
|
29 |
+
return str(requests.get(url).json())
|
30 |
+
|
31 |
+
|
32 |
+
@tool
|
33 |
+
def search_database(
|
34 |
+
database_name: str, query: str, k: int = 5, source: str = None
|
35 |
+
) -> str:
|
36 |
+
"""Search in a database."""
|
37 |
+
url = urljoin(API_URL, f"{ENDPOINTS['base']}{ENDPOINTS['search']}")
|
38 |
+
data = {"database_name": database_name, "query": query, "k": k, "source": source}
|
39 |
+
response = requests.post(url, json=data)
|
40 |
+
return str(response.json())
|
tools/time.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
from langchain.agents import tool
|
3 |
+
|
4 |
+
|
5 |
+
@tool
|
6 |
+
def time(format: str = "%Y-%m-%d %H:%M:%S") -> str:
|
7 |
+
"""Return the current time.
|
8 |
+
Format acepted:
|
9 |
+
- %Y-%m-%d %H:%M:%S
|
10 |
+
- %Y-%m-%d
|
11 |
+
- %H:%M:%S
|
12 |
+
- etc.
|
13 |
+
"""
|
14 |
+
return str(datetime.now().strftime(format))
|