Spaces:
Running
Running
Merge pull request #3 from biggraph/darabos-await-fmt
Browse files- server/lynxscribe_ops.py +186 -124
- server/main.py +26 -11
server/lynxscribe_ops.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
"""
|
2 |
LynxScribe configuration and testing in LynxKite.
|
3 |
"""
|
|
|
4 |
from lynxscribe.core.llm.base import get_llm_engine
|
5 |
from lynxscribe.core.vector_store.base import get_vector_store
|
6 |
from lynxscribe.common.config import load_config
|
@@ -9,7 +10,10 @@ from lynxscribe.components.rag.rag_graph import RAGGraph
|
|
9 |
from lynxscribe.components.rag.knowledge_base_graph import PandasKnowledgeBaseGraph
|
10 |
from lynxscribe.components.rag.rag_chatbot import Scenario, ScenarioSelector, RAGChatbot
|
11 |
from lynxscribe.components.chat_processor.base import ChatProcessor
|
12 |
-
from lynxscribe.components.chat_processor.processors import
|
|
|
|
|
|
|
13 |
from lynxscribe.components.chat_api import ChatAPI, ChatAPIRequest, ChatAPIResponse
|
14 |
|
15 |
from . import ops
|
@@ -17,187 +21,245 @@ import asyncio
|
|
17 |
import json
|
18 |
from .executors import one_by_one
|
19 |
|
20 |
-
ENV =
|
21 |
one_by_one.register(ENV)
|
22 |
op = ops.op_registration(ENV)
|
23 |
output_on_top = ops.output_position(output="top")
|
24 |
|
|
|
25 |
@output_on_top
|
26 |
@op("Vector store")
|
27 |
-
def vector_store(*, name=
|
28 |
-
|
29 |
-
|
|
|
30 |
|
31 |
@output_on_top
|
32 |
@op("LLM")
|
33 |
-
def llm(*, name=
|
34 |
-
|
35 |
-
|
|
|
36 |
|
37 |
@output_on_top
|
38 |
@ops.input_position(llm="bottom")
|
39 |
@op("Text embedder")
|
40 |
-
def text_embedder(llm, *, model=
|
41 |
-
|
42 |
-
|
43 |
-
|
|
|
44 |
|
45 |
@output_on_top
|
46 |
@ops.input_position(vector_store="bottom", text_embedder="bottom")
|
47 |
@op("RAG graph")
|
48 |
def rag_graph(vector_store, text_embedder):
|
49 |
-
vector_store = vector_store[0][
|
50 |
-
text_embedder = text_embedder[0][
|
51 |
rag_graph = RAGGraph(
|
52 |
PandasKnowledgeBaseGraph(vector_store=vector_store, text_embedder=text_embedder)
|
53 |
)
|
54 |
-
return {
|
|
|
55 |
|
56 |
@output_on_top
|
57 |
@op("Scenario selector")
|
58 |
-
def scenario_selector(*, scenario_file: str, node_types=
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
66 |
|
67 |
DEFAULT_NEGATIVE_ANSWER = "I'm sorry, but the data I've been trained on does not contain any information related to your question."
|
68 |
|
|
|
69 |
@output_on_top
|
70 |
@ops.input_position(rag_graph="bottom", scenario_selector="bottom", llm="bottom")
|
71 |
@op("RAG chatbot")
|
72 |
def rag_chatbot(
|
73 |
-
rag_graph,
|
|
|
|
|
|
|
74 |
negative_answer=DEFAULT_NEGATIVE_ANSWER,
|
75 |
-
limits_by_type=
|
76 |
-
strict_limits=True,
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
|
|
|
|
91 |
|
92 |
@output_on_top
|
93 |
@ops.input_position(processor="bottom")
|
94 |
@op("Chat processor")
|
95 |
def chat_processor(processor, *, _ctx: one_by_one.Context):
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
@output_on_top
|
112 |
@op("Truncate history")
|
113 |
-
def truncate_history(*, max_tokens=10000, language=
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
@output_on_top
|
117 |
@op("Mask")
|
118 |
-
def mask(*, name=
|
119 |
-
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
@ops.input_position(chat_api="bottom")
|
123 |
@op("Test Chat API")
|
124 |
-
def test_chat_api(message, chat_api, *, show_details=False):
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
@op("Input chat")
|
134 |
def input_chat(*, chat: str):
|
135 |
-
|
|
|
136 |
|
137 |
@output_on_top
|
138 |
@ops.input_position(chatbot="bottom", chat_processor="bottom", knowledge_base="bottom")
|
139 |
@op("Chat API")
|
140 |
-
def chat_api(chatbot, chat_processor, knowledge_base, *, model=
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
|
|
153 |
|
154 |
@output_on_top
|
155 |
@op("Knowledge base")
|
156 |
-
def knowledge_base(
|
157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
|
159 |
@op("View", view="table_view")
|
160 |
def view(input):
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
|
|
|
|
|
|
169 |
|
170 |
async def api_service(request):
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""
|
2 |
LynxScribe configuration and testing in LynxKite.
|
3 |
"""
|
4 |
+
|
5 |
from lynxscribe.core.llm.base import get_llm_engine
|
6 |
from lynxscribe.core.vector_store.base import get_vector_store
|
7 |
from lynxscribe.common.config import load_config
|
|
|
10 |
from lynxscribe.components.rag.knowledge_base_graph import PandasKnowledgeBaseGraph
|
11 |
from lynxscribe.components.rag.rag_chatbot import Scenario, ScenarioSelector, RAGChatbot
|
12 |
from lynxscribe.components.chat_processor.base import ChatProcessor
|
13 |
+
from lynxscribe.components.chat_processor.processors import (
|
14 |
+
MaskTemplate,
|
15 |
+
TruncateHistory,
|
16 |
+
)
|
17 |
from lynxscribe.components.chat_api import ChatAPI, ChatAPIRequest, ChatAPIResponse
|
18 |
|
19 |
from . import ops
|
|
|
21 |
import json
|
22 |
from .executors import one_by_one
|
23 |
|
24 |
+
ENV = "LynxScribe"
|
25 |
one_by_one.register(ENV)
|
26 |
op = ops.op_registration(ENV)
|
27 |
output_on_top = ops.output_position(output="top")
|
28 |
|
29 |
+
|
30 |
@output_on_top
|
31 |
@op("Vector store")
|
32 |
+
def vector_store(*, name="chromadb", collection_name="lynx"):
|
33 |
+
vector_store = get_vector_store(name=name, collection_name=collection_name)
|
34 |
+
return {"vector_store": vector_store}
|
35 |
+
|
36 |
|
37 |
@output_on_top
|
38 |
@op("LLM")
|
39 |
+
def llm(*, name="openai"):
|
40 |
+
llm = get_llm_engine(name=name)
|
41 |
+
return {"llm": llm}
|
42 |
+
|
43 |
|
44 |
@output_on_top
|
45 |
@ops.input_position(llm="bottom")
|
46 |
@op("Text embedder")
|
47 |
+
def text_embedder(llm, *, model="text-embedding-ada-002"):
|
48 |
+
llm = llm[0]["llm"]
|
49 |
+
text_embedder = TextEmbedder(llm=llm, model=model)
|
50 |
+
return {"text_embedder": text_embedder}
|
51 |
+
|
52 |
|
53 |
@output_on_top
|
54 |
@ops.input_position(vector_store="bottom", text_embedder="bottom")
|
55 |
@op("RAG graph")
|
56 |
def rag_graph(vector_store, text_embedder):
|
57 |
+
vector_store = vector_store[0]["vector_store"]
|
58 |
+
text_embedder = text_embedder[0]["text_embedder"]
|
59 |
rag_graph = RAGGraph(
|
60 |
PandasKnowledgeBaseGraph(vector_store=vector_store, text_embedder=text_embedder)
|
61 |
)
|
62 |
+
return {"rag_graph": rag_graph}
|
63 |
+
|
64 |
|
65 |
@output_on_top
|
66 |
@op("Scenario selector")
|
67 |
+
def scenario_selector(*, scenario_file: str, node_types="intent_cluster"):
|
68 |
+
scenarios = load_config(scenario_file)
|
69 |
+
node_types = [t.strip() for t in node_types.split(",")]
|
70 |
+
scenario_selector = ScenarioSelector(
|
71 |
+
scenarios=[Scenario(**scenario) for scenario in scenarios],
|
72 |
+
node_types=node_types,
|
73 |
+
)
|
74 |
+
return {"scenario_selector": scenario_selector}
|
75 |
+
|
76 |
|
77 |
DEFAULT_NEGATIVE_ANSWER = "I'm sorry, but the data I've been trained on does not contain any information related to your question."
|
78 |
|
79 |
+
|
80 |
@output_on_top
|
81 |
@ops.input_position(rag_graph="bottom", scenario_selector="bottom", llm="bottom")
|
82 |
@op("RAG chatbot")
|
83 |
def rag_chatbot(
|
84 |
+
rag_graph,
|
85 |
+
scenario_selector,
|
86 |
+
llm,
|
87 |
+
*,
|
88 |
negative_answer=DEFAULT_NEGATIVE_ANSWER,
|
89 |
+
limits_by_type="{}",
|
90 |
+
strict_limits=True,
|
91 |
+
max_results=5,
|
92 |
+
):
|
93 |
+
rag_graph = rag_graph[0]["rag_graph"]
|
94 |
+
scenario_selector = scenario_selector[0]["scenario_selector"]
|
95 |
+
llm = llm[0]["llm"]
|
96 |
+
limits_by_type = json.loads(limits_by_type)
|
97 |
+
rag_chatbot = RAGChatbot(
|
98 |
+
rag_graph=rag_graph,
|
99 |
+
scenario_selector=scenario_selector,
|
100 |
+
llm=llm,
|
101 |
+
negative_answer=negative_answer,
|
102 |
+
limits_by_type=limits_by_type,
|
103 |
+
strict_limits=strict_limits,
|
104 |
+
max_results=max_results,
|
105 |
+
)
|
106 |
+
return {"chatbot": rag_chatbot}
|
107 |
+
|
108 |
|
109 |
@output_on_top
|
110 |
@ops.input_position(processor="bottom")
|
111 |
@op("Chat processor")
|
112 |
def chat_processor(processor, *, _ctx: one_by_one.Context):
|
113 |
+
cfg = _ctx.last_result or {
|
114 |
+
"question_processors": [],
|
115 |
+
"answer_processors": [],
|
116 |
+
"masks": [],
|
117 |
+
}
|
118 |
+
for f in ["question_processor", "answer_processor", "mask"]:
|
119 |
+
if f in processor:
|
120 |
+
cfg[f + "s"].append(processor[f])
|
121 |
+
question_processors = cfg["question_processors"][:]
|
122 |
+
answer_processors = cfg["answer_processors"][:]
|
123 |
+
masking_templates = {}
|
124 |
+
for mask in cfg["masks"]:
|
125 |
+
masking_templates[mask["name"]] = mask
|
126 |
+
if masking_templates:
|
127 |
+
question_processors.append(MaskTemplate(masking_templates=masking_templates))
|
128 |
+
answer_processors.append(MaskTemplate(masking_templates=masking_templates))
|
129 |
+
chat_processor = ChatProcessor(
|
130 |
+
question_processors=question_processors, answer_processors=answer_processors
|
131 |
+
)
|
132 |
+
return {"chat_processor": chat_processor, **cfg}
|
133 |
+
|
134 |
|
135 |
@output_on_top
|
136 |
@op("Truncate history")
|
137 |
+
def truncate_history(*, max_tokens=10000, language="English"):
|
138 |
+
return {
|
139 |
+
"question_processor": TruncateHistory(
|
140 |
+
max_tokens=max_tokens, language=language.lower()
|
141 |
+
)
|
142 |
+
}
|
143 |
+
|
144 |
|
145 |
@output_on_top
|
146 |
@op("Mask")
|
147 |
+
def mask(*, name="", regex="", exceptions="", mask_pattern=""):
|
148 |
+
exceptions = [e.strip() for e in exceptions.split(",") if e.strip()]
|
149 |
+
return {
|
150 |
+
"mask": {
|
151 |
+
"name": name,
|
152 |
+
"regex": regex,
|
153 |
+
"exceptions": exceptions,
|
154 |
+
"mask_pattern": mask_pattern,
|
155 |
+
}
|
156 |
+
}
|
157 |
+
|
158 |
|
159 |
@ops.input_position(chat_api="bottom")
|
160 |
@op("Test Chat API")
|
161 |
+
async def test_chat_api(message, chat_api, *, show_details=False):
|
162 |
+
chat_api = chat_api[0]["chat_api"]
|
163 |
+
request = ChatAPIRequest(
|
164 |
+
session_id="b43215a0-428f-11ef-9454-0242ac120002",
|
165 |
+
question=message["text"],
|
166 |
+
history=[],
|
167 |
+
)
|
168 |
+
response = await chat_api.answer(request)
|
169 |
+
if show_details:
|
170 |
+
return {**response.__dict__}
|
171 |
+
else:
|
172 |
+
return {"answer": response.answer}
|
173 |
+
|
174 |
|
175 |
@op("Input chat")
|
176 |
def input_chat(*, chat: str):
|
177 |
+
return {"text": chat}
|
178 |
+
|
179 |
|
180 |
@output_on_top
|
181 |
@ops.input_position(chatbot="bottom", chat_processor="bottom", knowledge_base="bottom")
|
182 |
@op("Chat API")
|
183 |
+
def chat_api(chatbot, chat_processor, knowledge_base, *, model="gpt-4o-mini"):
|
184 |
+
chatbot = chatbot[0]["chatbot"]
|
185 |
+
chat_processor = chat_processor[0]["chat_processor"]
|
186 |
+
knowledge_base = knowledge_base[0]
|
187 |
+
c = ChatAPI(
|
188 |
+
chatbot=chatbot,
|
189 |
+
chat_processor=chat_processor,
|
190 |
+
model=model,
|
191 |
+
)
|
192 |
+
if knowledge_base:
|
193 |
+
c.chatbot.rag_graph.kg_base.load_v1_knowledge_base(**knowledge_base)
|
194 |
+
c.chatbot.scenario_selector.check_compatibility(c.chatbot.rag_graph)
|
195 |
+
return {"chat_api": c}
|
196 |
+
|
197 |
|
198 |
@output_on_top
|
199 |
@op("Knowledge base")
|
200 |
+
def knowledge_base(
|
201 |
+
*,
|
202 |
+
nodes_path="nodes.pickle",
|
203 |
+
edges_path="edges.pickle",
|
204 |
+
template_cluster_path="tempclusters.pickle",
|
205 |
+
):
|
206 |
+
return {
|
207 |
+
"nodes_path": nodes_path,
|
208 |
+
"edges_path": edges_path,
|
209 |
+
"template_cluster_path": template_cluster_path,
|
210 |
+
}
|
211 |
+
|
212 |
|
213 |
@op("View", view="table_view")
|
214 |
def view(input):
|
215 |
+
columns = [str(c) for c in input.keys() if not str(c).startswith("_")]
|
216 |
+
v = {
|
217 |
+
"dataframes": {
|
218 |
+
"df": {
|
219 |
+
"columns": columns,
|
220 |
+
"data": [[input[c] for c in columns]],
|
221 |
+
}
|
222 |
+
}
|
223 |
+
}
|
224 |
+
return v
|
225 |
+
|
226 |
|
227 |
async def api_service(request):
|
228 |
+
"""
|
229 |
+
Serves a chat endpoint that matches LynxScribe's interface.
|
230 |
+
To access it you need to add the "module" and "workspace"
|
231 |
+
parameters.
|
232 |
+
The workspace must contain exactly one "Chat API" node.
|
233 |
+
|
234 |
+
curl -X POST ${LYNXKITE_URL}/api/service \
|
235 |
+
-H "Content-Type: application/json" \
|
236 |
+
-d '{
|
237 |
+
"module": "server.lynxscribe_ops",
|
238 |
+
"workspace": "LynxScribe demo",
|
239 |
+
"session_id": "b43215a0-428f-11ef-9454-0242ac120002",
|
240 |
+
"question": "what does the fox say",
|
241 |
+
"history": [],
|
242 |
+
"user_id": "x",
|
243 |
+
"meta_inputs": {}
|
244 |
+
}'
|
245 |
+
"""
|
246 |
+
import pathlib
|
247 |
+
from . import workspace
|
248 |
+
|
249 |
+
DATA_PATH = pathlib.Path.cwd() / "data"
|
250 |
+
path = DATA_PATH / request["workspace"]
|
251 |
+
assert path.is_relative_to(DATA_PATH)
|
252 |
+
assert path.exists(), f"Workspace {path} does not exist"
|
253 |
+
ws = workspace.load(path)
|
254 |
+
contexts = ops.EXECUTORS[ENV](ws)
|
255 |
+
nodes = [op for op in ws.nodes if op.data.title == "Chat API"]
|
256 |
+
[node] = nodes
|
257 |
+
context = contexts[node.id]
|
258 |
+
chat_api = context.last_result["chat_api"]
|
259 |
+
request = ChatAPIRequest(
|
260 |
+
session_id=request["session_id"],
|
261 |
+
question=request["question"],
|
262 |
+
history=request["history"],
|
263 |
+
)
|
264 |
+
response = await chat_api.answer(request)
|
265 |
+
return response
|
server/main.py
CHANGED
@@ -9,30 +9,34 @@ from . import workspace
|
|
9 |
here = pathlib.Path(__file__).parent
|
10 |
lynxkite_modules = {}
|
11 |
for _, name, _ in pkgutil.iter_modules([str(here)]):
|
12 |
-
if name.endswith(
|
13 |
-
print(f
|
14 |
-
name = f
|
15 |
lynxkite_modules[name] = importlib.import_module(name)
|
16 |
|
17 |
app = fastapi.FastAPI()
|
18 |
|
19 |
|
|
|
20 |
@app.get("/api/catalog")
|
21 |
def get_catalog():
|
22 |
return {
|
23 |
k: {op.name: op.model_dump() for op in v.values()}
|
24 |
-
for k, v in ops.CATALOGS.items()
|
|
|
25 |
|
26 |
|
27 |
class SaveRequest(workspace.BaseConfig):
|
28 |
path: str
|
29 |
ws: workspace.Workspace
|
30 |
|
|
|
31 |
def save(req: SaveRequest):
|
32 |
path = DATA_PATH / req.path
|
33 |
assert path.is_relative_to(DATA_PATH)
|
34 |
workspace.save(req.ws, path)
|
35 |
|
|
|
36 |
@app.post("/api/save")
|
37 |
def save_and_execute(req: SaveRequest):
|
38 |
save(req)
|
@@ -40,6 +44,7 @@ def save_and_execute(req: SaveRequest):
|
|
40 |
save(req)
|
41 |
return req.ws
|
42 |
|
|
|
43 |
@app.get("/api/load")
|
44 |
def load(path: str):
|
45 |
path = DATA_PATH / path
|
@@ -48,31 +53,41 @@ def load(path: str):
|
|
48 |
return workspace.Workspace()
|
49 |
return workspace.load(path)
|
50 |
|
51 |
-
|
|
|
|
|
52 |
|
53 |
@dataclasses.dataclass(order=True)
|
54 |
class DirectoryEntry:
|
55 |
name: str
|
56 |
type: str
|
57 |
|
|
|
58 |
@app.get("/api/dir/list")
|
59 |
def list_dir(path: str):
|
60 |
path = DATA_PATH / path
|
61 |
assert path.is_relative_to(DATA_PATH)
|
62 |
-
return sorted(
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
@app.post("/api/dir/mkdir")
|
67 |
def make_dir(req: dict):
|
68 |
-
path = DATA_PATH / req[
|
69 |
assert path.is_relative_to(DATA_PATH)
|
70 |
assert not path.exists()
|
71 |
path.mkdir()
|
72 |
return list_dir(path.parent)
|
73 |
|
|
|
74 |
@app.post("/api/service")
|
75 |
async def service(req: dict):
|
76 |
-
|
77 |
-
module = lynxkite_modules[req[
|
78 |
return await module.api_service(req)
|
|
|
9 |
here = pathlib.Path(__file__).parent
|
10 |
lynxkite_modules = {}
|
11 |
for _, name, _ in pkgutil.iter_modules([str(here)]):
|
12 |
+
if name.endswith("_ops") and not name.startswith("test_"):
|
13 |
+
print(f"Importing {name}")
|
14 |
+
name = f"server.{name}"
|
15 |
lynxkite_modules[name] = importlib.import_module(name)
|
16 |
|
17 |
app = fastapi.FastAPI()
|
18 |
|
19 |
|
20 |
+
|
21 |
@app.get("/api/catalog")
|
22 |
def get_catalog():
|
23 |
return {
|
24 |
k: {op.name: op.model_dump() for op in v.values()}
|
25 |
+
for k, v in ops.CATALOGS.items()
|
26 |
+
}
|
27 |
|
28 |
|
29 |
class SaveRequest(workspace.BaseConfig):
|
30 |
path: str
|
31 |
ws: workspace.Workspace
|
32 |
|
33 |
+
|
34 |
def save(req: SaveRequest):
|
35 |
path = DATA_PATH / req.path
|
36 |
assert path.is_relative_to(DATA_PATH)
|
37 |
workspace.save(req.ws, path)
|
38 |
|
39 |
+
|
40 |
@app.post("/api/save")
|
41 |
def save_and_execute(req: SaveRequest):
|
42 |
save(req)
|
|
|
44 |
save(req)
|
45 |
return req.ws
|
46 |
|
47 |
+
|
48 |
@app.get("/api/load")
|
49 |
def load(path: str):
|
50 |
path = DATA_PATH / path
|
|
|
53 |
return workspace.Workspace()
|
54 |
return workspace.load(path)
|
55 |
|
56 |
+
|
57 |
+
DATA_PATH = pathlib.Path.cwd() / "data"
|
58 |
+
|
59 |
|
60 |
@dataclasses.dataclass(order=True)
|
61 |
class DirectoryEntry:
|
62 |
name: str
|
63 |
type: str
|
64 |
|
65 |
+
|
66 |
@app.get("/api/dir/list")
|
67 |
def list_dir(path: str):
|
68 |
path = DATA_PATH / path
|
69 |
assert path.is_relative_to(DATA_PATH)
|
70 |
+
return sorted(
|
71 |
+
[
|
72 |
+
DirectoryEntry(
|
73 |
+
p.relative_to(DATA_PATH), "directory" if p.is_dir() else "workspace"
|
74 |
+
)
|
75 |
+
for p in path.iterdir()
|
76 |
+
]
|
77 |
+
)
|
78 |
+
|
79 |
|
80 |
@app.post("/api/dir/mkdir")
|
81 |
def make_dir(req: dict):
|
82 |
+
path = DATA_PATH / req["path"]
|
83 |
assert path.is_relative_to(DATA_PATH)
|
84 |
assert not path.exists()
|
85 |
path.mkdir()
|
86 |
return list_dir(path.parent)
|
87 |
|
88 |
+
|
89 |
@app.post("/api/service")
|
90 |
async def service(req: dict):
|
91 |
+
"""Executors can provide extra HTTP APIs through the /api/service endpoint."""
|
92 |
+
module = lynxkite_modules[req["module"]]
|
93 |
return await module.api_service(req)
|