darabos commited on
Commit
0213da5
·
1 Parent(s): 9cc1fee

Fix caching, move outputs on top.

Browse files
data/LynxScribe demo CHANGED
@@ -56,7 +56,7 @@
56
  ],
57
  "data": [
58
  [
59
- "Az élet titka sokak számára különböző lehet, és sok tényezőtől függ. Néhány kulcselem, ami segíthet megtalálni az élet értelmét vagy titkát:\n\n- **Kapcsolatok**: A barátok és a család közelsége fontos az érzelmi jólét szempontjából.\n- **Önmegvalósítás**: Mindenkinek más a célja és álma, érdemes dolgozni azon, hogy elérjük őket.\n- **Tanulás**: Folyamatosan fejlődjünk és tanuljunk, hogy jobban megértsük a világot.\n- **Egészség**: A fizikai és mentális egészség megőrzése kulcsfontosságú az életminőség szempontjából.\n- **Kibékülés**: Békélj meg a múltaddal, és tanulj meg megbocsátani önmagadnak és másoknak.\n\nEzek az elemek hozzájárulhatnak ahhoz, hogy az életet gazdagabbnak és értékesebbnek érezd. Van valami konkrét aspektus az élet titkáról, amiről szívesen beszélnél?\n\nPlease visit <a href='https://www.linkedin.com/in/g%c3%a1bor-benedek-95578717' target='_blank'>https://www.linkedin.com/in/g%c3%a1bor-benedek-95578717</a> for further information."
60
  ]
61
  ]
62
  }
@@ -114,7 +114,7 @@
114
  "type": {
115
  "type": "None"
116
  },
117
- "position": "right"
118
  }
119
  },
120
  "type": "basic",
@@ -122,8 +122,8 @@
122
  }
123
  },
124
  "position": {
125
- "x": -996.3183837866898,
126
- "y": 1329.9037704510513
127
  },
128
  "parentId": null
129
  },
@@ -163,7 +163,7 @@
163
  "type": {
164
  "type": "None"
165
  },
166
- "position": "right"
167
  }
168
  },
169
  "type": "basic",
@@ -171,8 +171,8 @@
171
  }
172
  },
173
  "position": {
174
- "x": -1076.3726648689906,
175
- "y": 1126.1701539825485
176
  },
177
  "parentId": null
178
  },
@@ -217,7 +217,7 @@
217
  "type": {
218
  "type": "<class 'inspect._empty'>"
219
  },
220
- "position": "left"
221
  }
222
  },
223
  "outputs": {
@@ -226,7 +226,7 @@
226
  "type": {
227
  "type": "None"
228
  },
229
- "position": "right"
230
  }
231
  },
232
  "type": "basic",
@@ -234,7 +234,7 @@
234
  }
235
  },
236
  "position": {
237
- "x": -57.80584961387282,
238
  "y": 235.19823621492515
239
  },
240
  "parentId": null
@@ -283,7 +283,7 @@
283
  "type": {
284
  "type": "None"
285
  },
286
- "position": "right"
287
  }
288
  },
289
  "type": "basic",
@@ -291,8 +291,8 @@
291
  }
292
  },
293
  "position": {
294
- "x": -428.0531718264389,
295
- "y": 174.62875974530755
296
  },
297
  "parentId": null
298
  },
@@ -303,10 +303,7 @@
303
  "title": "RAG chatbot",
304
  "params": {
305
  "negative_answer": "I'm sorry, but the data I've been trained on does not contain any information related to your question.",
306
- "min_information": 2,
307
- "max_information": 3,
308
- "min_summary": 2,
309
- "max_summary": 3,
310
  "strict_limits": true,
311
  "max_results": 5
312
  },
@@ -322,32 +319,11 @@
322
  "type": "<class 'str'>"
323
  }
324
  },
325
- "min_information": {
326
- "name": "min_information",
327
- "default": 2,
328
- "type": {
329
- "type": "<class 'int'>"
330
- }
331
- },
332
- "max_information": {
333
- "name": "max_information",
334
- "default": 3,
335
- "type": {
336
- "type": "<class 'int'>"
337
- }
338
- },
339
- "min_summary": {
340
- "name": "min_summary",
341
- "default": 2,
342
  "type": {
343
- "type": "<class 'int'>"
344
- }
345
- },
346
- "max_summary": {
347
- "name": "max_summary",
348
- "default": 3,
349
- "type": {
350
- "type": "<class 'int'>"
351
  }
352
  },
353
  "strict_limits": {
@@ -394,7 +370,7 @@
394
  "type": {
395
  "type": "None"
396
  },
397
- "position": "right"
398
  }
399
  },
400
  "type": "basic",
@@ -403,12 +379,12 @@
403
  "beingResized": false
404
  },
405
  "position": {
406
- "x": -647.9563055161224,
407
- "y": 528.5816378646354
408
  },
409
  "parentId": null,
410
- "width": 417,
411
- "height": 494
412
  },
413
  {
414
  "id": "RAG graph 1",
@@ -443,7 +419,7 @@
443
  "type": {
444
  "type": "None"
445
  },
446
- "position": "right"
447
  }
448
  },
449
  "type": "basic",
@@ -451,8 +427,8 @@
451
  }
452
  },
453
  "position": {
454
- "x": -1018.3991667849547,
455
- "y": 882.7108232430365
456
  },
457
  "parentId": null
458
  },
@@ -492,7 +468,7 @@
492
  "type": {
493
  "type": "None"
494
  },
495
- "position": "right"
496
  }
497
  },
498
  "type": "basic",
@@ -501,8 +477,8 @@
501
  "beingResized": false
502
  },
503
  "position": {
504
- "x": -1992.8382657219915,
505
- "y": 898.0883240074281
506
  },
507
  "parentId": null,
508
  "width": 275,
@@ -544,7 +520,7 @@
544
  "type": {
545
  "type": "None"
546
  },
547
- "position": "right"
548
  }
549
  },
550
  "type": "basic",
@@ -552,8 +528,8 @@
552
  }
553
  },
554
  "position": {
555
- "x": -1601.7383061140106,
556
- "y": 1187.790118541483
557
  },
558
  "parentId": null
559
  },
@@ -585,7 +561,7 @@
585
  "type": {
586
  "type": "None"
587
  },
588
- "position": "right"
589
  }
590
  },
591
  "type": "basic",
@@ -593,8 +569,8 @@
593
  }
594
  },
595
  "position": {
596
- "x": -1830.5553990810897,
597
- "y": 1406.239623213993
598
  },
599
  "parentId": null
600
  },
@@ -634,7 +610,7 @@
634
  "type": {
635
  "type": "None"
636
  },
637
- "position": "right"
638
  }
639
  },
640
  "type": "basic",
@@ -642,8 +618,8 @@
642
  }
643
  },
644
  "position": {
645
- "x": -206.839661001107,
646
- "y": 954.055575798662
647
  },
648
  "parentId": null
649
  },
@@ -664,7 +640,7 @@
664
  "type": {
665
  "type": "<class 'inspect._empty'>"
666
  },
667
- "position": "left"
668
  }
669
  },
670
  "outputs": {
@@ -673,7 +649,7 @@
673
  "type": {
674
  "type": "None"
675
  },
676
- "position": "right"
677
  }
678
  },
679
  "type": "basic",
@@ -784,7 +760,7 @@
784
  "type": {
785
  "type": "None"
786
  },
787
- "position": "right"
788
  }
789
  },
790
  "type": "basic",
@@ -792,8 +768,8 @@
792
  }
793
  },
794
  "position": {
795
- "x": -202.83662881345157,
796
- "y": 1123.8190429357237
797
  },
798
  "parentId": null
799
  },
@@ -849,7 +825,7 @@
849
  "type": {
850
  "type": "None"
851
  },
852
- "position": "right"
853
  }
854
  },
855
  "type": "basic",
@@ -857,8 +833,8 @@
857
  }
858
  },
859
  "position": {
860
- "x": -192.5486444668937,
861
- "y": 1414.696184081429
862
  },
863
  "parentId": null
864
  }
 
56
  ],
57
  "data": [
58
  [
59
+ "Az élet titka sok ember számára különböző lehet, és sok tényezőtől függ, mint például a személyes értékek, tapasztalatok és célok. Néhány általános gondolat az élet titkairól:\n\n- **Kapcsolatok**: A szeretet és az emberi kapcsolatok nagyon fontosak, hiszen ezek adhatják az élet értelmét.\n- **Önmegvalósítás**: Az, hogy megtaláljuk a szenvedélyeinket és céljainkat, segíthet abban, hogy boldogan éljünk.\n- **Folyamatos tanulás**: Az élet folyamatos tanulás, amely segít fejlődni és alkalmazkodni a változásokhoz.\n- **Egészség**: A fizikai és mentális egészség megőrzése alapvető az életminőség szempontjából.\n\nEzek persze csak általános nézőpontok, és mindenki másképp találhatja meg a saját életének a titkát. Te mivel kapcsolatban keresed az élet titkát?\n\nPlease visit <a href='https://www.linkedin.com/in/g%c3%a1bor-benedek-95578717' target='_blank'>https://www.linkedin.com/in/g%c3%a1bor-benedek-95578717</a> for further information."
60
  ]
61
  ]
62
  }
 
114
  "type": {
115
  "type": "None"
116
  },
117
+ "position": "top"
118
  }
119
  },
120
  "type": "basic",
 
122
  }
123
  },
124
  "position": {
125
+ "x": -312.5774211084781,
126
+ "y": 1093.4019527511366
127
  },
128
  "parentId": null
129
  },
 
163
  "type": {
164
  "type": "None"
165
  },
166
+ "position": "top"
167
  }
168
  },
169
  "type": "basic",
 
171
  }
172
  },
173
  "position": {
174
+ "x": -549.1300345090008,
175
+ "y": 1086.4852248156676
176
  },
177
  "parentId": null
178
  },
 
217
  "type": {
218
  "type": "<class 'inspect._empty'>"
219
  },
220
+ "position": "bottom"
221
  }
222
  },
223
  "outputs": {
 
226
  "type": {
227
  "type": "None"
228
  },
229
+ "position": "top"
230
  }
231
  },
232
  "type": "basic",
 
234
  }
235
  },
236
  "position": {
237
+ "x": -46.94726514341976,
238
  "y": 235.19823621492515
239
  },
240
  "parentId": null
 
283
  "type": {
284
  "type": "None"
285
  },
286
+ "position": "top"
287
  }
288
  },
289
  "type": "basic",
 
291
  }
292
  },
293
  "position": {
294
+ "x": 382.20164582795104,
295
+ "y": 533.2833307141879
296
  },
297
  "parentId": null
298
  },
 
303
  "title": "RAG chatbot",
304
  "params": {
305
  "negative_answer": "I'm sorry, but the data I've been trained on does not contain any information related to your question.",
306
+ "limits_by_type": "{\"information\": [2, 3], \"summary\": [2, 3]}",
 
 
 
307
  "strict_limits": true,
308
  "max_results": 5
309
  },
 
319
  "type": "<class 'str'>"
320
  }
321
  },
322
+ "limits_by_type": {
323
+ "name": "limits_by_type",
324
+ "default": "{}",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
  "type": {
326
+ "type": "<class 'str'>"
 
 
 
 
 
 
 
327
  }
328
  },
329
  "strict_limits": {
 
370
  "type": {
371
  "type": "None"
372
  },
373
+ "position": "top"
374
  }
375
  },
376
  "type": "basic",
 
379
  "beingResized": false
380
  },
381
  "position": {
382
+ "x": -521.6507639530705,
383
+ "y": 547.294980747757
384
  },
385
  "parentId": null,
386
+ "width": 336,
387
+ "height": 349
388
  },
389
  {
390
  "id": "RAG graph 1",
 
419
  "type": {
420
  "type": "None"
421
  },
422
+ "position": "top"
423
  }
424
  },
425
  "type": "basic",
 
427
  }
428
  },
429
  "position": {
430
+ "x": -817.8208895639339,
431
+ "y": 1014.836542916127
432
  },
433
  "parentId": null
434
  },
 
468
  "type": {
469
  "type": "None"
470
  },
471
+ "position": "top"
472
  }
473
  },
474
  "type": "basic",
 
477
  "beingResized": false
478
  },
479
  "position": {
480
+ "x": -1053.794625339574,
481
+ "y": 1347.7711940497127
482
  },
483
  "parentId": null,
484
  "width": 275,
 
520
  "type": {
521
  "type": "None"
522
  },
523
+ "position": "top"
524
  }
525
  },
526
  "type": "basic",
 
528
  }
529
  },
530
  "position": {
531
+ "x": -749.98604638686,
532
+ "y": 1293.5978526690794
533
  },
534
  "parentId": null
535
  },
 
561
  "type": {
562
  "type": "None"
563
  },
564
+ "position": "top"
565
  }
566
  },
567
  "type": "basic",
 
569
  }
570
  },
571
  "position": {
572
+ "x": -714.2838040349482,
573
+ "y": 1469.7242636905507
574
  },
575
  "parentId": null
576
  },
 
610
  "type": {
611
  "type": "None"
612
  },
613
+ "position": "top"
614
  }
615
  },
616
  "type": "basic",
 
618
  }
619
  },
620
  "position": {
621
+ "x": 0.08889822620079713,
622
+ "y": 1044.7639853229612
623
  },
624
  "parentId": null
625
  },
 
640
  "type": {
641
  "type": "<class 'inspect._empty'>"
642
  },
643
+ "position": "bottom"
644
  }
645
  },
646
  "outputs": {
 
649
  "type": {
650
  "type": "None"
651
  },
652
+ "position": "top"
653
  }
654
  },
655
  "type": "basic",
 
760
  "type": {
761
  "type": "None"
762
  },
763
+ "position": "top"
764
  }
765
  },
766
  "type": "basic",
 
768
  }
769
  },
770
  "position": {
771
+ "x": 233.69759202223884,
772
+ "y": 1041.6145468043276
773
  },
774
  "parentId": null
775
  },
 
825
  "type": {
826
  "type": "None"
827
  },
828
+ "position": "top"
829
  }
830
  },
831
  "type": "basic",
 
833
  }
834
  },
835
  "position": {
836
+ "x": 513.2761671440603,
837
+ "y": 1034.8547191984255
838
  },
839
  "parentId": null
840
  }
requirements.txt CHANGED
@@ -2,6 +2,7 @@ fastapi
2
  matplotlib
3
  networkx
4
  numpy
 
5
  pandas
6
  scipy
7
  uvicorn[standard]
 
2
  matplotlib
3
  networkx
4
  numpy
5
+ orjson
6
  pandas
7
  scipy
8
  uvicorn[standard]
server/executors/one_by_one.py CHANGED
@@ -1,8 +1,8 @@
1
  from .. import ops
2
  from .. import workspace
3
- import fastapi
4
- import json
5
  import pandas as pd
 
6
  import traceback
7
  import inspect
8
  import typing
@@ -63,6 +63,15 @@ def get_stages(ws, catalog):
63
  stages.append(set(nodes))
64
  return stages
65
 
 
 
 
 
 
 
 
 
 
66
  EXECUTOR_OUTPUT_CACHE = {}
67
 
68
  def execute(ws, catalog, cache=None):
@@ -101,10 +110,10 @@ def execute(ws, catalog, cache=None):
101
  inputs = [
102
  batch_inputs[(n, i.name)] if i.position in 'top or bottom' else task
103
  for i in op.inputs.values()]
104
- if cache:
105
- key = json.dumps(fastapi.encoders.jsonable_encoder((inputs, params)))
106
  if key not in cache:
107
- cache[key] = op.func(*inputs, **params)
108
  result = cache[key]
109
  else:
110
  result = op(*inputs, **params)
 
1
  from .. import ops
2
  from .. import workspace
3
+ import orjson
 
4
  import pandas as pd
5
+ import pydantic
6
  import traceback
7
  import inspect
8
  import typing
 
63
  stages.append(set(nodes))
64
  return stages
65
 
66
+
67
+ def _default_serializer(obj):
68
+ if isinstance(obj, pydantic.BaseModel):
69
+ return obj.dict()
70
+ return {"__nonserializable__": id(obj)}
71
+
72
+ def make_cache_key(obj):
73
+ return orjson.dumps(obj, default=_default_serializer)
74
+
75
  EXECUTOR_OUTPUT_CACHE = {}
76
 
77
  def execute(ws, catalog, cache=None):
 
110
  inputs = [
111
  batch_inputs[(n, i.name)] if i.position in 'top or bottom' else task
112
  for i in op.inputs.values()]
113
+ if cache is not None:
114
+ key = make_cache_key((inputs, params))
115
  if key not in cache:
116
+ cache[key] = op(*inputs, **params)
117
  result = cache[key]
118
  else:
119
  result = op(*inputs, **params)
server/lynxscribe_ops.py CHANGED
@@ -14,22 +14,27 @@ from lynxscribe.components.chat_api import ChatAPI, ChatAPIRequest, ChatAPIRespo
14
 
15
  from . import ops
16
  import asyncio
 
17
  from .executors import one_by_one
18
 
19
  ENV = 'LynxScribe'
20
  one_by_one.register(ENV)
21
  op = ops.op_registration(ENV)
 
22
 
 
23
  @op("Vector store")
24
  def vector_store(*, name='chromadb', collection_name='lynx'):
25
  vector_store = get_vector_store(name=name, collection_name=collection_name)
26
  return {'vector_store': vector_store}
27
 
 
28
  @op("LLM")
29
  def llm(*, name='openai'):
30
  llm = get_llm_engine(name=name)
31
  return {'llm': llm}
32
 
 
33
  @ops.input_position(llm="bottom")
34
  @op("Text embedder")
35
  def text_embedder(llm, *, model='text-embedding-ada-002'):
@@ -37,6 +42,7 @@ def text_embedder(llm, *, model='text-embedding-ada-002'):
37
  text_embedder = TextEmbedder(llm=llm, model=model)
38
  return {'text_embedder': text_embedder}
39
 
 
40
  @ops.input_position(vector_store="bottom", text_embedder="bottom")
41
  @op("RAG graph")
42
  def rag_graph(vector_store, text_embedder):
@@ -47,6 +53,7 @@ def rag_graph(vector_store, text_embedder):
47
  )
48
  return {'rag_graph': rag_graph}
49
 
 
50
  @op("Scenario selector")
51
  def scenario_selector(*, scenario_file: str, node_types='intent_cluster'):
52
  scenarios = load_config(scenario_file)
@@ -59,28 +66,31 @@ def scenario_selector(*, scenario_file: str, node_types='intent_cluster'):
59
 
60
  DEFAULT_NEGATIVE_ANSWER = "I'm sorry, but the data I've been trained on does not contain any information related to your question."
61
 
 
62
  @ops.input_position(rag_graph="bottom", scenario_selector="bottom", llm="bottom")
63
  @op("RAG chatbot")
64
  def rag_chatbot(
65
  rag_graph, scenario_selector, llm, *,
66
  negative_answer=DEFAULT_NEGATIVE_ANSWER,
67
- min_information=2, max_information=3,
68
- min_summary=2, max_summary=3,
69
  strict_limits=True, max_results=5):
70
  rag_graph = rag_graph[0]['rag_graph']
71
  scenario_selector = scenario_selector[0]['scenario_selector']
72
  llm = llm[0]['llm']
 
73
  rag_chatbot = RAGChatbot(
74
  rag_graph=rag_graph,
75
  scenario_selector=scenario_selector,
76
  llm=llm,
77
  negative_answer=negative_answer,
78
- limits_by_type=dict(information=[min_information, max_information], summary=[min_summary, max_summary]),
79
  strict_limits=strict_limits,
80
  max_results=max_results,
81
  )
82
  return {'chatbot': rag_chatbot}
83
 
 
 
84
  @op("Chat processor")
85
  def chat_processor(processor, *, _ctx: one_by_one.Context):
86
  cfg = _ctx.last_result or {'question_processors': [], 'answer_processors': [], 'masks': []}
@@ -98,10 +108,12 @@ def chat_processor(processor, *, _ctx: one_by_one.Context):
98
  chat_processor = ChatProcessor(question_processors=question_processors, answer_processors=answer_processors)
99
  return {'chat_processor': chat_processor, **cfg}
100
 
 
101
  @op("Truncate history")
102
  def truncate_history(*, max_tokens=10000, language='English'):
103
  return {'question_processor': TruncateHistory(max_tokens=max_tokens, language=language.lower())}
104
 
 
105
  @op("Mask")
106
  def mask(*, name='', regex='', exceptions='', mask_pattern=''):
107
  exceptions = [e.strip() for e in exceptions.split(',') if e.strip()]
@@ -119,11 +131,13 @@ def test_chat_api(message, chat_api):
119
  def input_chat(*, chat: str):
120
  return {'text': chat}
121
 
122
- @ops.input_position(chatbot="bottom", chat_processor="bottom")
 
123
  @op("Chat API")
124
  def chat_api(chatbot, chat_processor, knowledge_base, *, model='gpt-4o-mini'):
125
  chatbot = chatbot[0]['chatbot']
126
  chat_processor = chat_processor[0]['chat_processor']
 
127
  c = ChatAPI(
128
  chatbot=chatbot,
129
  chat_processor=chat_processor,
@@ -134,6 +148,7 @@ def chat_api(chatbot, chat_processor, knowledge_base, *, model='gpt-4o-mini'):
134
  c.chatbot.scenario_selector.check_compatibility(c.chatbot.rag_graph)
135
  return {'chat_api': c}
136
 
 
137
  @op("Knowledge base")
138
  def knowledge_base(*, nodes_path='nodes.pickle', edges_path='edges.pickle', template_cluster_path='tempclusters.pickle'):
139
  return {'nodes_path': nodes_path, 'edges_path': edges_path, 'template_cluster_path': template_cluster_path}
 
14
 
15
  from . import ops
16
  import asyncio
17
+ import json
18
  from .executors import one_by_one
19
 
20
  ENV = 'LynxScribe'
21
  one_by_one.register(ENV)
22
  op = ops.op_registration(ENV)
23
+ output_on_top = ops.output_position(output="top")
24
 
25
+ @output_on_top
26
  @op("Vector store")
27
  def vector_store(*, name='chromadb', collection_name='lynx'):
28
  vector_store = get_vector_store(name=name, collection_name=collection_name)
29
  return {'vector_store': vector_store}
30
 
31
+ @output_on_top
32
  @op("LLM")
33
  def llm(*, name='openai'):
34
  llm = get_llm_engine(name=name)
35
  return {'llm': llm}
36
 
37
+ @output_on_top
38
  @ops.input_position(llm="bottom")
39
  @op("Text embedder")
40
  def text_embedder(llm, *, model='text-embedding-ada-002'):
 
42
  text_embedder = TextEmbedder(llm=llm, model=model)
43
  return {'text_embedder': text_embedder}
44
 
45
+ @output_on_top
46
  @ops.input_position(vector_store="bottom", text_embedder="bottom")
47
  @op("RAG graph")
48
  def rag_graph(vector_store, text_embedder):
 
53
  )
54
  return {'rag_graph': rag_graph}
55
 
56
+ @output_on_top
57
  @op("Scenario selector")
58
  def scenario_selector(*, scenario_file: str, node_types='intent_cluster'):
59
  scenarios = load_config(scenario_file)
 
66
 
67
  DEFAULT_NEGATIVE_ANSWER = "I'm sorry, but the data I've been trained on does not contain any information related to your question."
68
 
69
+ @output_on_top
70
  @ops.input_position(rag_graph="bottom", scenario_selector="bottom", llm="bottom")
71
  @op("RAG chatbot")
72
  def rag_chatbot(
73
  rag_graph, scenario_selector, llm, *,
74
  negative_answer=DEFAULT_NEGATIVE_ANSWER,
75
+ limits_by_type='{}',
 
76
  strict_limits=True, max_results=5):
77
  rag_graph = rag_graph[0]['rag_graph']
78
  scenario_selector = scenario_selector[0]['scenario_selector']
79
  llm = llm[0]['llm']
80
+ limits_by_type = json.loads(limits_by_type)
81
  rag_chatbot = RAGChatbot(
82
  rag_graph=rag_graph,
83
  scenario_selector=scenario_selector,
84
  llm=llm,
85
  negative_answer=negative_answer,
86
+ limits_by_type=limits_by_type,
87
  strict_limits=strict_limits,
88
  max_results=max_results,
89
  )
90
  return {'chatbot': rag_chatbot}
91
 
92
+ @output_on_top
93
+ @ops.input_position(processor="bottom")
94
  @op("Chat processor")
95
  def chat_processor(processor, *, _ctx: one_by_one.Context):
96
  cfg = _ctx.last_result or {'question_processors': [], 'answer_processors': [], 'masks': []}
 
108
  chat_processor = ChatProcessor(question_processors=question_processors, answer_processors=answer_processors)
109
  return {'chat_processor': chat_processor, **cfg}
110
 
111
+ @output_on_top
112
  @op("Truncate history")
113
  def truncate_history(*, max_tokens=10000, language='English'):
114
  return {'question_processor': TruncateHistory(max_tokens=max_tokens, language=language.lower())}
115
 
116
+ @output_on_top
117
  @op("Mask")
118
  def mask(*, name='', regex='', exceptions='', mask_pattern=''):
119
  exceptions = [e.strip() for e in exceptions.split(',') if e.strip()]
 
131
  def input_chat(*, chat: str):
132
  return {'text': chat}
133
 
134
+ @output_on_top
135
+ @ops.input_position(chatbot="bottom", chat_processor="bottom", knowledge_base="bottom")
136
  @op("Chat API")
137
  def chat_api(chatbot, chat_processor, knowledge_base, *, model='gpt-4o-mini'):
138
  chatbot = chatbot[0]['chatbot']
139
  chat_processor = chat_processor[0]['chat_processor']
140
+ knowledge_base = knowledge_base[0]
141
  c = ChatAPI(
142
  chatbot=chatbot,
143
  chat_processor=chat_processor,
 
148
  c.chatbot.scenario_selector.check_compatibility(c.chatbot.rag_graph)
149
  return {'chat_api': c}
150
 
151
+ @output_on_top
152
  @op("Knowledge base")
153
  def knowledge_base(*, nodes_path='nodes.pickle', edges_path='edges.pickle', template_cluster_path='tempclusters.pickle'):
154
  return {'nodes_path': nodes_path, 'edges_path': edges_path, 'template_cluster_path': template_cluster_path}