Commit
·
f4b99cd
1
Parent(s):
6a9c224
Remove dependence on response annotators and replace them with data tranformations.
Browse files- OpenAIChatAtomicFlow.py +3 -52
- OpenAIChatAtomicFlow.yaml +2 -1
OpenAIChatAtomicFlow.py
CHANGED
@@ -10,14 +10,12 @@ from langchain import PromptTemplate
|
|
10 |
import langchain
|
11 |
from langchain.schema import HumanMessage, AIMessage, SystemMessage
|
12 |
|
13 |
-
from flows.message_annotators.abstract import MessageAnnotator
|
14 |
from flows.base_flows.abstract import AtomicFlow
|
15 |
from flows.datasets import GenericDemonstrationsDataset
|
16 |
|
17 |
from flows import utils
|
18 |
from flows.messages.flow_message import UpdateMessage_ChatMessage
|
19 |
from flows.utils.caching_utils import flow_run_cache
|
20 |
-
from flows.utils.general_helpers import validate_parameters
|
21 |
|
22 |
log = utils.get_pylogger(__name__)
|
23 |
|
@@ -40,7 +38,6 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
40 |
query_message_prompt_template: Optional[PromptTemplate] = None
|
41 |
demonstrations: GenericDemonstrationsDataset = None
|
42 |
demonstrations_response_template: PromptTemplate = None
|
43 |
-
response_annotators: Optional[Dict[str, MessageAnnotator]] = {}
|
44 |
|
45 |
def __init__(self, **kwargs):
|
46 |
super().__init__(**kwargs)
|
@@ -57,10 +54,6 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
57 |
super().set_up_flow_state()
|
58 |
self.flow_state["previous_messages"] = []
|
59 |
|
60 |
-
@classmethod
|
61 |
-
def _validate_parameters(cls, kwargs):
|
62 |
-
validate_parameters(cls, kwargs)
|
63 |
-
|
64 |
@classmethod
|
65 |
def _set_up_prompts(cls, config):
|
66 |
kwargs = {}
|
@@ -84,21 +77,13 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
84 |
#
|
85 |
# return kwargs
|
86 |
|
87 |
-
@classmethod
|
88 |
-
def _set_up_response_annotators(cls, config):
|
89 |
-
response_annotators = config.get("response_annotators", {})
|
90 |
-
response_annotators = deepcopy(response_annotators)
|
91 |
-
if len(response_annotators) > 0:
|
92 |
-
for key, config in response_annotators.items():
|
93 |
-
response_annotators[key] = hydra.utils.instantiate(config, _convert_="partial")
|
94 |
-
return {"response_annotators": response_annotators}
|
95 |
-
|
96 |
@classmethod
|
97 |
def instantiate_from_config(cls, config):
|
98 |
flow_config = deepcopy(config)
|
99 |
|
100 |
kwargs = {"flow_config": flow_config}
|
101 |
-
kwargs["
|
|
|
102 |
|
103 |
# ~~~ Set up prompts ~~~
|
104 |
kwargs.update(cls._set_up_prompts(flow_config))
|
@@ -106,9 +91,6 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
106 |
# # ~~~ Set up demonstration templates ~~~
|
107 |
# kwargs.update(cls._set_up_demonstration_templates(flow_config))
|
108 |
|
109 |
-
# ~~~ Set up response annotators ~~~
|
110 |
-
kwargs.update(cls._set_up_response_annotators(flow_config))
|
111 |
-
|
112 |
# ~~~ Instantiate flow ~~~
|
113 |
return cls(**kwargs)
|
114 |
|
@@ -142,30 +124,6 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
142 |
# input_variables = self.demonstrations_response_template.input_variables
|
143 |
# return self.demonstrations_response_template.format(**{k: sample_data[k] for k in input_variables}), []
|
144 |
|
145 |
-
def _get_annotator_with_key(self, key: str):
|
146 |
-
for _, ra in self.response_annotators.items():
|
147 |
-
if ra.key == key:
|
148 |
-
return ra
|
149 |
-
|
150 |
-
def _response_parsing(self, response: str, output_keys: List[str]):
|
151 |
-
target_annotators = [ra for _, ra in self.response_annotators.items() if ra.key in output_keys]
|
152 |
-
|
153 |
-
parsed_outputs = {}
|
154 |
-
for ra in target_annotators:
|
155 |
-
parsed_out = ra(response)
|
156 |
-
parsed_outputs.update(parsed_out)
|
157 |
-
|
158 |
-
if "raw_response" in output_keys:
|
159 |
-
parsed_outputs["raw_response"] = response
|
160 |
-
else:
|
161 |
-
log.warning("The raw response is not logged because it was not requested as per the expected output.")
|
162 |
-
|
163 |
-
if len(parsed_outputs) == 0:
|
164 |
-
raise Exception(f"The output dictionary is empty. "
|
165 |
-
f"None of the expected outputs: `{str(output_keys)}` were found.")
|
166 |
-
|
167 |
-
return parsed_outputs
|
168 |
-
|
169 |
# def _add_demonstrations(self):
|
170 |
# if self.demonstrations is not None:
|
171 |
# for example in self.demonstrations:
|
@@ -288,11 +246,4 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
288 |
content=response
|
289 |
)
|
290 |
|
291 |
-
|
292 |
-
output_data = self._response_parsing(
|
293 |
-
response=response,
|
294 |
-
output_keys=input_data["output_keys"]
|
295 |
-
)
|
296 |
-
# self._state_update_dict(update_data=output_data) # ToDo: Is this necessary? When?
|
297 |
-
|
298 |
-
return output_data
|
|
|
10 |
import langchain
|
11 |
from langchain.schema import HumanMessage, AIMessage, SystemMessage
|
12 |
|
|
|
13 |
from flows.base_flows.abstract import AtomicFlow
|
14 |
from flows.datasets import GenericDemonstrationsDataset
|
15 |
|
16 |
from flows import utils
|
17 |
from flows.messages.flow_message import UpdateMessage_ChatMessage
|
18 |
from flows.utils.caching_utils import flow_run_cache
|
|
|
19 |
|
20 |
log = utils.get_pylogger(__name__)
|
21 |
|
|
|
38 |
query_message_prompt_template: Optional[PromptTemplate] = None
|
39 |
demonstrations: GenericDemonstrationsDataset = None
|
40 |
demonstrations_response_template: PromptTemplate = None
|
|
|
41 |
|
42 |
def __init__(self, **kwargs):
|
43 |
super().__init__(**kwargs)
|
|
|
54 |
super().set_up_flow_state()
|
55 |
self.flow_state["previous_messages"] = []
|
56 |
|
|
|
|
|
|
|
|
|
57 |
@classmethod
|
58 |
def _set_up_prompts(cls, config):
|
59 |
kwargs = {}
|
|
|
77 |
#
|
78 |
# return kwargs
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
@classmethod
|
81 |
def instantiate_from_config(cls, config):
|
82 |
flow_config = deepcopy(config)
|
83 |
|
84 |
kwargs = {"flow_config": flow_config}
|
85 |
+
kwargs["input_data_transformations"] = cls._set_up_data_transformations(config["input_data_transformations"])
|
86 |
+
kwargs["output_data_transformations"] = cls._set_up_data_transformations(config["output_data_transformations"])
|
87 |
|
88 |
# ~~~ Set up prompts ~~~
|
89 |
kwargs.update(cls._set_up_prompts(flow_config))
|
|
|
91 |
# # ~~~ Set up demonstration templates ~~~
|
92 |
# kwargs.update(cls._set_up_demonstration_templates(flow_config))
|
93 |
|
|
|
|
|
|
|
94 |
# ~~~ Instantiate flow ~~~
|
95 |
return cls(**kwargs)
|
96 |
|
|
|
124 |
# input_variables = self.demonstrations_response_template.input_variables
|
125 |
# return self.demonstrations_response_template.format(**{k: sample_data[k] for k in input_variables}), []
|
126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
# def _add_demonstrations(self):
|
128 |
# if self.demonstrations is not None:
|
129 |
# for example in self.demonstrations:
|
|
|
246 |
content=response
|
247 |
)
|
248 |
|
249 |
+
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
OpenAIChatAtomicFlow.yaml
CHANGED
@@ -18,7 +18,7 @@ system_name: system
|
|
18 |
user_name: user
|
19 |
assistant_name: assistant
|
20 |
|
21 |
-
|
22 |
|
23 |
system_message_prompt_template:
|
24 |
_target_: langchain.PromptTemplate
|
@@ -35,6 +35,7 @@ human_message_prompt_template:
|
|
35 |
- "query"
|
36 |
template_format: jinja2
|
37 |
default_human_input_key: "query"
|
|
|
38 |
|
39 |
query_message_prompt_template:
|
40 |
_target_: langchain.PromptTemplate
|
|
|
18 |
user_name: user
|
19 |
assistant_name: assistant
|
20 |
|
21 |
+
output_data_transformations: []
|
22 |
|
23 |
system_message_prompt_template:
|
24 |
_target_: langchain.PromptTemplate
|
|
|
35 |
- "query"
|
36 |
template_format: jinja2
|
37 |
default_human_input_key: "query"
|
38 |
+
input_data_transformations: []
|
39 |
|
40 |
query_message_prompt_template:
|
41 |
_target_: langchain.PromptTemplate
|