python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
from .node import Node
from .portsSpecSchema import NodePorts, ConfSchema
__all__ = ['Output_Collector', 'OUTPUT_ID', 'OUTPUT_TYPE']
class Output_Collector(Node):
def meta_setup(self):
return super().meta_setup()
def ports_setup(self):
return NodePorts(inports={}, outports={})
def conf_schema(self):
return ConfSchema()
def process(self, inputs):
return super().process(inputs)
# TODO: DO NOT RELY ON special OUTPUT_ID.
# OUTPUT_ID = 'f291b900-bd19-11e9-aca3-a81e84f29b0f_uni_output'
OUTPUT_ID = 'collector_id_fd9567b6'
OUTPUT_TYPE = Output_Collector.__name__
| fsi-samples-main | greenflow/greenflow/dataframe_flow/output_collector_node.py |
from greenflow.dataframe_flow.portsSpecSchema import NodePorts
from greenflow.dataframe_flow.metaSpec import MetaData
__all__ = ['TemplateNodeMixin']
class TemplateNodeMixin:
'''This mixin is used with Nodes that use attributes for managing ports
and meta.
:ivar port_inports: Ports dictionary for input ports.
:ivar port_outports: Ports dictionary for output ports.
:ivar meta_inports: Metadata dictionary for input ports.
:ivar meta_outports: Metadata dictionary for output ports.
'''
def init(self):
"""
Used to initilze the Node. called from the node constructor
all children should run parent init first in the constructor e.g.
def init(self):
TemplateNodeMixin.init(self)
....
In this function. Define the static ports and meta setup. Note,
only static information can be used includig the self.conf
information. If need information from
self.get_connected_inports() and self.get_input_meta(),
please define it in update() function.
Define the template ports setup by self.template_ports_setup
E.g.
port_inports = {
"port0_name": {
PortsSpecSchema.port_type: ["type0", "type1"]
},
"port1_name": {
PortsSpecSchema.port_type: "${conf:some_type}",
PortsSpecSchema.dynamic: {
# choie can be True/False, list of types or string
# True, generate outports matching the
# connected dynamic input ports, use the
# the same type as the dynamic port
# False, not generate matching outports
# list of types or string, same as True condition,
# but use the specified types
PortsSpecSchema.DYN_MATCH: ['type0', 'type1']
}
},
...
}
port_outports = {
"port0_name": {
PortsSpecSchema.port_type: ["type0", "type1"]
},
"port1_name": {
PortsSpecSchema.port_type: "${port:port0_name}"
},
...
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
Define the template meta data setup by self.template_meta_setup.
E.g.
meta_inports = {
"port0_name": {
"name0": "type0",
"name1": "type1",
"name2": "type2",
},
"port1_name": {
"${conf:abc}": "type0",
"name1": "type1",
"name2": "${conf:type1}",
},
...
}
meta_outports = {
"port0_name": {
MetaDataSchema.META_REF_INPUT: "port0_name",
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_DATA: {
"${conf:abc}": "type0",
"name1": "type1",
"name2": "${conf:type1}",
}
# order is optional
MetaDataSchema.META_ORDER: {
"${conf:abc}": 0,
"name2": -1
}
},
"port1_name": {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: {
"${conf:abc}": "type0",
"name1": "type1",
"name2": "${conf:type1}",
},
# order is optional
MetaDataSchema.META_ORDER: {
"${conf:abc}": -1,
}
},
...
}
self.template_meta_setup(in_ports=meta_inports,
out_ports=meta_outports)
"""
if not hasattr(self, '__port_inports'):
self.__port_inports = {}
if not hasattr(self, '__port_outports'):
self.__port_outports = {}
if not hasattr(self, '__meta_inports'):
self.__meta_inports = {}
if not hasattr(self, '__meta_outports'):
self.__meta_outports = {}
def template_ports_setup(self, in_ports=None, out_ports=None):
if in_ports is not None:
self.__port_inports = in_ports
if out_ports is not None:
self.__port_outports = out_ports
return NodePorts(inports=self.__port_inports,
outports=self.__port_outports)
def template_meta_setup(self, in_ports=None, out_ports=None):
if in_ports is not None:
self.__meta_inports = in_ports
if out_ports is not None:
self.__meta_outports = out_ports
return MetaData(inports=self.__meta_inports,
outports=self.__meta_outports)
def update(self):
'''Updates state of a Node with resolved ports and meta.
'''
ports_template = \
NodePorts(inports=self.__port_inports,
outports=self.__port_outports)
ports = self._resolve_ports(ports_template)
port_inports = ports.inports
meta_template = \
MetaData(inports=self.__meta_inports,
outports=self.__meta_outports)
meta = self._resolve_meta(meta_template, port_inports)
self.__port_inports = ports.inports
self.__port_outports = ports.outports
self.__meta_inports = meta.inports
self.__meta_outports = meta.outports
def ports_setup(self):
ports = NodePorts(inports=self.__port_inports,
outports=self.__port_outports)
return self.ports_setup_ext(ports)
def meta_setup(self):
meta = MetaData(inports=self.__meta_inports,
outports=self.__meta_outports)
return self.meta_setup_ext(meta)
| fsi-samples-main | greenflow/greenflow/dataframe_flow/template_node_mixin.py |
import warnings
import copy
import traceback
import cloudpickle
import base64
from types import ModuleType
from collections import OrderedDict
import ruamel.yaml
from ._node_flow import _CLEANUP
from .task import Task
from .taskSpecSchema import TaskSpecSchema
from .portsSpecSchema import PortsSpecSchema
from .util import get_encoded_class
from .config_nodes_modules import get_node_obj
from .output_collector_node import (Output_Collector, OUTPUT_TYPE, OUTPUT_ID)
__all__ = ['TaskGraph', 'Output_Collector']
server_task_graph = None
def add_module_from_base64(module_name, class_str):
class_obj = cloudpickle.loads(base64.b64decode(class_str))
class_name = class_obj.__name__
import sys
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = ModuleType(module_name)
sys.modules[module_name] = mod
setattr(mod, class_name, class_obj)
return class_obj
class Results(object):
def __init__(self, values):
self.values = tuple([i[1] for i in values])
self.__keys = tuple([i[0] for i in values])
self.__dict = OrderedDict(values)
def __iter__(self):
return iter(self.values)
def __getitem__(self, key):
if isinstance(key, int):
return self.values[key]
else:
return self.__dict[key]
def __len__(self):
return len(self.values)
def __repr__(self):
return "Results"+self.__dict.__repr__()[11:]
def __str__(self):
return "Results"+self.__dict.__str__()[11:]
def __contains__(self, key):
return True if key in self.__dict else False
def get_keys(self):
return self.__keys
def formated_result(result):
import ipywidgets as widgets
from IPython.display import display
from ipywidgets import Output
outputs = [Output() for i in range(len(result))]
for i in range(len(result)):
with outputs[i]:
display(result[i])
tab = widgets.Tab()
tab.children = outputs
for i in range(len(result)):
tab.set_title(i, result.get_keys()[i])
return tab
class TaskGraph(object):
''' TaskGraph class that is used to store the graph.
'''
__SETUP_YAML_ONCE = False
@staticmethod
def setup_yaml():
'''Write out yaml in order for OrderedDict.'''
# https://stackoverflow.com/a/8661021
# https://stackoverflow.com/questions/47692094/lambda-works-
# defined-function-does-not # noqa
# represent_dict_order = lambda dumper, data: \
# dumper.represent_mapping('tag:yaml.org,2002:map', data.items())
def represent_dict_order(dumper, data):
return dumper.represent_mapping('tag:yaml.org,2002:map',
data.items())
ruamel.yaml.add_representer(OrderedDict, represent_dict_order)
TaskGraph.__SETUP_YAML_ONCE = True
def __init__(self, task_spec_list=None):
'''
:param task_spec_list: List of task-spec dicts per TaskSpecSchema.
'''
self.__task_list = {}
self.__node_dict = {}
self.__index = None
# this is server widget that this taskgraph associated with
self.__widget = None
error_msg = 'Task-id "{}" already in the task graph. Set '\
'replace=True to replace existing task with extended task.'
self.__extend(task_spec_list=task_spec_list, replace=False,
error_msg=error_msg)
def __extend(self, task_spec_list=None, replace=False, error_msg=None):
tspec_list = dict() if task_spec_list is None else task_spec_list
if error_msg is None:
error_msg = 'Task-id "{}" already in the task graph. Set '\
'replace=True to replace existing task.'
for tspec in tspec_list:
task = Task(tspec)
task_id = task[TaskSpecSchema.task_id]
if task_id in self.__task_list and not replace:
raise Exception(error_msg.format(task_id))
self.__task_list[task_id] = task
if self.__widget is not None:
self.__widget.value = self.export_task_speclist()
def extend(self, task_spec_list=None, replace=False):
'''
Add more task-spec dicts to the graph
:param task_spec_list: List of task-spec dicts per TaskSpecSchema.
'''
error_msg = 'Task-id "{}" already in the task graph. Set '\
'replace=True to replace existing task with extended task.'
self.__extend(task_spec_list=task_spec_list, replace=replace,
error_msg=error_msg)
def __contains__(self, task_id):
return True if task_id in self.__task_list else False
def __len__(self):
return len(self.__task_list)
def __iter__(self):
self.__index = 0
self.__tlist = list(self.__task_list.values())
return self
def __next__(self):
idx = self.__index
if idx is None or idx == len(self.__tlist):
self.__index = None
raise StopIteration
task = self.__tlist[idx]
self.__index = idx + 1
return task
def __getitem__(self, key):
# FIXME: This is inconsistent. Above for __contains__, __iter__, and
# __next__, the returned object is a Task instance. Here however
# the returned object is a Node instance.
if not self.__node_dict:
warnings.warn(
'Task graph internal state empty. Did you build the task '
'graph? Run ".build()"',
RuntimeWarning)
elif key not in self.__node_dict:
warnings.warn(
'Task graph missing task id "{}". Check the spelling of the '
'task id.'.format(key),
RuntimeWarning)
return self.__node_dict[key]
def __find_roots(self, node, inputs, consider_load=True):
"""
find the root nodes that the `node` depends on
Arguments
-------
node: Node
the leaf node, of whom we need to find the dependent input nodes
inputs: list
resulting list to store all the root nodes in this list
consider_load: bool
whether it skips the node which are loading cache file or not
Returns
-----
None
"""
if (node.visited):
return
node.visited = True
if len(node.inputs) == 0:
inputs.append(node)
return
if consider_load and node.load:
inputs.append(node)
return
for node_in in node.inputs:
inode = node_in['from_node']
self.__find_roots(inode, inputs, consider_load)
def start_labwidget(self):
from IPython.display import display
display(self.draw())
@staticmethod
def register_lab_node(module_name, class_obj):
"""
Register the node class for the Greenflowlab. It put the class_obj
into a sys.modules with `module_name`. It will register the node
class into the Jupyterlab kernel space, communicate with the
client to populate the add nodes menus, sync up with
Jupyterlab Server space to register the node class.
The latest registered `class_obj` overwrites the old one.
Arguments
-------
module_name: str
the module name for `class_obj`. It will also be the menu name for
the node. Note, if use '.' inside the 'module_name', the client
will automatically construct the hierachical menus based on '.'
class_obj: Node
The node class that is the subclass of greenflow 'Node'. It is
usually defined dynamically so it can be registered.
Returns
-----
None
"""
global server_task_graph
if server_task_graph is None:
server_task_graph = TaskGraph()
server_task_graph.start_labwidget()
server_task_graph.register_node(module_name, class_obj)
@staticmethod
def load_taskgraph(filename):
"""
load the yaml file to TaskGraph object
Arguments
-------
filename: str
the filename pointing to the yaml file in the filesystem
Returns
-----
object
the TaskGraph instance
"""
with open(filename) as f:
yaml = ruamel.yaml.YAML(typ='safe')
yaml.constructor.yaml_constructors[
u'tag:yaml.org,2002:timestamp'] = \
yaml.constructor.yaml_constructors[u'tag:yaml.org,2002:str']
obj = yaml.load(f)
t = TaskGraph(obj)
return t
def export_task_speclist(self):
tlist_od = [] # task list ordered
for task in self:
tod = OrderedDict([(TaskSpecSchema.task_id, 'idholder'),
(TaskSpecSchema.node_type, 'typeholder'),
(TaskSpecSchema.conf, 'confholder'),
(TaskSpecSchema.inputs, 'inputsholder')
])
tod.update(task._task_spec)
if not isinstance(tod[TaskSpecSchema.node_type], str):
tod[TaskSpecSchema.node_type] = tod[
TaskSpecSchema.node_type].__name__
tlist_od.append(tod)
return tlist_od
def save_taskgraph(self, filename):
"""
Write a list of tasks i.e. taskgraph to a yaml file.
Arguments
-------
filename: str
The filename to write a yaml file to.
"""
if not TaskGraph.__SETUP_YAML_ONCE:
TaskGraph.setup_yaml()
# we want -id to be first in the resulting yaml file.
tlist_od = self.export_task_speclist()
with open(filename, 'w') as fh:
ruamel.yaml.dump(tlist_od, fh, default_flow_style=False)
def viz_graph(self, show_ports=False, pydot_options=None):
"""
Generate the visulization of the graph in the JupyterLab
Returns
-----
nx.DiGraph
"""
import networkx as nx
G = nx.DiGraph()
if pydot_options:
G.graph['graph'] = pydot_options
# instantiate objects
for itask in self:
task_inputs = itask[TaskSpecSchema.inputs]
to_task = itask[TaskSpecSchema.task_id]
to_type = itask[TaskSpecSchema.node_type]
if to_task == "":
to_task = OUTPUT_TYPE
for iport_or_tid in task_inputs:
# iport_or_tid: it is either to_port or task id (tid) b/c
# if using ports API task_inputs is a dictionary otherwise
# task_inputs is a list.
taskin_and_oport = task_inputs[iport_or_tid] \
if isinstance(task_inputs, dict) else iport_or_tid
isplit = taskin_and_oport.split('.')
from_task = isplit[0]
from_port = isplit[1] if len(isplit) > 1 else None
if show_ports and from_port is not None:
to_port = iport_or_tid
common_tip = taskin_and_oport
G.add_edge(from_task, common_tip, label=from_port)
G.add_edge(common_tip, to_task, label=to_port)
tnode = G.nodes[common_tip]
tnode.update({
# 'label': '',
'shape': 'point'})
else:
G.add_edge(from_task, to_task)
# draw output ports
if show_ports:
if (to_type == OUTPUT_TYPE):
continue
task_node = get_node_obj(itask, tgraph_mixin=True)
# task_outputs = itask.get(TaskSpecSchema.outputs, [])
for pout in task_node._get_output_ports():
out_tip = '{}.{}'.format(
itask[TaskSpecSchema.task_id], pout)
G.add_edge(to_task, out_tip, label=pout)
tnode = G.nodes[out_tip]
tnode.update({
# 'label': '',
'shape': 'point'})
return G
def _build(self, replace=None, profile=False):
"""
compute the graph structure of the nodes. It will set the input and
output nodes for each of the node
Arguments
-------
replace: dict
conf parameters replacement
"""
self.__node_dict.clear()
replace = dict() if replace is None else replace
# check if there are item in the replace that is not in the graph
task_ids = set([task[TaskSpecSchema.task_id] for task in self])
for rkey in replace.keys():
if rkey not in task_ids:
warnings.warn(
'Replace task-id {} not found in task-graph'.format(rkey),
RuntimeWarning)
# instantiate node objects
for task in self:
task_id = task[TaskSpecSchema.task_id]
nodetype = task[TaskSpecSchema.node_type]
nodetype = nodetype if isinstance(nodetype, str) else \
nodetype.__name__
if nodetype == OUTPUT_TYPE:
output_task = task
node = get_node_obj(output_task, tgraph_mixin=True)
else:
node = get_node_obj(task, replace.get(task_id), profile,
tgraph_mixin=True)
self.__node_dict[task_id] = node
# build the graph
for task_id in self.__node_dict:
node = self.__node_dict[task_id]
task_inputs = node._task_obj[TaskSpecSchema.inputs]
for iport in task_inputs:
# node_inputs should be a dict with entries:
# {iport: taskid.oport}
input_task = task_inputs[iport].split('.')
dst_port = iport
input_id = input_task[0]
# src_port = input_task[1] if len(input_task) > 1 else None
src_port = input_task[1]
try:
input_node = self.__node_dict[input_id]
except KeyError:
raise LookupError(
'Missing task "{}". Add task spec to TaskGraph.'
.format(input_id))
node.inputs.append({
'from_node': input_node,
'from_port': src_port,
'to_port': dst_port
})
# input_node.outputs.append(node)
input_node.outputs.append({
'to_node': node,
'to_port': dst_port,
'from_port': src_port
})
def build(self, replace=None, profile=None):
"""
compute the graph structure of the nodes. It will set the input and
output nodes for each of the node
Arguments
-------
replace: dict
conf parameters replacement
"""
profile = False if profile is None else profile
# make connection only
self._build(replace=replace, profile=profile)
# Columns type checking is done in the :meth:`TaskGraph._run` after the
# outputs are specified and participating tasks are determined.
# this part is to update each of the node so dynamic inputs can be
# processed
self.breadth_first_update()
def breadth_first_update(self, extra_roots=[], extra_updated=set()):
"""
Do a breadth first graph traversal and update nodes.
Update each note following the causal order. The children notes are
only added to the queue if all the parents are updated.
Each node is only updated once.
extra_roots and extra_updated should be empty for normal graph. It
is used for composite node when the graph is connected to other
graph.
"""
queue = []
updated = extra_updated
for k in self.__node_dict.keys():
if len(self.__node_dict[k].inputs) == 0:
queue.append(self.__node_dict[k])
queue.extend(extra_roots)
while (len(queue) != 0):
node_to_update = queue.pop(0)
# print('update {}'.format(node_to_update.uid))
if node_to_update not in updated:
node_to_update.update()
updated.add(node_to_update)
for element in node_to_update.outputs:
child = element['to_node']
if all([i['from_node'] in updated for i in child.inputs]):
queue.append(child)
# print('----done----')
def __str__(self):
out_str = ""
for k in self.__node_dict.keys():
out_str += k + ": " + str(self.__node_dict[k]) + "\n"
return out_str
def reset(self):
self.__node_dict.clear()
self.__task_list.clear()
self.__index = None
def register_node(self, module_name, classObj):
"""
Check `TaskGraph.register_lab_node`
"""
if self.__widget is not None:
encoded_class = get_encoded_class(classObj)
cacheCopy = copy.deepcopy(self.__widget.cache)
cacheCopy['register'] = {
"module": module_name,
"class": encoded_class
}
add_module_from_base64(module_name, encoded_class)
self.__widget.cache = cacheCopy
def _run(self, outputs=None, replace=None, profile=None, formated=False,
build=True):
replace = dict() if replace is None else replace
if build:
self.build(replace, profile)
else:
if replace:
warnings.warn(
'Replace is specified, but build is set to False. No '
'replacement will be done. Either set build=True or '
'first build with replace then call run.',
RuntimeWarning)
if profile is not None:
warnings.warn(
'Profile is specified, but build is set to False. '
'Profile will be done according to last build. '
'Alternatively either set build=True or first build with '
'desired profile option then call run.',
RuntimeWarning)
# Reset visited status to run the taskgraph. This is done during
# build, but since not building need to reset here.
for inode in self.__node_dict.values():
inode.visited = False
using_output_node = False
if outputs is None:
graph_outputs = []
outputs = graph_outputs # reference copy
# Find the output collector in the task graph.
for task in self:
# FIXME: Note the inconsistency of getting a task
# "for task in self" yet also retrieving a node
# via "self[task_id]".
node = self[task[TaskSpecSchema.task_id]]
if node.node_type_str == OUTPUT_TYPE:
using_output_node = True
outputs_collector_node = node
for input_item in outputs_collector_node.inputs:
from_node_id = input_item['from_node'].uid
fromStr = from_node_id + '.' + input_item['from_port']
graph_outputs.append(fromStr)
break
if not using_output_node:
warnings.warn(
'Outputs not specified and output collector not found '
'in the task graph. Nothing will be run.',
RuntimeWarning)
result = Results([])
if formated:
return formated_result(result)
else:
return result
if using_output_node:
# This is rewiring the graph which should not be needed.
# clean all the connections to this output node
# for inode in self.__node_dict.values():
# inode.outputs = list(filter(
# lambda x: x['to_node'] != outputs_collector_node,
# inode.outputs))
outputs_collector_node.input_df.clear()
else:
# This does make it possible to temporarily have 2 output
# collectors in a task graph. This 2nd collector is cleaned up.
output_task = Task({
# Use a slightly different uid to differentiate from an
# output node that might be part of the task graph.
TaskSpecSchema.task_id: '{}-outspec'.format(OUTPUT_ID),
TaskSpecSchema.conf: {},
TaskSpecSchema.node_type: Output_Collector,
TaskSpecSchema.inputs: outputs
})
outputs_collector_node = get_node_obj(output_task,
tgraph_mixin=True)
for task_id in outputs:
nodeid_oport = task_id.split('.')
nodeid = nodeid_oport[0]
oport = nodeid_oport[1]
try:
onode = self.__node_dict[nodeid]
except KeyError as err:
raise RuntimeError('Missing nodeid: {}. Did you build the '
'task graph?'.format(nodeid)) from err
dummy_port = task_id
outputs_collector_node.inputs.append({
'from_node': onode,
'from_port': oport,
'to_port': dummy_port
})
onode.outputs.append({
'to_node': outputs_collector_node,
'to_port': dummy_port,
'from_port': oport
})
outputs_collector_node.clear_input = False
inputs = []
self.__find_roots(outputs_collector_node, inputs, consider_load=True)
# Validate metadata prior to running heavy compute
for node in self.__node_dict.values():
if not node.visited:
continue
# Run ports validation.
PortsSpecSchema.validate_ports(node.ports_setup())
node.validate_connected_ports()
# Run meta setup in case the required meta are calculated
# within the meta_setup and are NodeTaskGraphMixin dependent.
# node.meta_setup()
node.validate_connected_metadata()
if self.__widget is not None:
def progress_fun(uid):
cacheCopy = copy.deepcopy(self.__widget.cache)
nodes = list(filter(lambda x: x['id'] == uid,
cacheCopy['nodes']
if 'nodes' in cacheCopy else []))
if len(nodes) > 0:
current_node = nodes[0]
current_node['busy'] = True
self.__widget.cache = cacheCopy
for i in inputs:
i.flow(progress_fun)
# clean up the progress
def cleanup():
import time
cacheCopy = copy.deepcopy(self.__widget.cache)
for node in cacheCopy.get('nodes', []):
node['busy'] = False
time.sleep(1)
self.__widget.cache = cacheCopy
import threading
t = threading.Thread(target=cleanup)
t.start()
else:
for i in inputs:
i.flow()
results_dfs_dict = outputs_collector_node.input_df
port_map = {}
for input_item in outputs_collector_node.inputs:
from_node_id = input_item['from_node'].uid
fromStr = from_node_id + '.' + input_item['from_port']
port_map[fromStr] = input_item['to_port']
results_task_ids = outputs
results = []
for task_id in results_task_ids:
results.append((task_id, results_dfs_dict[port_map[task_id]]))
# clean the results afterwards
outputs_collector_node.input_df.clear()
if not using_output_node:
# Remove the output collector that's not part of the task graph.
for inode in self.__node_dict.values():
inode.outputs = list(filter(
lambda x: x['to_node'] != outputs_collector_node,
inode.outputs))
del outputs_collector_node
# Prevent memory leaks. Clean up the task graph.
for inode in self.__node_dict.values():
# if not inode.visited:
inode.input_df.clear()
result = Results(results)
# Cleanup logic for any plugin that used "register_cleanup".
self.run_cleanup()
if formated:
return formated_result(result)
else:
return result
def run_cleanup(self, ui_clean=False):
for v in _CLEANUP.values():
v(ui_clean)
def run(self, outputs=None, replace=None, profile=None, formated=False,
build=True):
"""
Flow the dataframes in the graph to do the data science computations.
Arguments
-------
outputs: list
a list of the leaf node IDs for which to return the final results
replace: list
a dict that defines the conf parameters replacement
profile: Boolean
whether profile the processing time of the nodes or not
Returns
-----
tuple
the results corresponding to the outputs list
"""
if formated:
# cap_run = out.capture(clear_output=True)(self._run)
# result = cap_run(outputs=outputs, replace=replace,
# profile=profile, formated=formated)
try:
err = ""
result = None
result = self._run(outputs=outputs, replace=replace,
profile=profile, formated=formated,
build=build)
except Exception:
err = traceback.format_exc()
finally:
import ipywidgets
out = ipywidgets.Output(layout={'border': '1px solid black'})
out.append_stderr(err)
if result is None:
result = ipywidgets.Tab()
result.set_title(len(result.children), 'std output')
result.children = result.children + (out,)
return result
else:
return self._run(outputs=outputs, replace=replace, profile=profile,
formated=formated, build=build)
def to_pydot(self, show_ports=False, pydot_options=None):
import networkx as nx
nx_graph = self.viz_graph(show_ports=show_ports,
pydot_options=pydot_options)
to_pydot = nx.drawing.nx_pydot.to_pydot
pdot = to_pydot(nx_graph)
return pdot
def get_widget(self):
if self.__widget is None:
from greenflowlab.greenflowmodel import GreenflowWidget
widget = GreenflowWidget()
widget.value = self.export_task_speclist()
widget.set_taskgraph(self)
self.__widget = widget
return self.__widget
def del_widget(self):
del self.__widget
self.__widget = None
def draw(self, show='lab', fmt='png', show_ports=False,
pydot_options=None):
'''
:param show: str; One of 'ipynb', 'lab'
:param fmt: str; 'png' or 'svg'. Only used if show='ipynb'
:param show_ports: boolean; Labels intermediate ports between nodes in
the taskgraph. Only used if show='ipynb'
:param pydot_options: dict; Passed to the graph attribute of a graphviz
generated dot graph. Only used when show='ipynb'. Refer to:
https://graphviz.org/doc/info/attrs.html
Example: pydot_options={'rankdir': 'LR'} to draw left-to-right
'''
if show in ('ipynb',):
pdot = self.to_pydot(show_ports, pydot_options)
pdot_out = pdot.create(format=fmt)
if fmt in ('svg',):
from IPython.display import SVG as Image # @UnusedImport
else:
from IPython.display import Image # @Reimport
plt = Image(pdot_out)
return plt
else:
widget = self.get_widget()
return widget
| fsi-samples-main | greenflow/greenflow/dataframe_flow/taskGraph.py |
from .node import * # noqa: F401,F403
from .taskSpecSchema import * # noqa: F401,F403
from .taskGraph import * # noqa: F401,F403
from .portsSpecSchema import * # noqa: F401,F403
from .metaSpec import * # noqa: F401,F403
import sys
try:
# For python 3.8 and later
import importlib.metadata as importlib_metadata
except ImportError:
# prior to python 3.8 need to install importlib-metadata
import importlib_metadata
# load all the plugins from entry points
for entry_point in \
importlib_metadata.entry_points().get('greenflow.plugin', ()):
mod = entry_point.load()
name = entry_point.name
sys.modules[name] = mod
| fsi-samples-main | greenflow/greenflow/dataframe_flow/__init__.py |
import os
import sys
from pathlib import Path
import copy
import pkgutil
import inspect
from collections import namedtuple
import configparser
import importlib
from .util import get_file_path
from .taskSpecSchema import TaskSpecSchema
from .task import Task
from ._node import _Node
from ._node_flow import NodeTaskGraphMixin
from .output_collector_node import (Output_Collector, OUTPUT_TYPE)
DEFAULT_MODULE = os.getenv('GREENFLOW_PLUGIN_MODULE', "greenflow.plugin_nodes")
MODULE_CACHE = {}
class ConfigParser(configparser.ConfigParser):
"""Can get options() without defaults
"""
def options(self, section, no_defaults=True, **kwargs):
if no_defaults:
try:
return list(self._sections[section].keys())
except KeyError:
raise configparser.NoSectionError(section)
else:
return super().options(section, **kwargs)
def get_greenflow_config_modules():
if 'GREENFLOW_CONFIG' not in os.environ:
os.environ['GREENFLOW_CONFIG'] = os.getcwd() + '/greenflowrc'
print('\nGREENFLOW_CONFIG NOT SET. SETTING TO: {}'
.format(os.environ['GREENFLOW_CONFIG']))
config = ConfigParser(defaults=os.environ)
greenflow_cfg = os.getenv('GREENFLOW_CONFIG', None)
if Path(greenflow_cfg).is_file():
config.read(greenflow_cfg)
if 'ModuleFiles' not in config:
return {}
modules_names = config.options('ModuleFiles')
modules_list = {imod: config['ModuleFiles'][imod]
for imod in modules_names}
return modules_list
# create a task to add path path
def append_path(path):
if path not in sys.path:
sys.path.append(path)
def import_submodules(package, recursive=True, _main_package=None):
"""Import all submodules of a module, recursively, including subpackages.
Finds members of those packages. If a member is a greenflow Node subclass
then sets the top level package attribute with the class. This is done so
that the class can be accessed via:
NodeClass = getattr(mod, node_type)
Where mod is the package.
:param package: package (name or actual module)
:type package: module, str
"""
if isinstance(package, str):
package = importlib.import_module(package)
_main_package = package if _main_package is None else _main_package
# for loader, name, is_pkg
for _, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
mod = importlib.import_module(full_name)
if recursive and is_pkg:
# import_submodules(full_name, _main_package=_main_package)
import_submodules(mod, _main_package=_main_package)
for node in inspect.getmembers(mod):
nodecls = node[1]
if not inspect.isclass(nodecls):
continue
if not issubclass(nodecls, _Node):
continue
if nodecls.__name__ == 'Node':
continue
try:
getattr(_main_package, nodecls.__name__)
except AttributeError:
setattr(_main_package, nodecls.__name__, nodecls)
def load_modules(pathfile, name=None):
"""
Given a py filename with path information,
It will load the file as a python
module, put it into the sys.modules and add the path into the pythonpath.
@param modulefile
string, file name
@returns
namedtuple, absolute path and loaded module
"""
key = (pathfile, name)
if key in MODULE_CACHE:
return MODULE_CACHE[key]
filename = Path(pathfile)
if not filename.exists():
filename = get_file_path(str(filename))
filename = Path(filename)
if name is None:
modulename = filename.stem
else:
modulename = name
module_dir = str(filename.parent.absolute())
if filename.is_dir():
modulepath = filename.joinpath('__init__.py')
modulename = filename.stem
else:
modulepath = filename
spec = importlib.util.spec_from_file_location(modulename, str(modulepath))
mod = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = mod
if filename.is_dir():
import_submodules(mod)
spec.loader.exec_module(mod)
Load = namedtuple("Load", "path mod")
loaded = Load(module_dir, mod)
MODULE_CACHE[key] = loaded
return loaded
def get_node_tgraphmixin_instance(NodeClass, task):
'''Instantiate a node using task and NodeClass with NodeTaskGraphMixin
mixin. The returned node instance is a task graph aware node meaning it
can query task graph state such as its connections. Refer to the API of
NodeTaskGraphMixin.
:type NodeClass: a class type of a Node implementation
:type task: an instance of class::Task
'''
class NodeInTaskGraph(NodeTaskGraphMixin, NodeClass):
def __init__(self, task):
NodeClass.__init__(self, task)
NodeTaskGraphMixin.__init__(self)
def __repr__(self):
'''Override repr to show the name and path of the plugin
node class.'''
return '<{} {}.{} object at {}>'.format(
self.__class__.__name__,
NodeClass.__module__,
NodeClass.__name__,
hex(id(self)))
return NodeInTaskGraph(task)
def get_node_obj(task, replace=None, profile=False, tgraph_mixin=False,
dask_ray_setup=True):
"""
Instantiate a node instance for a task given the replacement setup.
Arguments
-------
replace: dict
conf parameters replacement
profile: Boolean
profile the node computation
Returns
-----
object
Node instance
"""
replace = dict() if replace is None else replace
task_spec = copy.copy(task._task_spec)
task_spec.update(replace)
# node_id = task_spec[TaskSpecSchema.task_id]
modulepath = task_spec.get(TaskSpecSchema.filepath)
module_name = task_spec.get(TaskSpecSchema.module)
node_type = task_spec[TaskSpecSchema.node_type]
task = Task(task_spec)
NodeClass = None
module_dir = None
if isinstance(node_type, str):
modules = get_greenflow_config_modules()
if modulepath is not None:
loaded = load_modules(modulepath)
module_dir = loaded.path
mod = loaded.mod
NodeClass = getattr(mod, node_type)
elif (module_name is not None):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
loaded = load_modules(
modules[module_name], name=module_name)
module_dir = loaded.path
mod = loaded.mod
try:
NodeClass = getattr(mod, node_type)
except AttributeError:
pass
elif node_type == OUTPUT_TYPE:
# Output collector does not reside in default plugins
NodeClass = Output_Collector
else:
try:
global DEFAULT_MODULE
plugmod = os.getenv('GREENFLOW_PLUGIN_MODULE',
DEFAULT_MODULE)
MODLIB = importlib.import_module(plugmod)
NodeClass = getattr(MODLIB, node_type)
except AttributeError:
for key in modules:
loaded = load_modules(modules[key], name=key)
module_dir = loaded.path
mod = loaded.mod
try:
NodeClass = getattr(mod, node_type)
break
except AttributeError:
continue
if NodeClass is None:
raise Exception("Cannot find the Node Class:" +
node_type)
if module_dir:
append_path(module_dir)
if module_dir and dask_ray_setup:
try:
# Add python path to all the client workers
# assume all the workers share the same directory
# structure
import dask.distributed
client = dask.distributed.client.default_client()
client.run(append_path, module_dir)
except (ValueError, ImportError):
pass
try:
import ray
def ray_append_path(worker):
import sys # @Reimport
if module_dir not in sys.path:
sys.path.append(module_dir)
# TODO: This could be a Ray Driver functionality. Add
# module path to all workers.
ray.worker.global_worker.run_function_on_all_workers(
ray_append_path)
except (ValueError, ImportError):
pass
elif issubclass(node_type, _Node):
NodeClass = node_type
else:
raise Exception("Node type not supported: {}".format(node_type))
assert issubclass(NodeClass, _Node), \
'Node-type is not a subclass of "Node" class.'
if tgraph_mixin:
node = get_node_tgraphmixin_instance(NodeClass, task)
else:
node = NodeClass(task)
node.profile = profile
return node
| fsi-samples-main | greenflow/greenflow/dataframe_flow/config_nodes_modules.py |
from greenflow._common import _namedtuple_with_defaults
__all__ = ['MetaData', 'MetaDataSchema']
_MetaData = _namedtuple_with_defaults(
'_MetaData',
['inports', 'outports'],
{'inports': dict(), 'outports': dict()}
)
class MetaData(_MetaData):
'''Node metadata must be setup for inputs and outputs. The validation
logic will check whether the required inputs met the passed in output
metadata. and the produced calculation results matches the output
metadata.
:ivar inports: Dictionary defining input metadata, which specified the
input requirement
:ivar outports: Dictionary defining output metadata
Empty dicts default:
metadata = MetaData()
metadata.inports and metadata.outports are empty dicts
Example with port specs:
inports = {
'iport0_name': {
"column0": "float64",
"column1": "float64",
},
'iport1_name': {
"column0": "float64",
"column1": "float64",
"column2": "float64",
}
}
outports = {
'oport0_name': {
"column0": "float64",
"column1": "float64",
"column2": "float64",
},
'oport1_name': {
"column0": "float64",
"column1": "float64",
"column2": "float64",
"column3": "float64",
}
}
metadata = MetaData(inports=inports, outports=outports)
The inports/outports are nested dictionaries. The outer dictionary is keyed
by port name with metadata obj being the value of the outer dictionary. The
metadata obj is a dictionary with keys/fields which can be serialized into
JSON.
'''
class MetaDataSchema:
'''Explanation of the fields, etc.'''
META_OP_DELETION = 'deletion'
META_OP_ADDITION = 'addition'
META_OP_RETENTION = 'retention'
META_REF_INPUT = 'input'
META_OP = 'meta_op'
META_DATA = 'data'
META_ORDER = 'order'
| fsi-samples-main | greenflow/greenflow/dataframe_flow/metaSpec.py |
from collections.abc import Mapping
from itertools import chain
from typing import Iterable
from greenflow._common import _namedtuple_with_defaults
__all__ = ['PortsSpecSchema', 'NodePorts', 'ConfSchema']
_NodePorts = _namedtuple_with_defaults(
'_NodePorts',
['inports', 'outports'],
{'inports': dict(), 'outports': dict()}
)
_ConfSchema = _namedtuple_with_defaults(
'_ConfSchema',
['json', 'ui'],
{'json': dict(), 'ui': dict()}
)
class NodePorts(_NodePorts):
'''Node ports must be defined for inputs and outputs.
:ivar inports: Dictionary defining port specs for input ports
:ivar outports: Dictionary defining port specs for output ports
Empty dicts default:
node_ports = NodePorts()
node_ports.inports and node_ports.outports are empty dicts
Example with port specs:
inports = {
'iport0_name': {
PortsSpecSchema.port_type: cudf.DataFrame
},
'iport1_name': {
PortsSpecSchema.port_type: cudf.DataFrame,
PortsSpecSchema.optional: True
}
}
outports = {
'oport0_name': {
PortsSpecSchema.port_type: cudf.DataFrame
},
'oport1_name': {
PortsSpecSchema.port_type: cudf.DataFrame,
PortsSpecSchema.optional: True
}
}
node_ports = NodePorts(inports=inports, outports=outports)
The inports/outports are nested dictionaries. The outer dictionary is keyed
by port name with port spec being the value of the outer dictionary. The
port spec is a dictionary with keys/fields per PortsSpecSchema class.
'''
class ConfSchema(_ConfSchema):
''' ConfSchema must be defined for Node conf JSON.
:ivar json: Dictionary defining port specs for input ports
:ivar ui: Dictionary defining port specs for output ports
Empty dicts default:
confSchema = ConfSchema()
confSchema.json and confSchema.ui are empty dicts
Examples:
const schema = {
type: "boolean",
enum: [true, false]
};
const uiSchema={
"ui:enumDisabled": [true],
};
confSchema = ConfSchema(json=schema, ui=uiSchema)
'''
class PortsSpecSchema(object):
'''Outline fields expected in a ports definition for a node implementation.
:cvar type: The type of instance for the port. This can also be a
list of types if inputs can be of multiple types. Ex.:
[cudf.DataFrame, pd.DataFrame]
Optional port setting.
Default: [] Empty list.
:cvar optional: Boolean to indicate whether a given port is optional i.e.
the input or output might be optional so missing.
Optional port setting.
Default: False i.e. if port defined it is assumed required.
'''
port_type = 'type'
optional = 'optional'
dynamic = 'dynamic'
DYN_MATCH = 'matching_outputs'
@classmethod
def _typecheck(cls, schema_field, value):
if (schema_field == cls.port_type):
def check_ptype(val):
err_msg = 'Port type must be a pythonic '\
'type i.e type(port_type) == type. Instead got: {}, {}'
assert isinstance(val,
type), err_msg.format(type(val), val)
if isinstance(value, Iterable):
for ptype in value:
check_ptype(ptype)
else:
check_ptype(value)
elif schema_field == cls.optional:
assert isinstance(value, bool), 'Optional field must be a '\
'boolean. Instead got: {}'.format(value)
elif schema_field == cls.dynamic:
assert isinstance(value, bool), 'Dynamic field must be a '\
'boolean. Instead got: {}'.format(value)
else:
raise KeyError('Uknown schema field "{}" in the port spec.'.format(
schema_field))
# _schema_req_fields = []
@classmethod
def validate_ports(cls, node_ports):
'''
:type node_ports: NodePorts
'''
if not isinstance(node_ports, NodePorts):
raise AssertionError(
'Ports definition must be of type NodePorts. Instead got: '
'{}'.format(type(node_ports)))
if not isinstance(node_ports.inports, Mapping):
raise AssertionError(
'Input ports must be defined as a Mapping or dict. Instead '
'got: {}'.format(node_ports.inports))
if not isinstance(node_ports.outports, Mapping):
raise AssertionError(
'Output ports must be defined as a Mapping or dict. Instead '
'got: {}'.format(node_ports.outports))
for port_name, port_spec in chain(node_ports.inports.items(),
node_ports.outports.items()):
assert isinstance(port_name, str), \
'Port names must be strings. Instead got: {}'.format(port_name)
if not isinstance(port_spec, Mapping):
raise Exception(
'Port spec must be dict. Invalid port spec for port '
'"{}" port spec: {}'.format(port_name, port_spec))
for port_field, field_val in port_spec.items():
cls._typecheck(port_field, field_val)
| fsi-samples-main | greenflow/greenflow/dataframe_flow/portsSpecSchema.py |
from collections.abc import Iterable
import warnings
import dask
from dask.dataframe import DataFrame as DaskDataFrame
from dask.dataframe import from_delayed
import copy
from dask.base import is_dask_collection
from dask.distributed import Future
from .portsSpecSchema import PortsSpecSchema
from .taskSpecSchema import TaskSpecSchema
from .metaSpec import MetaData
from ._node import _Node
from ._node_taskgraph_extension_mixin import NodeTaskGraphExtensionMixin
from .output_collector_node import OUTPUT_TYPE
__all__ = ['NodeTaskGraphMixin', 'register_validator',
'register_copy_function', 'register_cleanup']
# class NodeIncomingEdge(object):
# from_node = 'from_node'
# from_port = 'from_port'
# to_node = 'to_port'
#
#
# class NodeOutgoingEdge(object):
# to_node = 'to_node'
# to_port = 'to_port'
# from_port = 'from_port'
# dictionary of validators of funciton signiture (val, meta) -> bool
_VALIDATORS = {}
# dictionary of object copy functions
_COPYS = {}
# dictionary of clean up functions
_CLEANUP = {}
def _get_nodetype(node):
'''Identify the implementation node class. A node might be mixed in with
other classes. Ideally get the primary implementation class.
'''
nodetypes = node.__class__.mro()
keeptypes = []
for nodet in nodetypes:
# Exclude base Node classes i.e. _Node, NodeTaskGraphMixin, Node.
# Using nodet.__name__ != 'Node' to avoid cyclic dependencies.
if issubclass(nodet, _Node) and \
not issubclass(nodet, NodeTaskGraphMixin) and \
not issubclass(nodet, NodeTaskGraphExtensionMixin) and \
nodet is not _Node and \
nodet.__name__ != 'Node':
keeptypes.append(nodet)
return keeptypes
def register_validator(typename: type,
fun) -> None:
# print('register validator for', typename)
_VALIDATORS[typename] = fun
def register_copy_function(typename: type,
fun) -> None:
# print('register validator for', typename)
_COPYS[typename] = fun
def register_cleanup(name: str,
fun) -> None:
# print('register validator for', typename)
_CLEANUP[name] = fun
class NodeTaskGraphMixin(NodeTaskGraphExtensionMixin):
'''Relies on mixing in with a Node class that has the following attributes
and methods:
ATTRIBUTES
----------
_task_obj
uid
node_type_str
conf
load
save
delayed_process
infer_meta
METHODS
-------
process
load_cache
save_cache
_get_input_ports
_get_output_ports
'''
def __getstate__(self):
state = self.__dict__.copy()
if 'input_df' in state:
del state['input_df']
# print('state', state)
return state
def __getitem__(self, key):
return getattr(self, key)
def __setstate__(self, state):
self.__dict__.update(state)
def __init__(self):
self.inputs = []
self.outputs = []
self.visited = False
self.input_df = {}
# input_df format:
# {
# iport0: df_for_iport0,
# iport1: df_for_iport1,
# }
# Note: that even though the "df" terminology is used the type is
# user configurable i.e. "df" is just some python object which is
# typically a data container.
self.clear_input = True
def update(self):
"""
Within a task graph context override a Node's update to cache: ports,
metadata, input connections, and input metadata.
"""
# this will filter out the primary class with ports_setup
nodecls_list = _get_nodetype(self)
for icls in nodecls_list:
if not hasattr(icls, 'update'):
continue
# Within update resolve ports and meta via class NodeExtensionMixin
# methods _resolve_ports and _resolve_meta.
icls.update(self)
break
# cache it after update
NodeTaskGraphExtensionMixin.cache_update_result(self)
# cache the conf_schema too
for icls in nodecls_list:
if not hasattr(icls, 'conf_schema'):
continue
self.conf_schema_cache = self.conf_schema()
break
else:
pass
def conf_schema(self):
if hasattr(self, 'conf_schema_cache'):
return self.conf_schema_cache
nodecls_list = _get_nodetype(self)
for icls in nodecls_list:
if not hasattr(icls, 'conf_schema'):
continue
return icls.conf_schema(self)
def meta_setup(self):
if hasattr(self, 'meta_data_cache'):
return self.meta_data_cache
nodecls_list = _get_nodetype(self)
meta = MetaData()
for icls in nodecls_list:
if not hasattr(icls, 'meta_setup'):
continue
meta = icls.meta_setup(self)
break
return meta
def ports_setup(self):
"""
overwrite the super class ports_setup so it can calculate the dynamic
ports.
If ports information is needed, ports_setup should be used
:return: Node ports
:rtype: NodePorts
"""
if hasattr(self, 'ports_setup_cache'):
ports = self.ports_setup_cache
else:
# this will filter out the primary class with ports_setup
nodecls_list = _get_nodetype(self)
for icls in nodecls_list:
if not hasattr(icls, 'ports_setup'):
continue
ports = icls.ports_setup(self)
break
else:
raise Exception('ports_setup method missing')
# note, currently can only handle one dynamic port per node
port_type = PortsSpecSchema.port_type
inports = ports.inports
dy = PortsSpecSchema.dynamic
for key in inports:
if dy in inports[key] and inports[key][dy]:
types = inports[key][port_type]
break
else:
return ports
if hasattr(self, 'inputs'):
has_dynamic = False
for inp in self.inputs:
to_port = inp['to_port']
if to_port in inports and not inports[to_port].get(dy, False):
# skip connected non dynamic ports
continue
else:
has_dynamic = True
if has_dynamic:
connected_inports = self.get_connected_inports()
for inp in self.inputs:
to_port = inp['to_port']
if to_port in inports and (not inports[to_port].get(
dy, False)):
# skip connected non dynamic ports
continue
else:
if to_port in connected_inports:
types = connected_inports[to_port]
inports[inp['from_node'].uid+'@'+inp['from_port']] = {
port_type: types, dy: True}
return ports
def __validate_output(self, node_output: dict):
output_meta = self.meta_setup().outports
# Validate each port
out_ports = self._get_output_ports(full_port_spec=True)
for pname, pspec in out_ports.items():
# only validate it if it is connected
if not self.outport_connected(pname):
# if the port is not connected skip it
# print('port {} is not connected'.format(pname))
continue
out_optional = pspec.get('optional', False)
if pname not in node_output:
if out_optional:
continue
else:
raise Exception('Node "{}" did not produce output "{}"'
.format(self.uid, pname))
out_val = node_output[pname]
out_type = type(out_val)
expected_type = pspec.get(PortsSpecSchema.port_type)
if expected_type:
if not isinstance(expected_type, list):
expected_type = [expected_type]
# if self.delayed_process and \
# cudf.DataFrame in expected_type and \
# dask_cudf.DataFrame not in expected_type:
# expected_type.append(dask_cudf.DataFrame)
match = False
for expected in expected_type:
if issubclass(out_type, expected):
match = True
break
if not match:
raise TypeError(
'Node "{}" output port "{}" produced wrong type '
'"{}". Expected type "{}"'
.format(self.uid, pname, out_type, expected_type))
# cudf_types_tuple = (cudf.DataFrame, dask_cudf.DataFrame)
# if out_type in cudf_types_tuple:
# if len(out_val.columns) == 0 and out_optional:
# continue
if out_type in _VALIDATORS:
validator = _VALIDATORS[out_type]
meta_to_val = output_meta.get(pname)
val_flag = validator(out_val, meta_to_val, self)
if not val_flag:
raise Exception("not valid output")
def __input_ready(self):
if not isinstance(self.load, bool) or self.load:
return True
for ient in self.inputs:
iport = ient['to_port']
if iport not in self.input_df:
return False
return True
def __get_input_df(self):
return self.input_df
def get_input_meta(self, port_name=None):
"""
if port_name is None, get all the connected input metas information
returns
dict, key is the current node input port name, value is the column
name and types
if port_name is not None, get meta data for the input port_name. If it
doesn't exist, return None
"""
if hasattr(self, 'input_meta_cache'):
if port_name is None:
return self.input_meta_cache
elif port_name in self.input_meta_cache:
return self.input_meta_cache[port_name]
else:
# Run the logic below to find the port
# Warning: Something might not be right if this happens.
# Perhaps the cache needs to be reset.
pass
output = {}
if not hasattr(self, 'inputs'):
return output
out_port_names = []
to_port_names = []
from_port_names = []
meta_data_list = []
for node_input in self.inputs:
from_node = node_input['from_node']
meta_data = copy.deepcopy(from_node.meta_setup())
from_port_name = node_input['from_port']
to_port_name = node_input['to_port']
if port_name is not None and port_name == to_port_name:
return meta_data.outports[from_port_name]
if from_port_name not in meta_data.outports:
if self.node_type_str == OUTPUT_TYPE:
continue
nodetype_list = _get_nodetype(self)
warnings.warn(
'node "{}" node-type "{}" to port "{}", from node "{}" '
'node-type "{}" oport "{}" missing oport in metadata for '
'node "{}" output meta: {}'.format(
self.uid, nodetype_list, to_port_name,
from_node.uid, _get_nodetype(from_node),
from_port_name, from_node.uid, meta_data.outports)
)
else:
out_port_name = from_node.uid+'@'+from_port_name
out_port_names.append(out_port_name)
to_port_names.append(to_port_name)
from_port_names.append(from_port_name)
meta_data_list.append(meta_data)
if port_name is not None:
return None
if len(out_port_names) > 0:
dy = PortsSpecSchema.dynamic
ports = self.ports_setup()
inports = ports.inports
for out_port_name, to_port_name, from_port_name, meta_data in zip(
out_port_names, to_port_names, from_port_names,
meta_data_list):
if out_port_name in inports and inports[out_port_name].get(
dy, False):
output[out_port_name] = meta_data.outports[from_port_name]
else:
output[to_port_name] = meta_data.outports[from_port_name]
return output
def __set_input_df(self, to_port, df):
self.input_df[to_port] = df
def flow(self, progress_fun=None):
"""
progress_fun is used to show the progress of computaion
it is function that takes node id as argument
flow from this node to do computation.
* it will check all the input dataframe are ready or not
* calls its process function to manipulate the input dataframes
* set the resulting dataframe to the children nodes as inputs
* flow each of the chidren nodes
"""
if progress_fun is not None:
progress_fun(self.uid)
input_ready = self.__input_ready()
if not input_ready:
return
inputs_data = self.__get_input_df()
output_df = self.__call__(inputs_data)
if self.clear_input:
self.input_df = {}
for out in self.outputs:
onode = out['to_node']
iport = out['to_port']
oport = out['from_port']
# Prevent memory leaks.
if not onode.visited:
continue
if oport is not None:
if oport not in output_df:
if onode.node_type_str == OUTPUT_TYPE:
onode_msg = 'is listed in task-graph outputs'
else:
onode_msg = 'is required as input to node "{}"'.format(
onode.uid)
err_msg = 'ERROR: Missing output port "{}" from '\
'node "{}". This output {}.'.format(
oport, self.uid, onode_msg)
raise Exception(err_msg)
df = output_df[oport]
onode.__set_input_df(iport, df)
if onode.visited:
onode.flow(progress_fun)
def __make_copy(self, df_obj):
typeObj = df_obj.__class__
if typeObj in _COPYS:
return _COPYS[typeObj](df_obj)
else:
return df_obj
def __check_dly_processing_prereq(self, inputs: dict):
'''At least one input must be a dask DataFrame type. Output types must
be specified as cudf.DataFrame or dask_cudf.DataFrame. (Functionality
could also be extended to support dask dataframe of pandas, but
currently only cudf/dask_cudf dataframes are supported.)
'''
# check if dask future or delayed
ivals = inputs.values()
if not any((is_dask_collection(iv) for iv in ivals)) and \
not any((isinstance(iv, Future) for iv in ivals)):
# None of the inputs are Delayed or Futures so no intention of
# using delayed processing. Return False and avoid printing
# non-applicable warning.
return False
use_delayed = False
for ival in ivals:
if isinstance(ival, DaskDataFrame):
use_delayed = True
break
# NOTE: Currently only support delayed processing when one of the
# inputs is a dask_cudf.DataFrame. In the future might generalize
# to support dask processing of other delayed/future type inputs.
if not use_delayed:
warn_msg = \
'None of the Node "{}" inputs '\
'is a dask_cudf.DataFrame. Ignoring '\
'"delayed_process" setting.'.format(self.uid)
warnings.warn(warn_msg)
return use_delayed
def __delayed_call(self, inputs: dict):
'''Delayed processing called when self.delayed_process is set. To
handle delayed processing automatically, prerequisites are checked via
call to:
:meth:`__check_dly_processing_prereq`
Additionally all input dask_cudf dataframes have to be partitioned
the same i.e. equal number of partitions.
@param inputs: dict
key: iport name string, value: input data
'''
def df_copy(df_in):
'''Used for delayed unpacking.'''
# Needed for the same reason as __make_copy. To prevent columns
# addition in the input data frames. In python everything is
# by reference value and dataframes are mutable.
# Handle the case when dask_cudf.DataFrames are source frames
# which appear as cudf.DataFrame in a dask-delayed function.
return self.__make_copy(df_in)
def get_pout(out_dict, port):
'''Get the output in out_dict at key port. Used for delayed
unpacking.'''
# DEBUGGING
# try:
# from dask.distributed import get_worker
# worker = get_worker()
# print('worker{} get_pout NODE "{}" port "{}" worker: {}'
# .format(worker.name, self.uid, port, worker))
# except Exception as err:
# print(err)
df_out = out_dict.get(port)
return self.__make_copy(df_out)
inputs_not_dly = {}
for iport, inarg in inputs.items():
# dcudf not necessarily a dask cudf frame
if not isinstance(inarg, DaskDataFrame):
# TODO: There could be cases where this non-delayed args are
# mutable. In that case USER BEWARE. Could copy here to
# deal with that. Shallow copy would be preferred but not
# 100% reliable.
inputs_not_dly[iport] = inarg
inputs_dly = {}
# A dask_cudf object will return a list of dask delayed object using
# to_delayed() API. Below the logic assumes (otherwise error) that
# all inputs are dask_cudf objects and are distributed in the same
# manner. Ex. inputs_dly:
# inputs_dly = {
# p0: {
# iport0: ddf_dly_i0_p0,
# iport1: ddf_dly_i1_p0,
# ... for all iports
# },
# p1: {
# iport0: ddf_dly_i0_p1,
# iport1: ddf_dly_i1_p1,
# ... for all iports
# },
# ... for all partitions
# i_x - iport
# p_x - partition index
npartitions = None
for iport, inarg in inputs.items():
# dcudf not necessarily a dask cudf frame
if not isinstance(inarg, DaskDataFrame):
continue
dcudf = inarg
ddf_dly_list = dcudf.to_delayed()
npartitions_ = len(ddf_dly_list)
if npartitions is None:
npartitions = npartitions_
if npartitions != npartitions_:
raise Exception(
'Error DASK_CUDF PARTITIONS MISMATCH: Node "{}" input "{}"'
' has {} npartitions and other inputs have {} partitions'
.format(self.uid, iport, npartitions_, npartitions))
for idly, dly in enumerate(ddf_dly_list):
# very import to use shallow copy of inputs_not_dly
inputs_dly.setdefault(idly, inputs_not_dly.copy()).update({
# iport: dly.persist() # DON'T PERSIST HERE
iport: dask.delayed(df_copy)(dly)
})
# DEBUGGING
# print('INPUTS_DLY:\n{}'.format(inputs_dly))
outputs_dly = {}
# Formulate a list of delayed objects for each output port to be able
# to call from_delayed to synthesize a dask_cudf object.
# Ex. outputs_dly:
# outputs_dly = {
# o0: [ddf_dly_o0_p0, ddf_dly_o0_p1, ... _pN]
# o1: [ddf_dly_o1_p0, ddf_dly_o1_p1, ... _pN]
# ... for all output ports
# }
# o_x - output port
# p_x - delayed partition
# VERY IMPORTANT TO USE PERSIST:
# https://docs.dask.org/en/latest/dataframe-api.html#dask.dataframe.DataFrame.persist
# Otherwise process will run several times.
for inputs_ in inputs_dly.values():
output_df_dly = dask.delayed(self.decorate_process())(inputs_)
# output_df_dly_per = output_df_dly.persist()
output_df_dly_per = output_df_dly
for oport in self._get_output_ports():
oport_out = dask.delayed(get_pout)(output_df_dly_per, oport)
# outputs_dly.setdefault(oport, []).append(oport_out.persist())
outputs_dly.setdefault(oport, []).append(oport_out)
# DEBUGGING
# print('OUTPUTS_DLY:\n{}'.format(outputs_dly))
output_df = {}
# A dask_cudf object is synthesized from a list of delayed objects.
# Per outputs_dly above use dask_cudf.from_delayed API.
connected_outs = set([out['from_port'] for out in self.outputs])
for oport, port_spec in \
self._get_output_ports(full_port_spec=True).items():
if (oport not in connected_outs):
continue
port_type = port_spec.get(PortsSpecSchema.port_type, type(None))
if not isinstance(port_type, Iterable):
port_type = [port_type]
# DEBUGGING
# print('__DELAYED_CALL node "{}" port "{}" port type "{}"'.format(
# self.uid, oport, port_type))
if any([issubclass(p_type,
DaskDataFrame) for p_type in port_type]):
if self.infer_meta:
output_df[oport] = from_delayed(outputs_dly[oport])
else:
meta_data = self.meta_setup().outports
output_df[oport] = from_delayed(outputs_dly[oport],
meta=meta_data[oport])
else:
# outputs_dly[oport] is currently a list. Run compute on each
# partition, and keep the first one.
# This is not very generalizeable
# TODO: Check for equivalency and output a warning in case
# outputs don't match from different partitions.
output_df[oport] = \
[iout.compute() for iout in outputs_dly[oport]][0]
return output_df
def outport_connected(self, port_name):
"""
test whether this node's output port is connected.
@params port_name
string, outpout port name
returns
boolean, whehther this port is connected or not
"""
found = False
for iout in self.outputs:
oport = iout['from_port']
if (port_name == oport):
found = True
break
return found
def get_connected_inports(self):
"""
get all the connected input port information
returns
dict, key is the current node input port name, value is the port
type passed from parent
"""
if hasattr(self, 'input_connections_cache'):
return self.input_connections_cache
def get_type(type_def):
if isinstance(type_def, list):
return type_def
else:
return [type_def]
output = {}
if not hasattr(self, 'inputs'):
return output
for node_input in self.inputs:
from_node = node_input['from_node']
ports = from_node.ports_setup()
from_port_name = node_input['from_port']
to_port_name = node_input['to_port']
if from_port_name in ports.outports:
oport_types = get_type(
ports.outports[from_port_name][PortsSpecSchema.port_type])
output[to_port_name] = oport_types
else:
continue
return output
def decorate_process(self):
def timer(*argv):
import time
start = time.time()
result = self.process(*argv)
end = time.time()
print('id:%s process time:%.3fs' % (self.uid, end-start))
return result
if self.profile:
return timer
else:
return self.process
def __call__(self, inputs_data):
if self.load:
if isinstance(self.load, bool):
output_df = self.load_cache()
else:
output_df = self.load
else:
# nodes with ports take dictionary as inputs
inputs = {iport: self.__make_copy(data_input)
for iport, data_input in inputs_data.items()}
if not self.delayed_process:
output_df = self.decorate_process()(inputs)
else:
use_delayed = self.__check_dly_processing_prereq(inputs)
if use_delayed:
output_df = self.__delayed_call(inputs)
else:
output_df = self.decorate_process()(inputs)
if self.node_type_str != OUTPUT_TYPE and output_df is None:
raise Exception("None output")
else:
self.__validate_output(output_df)
if self.save:
self.save_cache(output_df)
return output_df
def _validate_connected_ports(self):
"""
Validate the connected port types match
"""
if self.node_type_str == OUTPUT_TYPE:
# Don't validate for Output_Collector
return
self_nodetype = _get_nodetype(self)
msgfmt = '"{task}":"{nodetype}" {inout} port "{ioport}" {inout} port '\
'type(s) "{ioport_types}"'
iports_connected = self.get_connected_inports()
iports_spec = self._get_input_ports(full_port_spec=True)
for iport in iports_connected.keys():
iport_spec = iports_spec[iport]
iport_type = iport_spec[PortsSpecSchema.port_type]
iport_types = [iport_type] \
if not isinstance(iport_type, Iterable) else iport_type
for ient in self.inputs:
# find input node edge entry with to_port, from_port, from_node
if not iport == ient['to_port']:
continue
ientnode = ient
break
else:
intask = self._task_obj[TaskSpecSchema.inputs][iport]
# this should never happen
raise LookupError(
'Task "{}" not connected to "{}.{}". Add task spec to '
'TaskGraph.'.format(intask, self.uid, iport))
from_node = ientnode['from_node']
oport = ientnode['from_port']
oports_spec = from_node._get_output_ports(full_port_spec=True)
oport_spec = oports_spec[oport]
oport_type = oport_spec[PortsSpecSchema.port_type]
oport_types = [oport_type] \
if not isinstance(oport_type, Iterable) else oport_type
for optype in oport_types:
if issubclass(optype, tuple(iport_types)):
break
else:
# Port types do not match
msgi = msgfmt.format(
task=self.uid,
nodetype=self_nodetype,
inout='input',
ioport=iport,
ioport_types=iport_types)
msgo = msgfmt.format(
task=from_node.uid,
nodetype=_get_nodetype(from_node),
inout='output',
ioport=oport,
ioport_types=oport_types)
errmsg = 'Port Types Validation\n{}\n{}\n'\
'Connected nodes do not have matching port types. Fix '\
'port types.'.format(msgo, msgi)
raise TypeError(errmsg)
def _validate_connected_metadata(self):
"""
Validate the connected metadata match the requirements.
metadata.inports specify the required metadata.
"""
metadata = self.meta_setup()
# as current behavior of matching in the validate_required
def validate_required(iport, kcol, kval, ientnode, icols):
node = ientnode['from_node']
oport = ientnode['from_port']
src_task = '{}.{}'.format(node.uid, oport)
src_type = _get_nodetype(node)
# incoming "task.port":"Node-type":{{column:column-type}}
msgi = \
'"{task}":"{nodetype}" produces metadata {colinfo}'.format(
task=src_task,
nodetype=src_type,
colinfo=icols)
dst_task = '{}.{}'.format(self.uid, iport)
dst_type = _get_nodetype(self)
# expecting "task.port":"Node-type":{{column:column-type}}
msge = \
'"{task}":"{nodetype}" requires metadata {colinfo}'.format(
task=dst_task,
nodetype=dst_type,
colinfo={kcol: kval})
header = \
'Meta Validation\n'\
'Format "task.port":"Node-type":{{column:column-type}}'
info_msg = '{}\n{}\n{}'.format(header, msgi, msge)
if kcol not in icols:
err_msg = \
'Task "{}" missing required column "{}" '\
'from "{}".'.format(self.uid, kcol, src_task)
out_err = '{}\n{}'.format(info_msg, err_msg)
raise LookupError(out_err)
ival = icols[kcol]
if kval != ival:
if kval is None:
# bypass None type
return
# special case for 'date'
if (kval == 'date' and ival
in ('datetime64[ms]', 'date', 'datetime64[ns]')):
return
else:
err_msg = 'Task "{}" column "{}" expected type "{}" got '\
'type "{}" instead.'.format(self.uid, kcol, kval, ival)
out_err = '{}\n{}'.format(info_msg, err_msg)
raise LookupError(out_err)
inputs_meta = self.get_input_meta()
required = metadata.inports
if not required:
return
inports = self._get_input_ports(full_port_spec=True)
for iport in inports:
if iport not in required:
continue
required_iport = required[iport]
if iport not in inputs_meta:
# if iport is dynamic, skip warning
dy = PortsSpecSchema.dynamic
if inports[iport].get(dy, False):
continue
if inports[iport].get(PortsSpecSchema.optional, False):
continue
# Is it possible that iport not connected? If so iport should
# not be in required. Should raise an exception here.
warn_msg = \
'Task "{}" Node Type "{}" missing required port "{}" in '\
'incoming meta data. Should the port be connected?'.format(
self.uid, _get_nodetype(self), iport)
warnings.warn(warn_msg)
continue
incoming_meta = inputs_meta[iport]
for ient in self.inputs:
# find input node edge entry with to_port, from_port, from_node
if not iport == ient['to_port']:
continue
ientnode = ient
break
else:
intask = self._task_obj[TaskSpecSchema.inputs][iport]
# this should never happen
raise LookupError(
'Task "{}" not connected to "{}.{}". Add task spec to '
'TaskGraph.'.format(intask, self.uid, iport))
for key, val in required_iport.items():
validate_required(iport, key, val,
ientnode, incoming_meta)
| fsi-samples-main | greenflow/greenflow/dataframe_flow/_node_flow.py |
import abc
from .task import Task
from .taskSpecSchema import TaskSpecSchema
from .portsSpecSchema import (ConfSchema, NodePorts)
from .metaSpec import MetaData
from ._node import _Node
from ._node_extension_mixin import NodeExtensionMixin
__all__ = ['Node']
class _PortsMixin(object):
'''Mixed class must have (doesn't have to implement i.e. relies on
NotImplementedError) "ports_setup" method otherwise raises AttributeError.
'''
def __get_io_port(self, io=None, full_port_spec=False):
input_ports, output_ports = self.ports_setup()
if io in ('in',):
io_ports = input_ports
else:
io_ports = output_ports
if io_ports is None:
io_ports = dict()
if not full_port_spec:
io_ports = list(io_ports.keys())
return io_ports
def _get_input_ports(self, full_port_spec=False):
return self.__get_io_port(io='in', full_port_spec=full_port_spec)
def _get_output_ports(self, full_port_spec=False):
return self.__get_io_port(io='out', full_port_spec=full_port_spec)
class Node(NodeExtensionMixin, _PortsMixin, _Node):
'''Base class for implementing greenflow plugins i.e. nodes. A node
processes tasks within a greenflow task graph.
Within the context of a task graph the Node class is mixed in with classes
NodeTaskGraphMixin and NodeTaskGraphExtensionMixin.
Must implement the following method:
:meth: ports_setup
Defines ports for the node. Refer to ports_setup docstring for
further details.
:meth: meta_setup
Define expected metadata and resulting metadata.
Ex. When processing dataframes define expected columns:
def meta_setup(self):
metadata = MetaData()
required = {
'iport0_name': {'x': 'float64',
'y': 'float64'}
'iport1_name': some_dict,
etc.
}
out_cols = {
'oport0_name': {'x': 'float64',
'y': 'float64',
'z': 'int64'}
'oport1_name': some_dict,
etc.
}
metadata.inports = required
metadata.outports = out_cols
return metadata
Refer to meta_setup docstring for further details.
:meth: conf_schema
Define the json schema for the Node configuration. The client
can automatically generate the UI elements based on the schema
Refer to process docstring for further details.
:meth: process
Main functionaliy or processing logic of the Node. Refer to
process docstring for further details.
'''
cache_dir = '.cache'
def __init__(self, task):
# make sure is is a task object
assert isinstance(task, Task)
self._task_obj = task # save the task obj
self.uid = task[TaskSpecSchema.task_id]
node_type = task[TaskSpecSchema.node_type]
self.node_type_str = node_type if isinstance(node_type, str) else \
node_type.__name__
self.conf = task[TaskSpecSchema.conf]
self.load = task.get(TaskSpecSchema.load, False)
self.save = task.get(TaskSpecSchema.save, False)
self.delayed_process = False
# eargerly infer the metadata, costly
self.infer_meta = False
# customized the column setup
self.init()
self.profile = False # by default, do not profile
# PortsSpecSchema.validate_ports(self.ports_setup())
@abc.abstractmethod
def ports_setup(self) -> NodePorts:
"""Virtual method for specifying inputs/outputs ports.
Note: Within the context of a task graph the
NodeTaskGraphMixin.ports_setup is invoked and forwards the call
to the Node's implementation class ports_setup that is
implementation of this method.
Must return an instance of NodePorts that adheres to PortsSpecSchema.
Refer to PortsSpecSchema and NodePorts in module:
greenflow.dataframe_flow.portsSpecSchema
Ex. ports for inputs and outputs. (typical case)
inports = {
'iport0_name': {
PortsSpecSchema.port_type: cudf.DataFrame
},
'iport1_name': {
PortsSpecSchema.port_type: cudf.DataFrame,
PortsSpecSchema.optional: True
PortsSpecSchema.dynamic: True
}
}
outports = {
'oport0_name': {
PortsSpecSchema.port_type: cudf.DataFrame
},
'oport1_name': {
PortsSpecSchema.port_type: cudf.DataFrame,
PortsSpecSchema.optional: True
}
}
node_ports = NodePorts(inports=inports, outports=outports)
return node_ports
The output port type can be dynamically calculated based on the input
port types. The input port type can be obtained by
`self.get_connected_inports` method.
:return: Node ports
:rtype: NodePorts
"""
raise NotImplementedError
def outport_connected(self, portname) -> bool:
"""
Test whether this node's output port is connected. It is used
to generate result for the output port based on the connection
condition
@params port_name
string, outpout port name
returns
boolean, whehther this port is connected or not
"""
# this method will be implemented by NodeTaskGraphMixin
pass
def update(self):
"""
Use the update method when relying on the dynamic information from
the parent nodes.
Call the self._resolve_ports and self._resolve_meta within update
if using ports and meta templates. Refer to class NodeExtensionMixin
and corresponding methods _resolve_ports and _resolve_meta.
Refer to usage examples of class TemplateNodeMixin.update.
"""
pass
def get_connected_inports(self) -> dict:
"""
Get all the connected input port information. It is used by individual
node to determine the output port types
returns
dict, key is the current node input port name, value is the port
type passed from parent
"""
# this method will be implemented by NodeTaskGraphMixin
return {}
def init(self):
"""
Initialize the node. Usually it is used to set self.delayed_process
flag and other special initialzation.
The self.delayed_process flag is by default set to False. It can be
overwritten here to True. For native dataframe API calls, dask cudf
support the distribution computation. But the dask dataframe does
not support GPU customized kernels directly. We can use to_delayed and
from_delayed low level interfaces of dask dataframe to add this
support. In order to use Dask (for distributed computation i.e.
multi-gpu in examples later on) we set the flag and the framework
handles dask dataframes automatically under the hood.
"""
pass
def get_input_meta(self, port_name=None):
"""
Get the input meta information. It is usually used by individual
node to compute the output meta information
if port_name is None
returns
dict, key is the node input port name, value is the metadata dict
if port_name is provided
returns
the meta data send to the input port with name `port_name`. If it
is not connected, return None
"""
# this method will be implemented by NodeTaskGraphMixin
return {}
def conf_schema(self) -> ConfSchema:
"""Virtual method for specifying configuration schema. Implement if
desire to use the UI client to help fill the conf forms.
The schema standard is specified by
[JSON Schema](https://json-schema.org/)
The UI Client side uses this [open source React component]
(https://github.com/rjsf-team/react-jsonschema-form)
To learn how to write JSON schema, please refer to this [document]
(https://react-jsonschema-form.readthedocs.io/en/latest/).
:return: Conf Schema
:rtype: ConfSchema
"""
return ConfSchema(json={}, ui={})
@abc.abstractmethod
def meta_setup(self) -> MetaData:
"""
All children class should implement this.
It is used to compute the required input and output meta data.
Note: Within the context of a task graph the
NodeTaskGraphMixin.meta_setup is invoked and forwards the call
to the Node's implementation class meta_setup that is
implementation of this method.
`inports` defines the required metadata for the node.
metadata is python dictionaries, which can be serialized into JSON.
The output metadata are calcuated based on the input meta data. It is
passed to the downstream nodes to do metadata validation
returns:
:return: MetaData
"""
return MetaData(inports={}, outports={})
@abc.abstractmethod
def process(self, inputs, **kwargs) -> dict:
"""
process the input dataframe. Children class is required to override
this
Arguments
-------
inputs: dictionary
the inputs is a dictionary keyed by port name as defined in
ports_setup.
Ex.:
inputs = {
iport0: df0,
iport1: df1,
etc.
}
The task-spec for inputs is a dictionary keyed by port names
with values being task-ids of input tasks "." port output of
the input tasks. Ex.:
TaskSpecSchema.inputs: {
iport0: some_task_id.some_oport,
iport1: some_other_task_id.some_oport,
etc.
}
Within the process access the dataframes (data inputs) as:
df0 = inputs[iport0] # from some_task_id some_oport
df1 = inputs[iport1] # from some_other_task_id some_oport
etc.
Returns
-------
dataframe
The output can be anything representable in python. Typically it's
a processed dataframe.
It return a dictionary keyed by output ports (as defined in
ports_setup). Ex.:
df = cudf.DataFrame() # or maybe it can from an input
# do some calculations and populate df.
return {oport: df}
If there are mutliple output ports, the computation can be done
on demand depending on whether the output port is connected or
not. The output connection can be queried by
`self.outport_connected` method
"""
output = None
return output
# Validation methods ######################################
def validate_connected_ports(self) -> None:
"""
Validate the connected port types match. If not overwritte, it uses
the default implementation
"""
if hasattr(self, '_validate_connected_ports'):
self._validate_connected_ports()
def validate_connected_metadata(self) -> None:
"""
Validate the connected metadata match the requirements.
metadata.inports specify the required metadata.
If not overwritte, it uses the default implementation
"""
if hasattr(self, '_validate_connected_metadata'):
self._validate_connected_metadata()
def load_cache(self, filename=None) -> dict:
"""
Defines the behavior of how to load the cache file from the `filename`.
Node can override this method. Arguments
-------
filename: str
filename of the cache file. Leave as none to use default.
returns: dict
dictionary of the output from this node
"""
raise NotImplementedError
def save_cache(self, output_data: dict) -> None:
'''Defines how to save the output of a node to
filesystem cache.
:param output_data: The output from :meth:`process`.
'''
raise NotImplementedError
| fsi-samples-main | greenflow/greenflow/dataframe_flow/node.py |
from .portsSpecSchema import (PortsSpecSchema, NodePorts)
from .metaSpec import (MetaDataSchema, MetaData)
from copy import deepcopy
__all__ = ['NodeTaskGraphExtensionMixin']
class NodeTaskGraphExtensionMixin:
'''Extension logic for a Node within a taskgraph. This mixin is used with
NodeTaskGraphMixin.
'''
def reset_cache(self):
'''Delete ivars that maintain the cache state for a node.'''
if hasattr(self, 'ports_setup_cache'):
del self.ports_setup_cache
if hasattr(self, 'input_meta_cache'):
del self.input_meta_cache
if hasattr(self, 'input_connections_cache'):
del self.input_connections_cache
if hasattr(self, 'meta_data_cache'):
del self.meta_data_cache
def cache_update_result(self):
self.reset_cache()
# cache all the intermediate results
self.ports_setup_cache = self.ports_setup()
self.input_meta_cache = self.get_input_meta()
self.input_connections_cache = self.get_connected_inports()
self.meta_data_cache = self.meta_setup()
def ports_setup_ext(self, ports):
'''
1. Finds the port type by the connected node port type.
2. Set the port type to the determined type.
3. If the node is not connected, it will use the list of the types.
4. Also handles the dynamic port type.
5. Set the port type to the determined type (from the graph topology)
for the dynamic port.
:param ports: These are resolved ports.
:type ports: NodePorts
:return: Node ports
:rtype: NodePorts
'''
port_inports = ports.inports
port_outports = ports.outports
port_type = PortsSpecSchema.port_type
dy = PortsSpecSchema.dynamic
input_connections = self.get_connected_inports()
dynamic = None
inports = {}
for input_port in port_inports:
inports[input_port] = deepcopy(port_inports[input_port])
if input_port in input_connections:
determined_type = input_connections[input_port]
inports[input_port].update({port_type: determined_type})
if dy in port_inports[input_port]:
inports[input_port][dy] = True
dynamic = \
port_inports[input_port][dy][PortsSpecSchema.DYN_MATCH]
outports = {}
for output_port in port_outports:
types = port_outports[output_port][port_type]
if isinstance(types, str) and types.startswith('$'):
groups = self._sep_variable(types)
if groups[0] != 'port':
raise ValueError("expect variable {} refer to port".format(
groups[0]))
if groups[1] not in inports:
raise ValueError(
"expect variable name {} refer to a inport name"
.format(groups[1]))
input_port_name = groups[1]
outports[output_port] = {
port_type:
inports[input_port_name][port_type]
}
else:
outports[output_port] = {port_type: types}
static_inport_names = [
iport
for iport in port_inports if not inports[iport].get(dy, False)
]
for input_port in port_inports:
dynamic = None
if dy in port_inports[input_port]:
dynamic = \
port_inports[input_port][dy][PortsSpecSchema.DYN_MATCH]
if dynamic is not None:
types = None
if not isinstance(dynamic, bool):
types = dynamic
for port_name in input_connections.keys():
# if port_name not in port_inports:
if port_name not in outports and \
port_name not in static_inport_names:
if types is not None:
outports[port_name] = {port_type: types}
else:
if isinstance(dynamic, bool) and dynamic:
types = input_connections[port_name]
outports[port_name] = {port_type: types}
return NodePorts(inports=inports, outports=outports)
def meta_setup_ext(self, meta):
'''
1. Based on meta operators, calculate the output meta
2. Adjust meta data element orders based on specified order
3. Pass the meta data for dynamically added output ports
:param meta: the meta information that needs to be calculated.
:type MetaData: MetaData
:return: MetaData
:rtype: MetaData
'''
input_meta = self.get_input_meta()
inports = meta.inports.copy()
metaoutports = meta.outports
outports = {}
data_accessor = MetaDataSchema.META_DATA
order_accessor = MetaDataSchema.META_ORDER
for out_port_name in metaoutports:
type_str = metaoutports[out_port_name].get(MetaDataSchema.META_OP)
if type_str is None:
# NOT A META_OP
outports[out_port_name] = metaoutports[out_port_name]
elif type_str == MetaDataSchema.META_OP_ADDITION:
input_port = \
metaoutports[out_port_name][MetaDataSchema.META_REF_INPUT]
if input_port in input_meta:
input_meta_data = input_meta[input_port]
else:
input_meta_data = inports[input_port]
outports[out_port_name] = input_meta_data.copy()
outports[out_port_name].update(
metaoutports[out_port_name][data_accessor])
elif type_str == MetaDataSchema.META_OP_RETENTION:
outports[out_port_name] = \
metaoutports[out_port_name][data_accessor].copy()
elif type_str == MetaDataSchema.META_OP_DELETION:
input_port = metaoutports[out_port_name][
MetaDataSchema.META_REF_INPUT]
if input_port in input_meta:
input_meta_data = input_meta[input_port]
else:
input_meta_data = inports[input_port]
outports[out_port_name] = input_meta_data.copy()
for key in metaoutports[out_port_name][data_accessor]:
if key in outports[out_port_name]:
del outports[out_port_name][key]
else:
raise NotImplementedError('META_OP "{}" not implemented'
.format(type_str))
# adjust the columns order
if order_accessor in metaoutports[out_port_name]:
total_properties = len(outports[out_port_name])
order_dict = metaoutports[out_port_name][order_accessor].copy()
key_lists = list(outports[out_port_name].keys())
for key in order_dict:
if order_dict[key] < 0:
order_dict[key] += total_properties
if key in key_lists:
key_lists.remove(key)
items = list(order_dict.items())
items.sort(key=lambda x: x[1])
for i in items:
key_lists.insert(i[1], i[0])
old_dict = outports[out_port_name]
outports[out_port_name] = {
k: old_dict[k]
for k in key_lists if k in old_dict
}
# handle the dynamic output meta_setup
dy = PortsSpecSchema.dynamic
port_inports = self.ports_setup().inports
static_inport_names = [
iport
for iport in port_inports if not port_inports[iport].get(dy, False)
]
for input_port in port_inports:
isdynamic = None
if dy in port_inports[input_port]:
isdynamic = port_inports[input_port][dy]
if isdynamic is not None and isdynamic:
input_connections = self.get_connected_inports()
for port_name in input_connections.keys():
if port_name not in metaoutports and \
port_name not in static_inport_names and \
port_name in input_meta:
outports[port_name] = input_meta[port_name]
return MetaData(inports=inports, outports=outports)
| fsi-samples-main | greenflow/greenflow/dataframe_flow/_node_taskgraph_extension_mixin.py |
from ._node import _Node
__all__ = ['TaskSpecSchema']
class TaskSpecSchema(object):
'''Outline fields expected in a dictionary specifying a task node.
:cvar task_id: unique id or name for the node
:cvar node_type: Plugin class i.e. subclass of Node. Specified as string
or subclass of Node
:cvar conf: Configuration for the plugin i.e. parameterization. This is a
dictionary.
:cvar filepath: Path to python module for custom plugin types.
:cvar module: optional field for the name of the module.
:cvar inputs: List of ids of other tasks or an empty list.
'''
task_id = 'id'
node_type = 'type'
conf = 'conf'
filepath = 'filepath'
module = 'module'
inputs = 'inputs'
# outputs = 'outputs'
load = 'load'
save = 'save'
@classmethod
def _typecheck(cls, schema_field, value):
try:
if (schema_field == cls.task_id):
assert isinstance(value, str)
elif schema_field == cls.node_type:
assert (isinstance(value, str) or issubclass(value, _Node))
elif schema_field == cls.conf:
assert (isinstance(value, dict) or isinstance(value, list))
elif schema_field == cls.filepath:
assert isinstance(value, str)
elif schema_field == cls.module:
assert isinstance(value, str)
elif schema_field == cls.inputs:
assert (isinstance(value, list) or isinstance(value, dict))
for item in value:
assert isinstance(item, str)
# elif schema_field == cls.outputs:
# assert isinstance(value, list)
# for item in value:
# assert isinstance(item, str)
elif schema_field == cls.load:
pass
elif schema_field == cls.save:
assert isinstance(value, bool)
else:
raise KeyError(
'Uknown schema field "{}" in the task spec.'.format(
schema_field))
except AssertionError as e:
print(schema_field, value)
raise e
_schema_req_fields = [task_id, node_type, conf, inputs]
@classmethod
def validate(cls, task_spec):
'''
:param task_spec: A dictionary per TaskSpecSchema
'''
for ifield in cls._schema_req_fields:
if ifield not in task_spec:
raise KeyError('task spec missing required field: {}'
.format(ifield))
for task_field, field_val in task_spec.items():
cls._typecheck(task_field, field_val)
| fsi-samples-main | greenflow/greenflow/dataframe_flow/taskSpecSchema.py |
from .util import * # noqa: F403,F401
| fsi-samples-main | greenflow/greenflow/plugin_nodes/__init__.py |
from .compositeNode import CompositeNode
from .contextCompositeNode import ContextCompositeNode
__all__ = ["CompositeNode", "ContextCompositeNode"]
| fsi-samples-main | greenflow/greenflow/plugin_nodes/util/__init__.py |
from jsonpath_ng import parse
# map from python obj name to schema name
type_map = {
"dict": 'object',
'list': 'array',
'float': 'number',
'str': 'string',
'int': 'number',
'bool': 'boolean'
}
def parse_config(json_obj):
expr = parse('$..*') # search for all the fields in the json file
matches = expr.find(json_obj)
map_result = {}
for match in matches:
v = match.value
if type(v) == dict:
continue
element_type = ''
if type(v) == list:
if len(v) > 0:
ele = v[0]
if type(ele) == dict or type(ele) == list:
continue
else:
element_type = type_map[ele.__class__.__name__]
if v is None:
continue
result_type = type_map[v.__class__.__name__]
result_value = v
result_element_type = element_type
result_path = str(match.full_path)
node_id = result_path.split('.')[0]
if result_element_type:
type_key = result_type + '_' + result_element_type
else:
type_key = result_type
type_container = map_result.get(type_key, {})
map_result[type_key] = type_container
content_container = type_container.get(node_id, [])
type_container[node_id] = content_container
item_str = '.'.join(result_path.split('.')[2:])+" val: "+str(v)
content_container.append({'value': result_value, "path": result_path,
"item": item_str})
return map_result
| fsi-samples-main | greenflow/greenflow/plugin_nodes/util/json_util.py |
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow import TaskGraph
from greenflow.dataframe_flow.taskSpecSchema import TaskSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.portsSpecSchema import NodePorts
from greenflow.dataframe_flow.metaSpec import MetaData
import os
from greenflow.dataframe_flow.util import get_file_path
import uuid
__all__ = ["CompositeNode"]
def _get_node(port_name):
return port_name.split('@')[0]
def _get_port(port_name):
return '@'.join(port_name.split('@')[1:])
def fix_port_name(obj, subgraph_node_name):
output = {}
for key in obj.keys():
output[subgraph_node_name+'@'+key] = obj[key]
return output
def group_ports(input_list):
"""
group inputs ports by node id
returns a dictionary, keys are node id
values are list of ports
"""
nodes_group = {}
for inp_port in input_list:
inp = inp_port.split('.')[0] # node id
port_name = inp_port.split('.')[1] # port name
if inp in nodes_group:
port_list = nodes_group.get(inp)
else:
port_list = []
nodes_group[inp] = port_list
port_list.append(port_name)
return nodes_group
class CompositeNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.task_graph = None
def update(self):
TemplateNodeMixin.update(self)
self.conf_update() # update the conf
task_graph = ""
replacementObj = {}
task_graph_obj = None
if 'taskgraph' in self.conf:
try:
task_graph = get_file_path(self.conf['taskgraph'])
except FileNotFoundError:
task_graph = None
if task_graph is not None and os.path.exists(task_graph):
# with open(task_graph) as f:
# task_graph = hashlib.md5(f.read().encode()).hexdigest()
task_graph_obj = TaskGraph.load_taskgraph(
get_file_path(self.conf['taskgraph']))
self.all_inputs = []
self.all_outputs = []
self.task_graph = task_graph_obj
self.update_replace(replacementObj, task_graph_obj)
self.replacementObj = replacementObj
extra_updated = set()
extra_roots = []
if self.task_graph is not None:
self.task_graph._build(replace=self.replacementObj)
if 'input' in self.conf:
# group input ports by node id
self.inp_groups = group_ports(self.conf['input'])
for inp in self.inp_groups.keys():
if inp in self.task_graph:
inputNode = self.task_graph[inp]
update_inputs = []
replaced_ports = set(self.inp_groups[inp])
for oldInput in inputNode.inputs:
if oldInput['to_port'] in replaced_ports:
# we want to disconnect this old one and
# connect to external node
if hasattr(self, 'inputs'):
for externalInput in self.inputs:
if (_get_node(externalInput['to_port'])
== inputNode.uid and _get_port(
externalInput['to_port'])
== oldInput['to_port']):
newInput = {}
newInput['to_port'] = _get_port(
externalInput['to_port'])
newInput[
'from_port'] = externalInput[
'from_port']
newInput[
'from_node'] = externalInput[
'from_node']
update_inputs.append(newInput)
else:
update_inputs.append(oldInput)
inputNode.inputs = update_inputs
# add all the `updated` parents to the set
for i in inputNode.inputs:
if hasattr(i['from_node'], 'ports_setup_cache'):
extra_updated.add(i['from_node'])
# if all the parents are updated, this is
# a new root node
if all([
i['from_node'] in extra_updated
for i in inputNode.inputs
]):
extra_roots.append(inputNode)
self.all_inputs.append((inputNode, inp))
if 'output' in self.conf:
self.oup_groups = group_ports(self.conf['output'])
for oup in self.oup_groups.keys():
if oup in self.task_graph:
outNode = self.task_graph[oup]
# we do not disconnect anything here, as we take extra
# outputs for composite node.
# Node, we rely on the fact that taskgraph.run method
# will remove the output collector from taskgraph if
# the outputlist is set
self.all_outputs.append((outNode, oup))
# outNode_fun(outNode, oup_groups[oup])
# update all the nodes and cache it
self.task_graph.breadth_first_update(extra_roots=extra_roots,
extra_updated=extra_updated)
def conf_update(self):
"""
This method is used to overwrite the conf from
external sources
"""
pass
def _make_sub_graph_connection(self, task_graph,
inputNode_fun,
outNode_fun):
"""
connects the current composite node's inputs and outputs to
the subgraph-task_graph's inputs and outputs.
inputNode_fun has subgraph inputNode and all the input ports
as argument, it processes the inputNode logics
outputNode_fun has subgraph outputNode and all the outpout ports
as argument, it processes the outNode logics
"""
for innode in self.all_inputs:
inputNode_fun(innode[0], self.inp_groups[innode[1]])
for outnode in self.all_outputs:
# inputNode_fun(innode[0], inp_groups[innode[1]])
outNode_fun(outnode[0], self.oup_groups[outnode[1]])
# this part is to update each of the node so dynamic inputs can be
# processed
# task_graph.cache_update_result()
def ports_setup(self):
task_graph = self.task_graph
inports = {}
outports = {}
if task_graph:
def inputNode_fun(inputNode, in_ports):
inport = {}
before_fix = inputNode.ports_setup().inports
for key in before_fix.keys():
if key in in_ports:
inport[key] = before_fix[key]
inports.update(fix_port_name(inport, inputNode.uid))
def outNode_fun(outNode, out_ports):
ouport = {}
before_fix = outNode.ports_setup().outports
for key in before_fix.keys():
if key in out_ports:
ouport[key] = before_fix[key]
outports.update(fix_port_name(ouport, outNode.uid))
self._make_sub_graph_connection(task_graph,
inputNode_fun, outNode_fun)
output_port = NodePorts(inports=inports, outports=outports)
return output_port
def meta_setup(self):
task_graph = self.task_graph
required = {}
out_meta = {}
if task_graph:
def inputNode_fun(inputNode, in_ports):
req = {}
# do meta_setup so required columns are ready
input_meta = inputNode.meta_setup().inports
for key in input_meta.keys():
if key in in_ports:
req[key] = input_meta[key]
required.update(fix_port_name(req, inputNode.uid))
def outNode_fun(outNode, out_ports):
oucols = {}
before_fix = outNode.meta_setup().outports
for key in before_fix.keys():
if key in out_ports:
oucols[key] = before_fix[key]
out_meta.update(fix_port_name(oucols,
outNode.uid))
self._make_sub_graph_connection(task_graph,
inputNode_fun, outNode_fun)
metadata = MetaData(inports=required, outports=out_meta)
return metadata
def conf_schema(self):
task_graph = self.task_graph
json = {
"title": "Composite Node configure",
"type": "object",
"description": """Use a sub taskgraph as a composite node""",
"properties": {
"taskgraph": {
"type": "string",
"description": "the taskgraph filepath"
},
"input": {
"type": "array",
"description": "the input node ids",
"items": {
"type": "string"
}
},
"output": {
"type": "array",
"description": "the output node ids",
"items": {
"type": "string"
}
},
"subnode_ids": {
"title": self.uid+" subnode ids",
"type": "array",
"items": {
"type": "string"
},
"description": """sub graph node ids that need
to be reconfigured"""
},
"subnodes_conf": {
"title": self.uid+" subnodes configuration",
"type": "object",
"properties": {}
}
},
"required": ["taskgraph"],
}
ui = {
"taskgraph": {"ui:widget": "TaskgraphSelector"},
"subnodes_conf": {}
}
if task_graph:
def inputNode_fun(inputNode, in_ports):
pass
def outNode_fun(outNode, out_ports):
pass
self._make_sub_graph_connection(task_graph,
inputNode_fun, outNode_fun)
ids_in_graph = []
in_ports = []
out_ports = []
for t in task_graph:
node_id = t.get('id')
if node_id != '':
node = task_graph[node_id]
all_ports = node.ports_setup()
for port in all_ports.inports.keys():
in_ports.append(node_id+'.'+port)
for port in all_ports.outports.keys():
out_ports.append(node_id+'.'+port)
ids_in_graph.append(node_id)
json['properties']['input']['items']['enum'] = in_ports
json['properties']['output']['items']['enum'] = out_ports
json['properties']['subnode_ids']['items']['enum'] = ids_in_graph
if 'subnode_ids' in self.conf and task_graph:
for subnodeId in self.conf['subnode_ids']:
if subnodeId in task_graph:
nodeObj = task_graph[subnodeId]
schema = nodeObj.conf_schema()
json['properties'][
"subnodes_conf"]['properties'][subnodeId] = {
"type": "object",
"properties": {
"conf": schema.json
}
}
ui["subnodes_conf"].update({
subnodeId: {
'conf': schema.ui
}
})
out_schema = ConfSchema(json=json, ui=ui)
return out_schema
def update_replace(self, replaceObj, task_graph=None, **kwargs):
# find the other replacment conf
if 'subnodes_conf' in self.conf:
for key in self.conf['subnodes_conf'].keys():
newid = key
if newid in replaceObj:
replaceObj[newid].update(self.conf[
'subnodes_conf'][key])
else:
replaceObj[newid] = {}
replaceObj[newid].update(self.conf[
'subnodes_conf'][key])
def process(self, inputs, **kwargs):
"""
Composite computation
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
if 'taskgraph' in self.conf:
task_graph = self.task_graph
# task_graph = TaskGraph.load_taskgraph(
# get_file_path(self.conf['taskgraph']))
# task_graph._build()
outputLists = []
replaceObj = {}
input_feeders = []
def inputNode_fun(inputNode, in_ports):
inports = inputNode.ports_setup().inports
class InputFeed(TemplateNodeMixin, Node):
def meta_setup(self):
output = {}
for inp in inputNode.inputs:
output[inp['to_port']] = inp[
'from_node'].meta_setup().outports[
inp['from_port']]
# it will be something like { input_port: columns }
return MetaData(inports={}, outports=output)
def ports_setup(self):
# it will be something like { input_port: types }
return NodePorts(inports={}, outports=inports)
def update(self):
TemplateNodeMixin.update(self)
def conf_schema(self):
return ConfSchema()
def process(self, empty):
output = {}
for key in inports.keys():
if inputNode.uid+'@'+key in inputs:
output[key] = inputs[inputNode.uid+'@'+key]
return output
uni_id = str(uuid.uuid1())
obj = {
TaskSpecSchema.task_id: uni_id,
TaskSpecSchema.conf: {},
TaskSpecSchema.node_type: InputFeed,
TaskSpecSchema.inputs: []
}
input_feeders.append(obj)
newInputs = {}
for key in inports.keys():
if inputNode.uid+'@'+key in inputs:
newInputs[key] = uni_id+'.'+key
for inp in inputNode.inputs:
if inp['to_port'] not in in_ports:
# need to keep the old connections
newInputs[inp['to_port']] = (inp['from_node'].uid
+ '.' + inp['from_port'])
replaceObj.update({inputNode.uid: {
TaskSpecSchema.inputs: newInputs}
})
def outNode_fun(outNode, out_ports):
out_ports = outNode.ports_setup().outports
# fixed_outports = fix_port_name(out_ports, outNode.uid)
for key in out_ports.keys():
if self.outport_connected(outNode.uid+'@'+key):
outputLists.append(outNode.uid+'.'+key)
self._make_sub_graph_connection(task_graph,
inputNode_fun, outNode_fun)
task_graph.extend(input_feeders)
self.update_replace(replaceObj, task_graph, **kwargs)
result = task_graph.run(outputLists, replace=replaceObj)
output = {}
for key in result.get_keys():
splits = key.split('.')
output['@'.join(splits)] = result[key]
return output
else:
return {}
| fsi-samples-main | greenflow/greenflow/plugin_nodes/util/compositeNode.py |
from .compositeNode import CompositeNode
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema,
NodePorts)
from .data_obj import ConfData
from .json_util import parse_config
from jsonpath_ng import parse
__all__ = ["ContextCompositeNode"]
default_map = {
"boolean": False,
"number": 0.0,
"string": "a string",
"array": []
}
class ContextCompositeNode(CompositeNode):
def init(self):
super().init()
self.INPUT_CONFIG = 'conf_in'
self.OUTPUT_CONFIG = 'conf_out'
def ports_setup(self):
ports = super().ports_setup()
port_type = PortsSpecSchema.port_type
inports = ports.inports
outports = ports.outports
inports[self.INPUT_CONFIG] = {
port_type: ConfData
}
outports[self.OUTPUT_CONFIG] = {
port_type: ConfData
}
output_port = NodePorts(inports=inports, outports=outports)
return output_port
def meta_setup(self):
out_meta = super().meta_setup()
out_meta.outports[self.OUTPUT_CONFIG] = self.conf
return out_meta
def conf_schema(self):
# import pdb
# pdb.set_trace()
task_graph = self.task_graph
# cache_key, task_graph, replacementObj = self._compute_hash_key()
# if cache_key in CACHE_SCHEMA:
# return CACHE_SCHEMA[cache_key]
json = {
"title": "Context Composite Node configure",
"type": "object",
"description": """Use a sub taskgraph as a composite node""",
"properties": {
"taskgraph": {
"type": "string",
"description": "the taskgraph filepath"
},
"input": {
"type": "array",
"description": "the input node ids",
"items": {
"type": "string"
}
},
"output": {
"type": "array",
"description": "the output node ids",
"items": {
"type": "string"
}
},
},
"required": ["taskgraph"]
}
ui = {
"taskgraph": {"ui:widget": "TaskgraphSelector"},
}
types = []
if 'taskgraph' in self.conf:
json['properties']['context'] = {
"type": "object",
"description": "context parameters",
"additionalProperties": {
"type": "object",
"description": """The context
parameters for this composite node""",
"properties": {
"type": {
"type": "string",
},
},
"dependencies": {
"type": {
"oneOf": []
}
}
}
}
# import pdb
# pdb.set_trace()
all_fields = parse_config(self.replacementObj)
types = list(all_fields.keys())
addional = json['properties']['context']['additionalProperties']
addional['properties']['type']['enum'] = types
typelist = addional['dependencies']['type']['oneOf']
for ty in types:
ty_splits = ty.split('_')
obj_temp = {
"properties": {
"type": {
"type": "string",
"description": "the parameter data type"
},
"value": {
"type": ty_splits[0],
"default": default_map[ty_splits[0]],
"description": "the value for this context parameter"
},
"map": {
"type": "array",
"description": """The fields of subnode's config this
parameter maps to""",
"items": {
"type": "object",
"properties": {
"node_id": {
"type": "string",
"enum": []
}
},
"dependencies": {
"node_id": {
"oneOf": [],
}
}
}
}
}
}
if len(ty_splits) > 1:
obj_temp['properties']['value']['items'] = {
"type": ty_splits[1]
}
type_container = all_fields[ty]
ids = list(type_container.keys())
obj_temp['properties']['type']['enum'] = [ty]
obj_temp['properties']['map'][
'items']['properties']['node_id']['enum'] = ids
idlist = obj_temp['properties']['map'][
'items']['dependencies']['node_id']['oneOf']
for subid in ids:
id_obj = {
"properties": {
"node_id": {
"type": "string"
},
"xpath": {
"type": "string",
}
}
}
content = type_container[subid]
paths = [i['path'] for i in content]
names = [i['item'] for i in content]
id_obj['properties']['node_id']['enum'] = [subid]
id_obj['properties']['xpath']['enum'] = paths
id_obj['properties']['xpath']['enumNames'] = names
idlist.append(id_obj)
typelist.append(obj_temp)
if 'taskgraph' in self.conf:
def inputNode_fun(inputNode, in_ports):
pass
def outNode_fun(outNode, out_ports):
pass
self._make_sub_graph_connection(task_graph,
inputNode_fun, outNode_fun)
ids_in_graph = []
in_ports = []
out_ports = []
for t in task_graph:
node_id = t.get('id')
if node_id != '':
node = task_graph[node_id]
all_ports = node.ports_setup()
for port in all_ports.inports.keys():
in_ports.append(node_id+'.'+port)
for port in all_ports.outports.keys():
out_ports.append(node_id+'.'+port)
ids_in_graph.append(node_id)
json['properties']['input']['items']['enum'] = in_ports
json['properties']['output']['items']['enum'] = out_ports
out_schema = ConfSchema(json=json, ui=ui)
return out_schema
def conf_update(self):
input_conf = self.get_input_meta(self.INPUT_CONFIG)
if input_conf is not None:
self.conf.update(input_conf)
def update_replace(self, replaceObj, task_graph, **kwargs):
"""
called inside the self.process function to get the updated
replacement object before taskgraph.run.
"""
# find the other replacment conf
if task_graph:
for task in task_graph:
key = task.get('id')
newid = key
conf = task.get('conf')
if newid in replaceObj:
replaceObj[newid].update({'conf': conf})
else:
replaceObj[newid] = {}
replaceObj[newid].update({'conf': conf})
# replace the numbers from the context
if 'context' in self.conf:
for key in self.conf['context'].keys():
val = self.conf['context'][key]['value']
for map_obj in self.conf['context'][key]['map']:
xpath = map_obj['xpath']
expr = parse(xpath)
expr.update(replaceObj, val)
def process(self, inputs):
if self.INPUT_CONFIG in inputs:
self.conf.update(inputs[self.INPUT_CONFIG].data)
output = {}
if self.outport_connected(self.OUTPUT_CONFIG):
conf = ConfData(self.conf)
output[self.OUTPUT_CONFIG] = conf
more_output = super().process(inputs)
output.update(more_output)
return output
| fsi-samples-main | greenflow/greenflow/plugin_nodes/util/contextCompositeNode.py |
class ConfData(object):
def __init__(self, data):
self.data = data
| fsi-samples-main | greenflow/greenflow/plugin_nodes/util/data_obj.py |
fsi-samples-main | greenflow/tests/__init__.py |
|
fsi-samples-main | greenflow/tests/unit/__init__.py |
|
'''
greenflow Node in TaskGraph Columns Validation Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_node_taskgraph_typechecking.py -v
python -m unittest discover -v tests
or
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_node_taskgraph_typechecking.py
'''
import unittest
import copy
import warnings
from greenflow.dataframe_flow import (
Node, PortsSpecSchema, NodePorts, MetaData)
from greenflow.dataframe_flow import (TaskSpecSchema, TaskGraph)
from .utils import make_orderer
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class MyList(list):
pass
class NodeNumGen(Node):
def ports_setup(self):
ptype = self.conf.get('port_type', list)
output_ports = {'numlist': {PortsSpecSchema.port_type: ptype}}
return NodePorts(outports=output_ports)
def meta_setup(self):
colsopt = self.conf['columns_option']
cols = {
'listnums': {'list': 'numbers'},
'mylistnums': {'list': 'numbers'},
'rangenums': {'range': 'numbers'},
'listnotnums': {'list': 'notnumbers'},
}.get(colsopt)
return MetaData(inports={}, outports={'numlist': cols})
def process(self, inputs):
colsopt = self.conf['columns_option']
outopt = self.conf.get('out_type', colsopt)
rng = range(10)
# get callables according to desired type
out = {
'listnums': lambda: list(rng),
'mylistnums': lambda: MyList(rng),
'rangenums': lambda: rng,
'listnotnums': lambda: [str(ii) for ii in rng],
}.get(outopt)
return {'numlist': out()}
class NodeNumProc(Node):
def ports_setup(self):
ptype = self.conf.get('port_type', list)
inports = {'inlist': {PortsSpecSchema.port_type: ptype}}
outports = {'sum': {PortsSpecSchema.port_type: float}}
return NodePorts(inports=inports, outports=outports)
def meta_setup(self):
required = {'inlist': {'list': 'numbers'}}
columns_out = {'sum': {'element': 'number'}}
return MetaData(inports=required, outports=columns_out)
def process(self, inputs):
inlist = inputs['inlist']
return {'sum': float(sum(inlist))}
class TestNodeTaskGraphTypechecking(unittest.TestCase):
def setUp(self):
warnings.simplefilter('ignore', category=DeprecationWarning)
self.numgen_spec = {
TaskSpecSchema.task_id: 'numgen',
TaskSpecSchema.node_type: NodeNumGen,
TaskSpecSchema.conf: {},
TaskSpecSchema.inputs: {}
}
self.numproc_spec = {
TaskSpecSchema.task_id: 'numproc',
TaskSpecSchema.node_type: NodeNumProc,
TaskSpecSchema.conf: {},
TaskSpecSchema.inputs: {
'inlist': 'numgen.numlist'
}
}
def tearDown(self):
pass
@ordered
def test_columns_name_mismatch(self):
numgen_spec = copy.deepcopy(self.numgen_spec)
numproc_spec = copy.deepcopy(self.numproc_spec)
numgen_spec[TaskSpecSchema.conf] = {'columns_option': 'rangenums'}
tspec_list = [numgen_spec, numproc_spec]
tgraph_invalid = TaskGraph(tspec_list)
with self.assertRaises(LookupError) as cm:
tgraph_invalid.run(['numproc.sum'])
outerr_msg = '{}'.format(cm.exception)
errmsg = 'Task "numproc" missing required column "list" from '\
'"numgen.numlist".'
self.assertIn(errmsg, outerr_msg)
@ordered
def test_columns_type_mismatch(self):
numgen_spec = copy.deepcopy(self.numgen_spec)
numproc_spec = copy.deepcopy(self.numproc_spec)
numgen_spec[TaskSpecSchema.conf] = {'columns_option': 'listnotnums'}
tspec_list = [numgen_spec, numproc_spec]
tgraph_invalid = TaskGraph(tspec_list)
with self.assertRaises(LookupError) as cm:
tgraph_invalid.run(['numproc.sum'])
outerr_msg = '{}'.format(cm.exception)
errmsg = 'Task "numproc" column "list" expected type "numbers" got '\
'type "notnumbers" instead.'
self.assertIn(errmsg, outerr_msg)
@ordered
def test_ports_output_type_mismatch(self):
numgen_spec = copy.deepcopy(self.numgen_spec)
numproc_spec = copy.deepcopy(self.numproc_spec)
numgen_spec[TaskSpecSchema.conf] = {
'columns_option': 'listnums',
'out_type': 'rangenums'
}
tspec_list = [numgen_spec, numproc_spec]
tgraph_invalid = TaskGraph(tspec_list)
with self.assertRaises(TypeError) as cm:
tgraph_invalid.run(['numproc.sum'])
outerr_msg = '{}'.format(cm.exception)
errmsg = 'Node "numgen" output port "numlist" produced wrong type '\
'"<class \'range\'>". Expected type "[<class \'list\'>]"'
self.assertEqual(errmsg, outerr_msg)
@ordered
def test_ports_connection_type_mismatch(self):
numgen_spec = copy.deepcopy(self.numgen_spec)
numproc_spec = copy.deepcopy(self.numproc_spec)
numgen_spec[TaskSpecSchema.conf] = {'columns_option': 'listnums'}
numproc_spec[TaskSpecSchema.conf] = {'port_type': range}
tspec_list = [numgen_spec, numproc_spec]
tgraph_invalid = TaskGraph(tspec_list)
with self.assertRaises(TypeError) as cm:
tgraph_invalid.run(['numproc.sum'])
outerr_msg = '{}'.format(cm.exception)
errmsg = 'Connected nodes do not have matching port types. '\
'Fix port types.'
self.assertIn(errmsg, outerr_msg)
@ordered
def test_ports_connection_subclass_type_mismatch(self):
numgen_spec = copy.deepcopy(self.numgen_spec)
numproc_spec = copy.deepcopy(self.numproc_spec)
numgen_spec[TaskSpecSchema.conf] = {'columns_option': 'listnums'}
numproc_spec[TaskSpecSchema.conf] = {'port_type': MyList}
tspec_list = [numgen_spec, numproc_spec]
tgraph_invalid = TaskGraph(tspec_list)
with self.assertRaises(TypeError) as cm:
tgraph_invalid.run(['numproc.sum'])
outerr_msg = '{}'.format(cm.exception)
errmsg = 'Connected nodes do not have matching port types. '\
'Fix port types.'
self.assertIn(errmsg, outerr_msg)
@ordered
def test_ports_connection_subclass_type_match(self):
numgen_spec = copy.deepcopy(self.numgen_spec)
numproc_spec = copy.deepcopy(self.numproc_spec)
numgen_spec[TaskSpecSchema.conf] = {
'port_type': MyList,
'columns_option': 'mylistnums'
}
numproc_spec[TaskSpecSchema.conf] = {'port_type': list}
tspec_list = [numgen_spec, numproc_spec]
tgraph_valid = TaskGraph(tspec_list)
sumout, = tgraph_valid.run(['numproc.sum'])
self.assertEqual(sumout, 45)
@ordered
def test_columns_and_ports_types_match(self):
numgen_spec = copy.deepcopy(self.numgen_spec)
numproc_spec = copy.deepcopy(self.numproc_spec)
numgen_spec[TaskSpecSchema.conf] = {'columns_option': 'listnums'}
tspec_list = [numgen_spec, numproc_spec]
tgraph_valid = TaskGraph(tspec_list)
sumout, = tgraph_valid.run(['numproc.sum'])
self.assertEqual(sumout, 45)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | greenflow/tests/unit/test_node_taskgraph_typechecking.py |
'''
Workflow Serialization Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_workflow_serialization.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_workflow_serialization.py
'''
import os
import warnings
from io import StringIO
import yaml
import shutil
import tempfile
import unittest
from difflib import context_diff
from .utils import make_orderer
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
# ------------------------------------------- Workflow Serialization Test Cases
WORKFLOW_YAML = \
'''- id: points
type: PointNode
conf: {}
inputs: []
filepath: custom_nodes.py
- id: distance
type: DistanceNode
conf: {}
inputs:
- points
filepath: custom_nodes.py
- id: node_outputCsv
type: OutCsvNode
conf:
path: symbol_returns.csv
inputs:
- distance
'''
class TestWorkflowSerialization(unittest.TestCase):
def setUp(self):
# ignore importlib warnings.
warnings.simplefilter('ignore', category=ImportWarning)
warnings.simplefilter('ignore', category=DeprecationWarning)
# some dummy tasks
task_input = {
'id': 'points',
'type': 'PointNode',
'conf': {},
'inputs': [],
'filepath': 'custom_nodes.py'
}
task_compute = {
'id': 'distance',
'type': 'DistanceNode',
'conf': {},
'inputs': ['points'],
'filepath': 'custom_nodes.py'
}
task_output = {
'id': 'node_outputCsv',
'type': 'OutCsvNode',
'conf': {
'path': 'symbol_returns.csv'
},
'inputs': ['distance']
}
self._task_list = [task_input, task_compute, task_output]
# Create a temporary directory
self._test_dir = tempfile.mkdtemp()
def tearDown(self):
# Remove the directory after the test
shutil.rmtree(self._test_dir)
@ordered
def test_save_workflow(self):
'''Test saving a workflow to yaml:'''
from greenflow.dataframe_flow import TaskGraph
task_graph = TaskGraph(self._task_list)
workflow_file = os.path.join(self._test_dir, 'test_save_workflow.yaml')
task_graph.save_taskgraph(workflow_file)
with open(workflow_file) as wf:
workflow_str = wf.read()
# verify the workflow contentst same as expected. Empty list if same.
cdiff = list(context_diff(WORKFLOW_YAML, workflow_str))
cdiff_empty = cdiff == []
err_msg = 'Workflow yaml contents do not match expected results.\n'\
'SHOULD HAVE SAVED:\n\n'\
'{wyaml}\n\n'\
'INSTEAD FILE CONTAINS:\n\n'\
'{fcont}\n\n'\
'DIFF:\n\n'\
'{diff}'.format(wyaml=WORKFLOW_YAML, fcont=workflow_str,
diff=''.join(cdiff))
self.assertTrue(cdiff_empty, err_msg)
@ordered
def test_load_workflow(self):
'''Test loading a workflow from yaml:'''
from greenflow.dataframe_flow import TaskGraph
workflow_file = os.path.join(self._test_dir, 'test_save_workflow.yaml')
with open(workflow_file, 'w') as wf:
wf.write(WORKFLOW_YAML)
task_list = TaskGraph.load_taskgraph(workflow_file)
all_tasks_exist = True
for t in task_list:
match = False
if t._task_spec in self._task_list:
match = True
if not match:
all_tasks_exist = False
break
with StringIO() as yf:
yaml.dump(self._task_list, yf,
default_flow_style=False, sort_keys=False)
yf.seek(0)
err_msg = 'Load workflow failed. Missing expected task items.\n'\
'EXPECTED WORKFLOW YAML:\n\n'\
'{wyaml}\n\n'\
'GOT TASKS FORMATTED AS YAML:\n\n'\
'{tlist}\n\n'.format(wyaml=WORKFLOW_YAML, tlist=yf.read())
self.assertTrue(all_tasks_exist, err_msg)
if __name__ == '__main__':
# with warnings.catch_warnings():
# warnings.simplefilter('ignore', category=ImportWarning)
# unittest.main()
unittest.main()
| fsi-samples-main | greenflow/tests/unit/test_workflow_serialization.py |
'''
greenflow Node API Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_node_api.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_node_api.py
'''
import os
import unittest
from greenflow.dataframe_flow import TaskSpecSchema
from greenflow.dataframe_flow.task import Task
from greenflow.dataframe_flow._node import _Node
from greenflow.dataframe_flow.node import (Node, _PortsMixin)
from greenflow.dataframe_flow._node_flow import NodeTaskGraphMixin
from greenflow.dataframe_flow.config_nodes_modules import get_node_obj
from .utils import make_orderer
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestNodeAPI(unittest.TestCase):
def setUp(self):
os.environ['GREENFLOW_CONFIG'] = ''
custom_module = '{}/custom_port_nodes.py'.format(
os.path.dirname(os.path.realpath(__file__)))
points_task_spec = {
TaskSpecSchema.task_id: 'points_task',
TaskSpecSchema.node_type: 'PointNode',
TaskSpecSchema.filepath: custom_module,
TaskSpecSchema.conf: {'npts': 1000},
TaskSpecSchema.inputs: {}
}
self.points_task = Task(points_task_spec)
distance_task_spec = {
TaskSpecSchema.task_id: 'distance_by_cudf',
TaskSpecSchema.node_type: 'DistanceNode',
TaskSpecSchema.filepath: custom_module,
TaskSpecSchema.conf: {},
TaskSpecSchema.inputs: {
'points_df_in': 'points_task.points_df_out'
}
}
self.distance_task = Task(distance_task_spec)
def tearDown(self):
pass
@ordered
def test_node_instantiation(self):
'''Test node instantiation.
1. Test that you cannot instantiate an abstract base class without
first implementing the methods requiring override.
2. Check for the base and base mixin classes in a Node class
implementation.
'''
points_task = self.points_task
# assert cannot instantiate Node without overriding meta_setup
# and process
with self.assertRaises(TypeError) as cm:
_ = Node(points_task)
err_msg = '{}'.format(cm.exception)
self.assertEqual(
err_msg,
"Can't instantiate abstract class Node with abstract methods "
"meta_setup, ports_setup, process")
points_node = get_node_obj(points_task)
self.assertIsInstance(points_node, _Node)
self.assertIsInstance(points_node, Node)
self.assertIsInstance(points_node, _PortsMixin)
self.assertNotIsInstance(points_node, NodeTaskGraphMixin)
points_node = get_node_obj(points_task, tgraph_mixin=True)
self.assertIsInstance(points_node, NodeTaskGraphMixin)
@ordered
def test_node_ports(self):
'''Test the ports related APIs such as existence of ports, input ports,
and output ports.
'''
distance_node = get_node_obj(self.distance_task)
iports = distance_node._get_input_ports()
oports = distance_node._get_output_ports()
self.assertEqual(iports, ['points_df_in'])
self.assertEqual(oports, ['distance_df', 'distance_abs_df'])
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | greenflow/tests/unit/test_node_api.py |
import numpy as np
def make_orderer():
"""Keep tests in order"""
order = {}
def ordered(f):
order[f.__name__] = len(order)
return f
def compare(a, b):
return [1, -1][order[a] < order[b]]
return ordered, compare
def error_function(gpu_series, result_series):
"""
utility function to compare GPU array vs CPU array
Parameters
------
gpu_series: cudf.Series
GPU computation result series
result_series: pandas.Series
Pandas computation result series
Returns
-----
double
maximum error of the two arrays
"""
gpu_arr = gpu_series.to_array(fillna='pandas')
pan_arr = result_series.values
gpu_arr = gpu_arr[~np.isnan(gpu_arr) & ~np.isinf(gpu_arr)]
pan_arr = pan_arr[~np.isnan(pan_arr) & ~np.isinf(pan_arr)]
err = np.abs(gpu_arr - pan_arr).max()
return err
def error_function_index(gpu_series, result_series):
"""
utility function to compare GPU array vs CPU array
Parameters
------
gpu_series: cudf.Series
GPU computation result series
result_series: pandas.Series
Pandas computation result series
Returns
-----
double
maximum error of the two arrays
int
maximum index value diff
"""
err = error_function(gpu_series, result_series)
error_index = np.abs(gpu_series.index.to_array() -
result_series.index.values).max()
return err, error_index
| fsi-samples-main | greenflow/tests/unit/utils.py |
import numpy as np
import pandas as pd
from greenflow.dataframe_flow import Node, MetaData
from greenflow.dataframe_flow import NodePorts, PortsSpecSchema
from greenflow.dataframe_flow import ConfSchema
import os
import warnings
class NodeHDFCacheMixin(object):
def load_cache(self, filename=None) -> dict:
"""
Defines the behavior of how to load the cache file from the `filename`.
Node can override this method. Default implementation assumes cudf
dataframes.
Arguments
-------
filename: str
filename of the cache file. Leave as none to use default.
returns: dict
dictionary of the output from this node
"""
cache_dir = os.getenv('GREENFLOW_CACHE_DIR', self.cache_dir)
if filename is None:
filename = cache_dir + '/' + self.uid + '.hdf5'
output_df = {}
with pd.HDFStore(filename, mode='r') as hf:
for oport, pspec in \
self._get_output_ports(full_port_spec=True).items():
ptype = pspec.get(PortsSpecSchema.port_type)
if self.outport_connected(oport):
ptype = ([ptype] if not isinstance(ptype,
list) else ptype)
key = '{}/{}'.format(self.uid, oport)
# check hdf store for the key
if key not in hf:
raise Exception(
'The task "{}" port "{}" key "{}" not found in'
'the hdf file "{}". Cannot load from cache.'
.format(self.uid, oport, key, filename)
)
if pd.DataFrame not in ptype:
warnings.warn(
RuntimeWarning,
'Task "{}" port "{}" port type is not set to '
'cudf.DataFrame. Attempting to load port data '
'with cudf.read_hdf.'.format(self.uid, oport))
output_df[oport] = pd.read_hdf(hf, key)
return output_df
def save_cache(self, output_data: dict):
'''Defines the behavior for how to save the output of a node to
filesystem cache. Default implementation assumes cudf dataframes.
:param output_data: The output from :meth:`process`. For saving to hdf
requires that the dataframe(s) have `to_hdf` method.
'''
cache_dir = os.getenv('GREENFLOW_CACHE_DIR', self.cache_dir)
os.makedirs(cache_dir, exist_ok=True)
filename = cache_dir + '/' + self.uid + '.hdf5'
with pd.HDFStore(filename, mode='w') as hf:
for oport, odf in output_data.items():
# check for to_hdf attribute
if not hasattr(odf, 'to_hdf'):
raise Exception(
'Task "{}" port "{}" output object is missing '
'"to_hdf" attribute. Cannot save to cache.'
.format(self.uid, oport))
dtype = '{}'.format(type(odf)).lower()
if 'dataframe' not in dtype:
warnings.warn(
RuntimeWarning,
'Task "{}" port "{}" port type is not a dataframe.'
' Attempting to save to hdf with "to_hdf" method.'
.format(self.uid, oport))
key = '{}/{}'.format(self.uid, oport)
odf.to_hdf(hf, key, format='table', data_columns=True)
class PointNode(NodeHDFCacheMixin, Node):
def ports_setup(self):
input_ports = {}
output_ports = {
'points_df_out': {
PortsSpecSchema.port_type: pd.DataFrame
}
}
return NodePorts(inports=input_ports, outports=output_ports)
def conf_schema(self):
json = {
"title": "PointNode configure",
"type": "object",
"properties": {
"npts": {
"type": "number",
"description": "number of data points",
"minimum": 10
}
},
"required": ["npts"],
}
ui = {
"npts": {"ui:widget": "updown"}
}
return ConfSchema(json=json, ui=ui)
def init(self):
pass
def meta_setup(self):
columns_out = {
'points_df_out': {
'x': 'float64',
'y': 'float64'
},
'points_ddf_out': {
'x': 'float64',
'y': 'float64'
}
}
return MetaData(inports={}, outports=columns_out)
def process(self, inputs):
npts = self.conf['npts']
seed = self.conf.get('nseed')
if seed is not None:
np.random.seed(seed)
df = pd.DataFrame()
df['x'] = np.random.rand(npts)
df['y'] = np.random.rand(npts)
output = {}
if self.outport_connected('points_df_out'):
output.update({'points_df_out': df})
return output
class DistanceNode(NodeHDFCacheMixin, Node):
def ports_setup(self):
port_type = PortsSpecSchema.port_type
input_ports = {
'points_df_in': {
port_type: [pd.DataFrame]
}
}
output_ports = {
'distance_df': {
port_type: [pd.DataFrame]
},
'distance_abs_df': {
PortsSpecSchema.port_type: [pd.DataFrame]
}
}
input_connections = self.get_connected_inports()
if 'points_df_in' in input_connections:
types = input_connections['points_df_in']
# connected, use the types passed in from parent
return NodePorts(inports={'points_df_in': {port_type: types}},
outports={'distance_df': {port_type: types},
'distance_abs_df': {port_type: types},
})
else:
return NodePorts(inports=input_ports, outports=output_ports)
def conf_schema(self):
return ConfSchema()
def init(self):
self.delayed_process = True
def meta_setup(self):
req_cols = {
'x': 'float64',
'y': 'float64'
}
required = {
'points_df_in': req_cols,
}
input_meta = self.get_input_meta()
output_cols = ({
'distance_df': {
'distance_df': 'float64',
'x': 'float64',
'y': 'float64'
},
'distance_abs_df': {
'distance_abs_df': 'float64',
'x': 'float64',
'y': 'float64'
}
})
if 'points_df_in' in input_meta:
col_from_inport = input_meta['points_df_in']
# additional ports
output_cols['distance_df'].update(col_from_inport)
output_cols['distance_abs_df'].update(col_from_inport)
return MetaData(inports=required, outports=output_cols)
def process(self, inputs):
df = inputs['points_df_in']
output = {}
if self.outport_connected('distance_df'):
copy_df = df.copy()
copy_df['distance_df'] = np.sqrt((df['x'] ** 2 + df['y'] ** 2))
output.update({'distance_df': copy_df})
if self.outport_connected('distance_abs_df'):
copy_df = df.copy()
copy_df['distance_abs_df'] = np.abs(df['x']) + np.abs(df['y'])
output.update({'distance_abs_df': copy_df})
return output
| fsi-samples-main | greenflow/tests/unit/custom_port_nodes.py |
'''
greenflow TaskGraph API Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_taskgraph_api.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_taskgraph_api.py
'''
import os
import shutil
import tempfile
from difflib import context_diff
import yaml
from io import StringIO
import warnings
import unittest
import pandas as pd
from greenflow.dataframe_flow import (TaskSpecSchema, TaskGraph)
from greenflow.dataframe_flow.config_nodes_modules import DEFAULT_MODULE
from greenflow.dataframe_flow import Node
from .utils import make_orderer
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
TASKGRAPH_YAML = \
'''- id: points_task
type: PointNode
conf:
npts: 1000
inputs: []
- id: distance_by_df
type: DistanceNode
conf: {}
inputs:
points_df_in: points_task.points_df_out
'''
class TestTaskGraphAPI(unittest.TestCase):
def setUp(self):
import gc # python garbage collector
# warmup
s = pd.Series([1, 2, 3, None, 4])
del(s)
gc.collect()
os.environ['GREENFLOW_PLUGIN_MODULE'] = 'tests.unit.custom_port_nodes'
points_task_spec = {
TaskSpecSchema.task_id: 'points_task',
TaskSpecSchema.node_type: 'PointNode',
TaskSpecSchema.conf: {'npts': 1000},
TaskSpecSchema.inputs: []
}
distance_task_spec = {
TaskSpecSchema.task_id: 'distance_by_df',
TaskSpecSchema.node_type: 'DistanceNode',
TaskSpecSchema.conf: {},
TaskSpecSchema.inputs: {
'points_df_in': 'points_task.points_df_out'
}
}
tspec_list = [points_task_spec, distance_task_spec]
self.tgraph = TaskGraph(tspec_list)
# Create a temporary directory
self._test_dir = tempfile.mkdtemp()
os.environ['GREENFLOW_CACHE_DIR'] = os.path.join(self._test_dir,
'.cache')
def tearDown(self):
global DEFAULT_MODULE
os.environ['GREENFLOW_PLUGIN_MODULE'] = DEFAULT_MODULE
os.environ['GREENFLOW_CACHE_DIR'] = Node.cache_dir
shutil.rmtree(self._test_dir)
@ordered
def test_viz_graph(self):
'''Test taskgraph to networkx graph conversion for graph visualization.
'''
nx_graph = self.tgraph.viz_graph(show_ports=True)
nx_nodes = ['points_task', 'points_task.points_df_out',
'distance_by_df', 'distance_by_df.distance_df',
'distance_by_df.distance_abs_df']
nx_edges = [('points_task', 'points_task.points_df_out'),
('points_task.points_df_out', 'distance_by_df'),
('distance_by_df', 'distance_by_df.distance_df'),
('distance_by_df', 'distance_by_df.distance_abs_df')]
self.assertEqual(list(nx_graph.nodes), nx_nodes)
self.assertEqual(list(nx_graph.edges), nx_edges)
@ordered
def test_build(self):
'''Test build of a taskgraph and that all inputs and outputs are set
for the tasks withink a taskgraph.
'''
self.tgraph.build()
points_node = self.tgraph['points_task']
distance_node = self.tgraph['distance_by_df']
onode_info = {
'to_node': distance_node,
'to_port': 'points_df_in',
'from_port': 'points_df_out'
}
self.assertIn(onode_info, points_node.outputs)
onode_cols = {'points_df_out': {'x': 'float64', 'y': 'float64'},
'points_ddf_out': {'x': 'float64', 'y': 'float64'}}
self.assertEqual(onode_cols, points_node.meta_setup().outports)
inode_info = {
'from_node': points_node,
'from_port': 'points_df_out',
'to_port': 'points_df_in'
}
self.assertIn(inode_info, distance_node.inputs)
inode_in_cols = {
'points_df_in': {
'x': 'float64',
'y': 'float64'
}
}
self.assertEqual(inode_in_cols, distance_node.get_input_meta())
inode_out_cols = {'distance_df': {'distance_df': 'float64',
'x': 'float64',
'y': 'float64'},
'distance_abs_df': {'distance_abs_df': 'float64',
'x': 'float64', 'y': 'float64'}}
self.assertEqual(inode_out_cols, distance_node.meta_setup().outports)
@ordered
def test_run(self):
'''Test that a taskgraph can run successfully.
'''
outlist = ['distance_by_df.distance_df']
# Using numpy random seed to get repeatable and deterministic results.
# For seed 2335 should get something around 761.062831178.
replace_spec = {
'points_task': {
TaskSpecSchema.conf: {
'npts': 1000,
'nseed': 2335
}
}
}
(dist_df_w_df, ) = self.tgraph.run(
outputs=outlist, replace=replace_spec)
dist_sum = dist_df_w_df['distance_df'].sum()
# self.assertAlmostEqual(dist_sum, 0.0, places, msg, delta)
self.assertAlmostEqual(dist_sum, 761.062831178) # match to 7 places
@ordered
def test_save(self):
'''Test that a taskgraph can be save to a yaml file.
'''
workflow_file = os.path.join(self._test_dir,
'test_save_taskgraph.yaml')
self.tgraph.save_taskgraph(workflow_file)
with open(workflow_file) as wf:
workflow_str = wf.read()
# verify the workflow contentst same as expected. Empty list if same.
global TASKGRAPH_YAML
cdiff = list(context_diff(TASKGRAPH_YAML, workflow_str))
cdiff_empty = cdiff == []
err_msg = 'Taskgraph yaml contents do not match expected results.\n'\
'SHOULD HAVE SAVED:\n\n'\
'{wyaml}\n\n'\
'INSTEAD FILE CONTAINS:\n\n'\
'{fcont}\n\n'\
'DIFF:\n\n'\
'{diff}'.format(wyaml=TASKGRAPH_YAML, fcont=workflow_str,
diff=''.join(cdiff))
self.assertTrue(cdiff_empty, err_msg)
@ordered
def test_load(self):
'''Test that a taskgraph can be loaded from a yaml file.
'''
workflow_file = os.path.join(self._test_dir,
'test_load_taskgraph.yaml')
global TASKGRAPH_YAML
with open(workflow_file, 'w') as wf:
wf.write(TASKGRAPH_YAML)
tspec_list = [task._task_spec for task in self.tgraph]
tgraph = TaskGraph.load_taskgraph(workflow_file)
all_tasks_exist = True
for task in tgraph:
if task._task_spec not in tspec_list:
all_tasks_exist = False
break
with StringIO() as yf:
yaml.dump(tspec_list, yf,
default_flow_style=False, sort_keys=False)
yf.seek(0)
err_msg = 'Load taskgraph failed. Missing expected task items.\n'\
'EXPECTED TASKGRAPH YAML:\n\n'\
'{wyaml}\n\n'\
'GOT TASKS FORMATTED AS YAML:\n\n'\
'{tlist}\n\n'.format(wyaml=TASKGRAPH_YAML, tlist=yf.read())
self.assertTrue(all_tasks_exist, err_msg)
@ordered
def test_save_load_cache(self):
'''Test caching of tasks outputs within a taskgraph.
1. Save points_task output to cache when running the taskgraph.
2. Load points_task df from cache when running the taskgraph.
'''
replace_spec = {'points_task': {TaskSpecSchema.save: True}}
outlist = ['distance_by_df.distance_df']
with warnings.catch_warnings():
# ignore UserWarning: Using CPU via Pandas to write HDF dataset
warnings.filterwarnings(
'ignore',
message='Using CPU via Pandas to write HDF dataset',
category=UserWarning,)
# ignore RuntimeWarning: numpy.ufunc size changed
warnings.filterwarnings('ignore',
category=RuntimeWarning,
message='numpy.ufunc size changed')
(_, ) = self.tgraph.run(outputs=outlist, replace=replace_spec)
cache_dir = os.path.join(self._test_dir, '.cache', 'points_task.hdf5')
self.assertTrue(os.path.exists(cache_dir))
replace_spec = {'points_task': {TaskSpecSchema.load: True}}
with warnings.catch_warnings():
# ignore UserWarning: Using CPU via Pandas to read HDF dataset
warnings.filterwarnings(
'ignore',
message='Using CPU via Pandas to read HDF dataset',
category=UserWarning)
(_, ) = self.tgraph.run(outputs=outlist, replace=replace_spec)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | greenflow/tests/unit/test_taskgraph_api.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'greenflow'
copyright = '2019, NVIDIA'
author = 'NVIDIA'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# https://wwoods.github.io/2016/06/09/easy-sphinx-documentation-without-the-boilerplate/
autoclass_content = "both" # include both class docstring and __init__
# autodoc_default_flags = [
# # Make sure that any autodoc declarations show the right members
# "members",
# "inherited-members",
# "private-members",
# "show-inheritance",
# ]
autosummary_generate = True # Make _autosummary files and include them
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
# 'numpydoc',
# https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
'sphinx.ext.napoleon',
# 'sphinx_markdown_tables',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
# 'nbsphinx',
'recommonmark'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
# only import and set the theme if we're building docs locally
# otherwise, readthedocs.org uses their theme by default,
# so no need to specify it
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for intersphinx -------------------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| fsi-samples-main | greenflow/docs/source/conf.py |
import sys
import inspect
from pathlib import Path
file_ = Path(__file__)
modulespath = '{}/modules'.format(file_.resolve().parents[1])
sys.path.insert(1, modulespath)
from nemo.backends.pytorch.nm import NeuralModule
from nemo_greenflow_modules.nemoBaseNode import FeedProperty
TEMPLATE = """from greenflow.dataframe_flow import Node
from .nemoBaseNode import NeMoBase
import nemo
import {}
"""
CLASS_TEMP = """
class {}(NeMoBase, Node):
def init(self):
NeMoBase.init(self, {})
"""
def gen_module_file(module, overwrite=None):
file_str = TEMPLATE.format(module.__name__)
nodecls_list = []
for item in inspect.getmembers(module):
if inspect.ismodule(item[1]):
if item[1].__package__.startswith('nemo'):
for node in inspect.getmembers(item[1]):
if inspect.isclass(node[1]):
nodecls = node[1]
if nodecls in nodecls_list:
continue
if issubclass(nodecls, NeuralModule):
if nodecls.__module__ == 'nemo.backends.pytorch.nm':
continue
try:
# p_inports = node[1].input_ports
# p_outports = node[1].output_ports
# feeder = FeedProperty({})
# inports = p_inports.fget(feeder)
# outports = p_outports.fget(feeder)
init_fun = node[1].__init__
sig = inspect.signature(init_fun)
skip = False
for key in sig.parameters.keys():
if key == 'self':
# ignore the self
continue
para = sig.parameters[key]
if para.default != inspect._empty:
if para.default.__class__.__name__ == 'type' or para.default.__class__.__name__ == 'DataCombination':
print(para.default, para)
skip = True
break
if skip:
print(node[0], 'find class arg', para.default.__class__.__name__)
continue
class_name = node[1].__module__ + '.' + node[1].__name__
file_str += CLASS_TEMP.format(node[0] + "Node",
class_name)
nodecls_list.append(nodecls)
except Exception as e:
print(e)
print(node[0], 'is not compatible, as it uses instance for input/output ports')
continue
if overwrite is not None:
module_name = overwrite
else:
module_name = module.__name__.split('.')[-1]
with open('../modules/nemo_greenflow_modules/' + module_name + '.py', 'w') as f:
f.write(file_str)
import nemo.backends.pytorch.tutorials
gen_module_file(nemo.backends.pytorch.tutorials)
import nemo.backends.pytorch.common
gen_module_file(nemo.backends.pytorch.common)
import nemo.collections.asr
gen_module_file(nemo.collections.asr)
import nemo.collections.cv
gen_module_file(nemo.collections.cv)
import nemo.collections.nlp.nm
gen_module_file(nemo.collections.nlp.nm, 'nlp')
import nemo.collections.simple_gan
gen_module_file(nemo.collections.simple_gan)
import nemo.collections.tts
gen_module_file(nemo.collections.tts)
| fsi-samples-main | gQuant/util/auto_gen.py |
'''
Greenflow Cusignal Plugin
'''
from setuptools import setup, find_packages
setup(
name='greenflow_cusignal_plugin',
version='1.0',
description='greenflow cusignal plugin - RAPIDS Cusignal Nodes for Greenflow', # noqa: E501
install_requires=["greenflow", "cusignal"],
packages=find_packages(include=['greenflow_cusignal_plugin',
'greenflow_cusignal_plugin.*']),
entry_points={
'greenflow.plugin': [
'greenflow_cusignal_plugin = greenflow_cusignal_plugin',
'greenflow_cusignal_plugin.convolution = greenflow_cusignal_plugin.convolution', # noqa: E501
'greenflow_cusignal_plugin.filtering = greenflow_cusignal_plugin.filtering', # noqa: E501
'greenflow_cusignal_plugin.gensig = greenflow_cusignal_plugin.gensig', # noqa: E501
'greenflow_cusignal_plugin.spectral_analysis = greenflow_cusignal_plugin.spectral_analysis', # noqa: E501
'greenflow_cusignal_plugin.windows = greenflow_cusignal_plugin.windows' # noqa: E501
],
}
)
| fsi-samples-main | gQuant/plugins/cusignal_plugin/setup.py |
fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/__init__.py |
|
import inspect
import numpy as np
import cupy as cp
import cusignal.windows as cuwin
import scipy.signal.windows as siwin
from greenflow.dataframe_flow import (
Node, NodePorts, PortsSpecSchema, ConfSchema, MetaData)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ['CusignalWindowNode']
_DEFAULT_WIN_JSON_CONF = {
'M': {
'type': 'integer',
'title': 'M',
'description': 'Number of points in the output window. If '
'zero or less, an empty array is returned.' # noqa: E131,E501
},
'sym': {
'type': 'boolean',
'title': 'sym',
'description': 'When True (default), generates a symmetric '
'window, for use in filter design. When False, generates a ' # noqa: E131,E501
'periodic window, for use in spectral analysis.',
'default': True
}
}
_DEFAULT_WIN_RETDESC = 'Returns - window : ndarray; The window, with the '\
'maximum value normalized to 1 (though the value 1 does not appear if `M` '\
'is even and `sym` is True)'
_WINS_CONFIG = {
'general_cosine': {
'json_conf': {
'a': {
'type': 'array',
'items': {'type': 'number'},
'description': 'Sequence of weighting coefficients. This '
'uses the convention of being centered on the origin, '
'so these will typically all be positive numbers, not '
'alternating sign.'
},
},
'description': 'Generic weighted sum of cosine terms window.',
'desc-return': ''
},
'boxcar': {
'description': 'Return a boxcar or rectangular window. '
'Also known as a rectangular window or Dirichlet window, this is '
'equivalent to no window at all.',
'desc-return': 'window: ndarray; The window, with the maximum value '
'normalized to 1.'
},
'triang': {
'description': 'Return a triangular window.'
},
'parzen': {
'description': 'Return a Parzen window.',
'desc-return': ''
},
'bohman': {
'description': 'Return a Bohman window.'
},
'blackman': {
'description': 'The Blackman window is a taper formed by using the '
'first three terms of a summation of cosines. It was designed to '
'have close to the minimal leakage possible. It is close to '
'optimal, only slightly worse than a Kaiser window.'
},
'nuttall': {
'description': 'Return a minimum 4-term Blackman-Harris window '
'according to Nuttall. This variation is also called "Nuttall4c".'
},
'blackmanharris': {
'description': 'Return a minimum 4-term Blackman-Harris window.'
},
'flattop': {
'description': 'Return a flat top window.'
},
'bartlett': {
'description': 'Return a Bartlett window. The Bartlett window is very '
'similar to a triangular window, except that the end points are '
'at zero. It is often used in signal processing for tapering a '
'signal, without generating too much ripple in the frequency '
'domain.',
'desc-return': 'Returns - w : ndarray; The triangular window, with '
'the first and last samples equal to zero and the maximum value '
'normalized to 1 (though the value 1 does not appear if `M` is '
'even and `sym` is True).'
},
'hann': {
'description': 'Return a Hann window. The Hann window is a taper '
'formed by using a raised cosine or sine-squared with ends that '
'touch zero.'
},
'tukey': {
'json_conf': {
'alpha': {
'type': 'number',
'description': 'Shape parameter of the Tukey window, '
'representing the fraction of the window inside the '
'cosine tapered region. If zero, the Tukey window is '
'equivalent to a rectangular window. If one, the Tukey '
'window is equivalent to a Hann window.',
}
},
'description': 'Return a Tukey window, also known as a tapered '
'cosine window.'
},
'barthann': {
'description': 'Return a modified Bartlett-Hann window.'
},
'general_hamming': {
'json_conf': {
'alpha': {
'type': 'number',
'description': 'The window coefficient.',
}
},
'description': 'Return a generalized Hamming window. The generalized '
'Hamming window is constructed by multiplying a rectangular '
'window by one period of a cosine function'
},
'hamming': {
'description': 'Return a Hamming window. The Hamming window is a '
'taper formed by using a raised cosine with non-zero endpoints, '
'optimized to minimize the nearest side lobe.'
},
'kaiser': {
'json_conf': {
'beta': {
'type': 'number',
'description': 'Shape parameter, determines trade-off between '
'main-lobe width and side lobe level. As beta gets large, '
'the window narrows.',
}
},
'description': 'Return a Kaiser window. The Kaiser window is a taper '
'formed by using a Bessel function.'
},
'gaussian': {
'json_conf': {
'std': {
'type': 'number',
'description': 'The standard deviation, sigma.',
}
},
'description': 'Return a Gaussian window.'
},
'general_gaussian': {
'json_conf': {
'p': {
'type': 'number',
'description': 'Shape parameter. p = 1 is identical to '
'`gaussian`, p = 0.5 is the same shape as the Laplace '
'distribution.',
},
'sig': {
'type': 'number',
'description': 'The standard deviation, sigma.',
}
},
'description': 'Return a window with a generalized Gaussian shape.'
},
'chebwin': {
'json_conf': {
'at ': {
'type': 'number',
'description': 'Attenuation (in dB).',
}
},
'description': 'Return a Dolph-Chebyshev window.'
},
'cosine': {
'description': 'Return a window with a simple cosine shape.'
},
'exponential': {
'json_conf': {
'center': {
'type': 'number',
'description': 'Parameter defining the center location of '
'the window function. The default value if not given is '
'``center = (M-1) / 2``. This parameter must take its '
'default value for symmetric windows.',
},
'tau': {
'type': 'number',
'description': 'Parameter defining the decay. For '
'``center = 0`` use ``tau = -(M-1) / ln(x)`` if ``x`` is '
'the fraction of the window remaining at the end.',
}
},
'description': 'Return an exponential (or Poisson) window.'
},
'taylor': {
'json_conf': {
'nbar': {
'type': 'integer',
'description': 'Number of nearly constant level sidelobes '
'adjacent to the mainlobe.',
},
'sll': {
'type': 'number',
'description': 'Desired suppression of sidelobe level in '
'decibels (dB) relative to the DC gain of the mainlobe. '
'This should be a positive number.',
},
'norm': {
'type': 'boolean',
'description': 'When True (default), divides the window by '
'the largest (middle) value for odd-length windows or the '
'value that would occur between the two repeated middle '
'values for even-length windows such that all values are '
'less than or equal to 1. When False the DC gain will '
'remain at 1 (0 dB) and the sidelobes will be `sll` dB '
'down.',
'default': True
}
},
'description': 'Return a Taylor window. The Taylor window taper '
'function approximates the Dolph-Chebyshev window\'s constant '
'sidelobe level for a parameterized number of near-in sidelobes, '
'but then allows a taper beyond . The SAR (synthetic aperature '
'radar) community commonly uses Taylor weighting for image '
'formation processing because it provides strong, selectable '
'sidelobe suppression with minimum broadening of the mainlobe.',
'desc-return': 'Returns - out : array; The window. When `norm` is '
'True (default), the maximum value is normalized to 1 (though '
'the value 1 does not appear if `M` is even and `sym` is True).'
},
}
class CusignalWindowNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
outports = {'window': {port_type: [cp.ndarray, np.ndarray]}}
self.template_ports_setup(out_ports=outports)
meta_outports = {'window': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
windows_enum = list(_WINS_CONFIG.keys())
use_cpu_conf = {'use_cpu': {
'type': 'boolean',
'description': 'use_cpu - Use CPU for computation via '
'scipy::signal.windows. Default is False and runs on '
'GPU via cusignal.',
'default': False
}}
# windows configuration
win_anyof = []
for wtype in windows_enum:
wjson_conf =_DEFAULT_WIN_JSON_CONF.copy()
wjson_conf_update = _WINS_CONFIG[wtype].get('json_conf', {})
wjson_conf.update(wjson_conf_update)
wdesc = '{}\n{}'.format(
_WINS_CONFIG[wtype]['description'],
_WINS_CONFIG[wtype].get('desc-return', _DEFAULT_WIN_RETDESC))
wjson_conf_properties = {
'window_type': {
'type': 'string',
'default': wtype,
'readOnly': True
},
**wjson_conf,
**use_cpu_conf
}
wjson_schema = {
'title': wtype,
'description': wdesc,
'properties': wjson_conf_properties
}
win_anyof.append(wjson_schema)
json = {
'title': 'Cusignal Correlation Node',
'type': 'object',
'default': 'general_cosine',
'description': 'Filter Window. Parameters updated below based on '
'selected window.',
'anyOf': win_anyof,
'required': ['window_type'],
}
return ConfSchema(json=json)
def process(self, inputs):
wintype = self.conf.get('window_type', 'general_cosine')
winmod = siwin if self.conf.get('use_cpu') else cuwin
winfn = getattr(winmod, wintype)
# Match function signature parameters from self.conf; apply defaults to
# anything not matched.
winsig = inspect.signature(winfn)
params_filter = [pp.name for pp in winsig.parameters.values()
if pp.kind == pp.POSITIONAL_OR_KEYWORD]
params_dict = {kk: self.conf[kk] for kk in params_filter
if kk in self.conf}
ba = winsig.bind(**params_dict)
ba.apply_defaults()
winout = winfn(*ba.args, **ba.kwargs)
return {'window': winout}
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/windows.py |
from .gensig import *
from .wavefilereader import *
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/gensig/__init__.py |
import wave # Python standard lib.
import struct
try:
# conda install -c conda-forge pysoundfile
import soundfile as sf
except ModuleNotFoundError:
sf = None
import numpy as np
import cupy as cp
import cusignal
from greenflow.dataframe_flow import (
Node, NodePorts, PortsSpecSchema, ConfSchema, MetaData)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ['IQwavefileNode']
def wave_reader(wavefile, nframes):
'''Read an IQ wavefile. Not thoroughly tested.'''
# https://stackoverflow.com/questions/19709018/convert-3-byte-stereo-wav-file-to-numpy-array
with wave.open(wavefile, 'rb') as wf:
chans = wf.getnchannels()
# nframes = wf.getnframes()
sampwidth = wf.getsampwidth()
if sampwidth == 3: # have to read this one sample at a time
buf = ''
for _ in range(nframes):
fr = wf.readframes(1)
for c in range(0, 3 * chans, 3):
# put TRAILING 0 to make 32-bit (file is little-endian)
buf += '\0' + fr[c:(c + 3)]
else:
buf = wf.readframes(nframes)
unpstr = '<{0}{1}'.format(nframes * chans,
{1:'b', 2:'h', 3:'i', 4:'i', 8:'q'}[sampwidth])
# x = list(struct.unpack(unpstr, buf))
wdata = np.array(struct.unpack(unpstr, buf))
if sampwidth == 3:
# downshift to get +/- 2^24 with sign extension
# x = [k >> 8 for k in x]
wdata = np.right_shift(wdata, 8)
int2float = 2 ** (sampwidth * 8 - 1) - 1
# wdata = np.array(x)
wdata_float = wdata.astype(np.float64) / int2float
# iq_data = wdata_float.view(dtype=np.complex128)
return wdata_float
class IQwavefileNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
outports = {
'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]},
'framerate': {PortsSpecSchema.port_type: float},
}
self.template_ports_setup(out_ports=outports)
meta_outports = {'signal': {}, 'framerate': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
json = {
'title': 'IQ Wavefile Node',
'type': 'object',
'description': 'Load IQ data from a *.wav file. Preferably '
'install "pysoundfile" to do this. Otherwise uses "wave", ' # noqa: E131,E501
'but it has not been well tested for variety of ways data '
'has been stored in *.wav files.',
'properties': {
'wavefile': {
'type': 'string',
'description': 'IQ Wavefile *.wav. Typically '
'recorded snippets of SDR IQ.' # noqa: E131,E501
},
'duration': {
'type': 'number',
'description': 'Number of seconds to load. Number of '
'frames loaded is dependent on framerate. Default ' # noqa: E131,E501
'1 second. Limited to max frames in file. Will '
'fail if exceeds GPU memory size.',
'default': 1.0
},
'use_cpu': {
'type': 'boolean',
'description': 'use_cpu - Returns numpy array if True. '
'Default is False and returns Cupy array.', # noqa: E131,E501
'default': False
},
},
}
ui = {'wavefile': {'ui:widget': 'FileSelector'}}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
infile = self.conf.get('wavefile')
nsecs = self.conf.get('duration', 1)
with wave.open(infile) as wf:
wparams = wf.getparams()
# buf = wf.readframes(nframes)
# int2float = (2**15 - 1)
# wdata = np.frombuffer(buf, dtype=np.int16)
# wdata_float = wdata.astype(np.float64)/int2float
# iq_data = wdata_float.view(dtype=np.complex128)
nframes = min(int(wparams.framerate * nsecs), wparams.nframes)
if sf is None:
data = wave_reader(infile, nframes)
framerate = wparams.framerate
else:
data, framerate = sf.read(infile, frames=nframes)
# IQ data
cpu_signal = data.view(dtype=np.complex128).reshape(nframes)
if self.conf.get('use_cpu', False):
out = {'signal': cpu_signal}
else:
# Create mapped, pinned memory for zero copy between CPU and GPU
gpu_signal_buf = cusignal.get_shared_mem(
nframes, dtype=np.complex128)
gpu_signal_buf[:] = cpu_signal
# zero-copy conversion from Numba CUDA array to CuPy array
gpu_signal = cp.asarray(gpu_signal_buf)
out = {'signal': gpu_signal}
out['framerate'] = float(framerate)
return out
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/gensig/wavefilereader.py |
import numpy as np
import cupy as cp
import ast
from greenflow.dataframe_flow import (
Node, NodePorts, PortsSpecSchema, ConfSchema, MetaData)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ['SignalGeneratorNode']
def exec_then_eval(code):
# https://stackoverflow.com/questions/39379331/python-exec-a-code-block-and-eval-the-last-line
block = ast.parse(code, mode='exec')
# assumes last node is an expression
last = ast.Expression(block.body.pop().value)
_globals, _locals = {}, {}
exec(compile(block, '<string>', mode='exec'), _globals, _locals)
return eval(compile(last, '<string>', mode='eval'), _globals, _locals)
class SignalGeneratorNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
outports = {
'out1': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]},
'out2': {
PortsSpecSchema.port_type: [cp.ndarray, np.ndarray],
PortsSpecSchema.optional: True
},
}
self.template_ports_setup(out_ports=outports)
meta_outports = {'out1': {}, 'out2': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
json = {
'title': 'Custom Signal Generator Node.',
'type': 'object',
'description': 'Inject signals into greenflow taskgraphs. Use '
'CAUTION. Only run trusted code.',
'properties': {
'pycode': {
'type': 'string',
'title': 'Signal Code',
'description': 'Enter python code to generate signal. '
'The code must have a dictionary ``myout`` variable '
'with keys: out1 and out2. The out2 port is optional. '
'The ``myout`` must be the last line. Keep it simple '
'please.'
},
},
# 'required': ['pycode'],
}
ui = {'pycode': {'ui:widget': 'textarea'}}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
pycode = self.conf.get('pycode')
# print('Task id: {}; Node type: {}\nPYCODE:\n{}'.format(
# self.uid, 'SignalGeneratorNode', pycode))
if pycode:
myout = exec_then_eval(pycode)
return myout
raise RuntimeError('Task id: {}; Node type: {}\n'
'No pycode provided. Nothing to output.'
.format(self.uid, 'SignalGeneratorNode'))
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/gensig/gensig.py |
from .custom_filter_block import *
from .resample_poly import *
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/filtering/__init__.py |
import ast
from types import ModuleType
import numpy as np
import cupy as cp
from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ['CustomFilterNode']
def compile_user_module(code):
'''
Usage:
# code is some text/string of code to be compiled dynamically.
code = '\ndef somefn(in1, in2):\n return in1 + in2\n'
module_ = compile_user_module(code)
module_.somefn(5, 6) # returns 11 per def of somefn
'''
# https://stackoverflow.com/questions/19850143/how-to-compile-a-string-of-python-code-into-a-module-whose-functions-can-be-call
# https://stackoverflow.com/questions/39379331/python-exec-a-code-block-and-eval-the-last-line
block = ast.parse(code, mode='exec')
module_ = ModuleType('user_module')
exec(compile(block, '<string>', mode='exec'), module_.__dict__)
return module_
class CustomFilterNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
inports = {'signal': {port_type: [cp.ndarray, np.ndarray]}}
outports = {'signal_out': {port_type: '${port:signal}'}}
self.template_ports_setup(in_ports=inports, out_ports=outports)
meta_outports = {'signal_out': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
json = {
'title': 'Custom Filter Node.',
'type': 'object',
'description': 'Custom filter logic. CAUTION: Only run trusted '
'code.', # noqa: E131,E501
'properties': {
'pycode': {
'type': 'string',
'title': 'Signal Code - pycode',
'description': 'Enter python code to filter a signal. '
'The code must have a function with the following ' # noqa: E131,E501
'name and signature: def custom_filter(signal, conf). '
'The ``signal`` is a cp or np array. The ``conf`` '
'is the node\'s configuration dictionary. Besides '
'"pycode" custom conf fields are not not exposed via '
'UI. If anything needs to be set do it '
'programmatically via TaskSpecSchema. The '
'`custom_filter` function must return a processed '
'signal of same type as input signal.'
},
},
# 'required': ['pycode'],
}
ui = {'pycode': {'ui:widget': 'textarea'}}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
pycode = self.conf.get('pycode')
if not pycode:
raise RuntimeError('Task id: {}; Node type: {}\n'
'No code provided. Nothing to output.'
.format(self.uid, 'CustomFilterNode'))
signal = inputs['signal']
module_ = compile_user_module(pycode)
if not hasattr(module_, 'custom_filter'):
raise RuntimeError(
'Task id: {}; Node type: {}\n'
'Pycode does not define "custom_filter" function.\n'
'Pycode provided:\n{}'
.format(self.uid, 'CustomFilterNode', pycode))
out = module_.custom_filter(signal, self.conf)
return {'signal_out': out}
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/filtering/custom_filter_block.py |
from ast import literal_eval
from fractions import Fraction
import numpy as np
import cupy as cp
from cusignal.filtering.resample import resample_poly as curesamp
from scipy.signal import resample_poly as siresamp
from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..windows import _WINS_CONFIG
__all__ = ['CusignalResamplePolyNode']
_RESAMPLEPOLY_DESC = '''Resample `signal` along the given axis using polyphase
filtering. The signal is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step. Returns resampled array and new sample rate.
'''
class CusignalResamplePolyNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
inports = {
'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]},
'samplerate': {
PortsSpecSchema.port_type: [int, float, np.float32,
np.float64],
PortsSpecSchema.optional: True
},
'window': {
PortsSpecSchema.port_type: [cp.ndarray, np.ndarray],
PortsSpecSchema.optional: True
},
}
outports = {
'signal_out': {PortsSpecSchema.port_type: '${port:signal}'},
'samplerate_out': {
PortsSpecSchema.port_type: [int, float, np.float32,
np.float64],
PortsSpecSchema.optional: True
}
}
self.template_ports_setup(in_ports=inports, out_ports=outports)
meta_outports = {'signal_out': {}, 'samplerate_out': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
padtype_enum = ['constant', 'line', 'mean', 'median', 'maximum',
'minimum']
json = {
'title': 'Polyphase Filter Resample Node',
'type': 'object',
'description': _RESAMPLEPOLY_DESC,
'properties': {
'new_samplerate': {
'type': 'number',
'description': 'Desired sample rate. Specify this or the '
'up/down parameters. This is used when `samplerate` ' # noqa: E131,E501
'is passed in via ports, otherwise up/down is used. '
'If both are set then this takes precedence over '
'up/down.'
},
'up': {
'type': 'integer',
'description': 'The upsampling factor.'
},
'down': {
'type': 'integer',
'description': 'The downsampling factor.'
},
'axis': {
'type': 'integer',
'description': 'The axis of `x` that is resampled. '
'Default is 0.', # noqa: E131,E501
'default': 0,
'minimum': 0,
},
'window': {
'type': 'string',
'description': 'Desired window to use to design the '
'low-pass filter, or the FIR filter coefficients to ' # noqa: E131,E501
'employ. Window can be specified as a string, a '
'tuple, or a list. If a string choose one of '
'available windows. If a tuple refer to '
'`cusignal.windows.get_window`. The tuple format '
'specifies the first argument as the string name of '
'the window, and the next arguments the needed '
'parameters. If `window` is a list it is assumed to '
'be the FIR filter coefficients. Note that the FIR '
'filter is applied after the upsampling step, so it '
'should be designed to operate on a signal at a '
'sampling frequency higher than the original by a '
'factor of `up//gcd(up, down)`. If the port window '
'is connected it takes precedence. Default '
'("kaiser", 5.0)',
'default': '("kaiser", 5.0)'
},
'gpupath': {
'type': 'boolean',
'description': 'gpupath - Optional path for filter design.'
' gpupath == False may be desirable if filter sizes ' # noqa: E131,E501
'are small.',
'default': True
},
'use_cpu': {
'type': 'boolean',
'description': 'use_cpu - Use CPU for computation via '
'scipy::signal.resample_poly. Default is False and ' # noqa: E131,E501
'runs on GPU via cusignal.',
'default': False
},
'padtype': {
'type': 'string',
'description': 'Only used when `use_cpu` is set. Scipy '
'padtype parameter of `resample_poly`. This is not ' # noqa: E131,E501
'currently exposed in cusignal.',
'enum': padtype_enum,
'default': 'constant'
},
'cval': {
'type': 'number',
'description': 'Only used when `use_cpu` is set. Value '
'to use if `padtype="constant"`. Default is zero.' # noqa: E131,E501
}
}
}
return ConfSchema(json=json)
def process(self, inputs):
signal_in = inputs['signal']
samplerate = inputs.get('samplerate', None)
new_samplerate = self.conf.get('new_samplerate', None)
if new_samplerate and samplerate:
ud = Fraction(new_samplerate / samplerate).limit_denominator()
up = ud.numerator
down = ud.denominator
else:
up = self.conf['up']
down = self.conf['down']
if samplerate:
samplerate = inputs['samplerate']
new_samplerate = samplerate * up / down
else:
new_samplerate = up / down
axis = self.conf.get('axis', 0)
if 'window' in inputs:
window = input['window']
else:
window = self.conf.get('window', ("kaiser", 5.0))
if isinstance(window, str):
windows_enum = list(_WINS_CONFIG.keys())
# window could be a simple string or python code for tuple
if window not in windows_enum:
# window should be a string that is python code
# evaluated to a tuple.
try:
window = literal_eval(window)
except Exception:
raise RuntimeError('Uknown window: {}'.format(window))
gpupath = self.conf.get('gpupath', True)
use_cpu = self.conf.get('use_cpu', False)
if use_cpu:
padtype = self.conf.get('padtype', 'constant')
cval = self.conf.get('cval')
signal_out = siresamp(
signal_in, up, down, axis=axis, window=window,
padtype=padtype, cval=cval)
else:
signal_out = curesamp(
signal_in, up, down, axis=axis, window=window, gpupath=gpupath)
return {'signal_out': signal_out,
'samplerate_out': new_samplerate}
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/filtering/resample_poly.py |
import numpy as np
import cupy as cp
from cusignal.spectral_analysis import welch as cuwelch
from scipy.signal.spectral import welch as siwelch
from greenflow.dataframe_flow import (
Node, NodePorts, PortsSpecSchema, ConfSchema, MetaData)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..windows import _WINS_CONFIG
__all__ = ['WelchPSD_Node']
_WELCH_DESC = '''Estimate power spectral density using Welch's method. Welch's
method computes an estimate of the power spectral density by dividing the data
into overlapping segments, computing a modified periodogram for each segment
and averaging the periodograms.
Returns - freqs:ndarray Array of frequencies;
Pxx:ndarray Power spectral density or power spectrum of signal.
'''
class WelchPSD_Node(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
inports = {
'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]},
'samplerate': {
PortsSpecSchema.port_type: [int, float, np.float32,
np.float64],
PortsSpecSchema.optional: True
},
'window': {
PortsSpecSchema.port_type: [cp.ndarray, np.ndarray],
PortsSpecSchema.optional: True
},
}
outports = {
'psd': {PortsSpecSchema.port_type: '${port:signal}'},
'freqs': {PortsSpecSchema.port_type: '${port:signal}'},
}
self.template_ports_setup(in_ports=inports, out_ports=outports)
meta_outports = {'psd': {}, 'freqs': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
windows_enum = list(_WINS_CONFIG.keys())
detrend_enum = ['constant', 'linear', 'false']
scaling_enum = ['density', 'spectrum']
average_enum = ['mean', 'median']
json = {
'title': 'Welch Power Spectral Density Node',
'type': 'object',
'description': _WELCH_DESC,
'properties': {
'samplerate': {
'type': 'number',
'description': 'fs : float, optional; Sampling frequency '
'of the `x` (input signal) time series. Defaults to ' # noqa: E131,E501
'1.0. This can also be passed at input port '
'`samplerate`. Port takes precedence over conf.',
'default': 1.0
},
'window': {
'type': 'string',
'description': 'Desired window to use. Alternatively '
'pass window via port `window`. In that case its ' # noqa: E131,E501
'length must be nperseg. Defaults to a Hann window.',
'enum': windows_enum,
'default': 'hann'
},
'nperseg': {
'type': 'integer',
'description': 'Length of each segment. Defaults to None, '
'but if window is str, is set to 256, and if window ' # noqa: E131,E501
'is array_like (passed via port `window`), is set to '
'the lesser of this setting or length of the window.',
},
'noverlap': {
'type': 'integer',
'description': 'Number of points to overlap between '
'segments. If `None`, ``noverlap = nperseg // 2``. ' # noqa: E131,E501
'Defaults to `None`.',
},
'nfft': {
'type': 'integer',
'description': 'Length of the FFT used, if a zero padded '
'FFT is desired. If `None`, the FFT length is ' # noqa: E131,E501
'`nperseg`. Defaults to `None`.',
},
'detrend': {
'type': 'string',
'description': 'Specifies how to detrend each segment. If '
'"constant", only the mean of `data` is subtracted. ' # noqa: E131,E501
'If "linear", the result of a linear least-squares '
'fit to `data` is subtracted from `data`. If '
'`detrend` is `False`, no detrending is done. '
'Default is "constant".',
'enum': detrend_enum,
'default': 'constant'
},
'return_onesided': {
'type': 'boolean',
'description': 'return_onesided - If `True`, return a '
'one-sided spectrum for real data. If `False` return ' # noqa: E131,E501
'a two-sided spectrum. Defaults to `True`, but for '
'complex data, a two-sided spectrum is always '
'returned.',
'default': True
},
'scaling': {
'type': 'string',
'description': 'Selects between computing the power '
'spectral density ("density") where `Pxx` has units ' # noqa: E131,E501
'of V**2/Hz and computing the power spectrum '
'("spectrum") where `Pxx` has units of V**2, if `x` '
'is measured in V and `fs` is measured in Hz. '
'Defaults to density',
'enum': scaling_enum,
'default': 'constant'
},
'axis': {
'type': 'integer',
'description': 'Axis along which the periodogram is '
'computed; the default is over the last axis (i.e. ' # noqa: E131,E501
'``axis=-1``).',
'default': -1
},
'average': {
'type': 'string',
'description': '{"mean", "median"}, optional. Method to '
'use when averaging periodograms. Defaults to "mean".', # noqa: E131,E501
'enum': average_enum,
'default': 'mean'
},
'use_cpu': {
'type': 'boolean',
'description': 'Use CPU for computation via '
'scipy::signal.spectral.welch. Default is False and ' # noqa: E131,E501
'runs on GPU via cusignal.',
'default': False
},
},
}
return ConfSchema(json=json)
def process(self, inputs):
use_cpu = self.conf.get('use_cpu', False)
signal = inputs['signal']
samplerate = self.conf.get('samplerate', 1.0)
samplerate = inputs.get('samplerate', samplerate)
window = self.conf.get('window', 'hann')
window = inputs.get('window', window)
nperseg = self.conf.get('nperseg', None)
try:
nperseg = window.shape[0]
except Exception:
pass
noverlap = self.conf.get('noverlap', None)
nfft = self.conf.get('nfft', None)
detrend = self.conf.get('detrend', 'constant')
if isinstance(detrend, str):
detrend = False if detrend.lower() in ('false',) else detrend
return_onesided = self.conf.get('return_onesided', True)
scaling = self.conf.get('scaling', 'density')
axis = self.conf.get('axis', -1)
average = self.conf.get('average', 'mean')
welch_params = {
'fs': samplerate,
'window': window,
'nperseg': nperseg,
'noverlap': noverlap,
'nfft': nfft,
'detrend': detrend,
'return_onesided': return_onesided,
'scaling': scaling,
'axis': axis,
'average': average,
}
if use_cpu:
freqs, psd = siwelch(signal, **welch_params)
else:
freqs, psd = cuwelch(signal, **welch_params)
return {'psd': psd, 'freqs': freqs}
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/spectral_analysis/welchpsd.py |
from .welchpsd import *
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/spectral_analysis/__init__.py |
import numpy as np
import cupy as cp
from cusignal.convolution import correlate as cucorr
from scipy.signal import correlate as sicorr
from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ['CusignalCorrelationNode']
_CORR_DESC = '''Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Returns:
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
'''
_CORR_MODE_DESC = '''The size of the output.
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
'''
_CORR_METHOD_DESC = '''Method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
'''
class CusignalCorrelationNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
inports = {
'in1': {port_type: [cp.ndarray, np.ndarray]},
'in2': {port_type: [cp.ndarray, np.ndarray]}
}
outports = {
'correlate': {port_type: "${port:in1}"},
}
self.template_ports_setup(in_ports=inports, out_ports=outports)
meta_outports = {'correlate': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
mode_enum = ['full', 'valid', 'same']
method_enum = ['direct', 'fft', 'auto']
json = {
'title': 'Cusignal Correlation Node',
'type': 'object',
'description': _CORR_DESC,
'properties': {
'mode': {
'type': 'string',
'description': _CORR_MODE_DESC,
'enum': mode_enum,
'default': 'full'
},
'method': {
'type': 'string',
'description': _CORR_METHOD_DESC,
'enum': method_enum,
'default': 'auto'
},
'scale': {
'type': 'number',
'description': 'Scale output array i.e. out / scale',
'default': 1
},
'use_cpu': {
'type': 'boolean',
'description': 'Use CPU for computation via '
'scipy::signal.correlate. Default is False and runs ' # noqa: E131,E501
'on GPU via cusignal.',
'default': False
},
},
}
return ConfSchema(json=json)
def process(self, inputs):
mode = self.conf.get('mode', 'full')
method = self.conf.get('method', 'auto')
scale = self.conf.get('scale', 1)
use_cpu = self.conf.get('use_cpu', False)
in1 = inputs['in1']
in2 = inputs['in2']
if use_cpu:
corr = sicorr(in1, in2, mode=mode, method=method)
else:
corr = cucorr(in1, in2, mode=mode, method=method)
corr = corr if scale == 1 else corr / scale
return {'correlate': corr}
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/convolution/correlate.py |
import numpy as np
import cupy as cp
from cusignal.convolution import convolve as cuconv
from scipy.signal import convolve as siconv
from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ['CusignalConvolveNode']
_CONV_DESC = '''Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Returns:
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
'''
_CONV_MODE_DESC = '''mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
'''
_CONV_METHOD_DESC = '''method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default).
'''
class CusignalConvolveNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
inports = {
'in1': {port_type: [cp.ndarray, np.ndarray]},
'in2': {port_type: [cp.ndarray, np.ndarray]}
}
outports = {
'convolve': {port_type: [cp.ndarray, np.ndarray]},
}
self.template_ports_setup(in_ports=inports, out_ports=outports)
meta_outports = {'convolve': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
mode_enum = ['full', 'valid', 'same']
method_enum = ['direct', 'fft', 'auto']
json = {
'title': 'Cusignal Convolution Node',
'type': 'object',
'description': _CONV_DESC,
'properties': {
'mode': {
'type': 'string',
'description': _CONV_MODE_DESC,
'enum': mode_enum,
'default': 'full'
},
'method': {
'type': 'string',
'description': _CONV_METHOD_DESC,
'enum': method_enum,
'default': 'auto'
},
'normalize': {
'type': 'boolean',
'description': 'Scale convolutioni by in2 (typically a '
'window) i.e. convolve(in1, in2) / sum(in2). '
'Default False.',
'default': False
},
'use_cpu': {
'type': 'boolean',
'description': 'Use CPU for computation via '
'scipy::signal.convolve. Default is False and runs on '
'GPU via cusignal.',
'default': False
},
},
}
return ConfSchema(json=json)
def process(self, inputs):
mode = self.conf.get('mode', 'full')
method = self.conf.get('method', 'auto')
normalize = self.conf.get('normalize', False)
use_cpu = self.conf.get('use_cpu', False)
in1 = inputs['in1']
in2 = inputs['in2']
if use_cpu:
conv = siconv(in1, in2, mode=mode, method=method)
if normalize:
scale = np.sum(in2)
else:
conv = cuconv(in1, in2, mode=mode, method=method)
if normalize:
scale = cp.sum(in2)
if normalize:
conv = conv if scale == 1 else conv / scale
return {'convolve': conv}
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/convolution/convolve.py |
import numpy as np
import cupy as cp
from cusignal.convolution import correlate2d as cucorr2d
from scipy.signal import correlate2d as sicorr2d
from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ['CusignalCorrelate2dNode']
_CORR2_DESC = '''Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`
Returns:
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`
'''
_CORR2_MODE_DESC = '''mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
'''
_CORR2_BOUNDARY_DESC = '''boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
'''
_CORR2_FILLVAL_DESC = '''fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
'''
class CusignalCorrelate2dNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
inports = {
'in1': {port_type: [cp.ndarray, np.ndarray]},
'in2': {port_type: [cp.ndarray, np.ndarray]}
}
outports = {
'correlate2d': {port_type: [cp.ndarray, np.ndarray]},
}
self.template_ports_setup(in_ports=inports, out_ports=outports)
meta_outports = {'correlate2d': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
mode_enum = ['full', 'valid', 'same']
boundary_enum = ['fill', 'wrap', 'symm']
json = {
'title': 'Cusignal Convolution2D Node',
'type': 'object',
'description': _CORR2_DESC,
'properties': {
'mode': {
'type': 'string',
'description': _CORR2_MODE_DESC,
'enum': mode_enum,
'default': 'full'
},
'boundary': {
'type': 'string',
'description': _CORR2_BOUNDARY_DESC,
'enum': boundary_enum,
'default': 'fill'
},
'fillvalue': {
'type': 'number',
'description': _CORR2_FILLVAL_DESC,
'default': 0
},
'use_cpu': {
'type': 'boolean',
'description': 'Use CPU for computation via '
'scipy::signal.correlate2d. Default is False and runs on '
'GPU via cusignal.',
'default': False
},
},
}
return ConfSchema(json=json)
def process(self, inputs):
mode = self.conf.get('mode', 'full')
boundary = self.conf.get('boundary', 'fill')
fillvalue = self.conf.get('fillvalue', 0)
use_cpu = self.conf.get('use_cpu', False)
in1 = inputs['in1']
in2 = inputs['in2']
if use_cpu:
corr2d = sicorr2d(
in1, in2, mode=mode, boundary=boundary, fillvalue=fillvalue)
else:
corr2d = cucorr2d(
in1, in2, mode=mode, boundary=boundary, fillvalue=fillvalue)
return {'correlate2d': corr2d}
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/convolution/correlate2d.py |
from .convolve import *
from .correlate import *
from .fftconvolve import *
from .convolve2d import *
from .correlate2d import *
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/convolution/__init__.py |
import numpy as np
import cupy as cp
from cusignal.convolution import convolve2d as cuconv2d
from scipy.signal import convolve2d as siconv2d
from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ['CusignalConvolve2dNode']
_CONV2_DESC = '''Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Returns:
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
'''
_CONV2_MODE_DESC = '''mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
'''
_CONV2_BOUNDARY_DESC = '''boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
'''
_CONV2_FILLVAL_DESC = '''fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
'''
class CusignalConvolve2dNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
inports = {
'in1': {port_type: [cp.ndarray, np.ndarray]},
'in2': {port_type: [cp.ndarray, np.ndarray]}
}
outports = {
'convolve2d': {port_type: [cp.ndarray, np.ndarray]},
}
self.template_ports_setup(in_ports=inports, out_ports=outports)
meta_outports = {'convolve2d': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
mode_enum = ['full', 'valid', 'same']
boundary_enum = ['fill', 'wrap', 'symm']
json = {
'title': 'Cusignal Convolution2D Node',
'type': 'object',
'description': _CONV2_DESC,
'properties': {
'mode': {
'type': 'string',
'description': _CONV2_MODE_DESC,
'enum': mode_enum,
'default': 'full'
},
'boundary': {
'type': 'string',
'description': _CONV2_BOUNDARY_DESC,
'enum': boundary_enum,
'default': 'fill'
},
'fillvalue': {
'type': 'number',
'description': _CONV2_FILLVAL_DESC,
'default': 0
},
'use_cpu': {
'type': 'boolean',
'description': 'Use CPU for computation via '
'scipy::signal.convolve2d. Default is False and runs on '
'GPU via cusignal.',
'default': False
},
},
}
return ConfSchema(json=json)
def process(self, inputs):
mode = self.conf.get('mode', 'full')
boundary = self.conf.get('boundary', 'fill')
fillvalue = self.conf.get('fillvalue', 0)
use_cpu = self.conf.get('use_cpu', False)
in1 = inputs['in1']
in2 = inputs['in2']
if use_cpu:
conv2d = siconv2d(
in1, in2, mode=mode, boundary=boundary, fillvalue=fillvalue)
else:
conv2d = cuconv2d(
in1, in2, mode=mode, boundary=boundary, fillvalue=fillvalue)
return {'convolve2d': conv2d}
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/convolution/convolve2d.py |
import numpy as np
import cupy as cp
from cusignal.convolution import fftconvolve as cufftconv
from scipy.signal import fftconvolve as sifftconv
from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ['CusignalFFTConvolveNode']
_FFTCONV_DESC = '''Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Returns:
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
'''
_FFTCONV_MODE_DESC = '''mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
axis : tuple, optional
'''
_FFTCONV_AXES_DESC = '''axes : int or array_like of ints or None, optional
Axes over which to compute the convolution.
The default is over all axes.
'''
class CusignalFFTConvolveNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
inports = {
'in1': {port_type: [cp.ndarray, np.ndarray]},
'in2': {port_type: [cp.ndarray, np.ndarray]}
}
outports = {
'fftconvolve': {port_type: [cp.ndarray, np.ndarray]},
}
self.template_ports_setup(in_ports=inports, out_ports=outports)
meta_outports = {'fftconvolve': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
mode_enum = ['full', 'valid', 'same']
json = {
'title': 'Cusignal Convolution Node',
'type': 'object',
'description': _FFTCONV_DESC,
'properties': {
'mode': {
'type': 'string',
'description': _FFTCONV_MODE_DESC,
'enum': mode_enum,
'default': 'full'
},
'axes': {
'type': 'array',
'items': {
'type': 'integer'
},
'description': _FFTCONV_AXES_DESC,
},
'use_cpu': {
'type': 'boolean',
'description': 'Use CPU for computation via '
'scipy::signal.fftconvolve. Default is False and ' # noqa: E131,E501
'runs on GPU via cusignal.',
'default': False
},
},
}
return ConfSchema(json=json)
def process(self, inputs):
mode = self.conf.get('mode', 'full')
axes = self.conf.get('axes', [])
use_cpu = self.conf.get('use_cpu', False)
in1 = inputs['in1']
in2 = inputs['in2']
if len(axes) == 0:
axes = None
elif len(axes) == 1:
axes = axes[0]
if use_cpu:
fftconv = sifftconv(in1, in2, mode=mode, axes=axes)
else:
cache = cp.fft.config.get_plan_cache()
cache.clear()
mempool = cp.get_default_memory_pool()
mempool.free_all_blocks()
if cache.get_size() > 0:
cache.set_size(0)
# if cache.get_memsize() != 0:
# cache.set_memsize(0)
fftconv = cufftconv(in1, in2, mode=mode, axes=axes)
return {'fftconvolve': fftconv}
| fsi-samples-main | gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/convolution/fftconvolve.py |
from setuptools import setup, find_packages
setup(
name='greenflow_gquant_plugin',
version='0.0.3',
install_requires=[
"greenflow", "bqplot", "tables", "ray[tune]", "matplotlib", "ray[default]",
"mplfinance"
],
packages=find_packages(include=[
'greenflow_gquant_plugin', 'greenflow_gquant_plugin.analysis',
'greenflow_gquant_plugin.backtest',
'greenflow_gquant_plugin.dataloader', 'greenflow_gquant_plugin.ml',
'greenflow_gquant_plugin.portofolio',
'greenflow_gquant_plugin.strategy',
'greenflow_gquant_plugin.cuindicator',
'greenflow_gquant_plugin.transform'
]),
entry_points={
'greenflow.plugin': [
'greenflow_gquant_plugin = greenflow_gquant_plugin',
'greenflow_gquant_plugin.analysis = greenflow_gquant_plugin.analysis',
'greenflow_gquant_plugin.backtest = greenflow_gquant_plugin.backtest',
'greenflow_gquant_plugin.dataloader = greenflow_gquant_plugin.dataloader',
'greenflow_gquant_plugin.ml = greenflow_gquant_plugin.ml',
'greenflow_gquant_plugin.portofolio = greenflow_gquant_plugin.portofolio',
'greenflow_gquant_plugin.strategy = greenflow_gquant_plugin.strategy',
'greenflow_gquant_plugin.transform = greenflow_gquant_plugin.transform'
],
})
| fsi-samples-main | gQuant/plugins/gquant_plugin/setup.py |
fsi-samples-main | gQuant/plugins/gquant_plugin/tests/__init__.py |
|
'''
Fractional differencing Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_fractional_diff.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_fractional_diff.py
'''
import pandas as pd
import unittest
import cudf
from greenflow_gquant_plugin.cuindicator import (fractional_diff,
get_weights_floored,
port_fractional_diff)
import numpy as np
from .utils import make_orderer
import warnings
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
def frac_diff(df, d, floor=1e-3):
r"""Fractionally difference time series via CPU.
code is copied from
https://github.com/ritchieng/fractional_differencing_gpu/blob/master/notebooks/gpu_fractional_differencing.ipynb
Args:
df (pd.DataFrame): dataframe of raw time series values.
d (float): differencing value from 0 to 1 where > 1 has no FD.
floor (float): minimum value of weights, ignoring anything smaller.
"""
# Get weights window
weights = get_weights_floored(d=d, num_k=len(df), floor=floor)
weights_window_size = len(weights)
# Reverse weights
weights = weights[::-1]
# Blank fractionally differenced series to be filled
df_fd = []
# Slide window of time series,
# to calculated fractionally differenced values
# per window
for idx in range(weights_window_size, df.shape[0]):
# Dot product of weights and original values
# to get fractionally differenced values
# date_idx = df.index[idx]
df_fd.append(np.dot(weights.T,
df.iloc[idx - weights_window_size:idx]).item())
# Return FD values and weights
df_fd = pd.DataFrame(df_fd)
return df_fd, weights
class TestFracDiff(unittest.TestCase):
def setUp(self):
warnings.filterwarnings('ignore', message='numpy.ufunc size changed')
array_len = int(1e4)
random_array = np.random.rand(array_len)
df = cudf.DataFrame()
df['in'] = random_array
pdf = pd.DataFrame()
pdf['in'] = random_array
# ignore importlib warnings.
self._pandas_data = pdf
self._cudf_data = df
# data set for multiple assets
size = 200
half = size // 2
self.size = size
self.half = half
np.random.seed(10)
random_array = np.random.rand(size)
indicator = np.zeros(size, dtype=np.int32)
indicator[0] = 1
indicator[half] = 1
df2 = cudf.DataFrame()
df2['in'] = random_array
df2['indicator'] = indicator
pdf_low = pd.DataFrame()
pdf_high = pd.DataFrame()
pdf_low['in'] = random_array[0:half]
pdf_high['in'] = random_array[half:]
self._cudf_data_m = df2
self._plow_data = pdf_low
self._phigh_data = pdf_high
def tearDown(self):
pass
@ordered
def test_fractional_diff(self):
'''Test frac diff method'''
for d_val in [0.1, 0.5, 1.0]:
for floor_val in [1e-3, 1e-4]:
gres, weights = fractional_diff(self._cudf_data['in'], d=d_val,
floor=floor_val)
pres, weights = frac_diff(self._pandas_data, d=d_val,
floor=floor_val)
length = weights.size
g_array = (np.array(gres)[length-1:-1])
p_array = (pres[0].values)
err = abs(g_array - p_array).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_multi_fractional_diff(self):
'''Test frac diff method'''
d_val = 0.5
floor_val = 1e-3
gres, weights = port_fractional_diff(self._cudf_data_m['indicator'],
self._cudf_data_m['in'], d=d_val,
floor=floor_val)
pres, weights = frac_diff(self._plow_data, d=d_val,
floor=floor_val)
length = weights.size
g_array = (np.array(gres)[length-1:self.half-1])
# make sure nan is set at the begining
self.assertTrue(np.isnan(np.array(gres)[:length-1]).all())
p_array = (pres[0].values)
err = abs(g_array - p_array).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
pres, weights = frac_diff(self._phigh_data, d=d_val,
floor=floor_val)
length = weights.size
g_array = (np.array(gres)[self.half+length-1:-1])
# make sure nan is set at the begining
self.assertTrue(np.isnan(
np.array(gres)[self.half:self.half+length-1]).all())
p_array = (pres[0].values)
err = abs(g_array - p_array).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/test_fractional_diff.py |
'''
Technical Indicator for Multiple Assets Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_multi_assets_indicator.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_multi_assets_indicator.py
'''
import pandas as pd
import unittest
import cudf
from .utils import make_orderer, error_function
import greenflow_gquant_plugin.cuindicator as gi
from . import technical_indicators as ti
from greenflow_gquant_plugin.cuindicator import PEwm
import numpy as np
import warnings
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestMultipleAssets(unittest.TestCase):
def setUp(self):
warnings.filterwarnings('ignore', message='numpy.ufunc size changed')
warnings.simplefilter('ignore', category=ImportWarning)
warnings.simplefilter('ignore', category=DeprecationWarning)
size = 200
half = size // 2
self.size = size
self.half = half
np.random.seed(10)
random_array = np.random.rand(size)
open_array = np.random.rand(size)
close_array = np.random.rand(size)
high_array = np.random.rand(size)
low_array = np.random.rand(size)
volume_array = np.random.rand(size)
indicator = np.zeros(size, dtype=np.int32)
indicator[0] = 1
indicator[half] = 1
df = cudf.DataFrame()
df['in'] = random_array
df['open'] = open_array
df['close'] = close_array
df['high'] = high_array
df['low'] = low_array
df['volume'] = volume_array
df['indicator'] = indicator
pdf = pd.DataFrame()
pdf['in0'] = random_array[0:half]
pdf['in1'] = random_array[half:]
low_pdf = pd.DataFrame()
high_pdf = pd.DataFrame()
low_pdf['Open'] = open_array[0:half]
low_pdf['Close'] = close_array[0:half]
low_pdf['High'] = high_array[0:half]
low_pdf['Low'] = low_array[0:half]
low_pdf['Volume'] = volume_array[0:half]
high_pdf['Open'] = open_array[half:]
high_pdf['Close'] = close_array[half:]
high_pdf['High'] = high_array[half:]
high_pdf['Low'] = low_array[half:]
high_pdf['Volume'] = volume_array[half:]
self._pandas_data = pdf
self._cudf_data = df
self._plow_data = low_pdf
self._phigh_data = high_pdf
def tearDown(self):
pass
@ordered
def test_multi_assets_indicator(self):
'''Test portfolio ewm method'''
self._cudf_data['ewma'] = PEwm(3,
self._cudf_data['in'],
self._cudf_data[
'indicator'].to_gpu_array(),
thread_tile=2,
number_of_threads=2).mean()
gpu_array = self._cudf_data['ewma']
gpu_result = gpu_array[0:self.half]
cpu_result = self._pandas_data['in0'].ewm(span=3,
min_periods=3).mean()
err = error_function(gpu_result, cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = self._pandas_data['in1'].ewm(span=3,
min_periods=3).mean()
gpu_result = gpu_array[self.half:]
err = error_function(gpu_result, cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_macd(self):
'''Test portfolio macd method'''
n_fast = 10
n_slow = 20
r = gi.port_macd(self._cudf_data['indicator'].to_gpu_array(),
self._cudf_data['close'].to_gpu_array(),
n_fast,
n_slow)
cpu_result = ti.macd(self._plow_data, n_fast, n_slow)
err = error_function(r.MACD[:self.half], cpu_result['MACD_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.MACDsign[:self.half],
cpu_result['MACDsign_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.MACDdiff[:self.half],
cpu_result['MACDdiff_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.macd(self._phigh_data, n_fast, n_slow)
err = error_function(r.MACD[self.half:], cpu_result['MACD_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.MACDsign[self.half:],
cpu_result['MACDsign_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.MACDdiff[self.half:],
cpu_result['MACDdiff_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_relative_strength_index(self):
'''Test portfolio relative strength index method'''
n = 10
r = gi.port_relative_strength_index(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
n)
cpu_result = ti.relative_strength_index(self._plow_data, n)
err = error_function(r[:self.half], cpu_result['RSI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.relative_strength_index(self._phigh_data, n)
err = error_function(r[self.half:], cpu_result['RSI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_trix(self):
'''Test portfolio trix'''
n = 3
r = gi.port_trix(self._cudf_data['indicator'],
self._cudf_data['close'],
n)
cpu_result = ti.trix(self._plow_data, n)
err = error_function(r[:self.half], cpu_result['Trix_3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.trix(self._phigh_data, n)
err = error_function(r[self.half:], cpu_result['Trix_3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_average_true_range(self):
'''Test portfolio average true range'''
n = 10
r = gi.port_average_true_range(self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'], 10)
cpu_result = ti.average_true_range(self._plow_data, n)
err = error_function(r[:self.half], cpu_result['ATR_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.average_true_range(self._phigh_data, n)
err = error_function(r[self.half:], cpu_result['ATR_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_ppsr(self):
'''Test portfolio average true range'''
r = gi.port_ppsr(self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'])
cpu_result = ti.ppsr(self._plow_data)
err = error_function(r.PP[:self.half], cpu_result['PP'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.R1[:self.half], cpu_result['R1'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.S1[:self.half], cpu_result['S1'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.R2[:self.half], cpu_result['R2'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.S2[:self.half], cpu_result['S2'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.R3[:self.half], cpu_result['R3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.S3[:self.half], cpu_result['S3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.ppsr(self._phigh_data)
err = error_function(r.PP[self.half:], cpu_result['PP'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.R1[self.half:], cpu_result['R1'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.S1[self.half:], cpu_result['S1'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.R2[self.half:], cpu_result['R2'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.S2[self.half:], cpu_result['S2'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.R3[self.half:], cpu_result['R3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.S3[self.half:], cpu_result['S3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_stochastic_oscillator_k(self):
'''Test portfolio stochastic oscillator'''
r = gi.port_stochastic_oscillator_k(self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'])
cpu_result = ti.stochastic_oscillator_k(self._plow_data)
err = error_function(r[:self.half], cpu_result['SO%k'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.stochastic_oscillator_k(self._phigh_data)
err = error_function(r[self.half:], cpu_result['SO%k'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_stochastic_oscillator_d(self):
'''Test portfolio stochastic oscillator'''
n = 10
r = gi.port_stochastic_oscillator_d(self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'],
n)
cpu_result = ti.stochastic_oscillator_d(self._plow_data, n)
err = error_function(r[:self.half], cpu_result['SO%d_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.stochastic_oscillator_d(self._phigh_data, n)
err = error_function(r[self.half:], cpu_result['SO%d_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_moving_average(self):
'''Test portfolio moving average'''
n = 10
r = gi.port_moving_average(self._cudf_data['indicator'],
self._cudf_data['close'],
n)
cpu_result = ti.moving_average(self._plow_data, n)
err = error_function(r[:self.half], cpu_result['MA_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.moving_average(self._phigh_data, n)
err = error_function(r[self.half:], cpu_result['MA_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_rate_of_change(self):
'''Test portfolio rate_of_change'''
n = 10
r = gi.port_rate_of_change(self._cudf_data['indicator'],
self._cudf_data['close'],
n)
cpu_result = ti.rate_of_change(self._plow_data, n)
err = error_function(r[:self.half], cpu_result['ROC_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.rate_of_change(self._phigh_data, n)
err = error_function(r[self.half:], cpu_result['ROC_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
n = -10
r = gi.port_rate_of_change(self._cudf_data['indicator'],
self._cudf_data['close'],
n)
cpu_result = ti.rate_of_change(self._plow_data, n)
err = error_function(r[:self.half], cpu_result['ROC_-10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.rate_of_change(self._phigh_data, n)
err = error_function(r[self.half:], cpu_result['ROC_-10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_diff(self):
'''Test portfolio diff'''
n = 10
r = gi.port_diff(self._cudf_data['indicator'],
self._cudf_data['close'],
n)
cpu_result = self._plow_data['Close'].diff(n)
err = error_function(r[:self.half], cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = self._phigh_data['Close'].diff(n)
err = error_function(r[self.half:], cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
n = -10
r = gi.port_diff(self._cudf_data['indicator'],
self._cudf_data['close'],
n)
cpu_result = self._plow_data['Close'].diff(n)
err = error_function(r[:self.half], cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = self._phigh_data['Close'].diff(n)
err = error_function(r[self.half:], cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_shift(self):
'''Test portfolio shift'''
n = 10
r = gi.port_shift(self._cudf_data['indicator'],
self._cudf_data['close'],
n)
cpu_result = self._plow_data['Close'].shift(n)
err = error_function(r[:self.half], cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = self._phigh_data['Close'].shift(n)
err = error_function(r[self.half:], cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
n = -10
r = gi.port_shift(self._cudf_data['indicator'],
self._cudf_data['close'],
n)
cpu_result = self._plow_data['Close'].shift(n)
err = error_function(r[:self.half], cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = self._phigh_data['Close'].shift(n)
err = error_function(r[self.half:], cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_bollinger_bands(self):
'''Test portfolio bollinger bands'''
n = 10
r = gi.port_bollinger_bands(self._cudf_data['indicator'],
self._cudf_data['close'],
n)
cpu_result = ti.bollinger_bands(self._plow_data, n)
err = error_function(r.b1[:self.half], cpu_result['BollingerB_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.b2[:self.half], cpu_result['Bollinger%b_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.bollinger_bands(self._phigh_data, n)
err = error_function(r.b1[self.half:], cpu_result['BollingerB_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.b2[self.half:], cpu_result['Bollinger%b_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_average_directional_movement_index(self):
'''Test portfolio average directional movement index'''
n = 10
n_adx = 20
r = gi.port_average_directional_movement_index(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'],
n, n_adx)
cpu_result = ti.average_directional_movement_index(self._plow_data,
n,
n_adx)
err = error_function(r[:self.half], cpu_result['ADX_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.average_directional_movement_index(self._phigh_data,
n,
n_adx)
err = error_function(r[self.half:], cpu_result['ADX_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_vortex_indicator(self):
'''Test portfolio vortex indicator'''
n = 10
r = gi.port_vortex_indicator(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'],
n)
cpu_result = ti.vortex_indicator(self._plow_data,
n)
err = error_function(r[:self.half], cpu_result['Vortex_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.vortex_indicator(self._phigh_data,
n)
err = error_function(r[self.half:], cpu_result['Vortex_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_kst_oscillator(self):
'''Test portfolio kst oscillator'''
r = gi.port_kst_oscillator(
self._cudf_data['indicator'],
self._cudf_data['close'], 3, 4, 5, 6, 7, 8, 9, 10)
cpu_result = ti.kst_oscillator(self._plow_data,
3, 4, 5, 6, 7, 8, 9, 10)
err = error_function(r[:self.half], cpu_result['KST_3_4_5_6_7_8_9_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.kst_oscillator(self._phigh_data,
3, 4, 5, 6, 7, 8, 9, 10)
err = error_function(r[self.half:], cpu_result['KST_3_4_5_6_7_8_9_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_mass_index(self):
'''Test portfolio mass index'''
r = gi.port_mass_index(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
9, 25)
cpu_result = ti.mass_index(self._plow_data)
err = error_function(r[:self.half], cpu_result['Mass Index'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.mass_index(self._phigh_data)
err = error_function(r[self.half:], cpu_result['Mass Index'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_true_strength_index(self):
'''Test portfolio true strength index'''
r = gi.port_true_strength_index(
self._cudf_data['indicator'],
self._cudf_data['close'],
5, 8)
cpu_result = ti.true_strength_index(self._plow_data, 5, 8)
err = error_function(r[:self.half], cpu_result['TSI_5_8'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.true_strength_index(self._phigh_data, 5, 8)
err = error_function(r[self.half:], cpu_result['TSI_5_8'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_chaikin_oscillator(self):
'''Test portfolio chaikin oscillator'''
r = gi.port_chaikin_oscillator(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'],
self._cudf_data['volume'],
3, 10)
cpu_result = ti.chaikin_oscillator(self._plow_data)
err = error_function(r[:self.half], cpu_result['Chaikin'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.chaikin_oscillator(self._phigh_data)
err = error_function(r[self.half:], cpu_result['Chaikin'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_money_flow_index(self):
'''Test portfolio money flow index'''
r = gi.port_money_flow_index(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'],
self._cudf_data['volume'],
10)
cpu_result = ti.money_flow_index(self._plow_data, 10)
err = error_function(r[:self.half], cpu_result['MFI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.money_flow_index(self._phigh_data, 10)
err = error_function(r[self.half:], cpu_result['MFI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_on_balance_volume(self):
'''Test portfolio on balance volume'''
r = gi.port_on_balance_volume(
self._cudf_data['indicator'],
self._cudf_data['close'],
self._cudf_data['volume'],
10)
cpu_result = ti.on_balance_volume(self._plow_data, 10)
err = error_function(r[:self.half], cpu_result['OBV_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.on_balance_volume(self._phigh_data, 10)
err = error_function(r[self.half:], cpu_result['OBV_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_force_index(self):
'''Test portfolio force index'''
r = gi.port_force_index(
self._cudf_data['indicator'],
self._cudf_data['close'],
self._cudf_data['volume'],
10)
cpu_result = ti.force_index(self._plow_data, 10)
err = error_function(r[:self.half], cpu_result['Force_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.force_index(self._phigh_data, 10)
err = error_function(r[self.half:], cpu_result['Force_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_ease_of_movement(self):
'''Test portfolio ease of movement'''
r = gi.port_ease_of_movement(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['volume'],
10)
cpu_result = ti.ease_of_movement(self._plow_data, 10)
err = error_function(r[:self.half], cpu_result['EoM_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.ease_of_movement(self._phigh_data, 10)
err = error_function(r[self.half:], cpu_result['EoM_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_ultimate_oscillator(self):
'''Test portfolio ultimate oscillator'''
r = gi.port_ultimate_oscillator(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'])
cpu_result = ti.ultimate_oscillator(self._plow_data)
err = error_function(r[:self.half], cpu_result['Ultimate_Osc'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.ultimate_oscillator(self._phigh_data)
err = error_function(r[self.half:], cpu_result['Ultimate_Osc'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_donchian_channel(self):
'''Test portfolio donchian channel'''
r = gi.port_donchian_channel(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
10)
cpu_result = ti.donchian_channel(self._plow_data, 10)
err = error_function(r[:self.half-1], cpu_result['Donchian_10'][0:99])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.donchian_channel(self._phigh_data, 10)
err = error_function(r[self.half:-1], cpu_result['Donchian_10'][0:99])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_coppock_curve(self):
'''Test portfolio coppock curve'''
r = gi.port_coppock_curve(
self._cudf_data['indicator'],
self._cudf_data['close'],
10)
cpu_result = ti.coppock_curve(self._plow_data, 10)
err = error_function(r[:self.half], cpu_result['Copp_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.coppock_curve(self._phigh_data, 10)
err = error_function(r[self.half:], cpu_result['Copp_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_accumulation_distribution(self):
'''Test portfolio accumulation distribution'''
r = gi.port_accumulation_distribution(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'],
self._cudf_data['volume'],
10)
cpu_result = ti.accumulation_distribution(self._plow_data, 10)
err = error_function(r[:self.half], cpu_result['Acc/Dist_ROC_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.accumulation_distribution(self._phigh_data, 10)
err = error_function(r[self.half:], cpu_result['Acc/Dist_ROC_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_commodity_channel_index(self):
'''Test portfolio commodity channel index'''
r = gi.port_commodity_channel_index(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'],
10)
cpu_result = ti.commodity_channel_index(self._plow_data, 10)
err = error_function(r[:self.half], cpu_result['CCI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.commodity_channel_index(self._phigh_data, 10)
err = error_function(r[self.half:], cpu_result['CCI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_port_keltner_channel(self):
'''Test portfolio keltner channel'''
r = gi.port_keltner_channel(
self._cudf_data['indicator'],
self._cudf_data['high'],
self._cudf_data['low'],
self._cudf_data['close'],
10)
cpu_result = ti.keltner_channel(self._plow_data, 10)
err = error_function(r.KelChD[:self.half], cpu_result['KelChD_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.KelChM[:self.half], cpu_result['KelChM_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.KelChU[:self.half], cpu_result['KelChU_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
cpu_result = ti.keltner_channel(self._phigh_data, 10)
err = error_function(r.KelChD[self.half:], cpu_result['KelChD_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.KelChM[self.half:], cpu_result['KelChM_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r.KelChU[self.half:], cpu_result['KelChU_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/test_multi_assets_indicator.py |
'''
Technical Indicator Node Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_nodes.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_nodes.py
'''
import warnings
import unittest
import cudf
from greenflow_gquant_plugin.transform import ReturnFeatureNode
from greenflow_gquant_plugin.transform import IndicatorNode
from greenflow_gquant_plugin.transform import AssetIndicatorNode
from greenflow.dataframe_flow.task import Task
from .utils import make_orderer, error_function_index
import numpy as np
import pandas as pd
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
# gt for return
ground_truth = b'\x92\x01\xef3\xec \x02@\xa3\xd5\xc0\xd96\xdd\xc0\xbf\xe2\xe1\xa7f\xab\xc0\xaa?\x8e\x9cySv\x9b\xca?vA\xc5\xd9\x8e\xd6\xd8\xbf\xbf){\x92=p\xc3\xbf\xd7\x06\xda\xca\x98\n\xd0?\xcf\xbf\xe8C\xb8\xb5\xc6?\x03\xa9\xf2H1\xcf\xca?V{\xc3\x9c\xda\xd5\xc0?\x1f\xed\xc6p\x14\xea\xe4\xbf\x84*\x911\x07\xf6\xe1\xbfQ\xdbQON&\x12@b\\H\xceZ\x19\xd3\xbfsxy\xfe\x9aT\xdf?\xc08\x16\xc2\xe0\xef\xdc\xbflI\xa2\xf0\xbfo\xee?\xc7\x0f\xffY\xa0\x00\xd7\xbf\x04l\xf3\xfcD\x16\xe1\xbf\xb9j]\x90my\xe8\xbf[\xc0B\x82\xe6\xf5\xe1?h$\xf1\x99\xec\x9f\xec?\xf1#\xdb\xb9GX\xf8?\xcd\xaf\xe4\x1b\xc0\x13\xeb?\xa8`\xa6\x8d\x8af\xbf?m\'\x07\x92j\x80\xd7\xbf<\xa4\x04\xfb\x93\xa7\xdf\xbf\xd3\xdd\xbcGk\xd1\xf3?\xf9"\xc8w\xad\xc8\x95\xbf:3\xe3\x05\xef\xf4\xca\xbf\nU\xd1\tQ[\xed?\xd3\x8f\xc0\x87J\x0f\xc5\xbf\r6\n[\xb1T\xc3?\xd5I\x83\xa6\xee\xae\xd5\xbf]\xc3\x1f\xc6L/\xe3?I\xe3K\x1b\xf6f\xd1\xbf\xaf+ =\xc9\x9c\xef\xbf{\xd6\x94\xb3R#@@\t\xe8\xe6D\x83\xb8\xfe?\xc2\x01\'3NJ\xde\xbf\xea\x98j\xdc9\x81\xe3\xbf\xb2.\xf1\x9e\xcd\xa4\xd5\xbf\xf2Q\xbf\x0c\xf1\xdd\x17@\xf4\x05\xf44>\xe6\xc7\xbf$\xfc\xee?&\x07\xcd\xbf\r\x1d\x0eg\xb6\x90\xdb\xbfu\xbe\x9d\xde\t\xaa\xf3?\x18\xa0\xb6\x11\x83\xf9\xda\xbf\x97X=\xaa\xe5\xbe\xdd?\xca\xf6\xcb_re\xda?\xdc\xc3\x92tf$\xba?\xc5\xd6{8G\x00\xec\xbf\xddo\xd4\x8c\xc8\xeb\x0e@\x06\xd7\xfe\x9c\x00\xe7\xd0?\xca\x90F\x90gi\xe8\xbf F\x85<\xd3\xa1\xef\xbf\xd6A\x15\x00\x8a$\x81@/\xfbQ$\xbb\x11\xc5\xbf\xaf\xfeh\x1f\xe0$\xe5\xbf?o\x97\x9b\x87\xc3\x92?\xc4{\xe6\xbd\xbd)\xe2\xbf\\\x16\x0fp\x8b\xdd\x07@y\xa0,;\x81\x10\xee?-\x94\xe8C]\x15\x93\xbf\xd4b\xc0gix\xe7\xbf\x9bm\x8c\xe8\xab\xcf\xe3?m\xa9\x12H\xa0P\xf7?\xbf\xc3\x11\x0e\xde\xe8\xe6\xbfT*=\xf3\xb78\x03@\x8aY"\x8c\x17\x0b\xc7\xbfOy\xe8sPB\xc3?\x98j\xe1\xbd\xcd\xbe\x8f\xbf\xb0W\xa1Dh\x9e\xea\xbf\xc0\xa3\xb1\xb4U\x8d\x11@4\xfc\xaaY\xecw\xc3?\x90\xde):Y\x01\xb4\xbf\xc5\x15\xfdg`C\xc3\xbf\xae\xa3\x88\x92\x90/\xe2\xbf\xce{^\xef\xfdR\xc3?\xb4\xbeN\xd5P>\xd5\xbf\x97\xd9e\xc3!\xd7\xe3\xbf\xd7+#\x11{\xa5\xf2?1\x1b\xc6^\xeaT\x01@\xe8\x1eV\xa0^o\xe0?\xfb\xc7\x92\x02@\x8c\xd0\xbfm\xfa\x9fef\xc0\xc7\xbf\xc7\xf0\xf4r%\x01\xe1\xbf\r\rv&\xac\xf4\xef?vh\xd4\xfcQ\xd4\xe0?\xde\xa3\xab\x04\xe3B\xe4\xbf\xfd\x03)\xaa.<\xd8\xbf\x89.\x8c\x9aN\x8e\xec\xbf\x0b2M\xb3\xc1UB@\x16\x80^\xcc\xf9Y\xee\xbf[\xb63\xe7}\xa6$@|\x0eb$\xaee\xed?%Y(\x9b\x7f{\xef\xbf\xb10[\xd1\x1flB@\x06\xaf\'\x19I\xf1\xad\xbf\xea_\xa2\x15\xf0\xaa\xe3\xbf=\xd9q\xa9U\t<@>\xef\x06{~\x13\xea\xbf\xce!\xaf\xce\x80y!@\x17*T\x8ae\xc9\xde?\xcc\xe4\xa01\x0c\x1d\xe1\xbf\x18\x9c\xc1\rC\x07\xed\xbf"k\x95 \xfc\xcb\xf1?\xc9e\xf6b&)\x8b?\xe6^\xd1\x03\xe2\xc2!@P",%^\xf2\x85\xbfV\x06\x94]\xfeR\xef\xbf\x9d\xfa\x88\xad\xa0\xf7H@\xf5\x84\x17;\xe9\xb9\xad?$|\xa9\xb1`\xec\xe1\xbfW\x8fd\xd3\xa72\xd2\xbf\xe9\xf3\xd8!U\x04\xff?\x8d\xec7d)(\xd1\xbf\xde\x06C\x7fb\xb5\xe8\xbf\xccF$ux\x10\xe4?E\xc2\xcbD\xc0m\x05@\xceS\x8c1\xd8\xd8k?\x18\xce\xa7z"\xa8\xe7\xbf<s8M\xf2y\x05@\x0e\x1aK\xe8b\xb6\xd5\xbf_O\xb0_^\xc9\xc1?{\x92*\x07~&\xc2\xbf\xcf\xac\xcc\x80\xbaH\xcc?\xe3G\xe6\xe1\xf5\xbd\xef\xbf\x7f\x82\x84\x12\xc2a\x0e@\xa7G^\x88z\xe4:@\x9c\xd1\xf3\x92\xd8\n\xe8\xbf\x8b@\xa2\xa66Z\x08@2&@\xd9`\xd8\xe2\xbfj\x08\x1f\x9e\x84\xd1\xe9\xbf\x8f5\xc9L\x8c!#@UX\xa0\x07\x89{\xdf\xbf\\]\x89\xfe\x08]\xb7?\xe7\x92B\xcb|\xed\xff?\xe2b\xe0\xb3\x94\x8c\xe9\xbf\x80H\xbf\x93\x1b\x07\xe9?2\xf6u@;k\xe6\xbf\x97.e%\xf1\xe6\x06@\x8f\xf9\x91\xc9\xe9\x9a\xe8\xbfU7\xa1\xe7\xd4\xbf\x11@L\xadA\x91\xbe#\xde?\xcf\xba\xfc\x0f\xa3\xc5\xef\xbf\x7f+\'_\xc4\n0@1=;4M\x1e\xd8\xbf\x060E~\xc9z-@3y\x08I<P\xbd?\x8d\x02\xbd\xf2\xd08\xde\xbf\x8dU\xc4I6\xcb\xe3\xbf\x0bc"\x89\xb7\xa1\r@\xc8\x17\xf79.\x1d\xd3\xbf\x8cyq\x04\xa9\xd3\xca?`\xb2\xb5R\xc4\xad\xd6\xbf\xfcF\xc90\xbdV\xe1?\x14\x94D\xc3hh\xe2\xbf\x8e|\x0cY\xdb,\xe5\xbf?\xe7\xcf\xb3\x0c\xf5\x1b@\xb9|\x12\xef\xc1\xd9\xef\xbf\xdd\xf1U2\xdff0@\x88\xff\x1e\xe9\xdb\x97\xe9?\xab\xdc\x95\xc1\xb3|\x18@\xba_\xc38\x06\xb6\xe5\xbfK\x05\xd9\x0c\xee\xee\xd2\xbf\xaf\x0c\x85|\x80i\xc9?\xcc\xd0\x90\xab\x81\xfe\xe3\xbf\xadEW\xcb\xd4\x82\t@\x10\xd2\xeett8\xea?\x91\xc9R?\x89\x00\xbd\xbf\xf7R\xb0\xe9\xd7\x9d\xe3\xbf%\x1eK\xcc;\xe3\xb7\xbf\x1ce\x80\x92D\xe1\xf2?XO\xaf\x94\xdd\xcf\xe7?iB \x9e\xe8\x12\xd1\xbf\xd5B\xf0t\xadO\xc8?>\x89x\xa3\xc1\xeb\xda\xbf\r\x1c\xf50\xec\x9d\xb5\xbf\x95FF\xe8z\xcd\xe9?}^\xbd\x94\xba\x08\xdf\xbf\xdd\xf3I\x96\xfd\xf9\xde?\xdcl-c*W\xeb\xbf\x86\xd4\xfe\x0b\xe6\x8a\x19@n\x9f\xf5\x1c\x8a\xdf\xe0\xbf`\xa2JiRI\xfa?\x88\xfb\x95\xcb\xce\x1e\xe3\xbf\x01q\xf5V~\x8f\xf8?;\xd6\x8dX$p\xe6\xbf\xec*\xb9\x9f|`\xef\xbfI\xbd\xbd\xa5\x90\xb6>@\xc4\x92\\\x81\xcd\xda\xf1?\x9c\xcdf\xe3\xf7\xac\xd5\xbfH\xc6\x8d\xbbU\xe6\xfc?=i\xb19\xe4\xc1\xe3\xbf\xa5b\xc3oo\xa8\x05@\x97\x17\x10\x8e\x84\'\xd8\xbf\x8f=z\xe6\x1al\xd4?' # noqa #E501
# gt for indicators
g_t1 = b'\x80x\xe2\xf8Q\x0f\x7f\xbf\x04O\x11|\xec\x0e\xdd?\xa0\xbf\x98\x96\x03\x97\xca?\xf7\xbbF\xea;I\xd0?\x02\x8e\x80\xa6n\xf5\xcd?\xf85`\x18)\xd1\xc3?/\x12n\xb1\xb1\xaa\xc8?\x88^c<\xeb\xe5\xa3?d\xce[\x8f\xd73\xb0?$\xfe\xba\x87\xecq\xb2?(\x87\x87\xfcTF\xbf?\x1ezq\xeee\x13\xd2?\xccPQ\x90\xb4\xf5\xcd\xbf\xa8\xd0\xfd\xf9\x1c\xfd\xc1\xbf\x92\xfdI\xf3\xb6\xdf\x05\xc0m\xd5\xff\xd84\xb4\x00\xc0\x8c)\x9c\x0c\x86\x08\xf6\xbf\xb0\xeb\xb4x\x83\xad\xea\xbf\xf4\xdc{HY\xe3\xe1\xbf\xc0j\xb9I\x8c/\xce\xbf y\xe5\xe1\x15\xa0\xb4?L\x0b\xactJ\xe6\xd2?\x14i\\\x0c\xae\x1e\xdc?\xb8\xe2\xc0\x98\x19\x19\xd8?\x8c\xfa\x1fRG[\x06@:\xa4S\x87\xfek\x02@\xf5w\x95\xf5\xc6{\xfc?\x9a\xa5\x94Z\xee\xdc\xf5?\xfa7;\x04\xc7\xc9\xf1?\x95QY\xa3\xa3\xa3\xd2?\xc7\xfe\xe6J\xfb\x84\xa0?f\x1d\x97G\xd3s\xd7\xbf\x14\x11;z\xfbX\xc1\xbf<\xd0<\xdb\xf2\x01\xb8\xbf>bq\x8aG\x18\xc7\xbf\x98\xb0\x83\xf6\x16L\xc0\xbf`\xb9\xd6NE\x9d\x9e?GT\xdb\x9cT{\xd0?\x1a\xc1\xa9\x86\xa7\xe1\xc7?Z\xa0\xf8\x7ff\x0b\xb3?(\x8bT\\\xb2\xcc\x7f?=\xceH\xdckF\xd4?D\x1fV\x8cHG\xd6?2B\xcd5\xa3\xb1\xd0?\x8aI\xe5Kt.\xc4?Ta<\xe3$\'\xbf?p\xc5\x8b\xb5Yu\x8e?\xf0\xe0(\x8a)=\xa0\xbf4\xe1\xf02u\x85\xc3\xbf\x18\xb6\xbbYrm\xc5\xbf\xc6\xc2\xe0BV\x1d\xc5\xbf8\x8cO\xb3\xbfH\xb4\xbf\x1c\xfe|Yt,\xc4?\xd3\x99\xc8b=\x02\xc0?\xac\x99\xe6\x00\xb9\xb9\xc1?\x04U\xd9\x98\xc3\xa5\xbf?\xbeRe\x1d\xba\xe5\xc4?\xa80o\xfc#\x12\xbb?p1\\\xc4\xb5\x86\x9a?\xe4\x19\xe2Ch\x9d)@_\xc7\xe8;\xea6"@\xba\xac\xae\xa0\x8aA\x1e@Xgp\xb4\x90\xdfM\xc0$\xacd\x8d\x11\xd1E\xc0`\xf4\xf3+p`?\xc0\xba!\x19\x10\x8aZ5\xc0X\xc9\x8e\xdc\xd1Y+\xc0\xd8\xc8\xab\xd8] \x1f\xc0\xb0\x03\xac\xd4\x1a\xa1\t\xc0\x80\xf2g\x98\xa5h\xcd?\xe0\x8b\xf8\xda\x899\x05@8D\xba\x13\x1b\xf5\x11@\x0c\xd0\xe2\x95\x80\x93\x16@\xe2\x95X\x12\xd8\x13\x1a@\xe6Yu|\xd0`\x1c@\xc8\xa3h\x9e\x1c\x94\x1e@\xdcVI\x80\xb9\xb5\x1e@\xe6\xe1\x07\xcaK\x82\x1c@G\x1d\x81\x85\xd0\xbd\x1c@\xa1\xce\x95\x87e\xec\x1b@\x9e"\x88\x91\xab\x90\x1a@\x02!\x9f \xef\xf7\xef?\xf6\xa1l\xc5\xafG\xf0?\xb3\xf6\xef\x14\xfc5\xf3?\x8e|x2\x9bj\xf0?j+\x01B\xa3\xb9\xf2?V\xcc\xc6i\\\x07\xf1?\xbf\xd6S\xde\x15\xdb\xf1?B\xcc\xf6\xd9\xc6z\xf0?\x88\xc1\xa0~\xcco\xed?$Uv_\x94\xe8\xe2?\x82\xd7V\x9f\x8f\xc5\xdc?\x1f\xa5;\x85\xb9O\xe4?\xf0X*\xe7P\xd8\xe1?\xdb+\x1c\x9c\xa0(\xed?A\x8f\xc5e\xb5\xf7\xe7?\x9c,7\t\x84\x87!\xc0\x14\xaa9k\xca\xd6\x18\xc0\xc8\x82\xe1D\x9d\xfa\x10\xc0\x82\xc4\xe5\x0e:E\x04\xc0\xfc\xbf\xc7\x0c\xc5\xd5\xf6\xbfP\x86\xbf\xd5]K\xdf\xbf@{D\xfb(:\xce? 0\xd2\xd0\xecv\xe3?(\xca\x1a\xb9\x06\xa3\xea?\xd8\xb11\xa7L~\xf1?\xc81\x0f\x86\x82v\xe5?\x94\xbd\xa5\xce\xa1.\xee?\x80\xef$\xe0h\xeb\xf4?K\xe5$\'\xf9\xf5\xf5?6\xb9\xc2\xdc\x9ev\xf6?t\xc3\xd2\x16ZZ\xf0?\x94q\xff@\xa5\xd4\xf3?W0\xed\x1a\x96N\xf4?\x90y4fk\x8d\xf4?\x1e\x8c\xed\xb6O\xd7\xf2?\xa4\x17\xdcJv_\xf3?\xfd\xe7\xff\xbb#P\xf3?\x80\xcbD\x16\xc6\x8f\xe0?d/*\x1f\xf5\xba\xe2? \x96\x86\x16\x0b\x9b\xdc?\xae\xbf\xc4Vm}\xe0?\x8b\x0e/\x15\x87\xb9\xe0?tU(c\xa3\x92\xe2?\xf9\xbet\xedd\x05\xf1?\xf8\xb5\xeb\x03\x03,\xed?\\\x01\xcd\xd3h\xc2\xd8\xbf\xb0w\x04\x91\xf3\xb8\xb6\xbf\x00\xe3B^\x03\xcf\x83?P\x10\xe5,O\xee\xb7? ^\x9e\xdct\xbf\xcb?\x90\x98=u\x9aD\xc7?\xf0QYc\x94\x86\xb8?\xd8\x87\x02\x81\xf4I\xc2?\x90?$jU\x14\xd1?\xdeIs&\xf7\xe1\xd4?D9\xee9\xc7\x07\xcb?\xe6M\xf5:\xa7\x84\xd3?\xe0\nv\x97\xaf\x18\xe4?\r\x9c\xf8\x97\xba\xdc\xe1?\x989\xe1\xf4\x96\xe4\xe0?\x00\x1b\xc8\xf0\xe4\x87\xde?,^\x04$\xf4\x1b\xda?p\x08\x8c\xdd\xd9\x97\xd7?\xfd\xb3D0=\x14\xd5?\xe0B\xe6+\x04V\xd5?\xbe\x8dx\x7f\xc6\xf7\xe0?\xc1\x87\x12`\x07~\xd7?\x14\xadB\xa8\xf3$\xff?\xd4:\xf3[u\xb5\xf5?\x82}\x00^.,\xef?\xb0F\x11\xac>\x8c\xe6?\xc0J\xfc\xa1\x03\x9c\xdd?\x90\\E\xe5\x05\'\xce?h)\x95\x9a|\xe9\xb5?P\x13ar\x88\x92\xa9\xbf@\x93\xdd\xb0iF\xbe?\x00?\x14\xa4\xe3 \x84\xbf\xac\xea\xf7\xed\x91D\xc3\xbfD7\'\xdf\xab\xaa\xe4? \x8e\xe2\xb9\x87\x9d\xb3?\xc0\xb2}\xbf\xfd \x95\xbf' # noqa #E501
g_t2 = b'&\xec\x1b\xc9\xee\x0b\xff?)\x9es\x15\x01\xff\x02@D\xec\xda\x93\x9ey\x05@4\xee1\x83s\xfc\x04@U\xaa\x1e\xd4\xc2\xfb\x04@Z\xd2\x08\xb1Q\x93\x05@\xef\x81L_uB\x06@\x1d\xa95\xfc\xee\xb5\x05@\x91\x88\xb1\xbe\xa5\xae\x06@g\xaa\x91\x1bn\xa2\x06@\x08\x08\x16h\xff\xbc\x04@\x99\xa6\xcf6\xc5\xab\x00@\x10\xdc\xcb\x1cq\x82\xfb?u\x91fh\x1e\xe6\xf4?\x97\x01\xaa\xa4\x0bu\xf3?7\xac\xf5E\x86\xe1\xf3?\xc7\xa8l\x95\xf8\x86\xf4?\xf0\xa8 z\x10\xc4\xf3?$\xa5k\x1ds\xf1\xfa?y\xf4=\xad\xa2\x0b\xff?\t(\xa4{y\xad\xfe?\xe1\x89\x8d\xe8\x9cW\xff?\xaa\xb5S\xf6\x00\x8e\x02@\x92\xceA\xac6@\x06@\x82\xd1\x1e\xa7%\xdf\x05@|\x894\x0c\xa6\xd0\x05@\x15d6\n\xf6\x01\x05@\x99\xb3KI U\x06@\xfa\xe7\xfa&\x88K\x01@!G\x87|{\xa8\x00@\x9b\x9f\x8e\xbb1\xfd\xfe?\x07\xcf\xfb\x8cJ\xba\xff?\t4)6u\x86\xfb?\xf6\x85 "\x89\xcd\xfb?\x91\xed0\n\xb8I\xfb?h\x9b/\xdfkG\xfb?\x1f\xbd\xa6\xd5\x0c\x84\x00@Z\xa5\x1c3\\:\x04@\nc\xd1\xafbm\x05@2~k\xfeAV\x04@\xfc\xb5\xc7\xcb\x05\x04\x06@[\x02\xd6-M\xec\x07@l\xc5\xb0\xcf+<\x0b@R\xdb\xb4#r\xb8\x07@\x99\xfb\xb0\xa2\xee<\x08@\xd6\x7f\x0b\xed\xe7\x82\x08@:l\x9a\xad\xac\x8d\x07@\x0b\xf0\x84\xa7+t\x03@\x8a \xf2\x100x\x03@\xe2D\'{\xd3F\x05@\x92\x96\xb5}y\xcd\x03@X\xe8\x02\xc2\x7f\xb9\x01@\x84\x8e\x8c\xe4\xeb\xf0\xfb?w\xcb\xd4\x11R\x1c\xfa??\xab1\x06\xb4\x9d\x00@\xf5\x9fz{\x18z\x00@I\xb2\xd6\x8d\xab\xf3\xfb?\xb0y\xea\xe9\xbe\xd2\xf8?z\xb4s\xb0)\xab\xf8?\x9c+\xc5\xe7\xa6\x11\xf8?>\xce\xa7\xf9\xc9(\xfb?\xe4\x8a%_\xb6\'\x00@6\xff\xea s\xec\x03@\xacY+\x86A2\x06@\x8cu@\xd6\xc5\x0f\x03@\xc6\xf6\x97V\xe5\xa3\x03@J+\xc9j^\x04\x03@3m\xffeG\x89\x02@\xe2i\xc5\x7f.\xec\x03@\n_Z<\xe7\xd1\x02@\x8d\xfd\xd6\x1e@h\x02@dWl#\xdf\xd8\x01@\x8d8\x9b\\\x93\xa1\x00@\x97\xd7\x7f\xa9\x0e\x04\x03@\xcd\xc1C\xb00\x18\x03@\xfd#\xb0\xba\x03\x82\x06@\xef\xa6\xe8K\xef\xa4\x06@A\xd5\xec\x06B\x8b\x07@\xe6\xd3\xf1\x0b\xfb\xf5\n@\xab&\x96a\x17\x05\x0b@\x9d\x10\x87\x96\x10\xa9\n@BN\xda\xd5\x01\xa3\x02@\x85T\x8cE\xad\x01\x05@@5d\xe0\xfc\x18\x05@\xf5\xdb\xeaf\xd4@\x00@:\x7fJ5l\x8d\x02@\x8a\xeb\xc7^\x1e)\x02@m\xd8\xb6\xe8J\x84\x01@\xac\x15PM\xd7\x92\xff?\x9d\xc8m\xc7fn\xff?2s\x95\xd4\x9df\xff?\xd7CM\xaf\x92\x9f\x01@Id\xe8uM+\x04@\x10wT\xf6>\xd9\x03@F\xb3\x04\xb6\xd5\x08\x06@4\xafo\xe6\x95\xf0\x03@\xbc\xd4\x1a\x04\xd6\xcd\x04@]\xcam\xc1m \t@K$r\xb2\xc9\x1a\t@\xe4\xe5\xe8\xa3AR\n@\xd9\xc4\xec\xb5\xdd\xf9\n@\xf7\xe1Y\xea{H\x08@^f\'\xe6\xe1\xdb\x05@\x01\xa1:\x19\x96\x90\x06@\x9c\\\xed\xe89\x85\x07@\'\x9e\xf4\xdd\x19\x8c\x07@\x1e%}\x9f\xe2\x15\n@\x1bd~\xad\xf4\xc4\x05@cN"\xad\x8fY\x06@V\x17|2\n[\n@?^3:\xa6K\r@\xaf2\x16hW\x0c\x0e@D\xcc\xef\xf7\x83\x99\x0e@>\xce\xe0\xb2\xc0\xe8\x0e@\xdd\xd82x\\\x02\x0b@\xe7\x97Ya\xac\xed\x0c@\xfb\x1fd2\xea\x00\t@LN\xff3\xa7{\x08@,S\xdfK?y\x08@~\xb7E\x7f+\xdd\x03@\xa77\xdcX\xb1\x95\xff?\xff\xbet\xafW\x8d\xf9?\xb1\x92@\x1f7\x83\x00@\xe7\x9c^\xab\x1d \x00@\xf3\x924\x07l\xe6\x04@\xa4l4\xd6\xfa[\x06@\x1aceu\'X\t@\xff8P\xean\xf5\t@\xebc\xaa\xef\xf4\xbe\x0b@\xb1R?d;-\x0e@\xf1\xbaq#\xdd"\x10@E\x94\xbb\xf3\x12\xd3\x11@\x98c\xaez\xb8\xff\x0f@\x9f\xbe*\xde\x08\xb7\x0e@#\xdfof\xf7\x13\t@\xd0\x15\x91(D\xb8\x06@\x11\x046o\n\x86\x05@\xee\x1e@\x11&\x0c\x02@0\x82}+j\xa1\x03@\xd5$\xcd\x1b\xbe\xa2\x01@&?\xd5&\x97-\x00@\xfb\xebg\x97<m\xf9?D\xc4\xf5\xff\xa5r\xf9?\xc6\xbe\xbf\xef]e\xf9?\x18l\x92e\\\x18\xfb?N \x1e4\xaf\'\xf7?sT\xc6\xd8\xd3[\xfb?\x1f\x94\x1c \x05\xe9\xfa?\xeb\xb0s\r\xb0\xc6\xfb?\r*S\xfb\xc8\x9d\xfc?\xc3\xd7\x18\x91\xa3\xa5\xfd?\x94\x97s\xdc.\x8f\xfe?0K\x8b\xc7\xfc\xbf\x00@\xc1\xd6\x90\xe4qw\x05@|\x8aG\x8c\xa3\xdb\x07@#\x9f6\x95>\xf7\x08@W\xfc\x81\xf3\x979\x07@\x8b\x82\x9b\xa2r \x07@\xb7\x7f\x8f\x85\xab\xe5\x07@\to\x19\xb8\xd9>\x08@!}\xb8\xd8,C\x07@\x8d5\x16\xcb\'u\x06@' # noqa #E501
g_t3 = b'\x95\x13\xbek\xd1O\xcc?\xf5\xdc\x9cSD\xf6\xc0?&\xb1\xee\xdb\xdcD\xca?\x91\xf4\x90\x17\x87*\xd1?\'\x8b[\xfe\x98\xc4\xe0?S\x84 "\xca\xeb\xe9?\xdd\xea\x80\xd4\xe7\x8c\xeb?\x14@]\xc6\x91\xb2\xe2?\xc8RnQ\xacL\xd9?\xdd-tK\xb8\x03\xe6?\xea\xa9\x81U\x18\x9e\xe4?\xce\xc2\x8f\xa3`\xb0\xdf??\xd4\xb9\x0f0\x89\xeb?\x08\xe7\xe1k\xaay\xe5?\x08\x14:}\xe7.\xe8?\xa4y\x17\xae\xe9j\xda?\xf3\x05\xac:uC\xea?\xab\xf9]B;2\xe0?\'\nYVy\x17\xb6\xbf\x0f\x85<o\xa5\x80\xcb?S\xc4\x00\x95\x86j\xe4?\xd8\xfa\x03\xdc0J\xd5?\xd79\xec[\xb8F\xc9?%s\xed\x94\xfbK\xcc?\x0c\xfa\x91l}\x0e\xe7?)b\x90D\xc4t\xe3?\x87q\x83\x1c\xceM\xe1?~\x13Z\x07\xaf\x08\xd9?\xae\xb6\x0b7$\x1e\xe5?\x0e\xec\x82\xcfe\xcb\xd8?\xd5/p\xdd\xe6>\xe3?\xb8\xa4\xf0\x8c\x0fU\xe9?\xe8-\x9f\xb4&\xd4\xe9?=\x19\xa57\x9b\xba\xa1?~\x12F!\xceU\xdf?\x19\xde\xdb\x85G\x8d\xe4?\x18vO\x0c\xd6\x94\xc5?a\x03\xc1R\x00%\xbb?\xaa\x02\xbc\xa4\x92\xaa\xeb?B.\x0eI\xb9{\xe6?%X\xdf\xc4\xbeR\xd5?\xf5{\x86}\xf1\xa0\xd7?\xee\x16\x05\xfb0\x12\xd3?\xde\xdbF\xea\x1aq\xe1?\x0cT}M8\x8f\xea?\xa6\xf1Ik\xbf\xee\xe8?_\x02\xdaMV\xd1\xd4?G\xcd\xbd\xa2\x0c\xdf\xd8?\x01H\xb1\x9c$e\xea?\xfc\x15\x1a\x86t\x82\xd5?\x94\xc7\xe5\xff\xcb\xea\xe8?]9\x88\t\x1a\xf2\xe3?\xf8\xe7\xb1\x8a\x97\x80\xe5?-\x97\x0b\xbc\x88!\xe4?z\xa4\x12\x0b<u\xc0?\xd2*O\xc3\xa1\xfa\xe3?\xfc\x1d5G\xf9\xa6\xe5??\x10\xcbp^\xc3\xe2?\xe1\x85\xb1\x10\xab\xec\xdf?\x9d$\xd4G\x07\xd4\xbe?\xc6\xbcI\x8c}!\xcd?\n\x8f\xf9\x93Mb\xc9?\xfaV\xb1\x89\xefc\xc5?"eE\x80\n\xbc\xd2?\x97\xdf\x12\xc1\x87\xf5\xe2?(\xcb\x1c%\xde`\xea?)\xac\xe9>\xd1@\xe5?\xb0\x83\xca\xf0Y\xbe\xe2?+\xd7\x1b\x8cmB\xd6?\xc5f\x01T\xfcr\xe2?\xe3x\xcb\x9d5\xd7\xe8?De2\xce\x81R\xd4?\xf4\xe3\x8f\x96\x9f\xb1\xc8?*N\xb7CD\xa4\xb8?h\xcbs%\x1fo\xe6?\xa8\x9d\xff\xd9b\xb9\xc6?\xabpb\xb4\xc5[\xe1?\xdd"U\xb8\xec\xf9\xea?\xcd\xd2\xad:9U\xcb?pk\xceP~\xfc\xe2?\x83\xce.\xd1l`\xe3?\x02\x01\x7fy\xe8\xa2\xc6?F`\x00\xa1\x10\x8b\xd2?*\xcdx\x13\x19\xb3\xe7?\xb6\xff\xf2\xe5E\x81\xe6?\x95\xaf`\xebB\x95\xcf?\xef\xeb\xa7\x7f}\xbf\xe7?\xa2\x86\xd2\xba6\xf2\xdf?\x97\xe6\xb1\x19\xef+\xe1?B\xaef\x9eU.\xdf?\xaeI\x1c)r\x05\xe3?a\xbf\x960\xdc\xdb\xa9?\xd85\x15\x05\x1fm\xbf?\xf0\xe4Q\xeb\xaf>\xe5?1\x07\xb0V\x08/\xd2?\x8c\xdc_\x84F\x17\xe6?\xb0%\xb4\xfd\xbf.\xd8?CE\xca\x8e\x08Q\xcd?\xce\x85Sv+\xf9\xe5?\xc9\xdbP\xc82B\xdd?\x97\x8c\xf4\xdb\xdaN\xe0?\xa2\x18\xe9\x13Gd\xee?\xb0\xeaT\x8djL\xd3?\x1c\xbf\x81\xe78p\xdd?\xe9N\x13\xb2s\x1d\xd0?\xc0y\xdd\x15%i\xe1?\x9b\x1d\xa3\xa9(\xca\xd1?\xdf!\xa0\x93\xb7\xce\xe3?_A}\xb0\xff\xf2\xe9?\xab\xc0\xb0\tq\xb4\xc9?\xb2\xda\xder\xd5\x16\xd3?\xe0Tl\x81\xcc\xba\xd2?\x95\xd8\x94l\x04\x8b\xee?\xa1\xbd\x9a\x0c\xd1\xd2\xec?\x87\x06\xd1\x06\xf4\x07\xe2?E5\n\x90R\xe8\xd6?\x08\x95\x03\x1aK.\xe9?\xc1\x83\xc7\x1d\xcf\n\xe3?\t\x0c{\x94q\xb7\xe5?e\xcaA\xd0\x8b\xa5\xdd?\xc0\\2y\x91\xfe\xe3?\x1b7L\x05\x0c~\xc8?\x00t\x83\x01\x90\x0f\xbc?_\r\xab\x02t\x1a\xe9?\x1a;"V\xf8\xcc\xbe?l\x14\xc2\x0c\\\xca\xc8?\xae\xcb\x85\x8e8\x00\xd2?\xd2\xa9\x93\xbfgZ\xea?\xe1\x19{\t\x0f(\xdb?W\x81^\x89\xa8\x88\xd8?\xd0c\xb3\xf6>\x83\xdc?\x0e\xabQ\xc4\x1c,\xd6?t\x98!\x1d\xa7\xa9\xe1?@M%\xc8\x92\xfd\xea?luH\xbd?8\xe7?\xc4\xb86tT\xb0\xd7?n\xbe\xbb\xa0\xed\x81\xd5?\x1c\xd9e]G:\xe5?\x0e\xed\xe1\xf4K\xe8\xed?Om\xd4\xb1\xffw\xe5?\x01\xb1\x1a\x00\xdb\x90\xe7?\x88\xaa\xdc\x1b\x00\x0e\xd8?K=\x0c\x02\xa9W\xd5?\xb6\xedl#\xa4\xa6\xe6?\x8e\xa4\x84\x05\xdfz\xd5?$\xecR\xc3\x85B\xe0?\xe5b\n\x83@qn?T\x17v\x80h\xdc\xe1?\x1bk<\xfc\x0e\x94\xd0?\x8a\x9b\xab<\x9f\x02\xe9?\xd2\xe7\x86\xef\xf1|\xd4?2\xbb\xf1\xc3X\xac\xe9?\xbe:&\x83\x95a\xd0?\xd3\xdb\xe4R\xed\xd5\xc0?\xeb\x15\xf7bF\xb2\xd2?\xe0\xbdx\xf4.9\xdd?x0\xd0\xe7=&\xd6?\xc3`a8QV\xe6?\x85\xc3\x0b_R\xbb\xd7?\x03y$\xb5c:\xed?B_]\x13~\xa4\xe3?\x1d\x1bh\x8c3I\xe9?' # noqa #E501
class TestNodes(unittest.TestCase):
def setUp(self):
warnings.simplefilter('ignore', category=ImportWarning)
warnings.simplefilter('ignore', category=DeprecationWarning)
# ignore importlib warnings.
size = 200
half = size // 2
self.size = size
self.half = half
np.random.seed(10)
random_array = np.random.rand(size)
open_array = np.random.rand(size)
close_array = np.random.rand(size)
high_array = np.random.rand(size)
low_array = np.random.rand(size)
volume_array = np.random.rand(size)
indicator = np.zeros(size, dtype=np.int32)
indicator[0] = 1
indicator[half] = 1
df = cudf.DataFrame()
df['in'] = random_array
df['open'] = open_array
df['close'] = close_array
df['high'] = high_array
df['low'] = low_array
df['volume'] = volume_array
df['indicator'] = indicator
df['asset'] = 1
df['asset'].iloc[half:] = 2
index = np.array(list(reversed(range(0, size))))
df.index = index
gt_index = np.concatenate([index[1:half], index[half+1:]])
self._cudf_data = df
self.gt = pd.Series(np.frombuffer(ground_truth, dtype=np.float64),
index=gt_index)
gt_index2 = np.concatenate([index[19:half], index[half+19:]])
self.gt1 = pd.Series(np.frombuffer(g_t1, dtype=np.float64),
index=gt_index2)
self.gt2 = pd.Series(np.frombuffer(g_t2, dtype=np.float64),
index=gt_index2)
self.gt3 = pd.Series(np.frombuffer(g_t3, dtype=np.float64),
index=gt_index2)
def tearDown(self):
pass
@ordered
def test_return(self):
'''Test return feature node'''
conf = {
}
node_obj = {"id": "abc",
"type": "ReturnFeatureNode",
"conf": conf,
"inputs": {}}
task = Task(node_obj)
inN = ReturnFeatureNode(task)
o = inN.process({'stock_in': self._cudf_data})['stock_out']
err, index_err = error_function_index(o['returns'], self.gt)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
msg = "bad error %f\n" % (index_err,)
self.assertTrue(np.isclose(index_err, 0, atol=1e-6), msg)
@ordered
def test_indicator(self):
'''Test indicator node'''
conf = {
"indicators": [
{"function": "port_chaikin_oscillator",
"columns": ["high", "low", "close", "volume"],
"args": [10, 20]},
{"function": "port_bollinger_bands",
"columns": ["close"],
"args": [10],
"outputs": ["b1", "b2"]}
],
"remove_na": True
}
node_obj = {"id": "abc",
"type": "IndicatorNode",
"conf": conf,
"inputs": {}}
task = Task(node_obj)
inN = IndicatorNode(task)
o = inN.process({'stock_in': self._cudf_data})['stock_out']
err, index_err = error_function_index(o['CH_OS_10_20'], self.gt1)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
msg = "bad error %f\n" % (index_err,)
self.assertTrue(np.isclose(index_err, 0, atol=1e-6), msg)
err, index_err = error_function_index(o['BO_BA_b1_10'], self.gt2)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
msg = "bad error %f\n" % (index_err,)
self.assertTrue(np.isclose(index_err, 0, atol=1e-6), msg)
err, index_err = error_function_index(o['BO_BA_b2_10'], self.gt3)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
msg = "bad error %f\n" % (index_err,)
self.assertTrue(np.isclose(index_err, 0, atol=1e-6), msg)
@ordered
def test_asset_indicator(self):
'''Test asset indicator node'''
conf = {
}
node_obj = {"id": "abc",
"type": "AssetIndicatorNode",
"conf": conf,
"inputs": {}}
task = Task(node_obj)
inN = AssetIndicatorNode(task)
gt = self._cudf_data.to_pandas()['indicator']
o = inN.process({'stock_in':
self._cudf_data.drop('indicator', axis=1)})['stock_out']
err, index_err = error_function_index(o['indicator'], gt)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
msg = "bad error %f\n" % (index_err,)
self.assertTrue(np.isclose(index_err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/test_nodes.py |
'''
Workflow Serialization Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_rolling.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_rolling.py
'''
import pandas as pd
import unittest
import cudf
from greenflow_gquant_plugin.cuindicator import Rolling, Ewm
from .utils import make_orderer, error_function
import numpy as np
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestRolling(unittest.TestCase):
def setUp(self):
array_len = int(1e4)
self.average_window = 300
random_array = np.random.rand(array_len)
df = cudf.DataFrame()
df['in'] = random_array
pdf = pd.DataFrame()
pdf['in'] = random_array
# ignore importlib warnings.
self._pandas_data = pdf
self._cudf_data = df
def tearDown(self):
pass
@ordered
def test_rolling_functions(self):
'''Test rolling window method'''
gpu_result = Rolling(self.average_window, self._cudf_data['in']).mean()
cpu_result = self._pandas_data[
'in'].rolling(self.average_window).mean()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).max()
cpu_result = self._pandas_data['in'].rolling(self.average_window).max()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).min()
cpu_result = self._pandas_data['in'].rolling(self.average_window).min()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).sum()
cpu_result = self._pandas_data['in'].rolling(self.average_window).sum()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).std()
cpu_result = self._pandas_data['in'].rolling(self.average_window).std()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).var()
cpu_result = self._pandas_data['in'].rolling(self.average_window).var()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_ewm_functions(self):
'''Test exponential moving average method'''
gpu_result = Ewm(self.average_window, self._cudf_data['in']).mean()
cpu_result = self._pandas_data[
'in'].ewm(span=self.average_window,
min_periods=self.average_window).mean()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/test_rolling.py |
# flake8: noqa
"""
Indicators as shown by Peter Bakker at:
https://www.quantopian.com/posts/technical-analysis-indicators-without-talib-code
"""
"""
25-Mar-2018: Fixed syntax to support the newest version of Pandas. Warnings should no longer appear.
Fixed some bugs regarding min_periods and NaN.
If you find any bugs, please report to github.com/palmbook
"""
# Import Built-Ins
import logging
# Import Third-Party
import pandas as pd
import numpy as np
# Import Homebrew
# Init Logging Facilities
log = logging.getLogger(__name__)
def moving_average(df, n):
"""Calculate the moving average for the given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean(), name='MA_' + str(n))
df = df.join(MA)
return df
def exponential_moving_average(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
EMA = pd.Series(df['Close'].ewm(span=n, min_periods=n).mean(), name='EMA_' + str(n))
df = df.join(EMA)
return df
def momentum(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
M = pd.Series(df['Close'].diff(n), name='Momentum_' + str(n))
df = df.join(M)
return df
def rate_of_change(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name='ROC_' + str(n))
df = df.join(ROC)
return df
def average_true_range(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean(), name='ATR_' + str(n))
df = df.join(ATR)
return df
def bollinger_bands(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean())
MSD = pd.Series(df['Close'].rolling(n, min_periods=n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
df = df.join(B1)
b2 = (df['Close'] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
df = df.join(B2)
return df
def ppsr(df):
"""Calculate Pivot Points, Supports and Resistances for given data
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
psr = {'PP': PP, 'R1': R1, 'S1': S1, 'R2': R2, 'S2': S2, 'R3': R3, 'S3': S3}
PSR = pd.DataFrame(psr)
df = df.join(PSR)
return df
def stochastic_oscillator_k(df):
"""Calculate stochastic oscillator %K for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
df = df.join(SOk)
return df
def stochastic_oscillator_d(df, n):
"""Calculate stochastic oscillator %D for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
SOd = pd.Series(SOk.ewm(span=n, min_periods=n).mean(), name='SO%d_' + str(n))
df = df.join(SOd)
return df
def trix(df, n):
"""Calculate TRIX for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
EX1 = df['Close'].ewm(span=n, min_periods=n).mean()
EX2 = EX1.ewm(span=n, min_periods=n).mean()
EX3 = EX2.ewm(span=n, min_periods=n).mean()
i = 0
ROC_l = [np.nan]
while i + 1 <= df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
Trix = pd.Series(ROC_l, name='Trix_' + str(n))
df = df.join(Trix)
return df
def average_directional_movement_index(df, n, n_ADX):
"""Calculate the Average Directional Movement Index for given data.
:param df: pandas.DataFrame
:param n:
:param n_ADX:
:return: pandas.DataFrame
"""
i = 0
UpI = []
DoI = []
while i + 1 <= df.index[-1]:
UpMove = df.loc[i + 1, 'High'] - df.loc[i, 'High']
DoMove = df.loc[i, 'Low'] - df.loc[i + 1, 'Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean())
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n).mean() / ATR)
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n).mean() / ATR)
ADX = pd.Series((abs(PosDI - NegDI) / (PosDI + NegDI)).ewm(span=n_ADX, min_periods=n_ADX).mean(),
name='ADX_' + str(n) + '_' + str(n_ADX))
df = df.join(ADX)
return df
def macd(df, n_fast, n_slow):
"""Calculate MACD, MACD Signal and MACD difference
:param df: pandas.DataFrame
:param n_fast:
:param n_slow:
:return: pandas.DataFrame
"""
EMAfast = pd.Series(df['Close'].ewm(span=n_fast, min_periods=n_slow).mean())
EMAslow = pd.Series(df['Close'].ewm(span=n_slow, min_periods=n_slow).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(), name='MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_' + str(n_fast) + '_' + str(n_slow))
df = df.join(MACD)
df = df.join(MACDsign)
df = df.join(MACDdiff)
return df
def mass_index(df):
"""Calculate the Mass Index for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
Range = df['High'] - df['Low']
EX1 = Range.ewm(span=9, min_periods=9).mean()
EX2 = EX1.ewm(span=9, min_periods=9).mean()
Mass = EX1 / EX2
MassI = pd.Series(Mass.rolling(25).sum(), name='Mass Index')
df = df.join(MassI)
return df
def vortex_indicator(df, n):
"""Calculate the Vortex Indicator for given data.
Vortex Indicator described here:
http://www.vortexindicator.com/VFX_VORTEX.PDF
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
TR = [0]
while i < df.index[-1]:
Range = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < df.index[-1]:
Range = abs(df.loc[i + 1, 'High'] - df.loc[i, 'Low']) - abs(df.loc[i + 1, 'Low'] - df.loc[i, 'High'])
VM.append(Range)
i = i + 1
VI = pd.Series(pd.Series(VM).rolling(n).sum() / pd.Series(TR).rolling(n).sum(), name='Vortex_' + str(n))
df = df.join(VI)
return df
def kst_oscillator(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""Calculate KST Oscillator for given data.
:param df: pandas.DataFrame
:param r1:
:param r2:
:param r3:
:param r4:
:param n1:
:param n2:
:param n3:
:param n4:
:return: pandas.DataFrame
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
KST = pd.Series(
ROC1.rolling(n1).sum() + ROC2.rolling(n2).sum() * 2 + ROC3.rolling(n3).sum() * 3 + ROC4.rolling(n4).sum() * 4,
name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(
n2) + '_' + str(n3) + '_' + str(n4))
df = df.join(KST)
return df
def relative_strength_index(df, n):
"""Calculate Relative Strength Index(RSI) for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= df.index[-1]:
UpMove = df.loc[i + 1, 'High'] - df.loc[i, 'High']
DoMove = df.loc[i, 'Low'] - df.loc[i + 1, 'Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n).mean())
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n).mean())
RSI = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))
df = df.join(RSI)
return df
def true_strength_index(df, r, s):
"""Calculate True Strength Index (TSI) for given data.
:param df: pandas.DataFrame
:param r:
:param s:
:return: pandas.DataFrame
"""
M = pd.Series(df['Close'].diff(1))
aM = abs(M)
EMA1 = pd.Series(M.ewm(span=r, min_periods=r).mean())
aEMA1 = pd.Series(aM.ewm(span=r, min_periods=r).mean())
EMA2 = pd.Series(EMA1.ewm(span=s, min_periods=s).mean())
aEMA2 = pd.Series(aEMA1.ewm(span=s, min_periods=s).mean())
TSI = pd.Series(EMA2 / aEMA2, name='TSI_' + str(r) + '_' + str(s))
df = df.join(TSI)
return df
def accumulation_distribution(df, n):
"""Calculate Accumulation/Distribution for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
M = ad.diff(n - 1)
N = ad.shift(n - 1)
ROC = M / N
AD = pd.Series(ROC, name='Acc/Dist_ROC_' + str(n))
df = df.join(AD)
return df
def chaikin_oscillator(df):
"""Calculate Chaikin Oscillator for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
Chaikin = pd.Series(ad.ewm(span=3, min_periods=3).mean() - ad.ewm(span=10, min_periods=10).mean(), name='Chaikin')
df = df.join(Chaikin)
return df
def money_flow_index(df, n):
"""Calculate Money Flow Index and Ratio for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.loc[i + 1, 'Volume'])
else:
PosMF.append(0)
i = i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
MFI = pd.Series(MFR.rolling(n, min_periods=n).mean(), name='MFI_' + str(n))
df = df.join(MFI)
return df
def on_balance_volume(df, n):
"""Calculate On-Balance Volume for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
OBV = [0]
while i < df.index[-1]:
if df.loc[i + 1, 'Close'] - df.loc[i, 'Close'] > 0:
OBV.append(df.loc[i + 1, 'Volume'])
if df.loc[i + 1, 'Close'] - df.loc[i, 'Close'] == 0:
OBV.append(0)
if df.loc[i + 1, 'Close'] - df.loc[i, 'Close'] < 0:
OBV.append(-df.loc[i + 1, 'Volume'])
i = i + 1
OBV = pd.Series(OBV)
OBV_ma = pd.Series(OBV.rolling(n, min_periods=n).mean(), name='OBV_' + str(n))
df = df.join(OBV_ma)
return df
def force_index(df, n):
"""Calculate Force Index for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
F = pd.Series(df['Close'].diff(n) * df['Volume'].diff(n), name='Force_' + str(n))
df = df.join(F)
return df
def ease_of_movement(df, n):
"""Calculate Ease of Movement for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
EoM = (df['High'].diff(1) + df['Low'].diff(1)) * (df['High'] - df['Low']) / (2 * df['Volume'])
Eom_ma = pd.Series(EoM.rolling(n, min_periods=n).mean(), name='EoM_' + str(n))
df = df.join(Eom_ma)
return df
def commodity_channel_index(df, n):
"""Calculate Commodity Channel Index for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
CCI = pd.Series((PP - PP.rolling(n, min_periods=n).mean()) / PP.rolling(n, min_periods=n).std(),
name='CCI_' + str(n))
df = df.join(CCI)
return df
def coppock_curve(df, n):
"""Calculate Coppock Curve for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
M = df['Close'].diff(int(n * 11 / 10) - 1)
N = df['Close'].shift(int(n * 11 / 10) - 1)
ROC1 = M / N
M = df['Close'].diff(int(n * 14 / 10) - 1)
N = df['Close'].shift(int(n * 14 / 10) - 1)
ROC2 = M / N
Copp = pd.Series((ROC1 + ROC2).ewm(span=n, min_periods=n).mean(), name='Copp_' + str(n))
df = df.join(Copp)
return df
def keltner_channel(df, n):
"""Calculate Keltner Channel for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
KelChM = pd.Series(((df['High'] + df['Low'] + df['Close']) / 3).rolling(n, min_periods=n).mean(),
name='KelChM_' + str(n))
KelChU = pd.Series(((4 * df['High'] - 2 * df['Low'] + df['Close']) / 3).rolling(n, min_periods=n).mean(),
name='KelChU_' + str(n))
KelChD = pd.Series(((-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3).rolling(n, min_periods=n).mean(),
name='KelChD_' + str(n))
df = df.join(KelChM)
df = df.join(KelChU)
df = df.join(KelChD)
return df
def ultimate_oscillator(df):
"""Calculate Ultimate Oscillator for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
i = 0
TR_l = [0]
BP_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
BP = df.loc[i + 1, 'Close'] - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
BP_l.append(BP)
i = i + 1
UltO = pd.Series((4 * pd.Series(BP_l).rolling(7).sum() / pd.Series(TR_l).rolling(7).sum()) + (
2 * pd.Series(BP_l).rolling(14).sum() / pd.Series(TR_l).rolling(14).sum()) + (
pd.Series(BP_l).rolling(28).sum() / pd.Series(TR_l).rolling(28).sum()),
name='Ultimate_Osc')
df = df.join(UltO)
return df
def donchian_channel(df, n):
"""Calculate donchian channel of given pandas data frame.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
dc_l = []
while i < n - 1:
dc_l.append(0)
i += 1
i = 0
while i + n - 1 < df.index[-1]:
dc = max(df['High'].loc[i:i + n - 1]) - min(df['Low'].loc[i:i + n - 1])
dc_l.append(dc)
i += 1
donchian_chan = pd.Series(dc_l, name='Donchian_' + str(n))
donchian_chan = donchian_chan.shift(n - 1)
return df.join(donchian_chan)
def standard_deviation(df, n):
"""Calculate Standard Deviation for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
df = df.join(pd.Series(df['Close'].rolling(n, min_periods=n).std(), name='STD_' + str(n)))
return df
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/technical_indicators.py |
'''
Technical Indicator Node Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_indicator_node.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_indicator_node.py
'''
import warnings
import unittest
import cudf
import greenflow_gquant_plugin.cuindicator as gi
from greenflow.dataframe_flow.config_nodes_modules import \
get_node_tgraphmixin_instance
from greenflow_gquant_plugin.transform.indicatorNode import IndicatorNode
from greenflow.dataframe_flow.task import Task
from .utils import make_orderer
import numpy as np
import copy
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestIndicatorNode(unittest.TestCase):
def setUp(self):
warnings.simplefilter('ignore', category=ImportWarning)
warnings.simplefilter('ignore', category=DeprecationWarning)
# ignore importlib warnings.
size = 200
half = size // 2
self.size = size
self.half = half
np.random.seed(10)
random_array = np.random.rand(size)
open_array = np.random.rand(size)
close_array = np.random.rand(size)
high_array = np.random.rand(size)
low_array = np.random.rand(size)
volume_array = np.random.rand(size)
indicator = np.zeros(size, dtype=np.int32)
indicator[0] = 1
indicator[half] = 1
df = cudf.DataFrame()
df['in'] = random_array
df['open'] = open_array
df['close'] = close_array
df['high'] = high_array
df['low'] = low_array
df['volume'] = volume_array
df['indicator'] = indicator
self._cudf_data = df
self.conf = {
"indicators": [
{"function": "port_chaikin_oscillator",
"columns": ["high", "low", "close", "volume"],
"args": [10, 20]},
{"function": "port_bollinger_bands",
"columns": ["close"],
"args": [10],
"outputs": ["b1", "b2"]}
],
"remove_na": True
}
def tearDown(self):
pass
@ordered
def test_colums(self):
'''Test node columns requirments'''
node_obj = {"id": "abc",
"type": "IndicatorNode",
"conf": self.conf,
"inputs": {}}
task = Task(node_obj)
inN = get_node_tgraphmixin_instance(IndicatorNode, task)
# inN = IndicatorNode(task)
out_cols = inN.meta_setup().outports
col = "indicator"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.meta_setup().inports['stock_in'], msg)
col = "high"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.meta_setup().inports['stock_in'], msg)
col = "low"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.meta_setup().inports['stock_in'], msg)
col = "close"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.meta_setup().inports['stock_in'], msg)
col = "volume"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.meta_setup().inports['stock_in'], msg)
col = "CH_OS_10_20"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in out_cols['stock_out'], msg)
col = "BO_BA_b1_10"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in out_cols['stock_out'], msg)
col = "BO_BA_b2_10"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in out_cols['stock_out'], msg)
@ordered
def test_drop(self):
'''Test node columns drop'''
node_obj = {"id": "abc",
"type": "IndicatorNode",
"conf": self.conf,
"inputs": {}}
task = Task(node_obj)
inN = IndicatorNode(task)
o = inN.process({"stock_in": self._cudf_data})['stock_out']
msg = "bad error: df len %d is not right" % (len(o))
self.assertTrue(len(o) == 162, msg)
newConf = copy.deepcopy(self.conf)
newConf['remove_na'] = False
node_obj = {"id": "abc",
"type": "IndicatorNode",
"conf": newConf,
"inputs": {}}
task = Task(node_obj)
inN = IndicatorNode(task)
o = inN.process({"stock_in": self._cudf_data})['stock_out']
msg = "bad error: df len %d is not right" % (len(o))
self.assertTrue(len(o) == 200, msg)
@ordered
def test_signal(self):
'''Test signal computation'''
newConf = copy.deepcopy(self.conf)
newConf['remove_na'] = False
node_obj = {"id": "abc",
"type": "IndicatorNode",
"conf": newConf,
"inputs": {}}
task = Task(node_obj)
inN = IndicatorNode(task)
o = inN.process({'stock_in': self._cudf_data})['stock_out']
# check chaikin oscillator computation
r_cudf = gi.chaikin_oscillator(self._cudf_data[:self.half]['high'],
self._cudf_data[:self.half]['low'],
self._cudf_data[:self.half]['close'],
self._cudf_data[:self.half]['volume'],
10, 20)
computed = o[:self.half]['CH_OS_10_20'].to_array('pandas')
ref = r_cudf.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
r_cudf = gi.chaikin_oscillator(self._cudf_data[self.half:]['high'],
self._cudf_data[self.half:]['low'],
self._cudf_data[self.half:]['close'],
self._cudf_data[self.half:]['volume'],
10, 20)
computed = o[self.half:]['CH_OS_10_20'].to_array('pandas')
ref = r_cudf.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
# check bollinger bands computation
r_cudf = gi.bollinger_bands(self._cudf_data[:self.half]['close'], 10)
computed = o[:self.half]["BO_BA_b1_10"].to_array('pandas')
ref = r_cudf.b1.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
computed = o[:self.half]["BO_BA_b2_10"].to_array('pandas')
ref = r_cudf.b2.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
r_cudf = gi.bollinger_bands(self._cudf_data[self.half:]['close'], 10)
computed = o[self.half:]["BO_BA_b1_10"].to_array('pandas')
ref = r_cudf.b1.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
computed = o[self.half:]["BO_BA_b2_10"].to_array('pandas')
ref = r_cudf.b2.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/test_indicator_node.py |
fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/__init__.py |
|
'''
Workflow Serialization Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_util.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_util.py
'''
import pandas as pd
import unittest
import cudf
from greenflow_gquant_plugin.cuindicator import shift, diff
import numpy as np
from .utils import make_orderer, error_function
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestUtil(unittest.TestCase):
def setUp(self):
array_len = int(1e4)
self.average_window = 300
random_array = np.random.rand(array_len)
df = cudf.DataFrame()
df['in'] = random_array
pdf = pd.DataFrame()
pdf['in'] = random_array
# ignore importlib warnings.
self._pandas_data = pdf
self._cudf_data = df
def tearDown(self):
pass
@ordered
def test_diff_functions(self):
'''Test diff method'''
for window in [-1, -2, -3, 1, 2, 3]:
gpu_result = diff(self._cudf_data['in'], window)
cpu_result = self._pandas_data['in'].diff(window)
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_shift_functions(self):
'''Test shift method'''
for window in [-1, -2, -3, 1, 2, 3]:
gpu_result = shift(self._cudf_data['in'], window)
cpu_result = self._pandas_data['in'].shift(window)
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/test_util.py |
'''
Performance Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_performance.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_performance.py
'''
import unittest
from greenflow import TaskGraph
from .utils import make_orderer
import cProfile
import pstats
import warnings
import os
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestPerformance(unittest.TestCase):
'''Profile calls to ports_setup and meta_setup.'''
def setUp(self):
warnings.filterwarnings('ignore', message='numpy.ufunc size changed')
dirnamefn = os.path.dirname
topdir = dirnamefn(dirnamefn(dirnamefn(os.path.realpath(__file__))))
os.environ['MODULEPATH'] = str(topdir) + '/modules'
os.environ['GREENFLOW_CONFIG'] = str(topdir) + '/greenflowrc'
self.ports_setup_ref = {
'ports_setup.compositeNode.py': 4,
'ports_setup.classificationGenerator.py': 2,
'ports_setup.csvStockLoader.py': 3,
'ports_setup.taskGraph.py': 5,
'ports_setup._node_flow.py': 320,
'ports_setup.template_node_mixin.py': 77,
'ports_setup_ext._node_taskgraph_extension_mixin.py': 77,
'ports_setup.output_collector_node.py': 5
}
self.meta_setup_ref = {
'meta_setup.normalizationNode.py': 2,
'meta_setup.compositeNode.py': 4,
'meta_setup.classificationGenerator.py': 2,
'meta_setup.simpleBackTest.py': 2,
'meta_setup.csvStockLoader.py': 3,
'meta_setup.taskGraph.py': 5,
'meta_setup.node.py': 5,
'meta_setup._node_flow.py': 177,
'meta_setup.template_node_mixin.py': 47,
'meta_setup_ext._node_taskgraph_extension_mixin.py': 47,
'meta_setup.output_collector_node.py': 5
}
tgraphpath = str(topdir) + \
'/taskgraphs/xgboost_example/xgboost_stock.gq.yaml'
profiler = cProfile.Profile()
profiler.enable()
graph = TaskGraph.load_taskgraph(tgraphpath)
graph.build()
profiler.disable()
stats = pstats.Stats(profiler).sort_stats('ncalls')
self.stats = stats
def tearDown(self):
pass
@ordered
def test_ports_setup_performance(self):
stats = self.stats
statkeys = self.stats.stats.keys()
keys = [k for k in statkeys if k[-1] in ('ports_setup',)] + \
[k for k in statkeys if k[-1] in ('ports_setup_ext',)]
for key in keys:
dict_key = key[-1]+'.'+key[0].split('/')[-1]
msg = "{}.{} is called {} (expected {}) times.".format(
key[0].split('/')[-1].split('.')[0], key[-1],
stats.stats[key][0], self.ports_setup_ref[dict_key])
self.assertTrue(
stats.stats[key][0] == self.ports_setup_ref[dict_key], msg)
@ordered
def test_meta_setup_performance(self):
stats = self.stats
statkeys = self.stats.stats.keys()
keys = [k for k in statkeys if k[-1] in ('meta_setup',)] + \
[k for k in statkeys if k[-1] in ('meta_setup_ext',)]
for key in keys:
dict_key = key[-1] + '.' + key[0].split('/')[-1]
msg = "{}.{} is called {} (expected {}) times.".format(
key[0].split('/')[-1].split('.')[0], key[-1],
stats.stats[key][0], self.meta_setup_ref[dict_key])
self.assertTrue(
stats.stats[key][0] == self.meta_setup_ref[dict_key], msg)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/test_performance.py |
import numpy as np
def make_orderer():
"""Keep tests in order"""
order = {}
def ordered(f):
order[f.__name__] = len(order)
return f
def compare(a, b):
return [1, -1][order[a] < order[b]]
return ordered, compare
def error_function(gpu_series, result_series):
"""
utility function to compare GPU array vs CPU array
Parameters
------
gpu_series: cudf.Series
GPU computation result series
result_series: pandas.Series
Pandas computation result series
Returns
-----
double
maximum error of the two arrays
"""
gpu_arr = gpu_series.to_array(fillna='pandas')
pan_arr = result_series.values
gpu_arr = gpu_arr[~np.isnan(gpu_arr) & ~np.isinf(gpu_arr)]
pan_arr = pan_arr[~np.isnan(pan_arr) & ~np.isinf(pan_arr)]
err = np.abs(gpu_arr - pan_arr).max()
return err
def error_function_index(gpu_series, result_series):
"""
utility function to compare GPU array vs CPU array
Parameters
------
gpu_series: cudf.Series
GPU computation result series
result_series: pandas.Series
Pandas computation result series
Returns
-----
double
maximum error of the two arrays
int
maximum index value diff
"""
err = error_function(gpu_series, result_series)
error_index = np.abs(gpu_series.index.to_array() -
result_series.index.values).max()
return err, error_index
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/utils.py |
'''
Workflow Serialization Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_indicator.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_indicator.py
'''
import warnings
import pandas as pd
import unittest
import pathlib
import cudf
import greenflow_gquant_plugin.cuindicator as gi
from . import technical_indicators as ti
from .utils import make_orderer, error_function
import numpy as np
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestIndicator(unittest.TestCase):
def setUp(self):
# ignore importlib warnings.
path = pathlib.Path(__file__)
self._pandas_data = pd.read_csv(str(path.parent)+'/testdata.csv.gz')
self._pandas_data['Volume'] /= 1000.0
self._cudf_data = cudf.from_pandas(self._pandas_data)
warnings.simplefilter('ignore', category=ImportWarning)
warnings.simplefilter('ignore', category=DeprecationWarning)
def tearDown(self):
pass
@ordered
def test_rate_of_return(self):
'''Test rate of return calculation'''
r_cudf = gi.rate_of_change(self._cudf_data['Close'], 2)
r_pandas = ti.rate_of_change(self._pandas_data, 2)
err = error_function(r_cudf, r_pandas.ROC_2)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_trix(self):
""" test the trix calculation"""
r_cudf = gi.trix(self._cudf_data['Close'], 3)
r_pandas = ti.trix(self._pandas_data, 3)
err = error_function(r_cudf, r_pandas.Trix_3)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_bollinger_bands(self):
""" test the bollinger_bands """
r_cudf = gi.bollinger_bands(self._cudf_data['Close'], 20)
r_pandas = ti.bollinger_bands(self._pandas_data, 20)
err = error_function(r_cudf.b1, r_pandas['BollingerB_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.b2, r_pandas['Bollinger%b_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_macd(self):
""" test the macd """
n_fast = 10
n_slow = 20
r_cudf = gi.macd(self._cudf_data['Close'], n_fast, n_slow)
r_pandas = ti.macd(self._pandas_data, n_fast, n_slow)
err = error_function(r_cudf.MACD, r_pandas['MACD_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.MACDdiff, r_pandas['MACDdiff_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.MACDsign, r_pandas['MACDsign_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_average_true_range(self):
""" test the average true range """
r_cudf = gi.average_true_range(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.average_true_range(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['ATR_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_ppsr(self):
""" test the ppsr """
r_cudf = gi.ppsr(self._cudf_data['High'], self._cudf_data['Low'],
self._cudf_data['Close'])
r_pandas = ti.ppsr(self._pandas_data)
err = error_function(r_cudf.PP, r_pandas['PP'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.R1, r_pandas['R1'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.S1, r_pandas['S1'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.R2, r_pandas['R2'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.S2, r_pandas['S2'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.R3, r_pandas['R3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.S3, r_pandas['S3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_stochastic_oscillator_k(self):
""" test the stochastic oscillator k """
r_cudf = gi.stochastic_oscillator_k(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'])
r_pandas = ti.stochastic_oscillator_k(self._pandas_data)
err = error_function(r_cudf, r_pandas['SO%k'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_stochastic_oscillator_d(self):
""" test the stochastic oscillator d """
r_cudf = gi.stochastic_oscillator_d(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.stochastic_oscillator_d(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['SO%d_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_average_directional_movement_index(self):
""" test the average_directional_movement_index """
r_cudf = gi.average_directional_movement_index(
self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'],
10, 20)
r_pandas = ti.average_directional_movement_index(self._pandas_data,
10, 20)
err = error_function(r_cudf, r_pandas['ADX_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_vortex_indicator(self):
""" test the vortex_indicator """
r_cudf = gi.vortex_indicator(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.vortex_indicator(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Vortex_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_kst_oscillator(self):
""" test the kst_oscillator """
r_cudf = gi.kst_oscillator(self._cudf_data['Close'],
3, 4, 5, 6, 7, 8, 9, 10)
r_pandas = ti.kst_oscillator(self._pandas_data,
3, 4, 5, 6, 7, 8, 9, 10)
err = error_function(r_cudf, r_pandas['KST_3_4_5_6_7_8_9_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_relative_strength_index(self):
""" test the relative_strength_index """
r_cudf = gi.relative_strength_index(self._cudf_data['High'],
self._cudf_data['Low'], 10)
r_pandas = ti.relative_strength_index(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['RSI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_mass_index(self):
""" test the mass_index """
r_cudf = gi.mass_index(self._cudf_data['High'],
self._cudf_data['Low'], 9, 25)
r_pandas = ti.mass_index(self._pandas_data)
err = error_function(r_cudf, r_pandas['Mass Index'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_true_strength_index(self):
""" test the true_strength_index """
r_cudf = gi.true_strength_index(self._cudf_data['Close'], 5, 8)
r_pandas = ti.true_strength_index(self._pandas_data, 5, 8)
err = error_function(r_cudf, r_pandas['TSI_5_8'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_chaikin_oscillator(self):
""" test the chaikin_oscillator """
r_cudf = gi.chaikin_oscillator(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'],
self._cudf_data['Volume'], 3, 10)
r_pandas = ti.chaikin_oscillator(self._pandas_data)
err = error_function(r_cudf, r_pandas['Chaikin'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_money_flow_index(self):
""" test the money_flow_index """
r_cudf = gi.money_flow_index(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'],
self._cudf_data['Volume'], 10)
r_pandas = ti.money_flow_index(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['MFI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_on_balance_volume(self):
""" test the on_balance_volume """
r_cudf = gi.on_balance_volume(self._cudf_data['Close'],
self._cudf_data['Volume'], 10)
r_pandas = ti.on_balance_volume(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['OBV_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_force_index(self):
""" test the force index """
r_cudf = gi.force_index(self._cudf_data['Close'],
self._cudf_data['Volume'], 10)
r_pandas = ti.force_index(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Force_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_ease_of_movement(self):
""" test the ease_of_movement """
r_cudf = gi.ease_of_movement(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Volume'], 10)
r_pandas = ti.ease_of_movement(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['EoM_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_ultimate_oscillator(self):
""" test the ultimate_oscillator """
r_cudf = gi.ultimate_oscillator(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'])
r_pandas = ti.ultimate_oscillator(self._pandas_data)
err = error_function(r_cudf, r_pandas['Ultimate_Osc'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_donchian_channel(self):
""" test the donchian_channel """
r_cudf = gi.donchian_channel(self._cudf_data['High'],
self._cudf_data['Low'], 10)
r_pandas = ti.donchian_channel(self._pandas_data, 10)
err = error_function(r_cudf[:-1], r_pandas['Donchian_10'][:-1])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_keltner_channel(self):
""" test the keltner_channel """
r_cudf = gi.keltner_channel(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.keltner_channel(self._pandas_data, 10)
err = error_function(r_cudf.KelChD, r_pandas['KelChD_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.KelChM, r_pandas['KelChM_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.KelChU, r_pandas['KelChU_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_coppock_curve(self):
""" test the coppock_curve """
r_cudf = gi.coppock_curve(self._cudf_data['Close'], 10)
r_pandas = ti.coppock_curve(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Copp_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_accumulation_distribution(self):
""" test the accumulation_distribution """
r_cudf = gi.accumulation_distribution(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'],
self._cudf_data['Volume'], 10)
r_pandas = ti.accumulation_distribution(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Acc/Dist_ROC_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_commodity_channel_index(self):
""" test the commodity_channel_index """
r_cudf = gi.commodity_channel_index(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.commodity_channel_index(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['CCI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_momentum(self):
""" test the momentum """
r_cudf = gi.momentum(self._cudf_data['Close'], 10)
r_pandas = ti.momentum(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Momentum_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_moving_average(self):
""" test the moving average """
r_cudf = gi.moving_average(self._cudf_data['Close'], 10)
r_pandas = ti.moving_average(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['MA_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_exponential_moving_average(self):
""" test the exponential moving average """
r_cudf = gi.exponential_moving_average(self._cudf_data['Close'], 10)
r_pandas = ti.exponential_moving_average(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['EMA_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/tests/unit/test_indicator.py |
display_fun = """
const columnKeys = Object.keys(metaObj);
let header = '';
if (columnKeys.length > 0) {
header += '<table>';
header += '<tr>';
header += '<th>Column Name</th>';
for (let i = 0; i < columnKeys.length; i++) {
header += `<th>${columnKeys[i]}</th>`;
}
header += '</tr>';
header += '<tr>';
header += '<th>Type</th>';
for (let i = 0; i < columnKeys.length; i++) {
header += `<td>${metaObj[columnKeys[i]]}</td>`;
}
header += '</tr>';
header += '</table>';
}
return header;
"""
validation = {}
display = {}
display['cudf.core.dataframe.DataFrame'] = display_fun
display['dask_cudf.core.DataFrame'] = display_fun
display['pandas.core.frame.DataFrame'] = display_fun
display['dask.dataframe.core.DataFrame'] = display_fun
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/client.py |
CACHE_NAME = {} | fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/cache.py |
from .client import validation, display # noqa: F401
from greenflow.dataframe_flow._node_flow import register_validator
from greenflow.dataframe_flow._node_flow import register_copy_function
from greenflow.dataframe_flow._node_flow import register_cleanup
import traceback
import cudf
import dask_cudf
import pandas
import numpy as np
import dask.dataframe
def _validate_df(df_to_val, ref_cols, obj):
'''Validate a cudf or dask_cudf DataFrame.
:param df_to_val: A dataframe typically of type cudf.DataFrame or
dask_cudf.DataFrame.
:param ref_cols: Dictionary of column names and their expected types.
:returns: True or False based on matching all columns in the df_to_val
and columns spec in ref_cols.
:raises: Exception - Raised when invalid dataframe length or unexpected
number of columns. TODO: Create a ValidationError subclass.
'''
if isinstance(df_to_val, cudf.DataFrame) and \
len(df_to_val) == 0:
err_msg = 'Node "{}" produced empty output'.format(obj.uid)
raise Exception(err_msg)
if not isinstance(df_to_val, cudf.DataFrame) and \
not isinstance(df_to_val, dask_cudf.DataFrame):
return True
i_cols = df_to_val.columns
if len(i_cols) != len(ref_cols):
errmsg = 'Invalid for node "{:s}"\n'\
'Expect {:d} columns, only see {:d} columns\n'\
'Ref: {}\n'\
'Columns: {}'\
.format(obj.uid, len(ref_cols), len(i_cols), ref_cols, i_cols)
raise Exception(errmsg)
for col in ref_cols.keys():
if col not in i_cols:
print("error for node %s, column %s is not in the required "
"output df" % (obj.uid, col))
return False
if ref_cols[col] is None:
continue
err_msg = "for node {} type {}, column {} type {} "\
"does not match expected type {}".format(
obj.uid, type(obj), col, df_to_val[col].dtype,
ref_cols[col])
if ref_cols[col] == 'category':
# comparing pandas.core.dtypes.dtypes.CategoricalDtype to
# numpy.dtype causes TypeError. Instead, let's compare
# after converting all types to their string representation
# d_type_tuple = (pd.core.dtypes.dtypes.CategoricalDtype(),)
d_type_tuple = (str(pandas.CategoricalDtype()),)
elif ref_cols[col] == 'date':
# Cudf read_csv doesn't understand 'datetime64[ms]' even
# though it reads the data in as 'datetime64[ms]', but
# expects 'date' as dtype specified passed to read_csv.
d_type_tuple = ('datetime64[ms]', 'date', 'datetime64[ns]')
else:
d_type_tuple = (str(np.dtype(ref_cols[col])),)
if (str(df_to_val[col].dtype) not in d_type_tuple):
print("ERROR: {}".format(err_msg))
# Maybe raise an exception here and have the caller
# try/except the validation routine.
return False
return True
def copy_df(df_obj):
return df_obj.copy(deep=False)
def copy_dask_cudf(df_obj):
# TODO: This just makes a df_obj with a shallow copy of the
# underlying computational graph. It does not affect the
# underlying data. Why is a copy of dask graph needed?
return df_obj.copy()
def clean_dask(ui_clean):
"""
ui_clean is True if the client send
'clean' command to the greenflow backend
"""
if ui_clean:
import dask.distributed
try:
client = dask.distributed.client.default_client()
client.restart()
except Exception:
traceback.format_exc()
register_validator(cudf.DataFrame, _validate_df)
register_validator(dask_cudf.DataFrame, _validate_df)
register_validator(pandas.DataFrame, _validate_df)
register_validator(dask.dataframe.DataFrame, _validate_df)
register_copy_function(cudf.DataFrame, copy_df)
register_copy_function(dask_cudf.DataFrame, copy_dask_cudf)
register_copy_function(pandas.DataFrame, copy_df)
register_copy_function(dask.dataframe.DataFrame, copy_dask_cudf)
register_cleanup('cleandask', clean_dask)
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/__init__.py |
import cudf
import os
import warnings
import pandas as pd
from greenflow.dataframe_flow.portsSpecSchema import PortsSpecSchema
__all__ = ['NodeHDFCacheMixin']
class NodeHDFCacheMixin:
def load_cache(self, filename=None) -> dict:
"""
Defines the behavior of how to load the cache file from the `filename`.
Node can override this method. Default implementation assumes cudf
dataframes.
Arguments
-------
filename: str
filename of the cache file. Leave as none to use default.
returns: dict
dictionary of the output from this node
"""
cache_dir = os.getenv('GREENFLOW_CACHE_DIR', self.cache_dir)
if filename is None:
filename = cache_dir + '/' + self.uid + '.hdf5'
output_df = {}
with pd.HDFStore(filename, mode='r') as hf:
for oport, pspec in \
self._get_output_ports(full_port_spec=True).items():
ptype = pspec.get(PortsSpecSchema.port_type)
if self.outport_connected(oport):
ptype = ([ptype] if not isinstance(ptype,
list) else ptype)
key = '{}/{}'.format(self.uid, oport)
# check hdf store for the key
if key not in hf:
raise Exception(
'The task "{}" port "{}" key "{}" not found in'
'the hdf file "{}". Cannot load from cache.'
.format(self.uid, oport, key, filename)
)
if cudf.DataFrame not in ptype:
warnings.warn(
RuntimeWarning,
'Task "{}" port "{}" port type is not set to '
'cudf.DataFrame. Attempting to load port data '
'with cudf.read_hdf.'.format(self.uid, oport))
output_df[oport] = cudf.read_hdf(hf, key)
return output_df
def save_cache(self, output_data: dict):
'''Defines the behavior for how to save the output of a node to
filesystem cache. Default implementation assumes cudf dataframes.
:param output_data: The output from :meth:`process`. For saving to hdf
requires that the dataframe(s) have `to_hdf` method.
'''
cache_dir = os.getenv('GREENFLOW_CACHE_DIR', self.cache_dir)
os.makedirs(cache_dir, exist_ok=True)
filename = cache_dir + '/' + self.uid + '.hdf5'
with pd.HDFStore(filename, mode='w') as hf:
for oport, odf in output_data.items():
# check for to_hdf attribute
if not hasattr(odf, 'to_hdf'):
raise Exception(
'Task "{}" port "{}" output object is missing '
'"to_hdf" attribute. Cannot save to cache.'
.format(self.uid, oport))
dtype = '{}'.format(type(odf)).lower()
if 'dataframe' not in dtype:
warnings.warn(
RuntimeWarning,
'Task "{}" port "{}" port type is not a dataframe.'
' Attempting to save to hdf with "to_hdf" method.'
.format(self.uid, oport))
key = '{}/{}'.format(self.uid, oport)
odf.to_hdf(hf, key, format='table', data_columns=True)
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/node_hdf_cache.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
import matplotlib as mpl
import matplotlib.pyplot as plt
from dask.dataframe import DataFrame as DaskDataFrame
import cudf
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class CumReturnNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'cum_return'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["matplotlib.figure.Figure"]
}
}
cols_required = {"datetime": "datetime64[ns]",
"strategy_returns": "float64"}
retension = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retension
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Cumulative Return Configuration",
"type": "object",
"description": """Plot the P & L graph from the `strategy_returns` column.
""",
"properties": {
"points": {
"type": "number",
"description": "number of data points for the chart"
},
"label": {
"type": "string",
"description": "Label for the line plot"
},
},
"required": ["points"],
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Plot the P & L graph from the `strategy_returns` column.
`label` in the `conf` defines the stock symbol name
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
Figure
"""
input_df = inputs[self.INPUT_PORT_NAME]
if isinstance(input_df, DaskDataFrame):
input_df = input_df.compute() # get the computed value
label = 'stock'
if 'label' in self.conf:
label = self.conf['label']
num_points = self.conf['points']
stride = max(len(input_df) // num_points, 1)
backend_ = mpl.get_backend()
mpl.use("Agg") # Prevent showing stuff
f = plt.figure()
if (isinstance(input_df,
cudf.DataFrame) or isinstance(input_df,
DaskDataFrame)):
plt.plot(input_df['datetime'][::stride].to_array(), (input_df[
'strategy_returns'].cumsum())[
::stride].to_array(), 'b', label=label)
else:
plt.plot(input_df['datetime'][::stride],
(input_df['strategy_returns'].cumsum())[::stride],
'b',
label=label)
plt.xlabel("Time")
plt.ylabel("Cumulative return")
plt.grid(True)
mpl.use(backend_)
return {self.OUTPUT_PORT_NAME: f}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/cumReturnNode.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class ImportanceCurveNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'importance_curve'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: ["xgboost.Booster", "builtins.dict"]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["matplotlib.figure.Figure"]
}
}
cols_required = {}
retension = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retension
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Feature Importance Plot Configuration",
"type": "object",
"description": """Plot feature importance of each feature.
""",
"properties": {
"type": {
"type": "string",
"description": """
* 'weight': the number of times a feature is used to
split the data across all trees.
* 'gain': the average gain across all splits the
feature is used in.
* 'cover': the average coverage across all
splits the feature is used in.
* 'total_gain': the total gain across all splits the
feature is used in.
* 'total_cover': the total coverage across all splits
the feature is used in.
""",
"enum": ["weight", "gain", "cover",
"total_gain", "total_cover"],
"default": "gain"
},
},
"required": ["type"],
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Plot the ROC curve
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
Figure
"""
model = inputs[self.INPUT_PORT_NAME]
if isinstance(model, dict):
model = model['booster']
# x_ord = OrdinalScale()
# y_sc = LinearScale()
backend_ = mpl.get_backend()
mpl.use("Agg") # Prevent showing stuff
f = plt.figure()
data = model.get_score(importance_type=self.conf.get('type', 'gain'))
x_values = []
y_values = []
for key in data.keys():
x_values.append(key)
y_values.append(data[key])
width = 0.35 # the width of the bars
x = np.arange(len(x_values))
plt.bar(x - width/2, y_values, width, label='Feature Importance')
plt.xticks(x, x_values, rotation='vertical')
# ax = f.get_axes()[0]
# ax.set_xticks(x)
# ax.set_xticklabels(x_values)
plt.xlabel('Features')
plt.ylabel('Importance')
plt.grid(True)
f.set_figwidth(15)
f.set_figheight(8)
mpl.use(backend_)
return {self.OUTPUT_PORT_NAME: f}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/importanceCurve.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
import mplfinance as mpf
from ipywidgets import Image
import dask_cudf
import io
import cudf
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['BarPlotNode']
class BarPlotNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'barplot'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["ipywidgets.Image"]
}
}
cols_required = {"datetime": "datetime64[ns]",
"open": "float64",
"close": "float64",
"high": "float64",
"low": "float64",
"volume": "float64"}
retension = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retension
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Bar Plot Node Configuration",
"type": "object",
"description": """Takes `datetime`, `open`, `close`, `high`,
`volume` columns in the dataframe to plot the bqplot figure
for financial bar data
""",
"properties": {
"points": {
"type": "number",
"description": "number of data points for the chart"
},
"label": {
"type": "string",
"description": "label for the plot"
},
},
"required": ["points"],
}
ui = {}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Takes `datetime`, `open`, `close`, `high`, `volume` columns in the
dataframe to plot the bqplot figure for this stock.
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
bqplot.Figure
"""
stock = inputs[self.INPUT_PORT_NAME]
num_points = self.conf['points']
stride = max(len(stock) // num_points, 1)
buf = io.BytesIO()
# Construct the marks
if (isinstance(stock, cudf.DataFrame)
or isinstance(stock, dask_cudf.DataFrame)):
data_df = stock[[
'datetime', 'open', 'high', 'low', 'close', 'volume'
]].iloc[::stride].to_pandas()
else:
data_df = stock[[
'datetime', 'open', 'high', 'low', 'close', 'volume'
]].iloc[::stride]
data_df.columns = [
'Date', 'Open', 'High', 'Low', 'Close', 'Volume'
]
data_df = data_df.set_index('Date')
mpf.plot(data_df, type='candle', volume=True, savefig=buf)
buf.seek(0)
fig = Image(
value=buf.read(),
format='png',
width=600,
height=900,
)
return {self.OUTPUT_PORT_NAME: fig}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/barPlotNode.py |
from .outCsvNode import OutCsvNode
from .sharpeRatioNode import SharpeRatioNode
from .cumReturnNode import CumReturnNode
from .barPlotNode import BarPlotNode
from .linePlotNode import LinePlotNode
from .rocCurveNode import RocCurveNode
from .importanceCurve import ImportanceCurveNode
from .exportXGBoostNode import XGBoostExportNode
from .scatterPlotNode import ScatterPlotNode
__all__ = ["OutCsvNode", "SharpeRatioNode", "CumReturnNode",
"BarPlotNode", "LinePlotNode", "RocCurveNode",
"ImportanceCurveNode", "XGBoostExportNode",
"ScatterPlotNode"]
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/__init__.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
from bqplot import (Axis, LinearScale, Figure,
DateScale, ColorScale, ColorAxis, Scatter)
import dask_cudf
import cudf
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ["ScatterPlotNode"]
scaleMap = {
"ColorScale": ColorScale,
"LinearScale": LinearScale,
"DateScale": DateScale
}
class ScatterPlotNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'scatter_plot'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["bqplot.Figure"]
}
}
cols_required = {}
if 'col_x' in self.conf:
cols_required[self.conf['col_x']] = None
if 'col_y' in self.conf:
cols_required[self.conf['col_y']] = None
if 'col_color' in self.conf:
cols_required[self.conf['col_color']] = None
retension = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retension
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Scatter Plot Configuration",
"type": "object",
"description": """Make a Scatter Plot.
""",
"properties": {
"points": {
"type": "number",
"description": "number of data points for the chart"
},
"title": {
"type": "string",
"description": "the plot title"
},
"col_x": {
"type": "string",
"description": "column used for X-axis"
},
"col_x_scale": {
"type": "string",
"description": "X-axis scale",
"enum": ["DateScale", "LinearScale"],
"default": "LinearScale"
},
"col_y": {
"type": "string",
"description": "column used for Y-axis"
},
"col_y_scale": {
"type": "string",
"description": "Y-axis scale",
"enum": ["DateScale", "LinearScale"],
"default": "LinearScale"
},
"col_color": {
"type": "string",
"description": "column used for color"
}
},
"required": ["points", "title", "col_x", "col_y"],
}
ui = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['col_x']['enum'] = enums
json['properties']['col_y']['enum'] = enums
json['properties']['col_color']['enum'] = enums
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Plot the Scatter plot
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
Figure
"""
input_df = inputs[self.INPUT_PORT_NAME]
if isinstance(input_df, dask_cudf.DataFrame):
input_df = input_df.compute() # get the computed value
num_points = self.conf['points']
stride = max(len(input_df) // num_points, 1)
sc_x = scaleMap[self.conf.get('col_x_scale', 'LinearScale')]()
sc_y = scaleMap[self.conf.get('col_y_scale', 'LinearScale')]()
x_col = self.conf['col_x']
y_col = self.conf['col_y']
ax_y = Axis(label=y_col, scale=sc_y,
orientation='vertical', side='left')
ax_x = Axis(label=x_col, scale=sc_x,
num_ticks=10, label_location='end')
m_chart = dict(top=50, bottom=70, left=50, right=100)
if 'col_color' in self.conf:
color_col = self.conf['col_color']
sc_c1 = ColorScale()
ax_c = ColorAxis(scale=sc_c1, tick_format='0.2%', label=color_col,
orientation='vertical', side='right')
if isinstance(input_df, (cudf.DataFrame, dask_cudf.DataFrame)):
scatter = Scatter(x=input_df[x_col][::stride].to_array(),
y=input_df[y_col][::stride].to_array(),
color=input_df[
color_col][::stride].to_array(),
scales={'x': sc_x,
'y': sc_y, 'color': sc_c1},
stroke='black')
else:
scatter = Scatter(x=input_df[x_col][::stride],
y=input_df[y_col][::stride],
color=input_df[color_col][::stride],
scales={'x': sc_x,
'y': sc_y, 'color': sc_c1},
stroke='black')
fig = Figure(axes=[ax_x, ax_c, ax_y], marks=[scatter],
fig_margin=m_chart,
title=self.conf['title'])
else:
if isinstance(input_df, (cudf.DataFrame, dask_cudf.DataFrame)):
scatter = Scatter(x=input_df[x_col][::stride].to_array(),
y=input_df[y_col][::stride].to_array(),
scales={'x': sc_x, 'y': sc_y},
stroke='black')
else:
scatter = Scatter(x=input_df[x_col][::stride],
y=input_df[y_col][::stride],
scales={'x': sc_x, 'y': sc_y},
stroke='black')
fig = Figure(axes=[ax_x, ax_y], marks=[scatter],
fig_margin=m_chart,
title=self.conf['title'])
return {self.OUTPUT_PORT_NAME: fig}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/scatterPlotNode.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
import dask_cudf
from greenflow.dataframe_flow.util import get_file_path
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class OutCsvNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'df_in'
self.OUTPUT_PORT_NAME = 'df_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:df_in}"
}
}
cols_required = {}
addition = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
input_meta = self.get_input_meta()
json = {
"title": "Cvs output Configure",
"type": "object",
"description": """Dump the input datafram to the resulting csv file.
the output filepath is defined as `path` in the `conf`.
if only a subset of columns is needed for the csv file,
enumerate the columns in the `columns` of the `conf`
""",
"properties": {
"path": {
"type": "string",
"description": """The output filepath for the csv"""
},
"columns": {
"type": "array",
"items": {
"type": "string"
},
"description": """array of columns to be selected for
the csv"""
}
},
"required": ["path"],
}
ui = {}
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['columns']['items']['enum'] = enums
return ConfSchema(json=json, ui=ui)
else:
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
dump the input datafram to the resulting csv file.
the output filepath is defined as `path` in the `conf`.
if only a subset of columns is needed for the csv file, enumerate the
columns in the `columns` of the `conf`
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
raw_input_df = inputs[self.INPUT_PORT_NAME]
if 'columns' in self.conf:
raw_input_df = raw_input_df[self.conf['columns']]
if isinstance(raw_input_df, dask_cudf.DataFrame):
input_df = raw_input_df.compute() # get the computed value
else:
input_df = raw_input_df
input_df.to_pandas().to_csv(get_file_path(self.conf['path']),
index=False)
return {self.OUTPUT_PORT_NAME: raw_input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/outCsvNode.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
import math
import dask_cudf
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class SharpeRatioNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'sharpe_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["builtins.float"]
}
}
cols_required = {"strategy_returns": "float64"}
retension = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retension
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Calculate Sharpe Ratio configure",
"type": "object",
"description": """Compute the yearly Sharpe Ratio from the
input dataframe `strategy_returns` column. Assume it is
daily return. Asumes 252 trading days per year
""",
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Compute the yearly Sharpe Ratio from the input dataframe
`strategy_returns` column. Assume it is daily return. Asumes
252 trading days per year.
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
float
the sharpe ratio
"""
input_df = inputs[self.INPUT_PORT_NAME]
if isinstance(input_df, dask_cudf.DataFrame):
input_df = input_df.compute() # get the computed value
daily_mean = input_df['strategy_returns'].mean()
daily_std = input_df['strategy_returns'].std()
return {self.OUTPUT_PORT_NAME: float(
daily_mean / daily_std * math.sqrt(252))}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/sharpeRatioNode.py |
from greenflow.dataframe_flow import Node
# from bqplot import Axis, LinearScale, Figure, Lines, PanZoom
import dask_cudf
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import metrics
import cudf
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class RocCurveNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'roc_curve'
self.OUTPUT_VALUE_NAME = 'value'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["matplotlib.figure.Figure"]
},
self.OUTPUT_VALUE_NAME: {
port_type: ["builtins.float"]
}
}
cols_required = {}
icols = self.get_input_meta()
if 'label' in self.conf:
label = self.conf['label']
labeltype = icols.get(self.INPUT_PORT_NAME, {}).get(label)
cols_required[label] = labeltype
if 'prediction' in self.conf:
cols_required[self.conf['prediction']] = None
retension = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retension
},
self.OUTPUT_VALUE_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retension
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "ROC Curve Configuration",
"type": "object",
"description": """Plot the ROC Curve for binary classification problem.
""",
"properties": {
"label": {
"type": "string",
"description": "Ground truth label column name"
},
"prediction": {
"type": "string",
"description": "prediction probablity column"
},
},
"required": ["label", "prediction"],
}
ui = {
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['label']['enum'] = enums
json['properties']['prediction']['enum'] = enums
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Plot the ROC curve
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
Figure
"""
input_df = inputs[self.INPUT_PORT_NAME]
if isinstance(input_df, dask_cudf.DataFrame):
input_df = input_df.compute() # get the computed value
label_col = input_df[self.conf['label']].values
pred_col = input_df[self.conf['prediction']].values
if isinstance(input_df, cudf.DataFrame):
fpr, tpr, _ = metrics.roc_curve(label_col.get(),
pred_col.get())
else:
fpr, tpr, _ = metrics.roc_curve(label_col,
pred_col)
auc_value = metrics.auc(fpr, tpr)
out = {}
backend_ = mpl.get_backend()
mpl.use("Agg") # Prevent showing stuff
f = plt.figure()
if self.outport_connected(self.OUTPUT_PORT_NAME):
# linear_x = LinearScale()
# linear_y = LinearScale()
# yax = Axis(label='True Positive Rate', scale=linear_x,
# orientation='vertical')
# xax = Axis(label='False Positive Rate', scale=linear_y,
# orientation='horizontal')
# panzoom_main = PanZoom(scales={'x': [linear_x]})
curve_label = 'ROC (area = {:.2f})'.format(auc_value)
plt.plot(fpr, tpr, color='blue', label=curve_label)
# line = Lines(x=fpr, y=tpr,
# scales={'x': linear_x, 'y': linear_y},
# colors=['blue'], labels=[curve_label],
# display_legend=True)
# new_fig = Figure(marks=[line], axes=[yax, xax],
# title='ROC Curve',
# interaction=panzoom_main)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid(True)
plt.title('ROC Curve')
plt.legend()
mpl.use(backend_)
out.update({self.OUTPUT_PORT_NAME: f})
if self.outport_connected(self.OUTPUT_VALUE_NAME):
out.update({self.OUTPUT_VALUE_NAME: float(auc_value)})
return out
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/rocCurveNode.py |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.util import get_file_path
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class XGBoostExportNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'model_in'
self.OUTPUT_PORT_NAME = 'filename'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: ["xgboost.Booster", "builtins.dict"]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["builtins.str"]
}
}
cols_required = {}
addition = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "XGBoost Export Configure",
"type": "object",
"description": """Export the xgboost model to a file
""",
"properties": {
"path": {
"type": "string",
"description":
"""The output filepath for the xgboost
model"""
}
},
"required": ["path"],
}
ui = {}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
dump the model into the file
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
model = inputs[self.INPUT_PORT_NAME]
if isinstance(model, dict):
model = model['booster']
pathname = get_file_path(self.conf['path'])
model.save_model(pathname)
return {self.OUTPUT_PORT_NAME: pathname}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/exportXGBoostNode.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
import matplotlib as mpl
import matplotlib.pyplot as plt
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
import cudf
import dask_cudf
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class LinePlotNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'lineplot'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["matplotlib.figure.Figure"]
}
}
cols_required = {"datetime": "datetime64[ns]"}
retension = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retension
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
color_strings = ['black', 'yellow', 'blue',
'red', 'green', 'orange',
'magenta', 'cyan']
json = {
"title": "Line Plot Node Configuration",
"type": "object",
"description": """Plot the columns as lines""",
"properties": {
"points": {
"type": "number",
"description": "number of data points for the chart"
},
"title": {
"type": "string",
"description": "the plot title"
},
"lines": {
"type": "array",
"items": {
"type": "object",
"title": "Line Information",
"properties": {
"column": {
"type": "string",
},
"label": {
"type": "string",
},
"color": {
"type": "string",
"enum": color_strings
}
}
}
}
},
"required": ["points", "title", "lines"],
}
input_meta = self.get_input_meta()
ui = {
}
if self.INPUT_PORT_NAME in input_meta:
col_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_inport.keys()]
first_item = json['properties']['lines']['items']
first_item['properties']['column']['enum'] = enums
return ConfSchema(json=json, ui=ui)
else:
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Plot the lines from the input dataframe. The plotted lines are the
columns in the input dataframe which are specified in the `lines` of
node's `conf`
The plot title is defined in the `title` of the node's `conf`
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
Figure
"""
input_df = inputs[self.INPUT_PORT_NAME]
num_points = self.conf['points']
stride = max(len(input_df) // num_points, 1)
backend_ = mpl.get_backend()
mpl.use("Agg") # Prevent showing stuff
f = plt.figure()
for line in self.conf['lines']:
col_name = line['column']
label_name = line['label']
color = line['color']
if (isinstance(input_df,
cudf.DataFrame) or isinstance(input_df,
dask_cudf.DataFrame)):
line = plt.plot(input_df['datetime'][::stride].to_array(),
input_df[col_name][::stride].to_array(),
color=color, label=label_name)
else:
line = plt.plot(input_df['datetime'][::stride],
input_df[col_name][::stride],
color=color, label=label_name)
plt.grid(True)
plt.legend()
mpl.use(backend_)
return {self.OUTPUT_PORT_NAME: f}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/linePlotNode.py |
from greenflow.dataframe_flow import (
Node, PortsSpecSchema, ConfSchema, MetaDataSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class SimpleBackTestNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'bardata_in'
self.OUTPUT_PORT_NAME = 'backtest_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:bardata_in}"
}
}
cols_required = {"signal": "float64",
"returns": "float64"}
addition = {"strategy_returns": "float64"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Backtest configure",
"type": "object",
"description": """compute the `strategy_returns` by assuming invest
`signal` amount of dollars for each of the time step.""",
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
compute the `strategy_returns` by assuming invest `signal` amount of
dollars for each of the time step.
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
input_df['strategy_returns'] = input_df['signal'] * input_df['returns']
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/backtest/simpleBackTest.py |
from .simpleBackTest import SimpleBackTestNode
__all__ = ["SimpleBackTestNode"]
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/backtest/__init__.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class SimpleAveragePortOpt(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
}
}
cols_required = {"datetime": "datetime64[ns]",
"strategy_returns": "float64",
"asset": "int64"}
retention = {"datetime": "datetime64[ns]",
"strategy_returns": "float64"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retention
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Simple Portfolio Node configure",
"type": "object",
"description": """Average the strategy returns for all the
assets """,
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Average the strategy returns for all the assets.
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
port = input_df[['datetime', 'strategy_returns']] \
.groupby(['datetime']).mean().reset_index().sort_values('datetime')
port.columns = ['datetime', 'strategy_returns']
return {self.OUTPUT_PORT_NAME: port}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/portofolio/simpleAveragePortOpt.py |
from .simpleAveragePortOpt import SimpleAveragePortOpt
__all__ = ["SimpleAveragePortOpt"]
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/portofolio/__init__.py |
from .splitDataNode import DataSplittingNode
from .xgboostNode import TrainXGBoostNode, InferXGBoostNode
from .forestInference import ForestInferenceNode
from .gridRandomSearchNode import GridRandomSearchNode
__all__ = ["DataSplittingNode", "TrainXGBoostNode",
"InferXGBoostNode", "ForestInferenceNode",
"GridRandomSearchNode"]
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/ml/__init__.py |
import cuml
import copy
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['DataSplittingNode']
class DataSplittingNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
port_type = PortsSpecSchema.port_type
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME_TRAIN = 'train'
self.OUTPUT_PORT_NAME_TEST = 'test'
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME_TRAIN: {
port_type: "${port:in}"
},
self.OUTPUT_PORT_NAME_TEST: {
port_type: "${port:in}"
}
}
meta_inports = {
self.INPUT_PORT_NAME: {}
}
meta_outports = {
self.OUTPUT_PORT_NAME_TRAIN: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_DELETION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: {}
},
self.OUTPUT_PORT_NAME_TEST: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_DELETION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: {}
}
}
if 'target' in self.conf:
target_col = self.conf['target']
meta_inports = {
self.INPUT_PORT_NAME: {
target_col: None
}
}
meta_outports[self.OUTPUT_PORT_NAME_TEST][
MetaDataSchema.META_ORDER] = {
target_col: -1
}
meta_outports[self.OUTPUT_PORT_NAME_TRAIN][
MetaDataSchema.META_ORDER] = {
target_col: -1,
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Data Splitting configure",
"type": "object",
"description": """Partitions device data into two parts""",
"properties": {
"target": {"type": "string",
"description": "Target column name"},
"train_size": {"type": "number",
"description": """If float, represents the
proportion [0, 1] of the data to be assigned to
the training set. If an int, represents the
number of instances to be assigned to the
training set.""",
"default": 0.8},
"shuffle": {"type": "boolean",
"description": """Whether or not to shuffle inputs
before splitting random_stateint"""},
"random_state": {"type": "number",
"description": """If shuffle is true, seeds
the generator. Unseeded by default"""}
},
"required": ["target"],
}
ui = {
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['target']['enum'] = enums
return ConfSchema(json=json, ui=ui)
else:
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
split the dataframe to train and tests
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
target_col = self.conf['target']
train_cols = list(input_df.columns)
if target_col in train_cols:
train_cols.remove(target_col)
conf = copy.copy(self.conf)
del conf['target']
r = cuml.train_test_split(
input_df[train_cols], input_df[target_col], **conf)
r[0].index = r[2].index
r[0][target_col] = r[2]
r[1].index = r[3].index
r[1][target_col] = r[3]
output = {}
if self.outport_connected(self.OUTPUT_PORT_NAME_TRAIN):
output.update({self.OUTPUT_PORT_NAME_TRAIN: r[0]})
if self.outport_connected(self.OUTPUT_PORT_NAME_TEST):
output.update({self.OUTPUT_PORT_NAME_TEST: r[1]})
return output
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/ml/splitDataNode.py |
from jsonpath_ng import parse
import uuid
import cudf
import pandas
from copy import deepcopy
import ray
from ray import tune
from greenflow.plugin_nodes.util.contextCompositeNode import \
ContextCompositeNode
from greenflow.plugin_nodes.util.compositeNode import (group_ports, _get_node,
_get_port)
from greenflow.dataframe_flow.portsSpecSchema import (
ConfSchema, PortsSpecSchema, NodePorts)
from greenflow.dataframe_flow.metaSpec import MetaData
from greenflow.dataframe_flow import TaskGraph
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.util import get_file_path
from greenflow.dataframe_flow.taskSpecSchema import TaskSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
__all__ = ["GridRandomSearchNode"]
_CONF_JSON = {
"description": """
Use Tune to specify a grid search
or random search for a context composite node.
""",
"definitions": {
"number": {
"type": "object",
"oneOf": [
{
"title": "randn",
"description": """Wraps
tune.sample_from around
np.random.randn.
tune.randn(10)
is equivalent to
np.random.randn(10)""",
"properties": {
"function": {
"type": "string",
"enum": [
"randn"
],
"default": "randn"
},
"args": {
"type": "array",
"items": [
{
"description": "number of samples",
"type": "number",
"default": 1.0
}
]
}
}
},
{
"title": "uniform",
"description": """Wraps tune.sample_from around
np.random.uniform""",
"properties": {
"function": {
"type": "string",
"enum": [
"uniform"
],
"default": "uniform"
},
"args": {
"type": "array",
"items": [
{
"type": "number",
"description": "Lower boundary",
"default": 0.0
},
{
"type": "number",
"description": "Upper boundary",
"default": 10.0
}
]
}
}
},
{
"title": "loguniform",
"description": """Sugar for sampling
in different orders of magnitude.,
parameters, min_bound – Lower
boundary of the output interval,
max_bound (float) – Upper boundary
of the output interval (1e-2),
base – Base of the log.""",
"properties": {
"function": {
"type": "string",
"enum": [
"loguniform"
],
"default": "loguniform"
},
"args": {
"type": "array",
"items": [
{
"type": "number",
"description": "Lower boundary",
"default": 0.0001
},
{
"type": "number",
"description": "Upper boundary",
"default": 0.01
},
{
"type": "number",
"description": "Log base",
"default": 10
}
]
}
}
},
{
"title": "choice",
"description": """Wraps tune.sample_from
around random.choice.""",
"properties": {
"function": {
"type": "string",
"enum": [
"choice"
],
"default": "choice"
},
"args": {
"type": "array",
"items": {
"type": "number"
}
}
}
},
{
"title": "grid_search",
"description": """Convenience method for
specifying grid search over a value.""",
"properties": {
"function": {
"type": "string",
"enum": [
"grid_search"
],
"default": "grid_search"
},
"args": {
"type": "array",
"items": {
"type": "number"
}
}
}
}
]
},
"string": {
"type": "object",
"oneOf": [
{
"title": "choice",
"description": """Wraps tune.sample_from
around random.choice.""",
"properties": {
"function": {
"type": "string",
"enum": [
"choice"
],
"default": "choice"
},
"args": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
{
"title": "grid_search",
"description": """Convenience method for
specifying grid search over a value.""",
"properties": {
"function": {
"type": "string",
"enum": [
"grid_search"
],
"default": "grid_search"
},
"args": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
]
}
},
"type": "object",
"properties": {
"parameters": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
},
"dependencies": {
"name": {
"oneOf": []
}
}
}
},
"metrics": {
"type": "array",
"description": """the metrics that is going to be
recorded""",
"items": {
"type": "string"
},
"default": []
},
"best": {
"description": """the metric that is used for
best configuration""",
"type": "object",
"properties": {
"metric": {
"type": "string"
},
"mode": {
"type": "string",
"enum": [
"min",
"max"
],
"default": "max"
}
}
},
"tune": {
"type": "object",
"properties": {
"local_dir": {
"type": "string",
"description": """
Local dir to save training results to.
""",
"default": "./ray"
},
"name": {
"type": "string",
"description": "Name of experiment",
"default": "exp"
},
"num_samples": {
"type": "number",
"description": """
Number of times to sample from
the hyperparameter space.
If grid_search is provided
as an argument, the grid will be
repeated num_samples of times.
""",
"default": 1
},
"resources_per_trial": {
"type": "object",
"description": """
Machine resources to allocate per trial,
e.g. {"cpu": 64, "gpu": 8}. Note that
GPUs will not be assigned unless you
specify them here.""",
"properties": {
"cpu": {
"type": "number",
"default": 1
},
"gpu": {
"type": "number",
"default": 1
}
}
}
}
}
}
}
class SeriliazableNode(object):
def __init__(self, uid, meta_data, ports_data):
self.meta_data = meta_data
self.ports_data = ports_data
self.uid = uid
def ports_setup(self):
return self.ports_data
def meta_setup(self):
return self.meta_data
def get_clean_inputs(conf, task_graph, inputs):
update_inputs = []
if 'input' in conf:
# group input ports by node id
inp_groups = group_ports(conf['input'])
for inp in inp_groups.keys():
if inp in task_graph:
inputNode = task_graph[inp]
replaced_ports = set(inp_groups[inp])
for oldInput in inputNode.inputs:
if oldInput['to_port'] in replaced_ports:
# we want to disconnect this old one and
# connect to external node
if True:
for externalInput in inputs:
if (_get_node(
externalInput[
'to_port']) == inputNode.uid
and _get_port(
externalInput[
'to_port']) == oldInput[
'to_port']):
newInput = {}
newInput['to_port'] = externalInput[
'to_port']
newInput['from_port'] = externalInput[
'from_port']
e_node = externalInput['from_node']
newInput['from_node'] = SeriliazableNode(
e_node.uid,
e_node.meta_setup(),
e_node.ports_setup())
update_inputs.append(newInput)
return update_inputs
def update_conf_for_search(conf, replaceObj, task_graph, config):
# find the other replacment conf
if task_graph:
for task in task_graph:
key = task.get('id')
newid = key
conf_obj = task.get('conf')
if newid in replaceObj:
replaceObj[newid].update({'conf': conf_obj})
else:
replaceObj[newid] = {}
replaceObj[newid].update({'conf': conf_obj})
# replace the numbers from the context
if 'context' in conf:
for key in conf['context'].keys():
val = conf['context'][key]['value']
if key in config:
val = config[key]
for map_obj in conf['context'][key]['map']:
xpath = map_obj['xpath']
expr = parse(xpath)
expr.update(replaceObj, val)
def get_search_fun(data_store, conf, inputs):
def search_fun(config, checkpoint_dir=None):
myinputs = {}
for key in data_store.keys():
v = ray.get(data_store[key])
if isinstance(v, pandas.DataFrame):
myinputs[key] = cudf.from_pandas(v)
else:
myinputs[key] = v
task_graph = TaskGraph.load_taskgraph(
get_file_path(conf['taskgraph']))
task_graph._build()
outputLists = []
replaceObj = {}
input_feeders = []
def inputNode_fun(inputNode, in_ports):
inports = inputNode.ports_setup().inports
class InputFeed(TemplateNodeMixin, Node):
def meta_setup(self):
output = {}
for inp in inputNode.inputs:
# output[inp['to_port']] = inp[
# 'from_node'].meta_setup().outports[
# inp['from_port']]
output[inp['to_port']] = inp[
'from_node'].meta_setup().outports[
inp['from_port']]
# it will be something like { input_port: columns }
return MetaData(inports={}, outports=output)
def ports_setup(self):
# it will be something like { input_port: types }
return NodePorts(inports={}, outports=inports)
def conf_schema(self):
return ConfSchema()
def update(self):
TemplateNodeMixin.update(self)
def process(self, empty):
output = {}
for key in inports.keys():
if (inputNode.uid+'@'+key
in myinputs):
output[key] = myinputs[
inputNode.uid+'@'+key]
return output
uni_id = str(uuid.uuid1())
obj = {
TaskSpecSchema.task_id: uni_id,
TaskSpecSchema.conf: {},
TaskSpecSchema.node_type: InputFeed,
TaskSpecSchema.inputs: []
}
input_feeders.append(obj)
newInputs = {}
for key in inports.keys():
if inputNode.uid+'@'+key in myinputs:
newInputs[key] = uni_id+'.'+key
for inp in inputNode.inputs:
if inp['to_port'] not in in_ports:
# need to keep the old connections
newInputs[inp['to_port']] = (
inp['from_node'].uid + '.' + inp['from_port'])
replaceObj.update({inputNode.uid: {
TaskSpecSchema.inputs: newInputs}
})
def outNode_fun(outNode, out_ports):
pass
outputLists = conf['metrics']
make_sub_graph_connection(conf, inputs, task_graph, inputNode_fun,
outNode_fun)
task_graph.extend(input_feeders)
update_conf_for_search(conf, replaceObj, task_graph, config)
result = task_graph.run(outputLists, replace=replaceObj)
metric_report = {item: result[item] for item in outputLists}
tune.report(**metric_report)
return search_fun
def make_sub_graph_connection(conf, inputs, task_graph, inputNode_fun,
outNode_fun):
"""
connects the current composite node's inputs and outputs to
the subgraph-task_graph's inputs and outputs.
inputNode_fun has subgraph inputNode and all the input ports
as argument, it processes the inputNode logics
outputNode_fun has subgraph outputNode and all the outpout ports
as argument, it processes the outNode logics
"""
all_inputs = []
all_outputs = []
extra_updated = set()
extra_roots = []
if 'input' in conf:
# group input ports by node id
inp_groups = group_ports(conf['input'])
for inp in inp_groups.keys():
if inp in task_graph:
inputNode = task_graph[inp]
update_inputs = []
replaced_ports = set(inp_groups[inp])
for oldInput in inputNode.inputs:
if oldInput['to_port'] in replaced_ports:
# we want to disconnect this old one and
# connect to external node
if True:
for externalInput in inputs:
if (_get_node(
externalInput[
'to_port']) == inputNode.uid
and _get_port(
externalInput[
'to_port']) == oldInput[
'to_port']):
newInput = {}
newInput['to_port'] = _get_port(
externalInput['to_port'])
newInput['from_port'] = externalInput[
'from_port']
newInput['from_node'] = externalInput[
'from_node']
update_inputs.append(newInput)
else:
update_inputs.append(oldInput)
inputNode.inputs = update_inputs
# add all the `updated` parents to the set
for i in inputNode.inputs:
if isinstance(i['from_node'], SeriliazableNode):
extra_updated.add(i['from_node'])
# if all the parents are updated, this is
# a new root node
if all([
isinstance(i['from_node'], SeriliazableNode)
for i in inputNode.inputs
]):
extra_roots.append(inputNode)
all_inputs.append((inputNode, inp))
if 'output' in conf:
oup_groups = group_ports(conf['output'])
for oup in oup_groups.keys():
if oup in task_graph:
outNode = task_graph[oup]
# we do not disconnect anything here, as we take extra
# outputs for composite node.
# Node, we rely on the fact that taskgraph.run method
# will remove the output collector from taskgraph if
# the outputlist is set
all_outputs.append((outNode, oup))
# outNode_fun(outNode, oup_groups[oup])
# update all the nodes and cache it
task_graph.breadth_first_update(extra_roots=extra_roots,
extra_updated=extra_updated)
for innode in all_inputs:
inputNode_fun(innode[0], inp_groups[innode[1]])
for outnode in all_outputs:
# inputNode_fun(innode[0], inp_groups[innode[1]])
outNode_fun(outnode[0], oup_groups[outnode[1]])
class GridRandomSearchNode(ContextCompositeNode):
def init(self):
ContextCompositeNode.init(self)
def ports_setup(self):
return ContextCompositeNode.ports_setup(self)
def conf_schema(self):
task_graph = self.task_graph
# replacementObj = self.replacementObj
# # cache_key, task_graph, replacementObj = self._compute_has
# cache_key, task_graph, replacementObj = self._compute_hash_key()
# if cache_key in CACHE_SCHEMA:
# return CACHE_SCHEMA[cache_key]
# get's the input when it gets the conf
input_meta = self.get_input_meta()
json = {}
if self.INPUT_CONFIG in input_meta:
conf = input_meta[self.INPUT_CONFIG]
if 'context' in conf:
json = deepcopy(_CONF_JSON)
metrics = []
# task_graph.build(replace=replacementObj)
for t in task_graph:
node_id = t.get('id')
if node_id != '':
node = task_graph[node_id]
all_ports = node.ports_setup()
for port in all_ports.outports.keys():
types = all_ports.outports[port][
PortsSpecSchema.port_type]
if types == float:
metrics.append(node_id+'.'+port)
elif (isinstance(types, list)
and types[0] == float):
metrics.append(node_id+'.'+port)
context = conf['context']
json['properties']['parameters'][
'items']['properties']['name']['enum'] = list(
context.keys())
json['properties']['metrics'][
'items']['enum'] = metrics
if 'metrics' in self.conf:
json['properties']['best'][
'properties']['metric']['enum'] = self.conf['metrics']
options = json['properties']['parameters'][
'items']['dependencies']['name']['oneOf']
for var in context.keys():
if (context[var]['type'] == 'number' or
context[var]['type'] == 'string'):
obj = {
"properties": {
"name": {
"type": "string",
"enum": [var]
},
"search": {
"$ref": "#/definitions/{}".format(
context[var]['type'])
}
}
}
options.append(obj)
ui = {
"tune": {
"local_dir": {"ui:widget": "PathSelector"}
}
}
out_schema = ConfSchema(json=json, ui=ui)
# CACHE_SCHEMA[cache_key] = out_schema
return out_schema
def meta_setup(self):
from ray.tune import Analysis
out_meta = ContextCompositeNode.meta_setup(self)
if 'tune' in self.conf:
if 'local_dir' in self.conf['tune']:
path = self.conf['tune']['local_dir']
if 'name' in self.conf['tune']:
exp = self.conf['tune']['name']
try:
analysis = Analysis(path+'/'+exp)
if 'best' in self.conf:
best = analysis.get_best_config(
**self.conf['best'])
for key in best.keys():
self.conf['context'][key]['value'] = best[key]
print('get best', best)
out_meta.outports[self.OUTPUT_CONFIG] = self.conf
except Exception:
pass
return out_meta
def process(self, inputs):
if self.INPUT_CONFIG in inputs:
self.conf.update(inputs[self.INPUT_CONFIG].data)
output = {}
if self.outport_connected(self.OUTPUT_CONFIG):
data_store = {}
for key in inputs.keys():
v = inputs[key]
if isinstance(v, cudf.DataFrame):
# it is a work around,
# the ray.put doesn't support GPU cudf
data_store[key] = ray.put(v.to_pandas())
else:
data_store[key] = ray.put(v)
# here we need to do the hyper parameter search
config = {}
for para in self.conf['parameters']:
fun_name = para['search']['function']
fun = getattr(tune, fun_name)
if fun_name == 'grid_search' or fun_name == 'choice':
config[para['name']] = fun(para['search']['args'])
else:
config[para['name']] = fun(*para['search']['args'])
clean_inputs = get_clean_inputs(self.conf, self.task_graph,
self.inputs)
fun = get_search_fun(data_store, self.conf, clean_inputs)
analysis = tune.run(fun, **self.conf['tune'], config=config)
best = analysis.get_best_config(**self.conf['best'])
for key in best.keys():
self.conf['context'][key]['value'] = best[key]
output[self.OUTPUT_CONFIG] = self.conf
more_output = ContextCompositeNode.process(self, inputs)
output.update(more_output)
return output
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/ml/gridRandomSearchNode.py |
from greenflow.dataframe_flow import Node
import cudf
import dask_cudf
import xgboost as xgb
import dask
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
import copy
from collections import OrderedDict
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['TrainXGBoostNode', 'InferXGBoostNode']
class TrainXGBoostNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'model_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ['xgboost.Booster', 'builtins.dict']
}
}
cols_required = {}
if 'columns' in self.conf and self.conf.get('include', True):
cols_required = {}
for col in self.conf['columns']:
cols_required[col] = None
meta_inports = {
self.INPUT_PORT_NAME: cols_required,
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: {}
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
meta_inports = self.template_meta_setup().inports
meta_outports = self.template_meta_setup().outports
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
cols_output = {}
cols_output['train'] = OrderedDict()
cols_output['label'] = OrderedDict()
if 'columns' in self.conf:
if self.conf.get('include', True):
included_colums = self.conf['columns']
else:
included_colums = [col for col in enums
if col not in self.conf['columns']]
cols_required = {}
for col in included_colums:
if col in col_from_inport:
cols_required[col] = col_from_inport[col]
cols_output['train'][col] = col_from_inport[col]
else:
cols_required[col] = None
cols_output['train'][col] = None
if ('target' in self.conf and
self.conf['target'] in col_from_inport):
cols_required[self.conf['target']
] = col_from_inport[self.conf['target']]
cols_output['label'][
self.conf['target']] = col_from_inport[
self.conf['target']]
else:
cols_required[self.conf['target']] = None
cols_output['label'][
self.conf['target']] = None
meta_inports[self.INPUT_PORT_NAME] = cols_required
meta_outports[self.OUTPUT_PORT_NAME][
MetaDataSchema.META_DATA] = cols_output
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "XGBoost Node configure",
"type": "object",
"description": """train a XGBoost model for the input data,
""",
"properties": {
"num_of_rounds": {
"type": "number",
"description": """The number of rounds for boosting""",
"default": 100
},
"target": {
"type": "string",
"description": "the column used as dependent variable"
},
"columns": {
"type": "array",
"items": {
"type": "string",
},
"description": """columns in the input dataframe that are
considered as training features or not depending on
`include` flag."""
},
"include": {
"type": "boolean",
"description": """if set true, the `columns` are treated
as independent variables. if false, all dataframe columns
are independent variables except the `columns`""",
"default": True
},
"xgboost_parameters": {
"type": "object",
"description": "xgoobst parameters",
"properties": {
'eta': {
"type": "number",
"description": """Step size shrinkage used in
update to prevents overfitting. After each boosting
step, we can directly get the weights of new
features, and eta shrinks the feature weights to
make the boosting process more conservative.""",
"default": 0.3
},
'min_child_weight': {
"type": "number",
"description": """Minimum sum of instance weight
(hessian) needed in a child. If the tree partition
step results in a leaf node with the sum of
instance weight less than min_child_weight,
then the building process will give up further
partitioning. In linear regression task, this
simply corresponds to minimum number of instances
needed to be in each node. The larger
min_child_weight is, the more conservative
the algorithm will be.""",
"default": 1.0
},
'subsample': {
"type": "number",
"description": """Subsample ratio of the training
instances. Setting it to 0.5 means that XGBoost
would randomly sample half of the training data
prior to growing trees. and this will prevent
overfitting. Subsampling will occur once in every
boosting iteration.""",
"default": 1.0
},
'sampling_method': {
"type": "string",
"description": """The method to use to sample the
training instances.""",
"enum": ["uniform", "gradient_based"],
"default": "uniform",
},
'colsample_bytree': {
"type": "number",
"description": """is the subsample ratio of
columns when constructing each tree. Subsampling
occurs once for every tree constructed.""",
"default": 1.0
},
'colsample_bylevel': {
"type": "number",
"description": """is the subsample ratio of columns
for each level. Subsampling occurs once for every
new depth level reached in a tree. Columns are
subsampled from the set of columns chosen for the
current tree""",
"default": 1.0
},
'colsample_bynode': {
"type": "number",
"description": """is the subsample ratio of
columns for each node (split). Subsampling occurs
once every time a new split is evaluated. Columns
are subsampled from the set of columns chosen for
the current level.""",
"default": 1.0
},
'max_depth': {
"type": "integer",
"description": "Maximum depth of a tree.",
"default": 8
},
"max_leaves": {
"type": "integer",
"description": "maximum number of tree leaves",
"default": 2**8
},
"grow_policy": {
"type": "string",
"enum": ["depthwise", "lossguide"],
"description": """Controls a way new nodes are
added to the tree. Currently supported only if
tree_method is set to hist.""",
"default": "depthwise"
},
"gamma": {
"type": "number",
"description": """Minimum loss reduction required
to make a further partition on a leaf node of the
tree.""",
"default": 0.0
},
"lambda": {
"type": "number",
"description": """L2 regularization term on
weights. Increasing this value will make model
more conservative.""",
"default": 1.0
},
"alpha": {
"type": "number",
"description": """L1 regularization term on
weights. Increasing this value will make model more
conservative.""",
"default": 0.0
},
"tree_method": {
"type": "string",
"description": """The tree construction algorithm
used in XGBoost""",
"enum": ["auto", "exact", "approx", 'hist',
'gpu_hist'],
"default": "auto"
},
"single_precision_histogram": {
"type": "boolean",
"description": """for hist and `gpu_hist tree
method, Use single precision to build histograms
instead of double precision.""",
"default": False
},
"deterministic_histogram": {
"type": "boolean",
"description": """for gpu_hist tree method, Build
histogram on GPU deterministically. Histogram
building is not deterministic due to the
non-associative aspect of floating point summation.
We employ a pre-rounding routine to mitigate the
issue, which may lead to slightly lower accuracy.
Set to false to disable it.""",
"default": False
},
"objective": {
"type": "string",
"enum": ["reg:squarederror", "reg:squaredlogerror",
"reg:logistic", "reg:pseudohubererror",
"binary:logistic", "binary:logitraw",
"binary:hinge", "count:poisson",
"survival:cox", "survival:aft",
"aft_loss_distribution", "multi:softmax",
"multi:softprob", "rank:pairwise",
"rank:ndcg", "rank:map", "reg:gamma",
"reg:tweedie"
],
"description": """Specify the learning task and
the corresponding learning objective.""",
"default": "reg:squarederror"
}
}
}
},
"required": [],
}
ui = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['columns']['items']['enum'] = enums
json['properties']['target']['enum'] = enums
return ConfSchema(json=json, ui=ui)
else:
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
dxgb_params = {
'max_depth': 8,
'max_leaves': 2 ** 8,
'tree_method': 'gpu_hist',
'objective': 'reg:squarederror',
}
# num_of_rounds = 100
if 'xgboost_parameters' in self.conf:
dxgb_params.update(self.conf['xgboost_parameters'])
input_df = inputs[self.INPUT_PORT_NAME]
if self.conf.get('include', True):
included_colums = self.conf['columns']
else:
included_colums = [col for col in input_df.columns
if col not in self.conf['columns']]
train_cols = [col for col in included_colums
if col != self.conf['target']]
# train_cols.sort()
if isinstance(input_df, dask_cudf.DataFrame):
# get the client
client = dask.distributed.client.default_client()
train = input_df[train_cols]
target = input_df[self.conf['target']]
dmatrix = xgb.dask.DaskDMatrix(client, train, label=target)
bst = xgb.dask.train(client, dxgb_params, dmatrix,
num_boost_round=self.conf["num_of_rounds"])
elif isinstance(input_df, cudf.DataFrame):
train = input_df[train_cols]
target = input_df[self.conf['target']]
dmatrix = xgb.DMatrix(train, label=target)
bst = xgb.train(dxgb_params, dmatrix,
num_boost_round=self.conf["num_of_rounds"])
return {self.OUTPUT_PORT_NAME: bst}
class InferXGBoostNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'data_in'
self.INPUT_PORT_MODEL_NAME = 'model_in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.INPUT_PORT_MODEL_NAME: {
port_type: ['xgboost.Booster', 'builtins.dict']
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:data_in}"
}
}
meta_inports = {
self.INPUT_PORT_NAME: {},
self.INPUT_PORT_MODEL_NAME: {}
}
predict = self.conf.get('prediction', 'predict')
out_cols = {predict: None}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: out_cols
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
input_meta = self.get_input_meta()
predict = self.conf.get('prediction', 'predict')
pred_contribs: bool = self.conf.get('pred_contribs', False)
meta_inports = self.template_meta_setup().inports
meta_outports = self.template_meta_setup().outports
if (self.INPUT_PORT_NAME in input_meta
and self.INPUT_PORT_MODEL_NAME in input_meta):
col_from_inport = input_meta[self.INPUT_PORT_NAME]
if 'train' in input_meta[self.INPUT_PORT_MODEL_NAME]:
required_cols = input_meta[
self.INPUT_PORT_MODEL_NAME]['train']
else:
required_cols = {}
if not pred_contribs:
col_from_inport[predict] = None # the type is not determined
else:
col_from_inport = {}
for i in range(len(required_cols)+1):
col_from_inport[i] = None
meta_inports[self.INPUT_PORT_NAME] = required_cols
meta_outports[self.OUTPUT_PORT_NAME][
MetaDataSchema.META_DATA] = col_from_inport
elif (self.INPUT_PORT_NAME not in input_meta and
self.INPUT_PORT_MODEL_NAME in input_meta):
if 'train' in input_meta[self.INPUT_PORT_MODEL_NAME]:
required_cols = input_meta[
self.INPUT_PORT_MODEL_NAME]['train']
else:
required_cols = {}
col_from_inport = copy.copy(required_cols)
if not pred_contribs:
col_from_inport[predict] = None # the type is not determined
else:
col_from_inport = {}
for i in range(len(required_cols)+1):
col_from_inport[i] = None
meta_inports[self.INPUT_PORT_NAME] = required_cols
meta_outports[self.OUTPUT_PORT_NAME][
MetaDataSchema.META_DATA] = col_from_inport
elif (self.INPUT_PORT_NAME in input_meta and
self.INPUT_PORT_MODEL_NAME not in input_meta):
col_from_inport = input_meta[self.INPUT_PORT_NAME]
if not pred_contribs:
col_from_inport[predict] = None # the type is not determined
meta_outports[self.OUTPUT_PORT_NAME][
MetaDataSchema.META_DATA] = col_from_inport
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "XGBoost Inference Node configure",
"type": "object",
"description": """make predictions for all the input
data points""",
"properties": {
"prediction": {
"type": "string",
"description": "the column name for prediction",
"default": "predict"
},
"pred_contribs": {
"type": "boolean",
"description":
"""
When this is True the output will be a matrix of size
(nsample, nfeats + 1) with each record indicating the
feature contributions (SHAP values) for that prediction.
The sum of all feature contributions is equal to the raw
untransformed margin value of the prediction. Note the
final column is the bias term.
""",
"default": False
}
},
"required": [],
}
ui = {}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
input_df = inputs[self.INPUT_PORT_NAME]
bst_model = inputs[self.INPUT_PORT_MODEL_NAME]
input_meta = self.get_input_meta()
required_cols = input_meta[
self.INPUT_PORT_MODEL_NAME]['train']
required_cols = list(required_cols.keys())
# required_cols.sort()
predict_col = self.conf.get('prediction', 'predict')
pred_contribs: bool = self.conf.get('pred_contribs', False)
if isinstance(input_df, dask_cudf.DataFrame):
# get the client
client = dask.distributed.client.default_client()
dtrain = xgb.dask.DaskDMatrix(client, input_df[required_cols])
prediction = xgb.dask.predict(client,
bst_model,
dtrain,
pred_contribs=pred_contribs)
pred_df = dask_cudf.from_dask_dataframe(
prediction.to_dask_dataframe())
pred_df.index = input_df.index
if not pred_contribs:
input_df[predict_col] = pred_df
else:
input_df = pred_df
else:
infer_dmatrix = xgb.DMatrix(input_df[required_cols])
if not pred_contribs:
prediction = cudf.Series(bst_model.predict(infer_dmatrix),
nan_as_null=False,
index=input_df.index
)
input_df[predict_col] = prediction
else:
prediction = cudf.DataFrame(bst_model.predict(
infer_dmatrix, pred_contribs=pred_contribs),
index=input_df.index)
input_df = prediction
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/ml/xgboostNode.py |
from cuml import ForestInference
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.util import get_file_path
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['ForestInferenceNode']
class ForestInferenceNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.INPUT_PORT_NAME = 'data_in'
self.INPUT_PORT_MODEL_NAME = 'model_file'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.INPUT_PORT_MODEL_NAME: {
port_type: ['builtins.str']
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:data_in}"
}
}
meta_inports = {
self.INPUT_PORT_NAME: {},
self.INPUT_PORT_MODEL_NAME: {}
}
predict = self.conf.get('prediction', 'predict')
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: {predict: None}
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
input_meta = self.get_input_meta()
meta_inports = self.template_meta_setup().inports
if self.INPUT_PORT_MODEL_NAME in input_meta:
if 'train' in input_meta[self.INPUT_PORT_MODEL_NAME]:
required_cols = input_meta[self.INPUT_PORT_MODEL_NAME]['train']
else:
required_cols = {}
meta_inports[self.INPUT_PORT_NAME] = required_cols
else:
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
else:
col_from_inport = {}
enums = [col for col in col_from_inport.keys()]
if 'columns' in self.conf:
if self.conf.get('include', True):
included_colums = self.conf['columns']
else:
included_colums = [col for col in enums
if col not in self.conf['columns']]
for col in included_colums:
if col in col_from_inport:
meta_inports[
self.INPUT_PORT_NAME][col] = col_from_inport[col]
else:
meta_inports[self.INPUT_PORT_NAME][col] = None
self.template_meta_setup(
in_ports=meta_inports,
out_ports=None
)
def conf_schema(self):
json = {
"title": "Forest Inferencing Node",
"type": "object",
"description": """ForestInference provides GPU-accelerated inference
(prediction) for random forest and boosted decision tree models.
This module does not support training models. Rather, users should
train a model in another package and save it in a
treelite-compatible format. (See https://github.com/dmlc/treelite)
Currently, LightGBM, XGBoost and SKLearn GBDT and random forest
models are supported.""",
"properties": {
"columns": {
"type": "array",
"items": {
"type": "string",
},
"description": """columns in the input dataframe that
are considered as input features or not depending on `include` flag."""
},
"include": {
"type": "boolean",
"description": """if set true, the `columns` are treated as
input features if false, all dataframe columns are input
features except the `columns`""",
"default": True
},
"file": {
"type": "string",
"description": """The saved model file"""
},
"prediction": {
"type": "string",
"description": "the column name for prediction",
"default": "predict"
},
"model_type": {
"type": "string",
"description": """Format of the saved treelite model to be
load""",
"enum": ["xgboost", "lightgbm"],
"default": "xgboost"
},
},
"required": ['file'],
}
ui = {
"file": {"ui:widget": "FileSelector"},
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['columns']['items']['enum'] = enums
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
input_meta = self.get_input_meta()
predict_col = self.conf.get('prediction', 'predict')
data_df = inputs[self.INPUT_PORT_NAME]
if self.INPUT_PORT_MODEL_NAME in input_meta:
# use external information instead of conf
filename = get_file_path(inputs[self.INPUT_PORT_MODEL_NAME])
train_cols = input_meta[self.INPUT_PORT_MODEL_NAME]['train']
train_cols = list(train_cols.keys())
else:
# use the conf information
filename = get_file_path(self.conf['file'])
if 'columns' in self.conf:
if self.conf.get('include', True):
train_cols = self.conf['columns']
else:
train_cols = [col for col in data_df.columns
if col not in self.conf['columns']]
# train_cols.sort()
fm = ForestInference.load(filename,
model_type=self.conf.get("model_type",
"xgboost"))
prediction = fm.predict(data_df[train_cols])
prediction.index = data_df.index
data_df[predict_col] = prediction
return {self.OUTPUT_PORT_NAME: data_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/ml/forestInference.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ["OneHotEncodingNode"]
class OneHotEncodingNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
self.delayed_process = True
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
cols_required = {}
addition = {}
for col in self.conf:
for cat in col['cats']:
name = col.get('prefix')+col.get('prefix_sep', '_')+str(cat)
addition.update({name: col.get('dtype', 'float64')})
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if len(self.conf) > 0:
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
for col in self.conf:
required[col['column']] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
for col in self.conf:
col_name = col['column']
if col_name in col_from_inport:
required[col_name] = col_from_inport[col_name]
else:
required[col_name] = None
meta_inports[self.INPUT_PORT_NAME] = required
self.template_meta_setup(in_ports=meta_inports, out_ports=None)
def conf_schema(self):
json = {
"title": "One Hot Encoding configure",
"type": "array",
"description": """Encode the categorical variable by One-hot encoding
""",
"items": {
"type": "object",
"properties": {
"column": {
"type": "string",
"description": """the source column with binary
encoding for the data."""
},
"prefix": {
"type": "string",
"description": "the new column name prefix."
},
"cats": {
"type": "array",
'items': {
"type": "integer"
},
"description": "an arrya of categories"
},
"prefix_sep": {
"type": "string",
"description": """the separator between the prefix
and the category.""",
"default": "_"
},
"dtype": {
"type": "string",
"description": "the dtype for the outputs",
"enum": ["float64", "float32", "int64", "int32"],
"default": "float64"
}
}
}
}
ui = {
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['items']['properties']['column']['enum'] = enums
return ConfSchema(json=json, ui=ui)
else:
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
encode the categorical variables to one hot
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
for col in self.conf:
input_df = input_df.one_hot_encoding(**col)
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/onehotEncoding.py |
from greenflow.dataframe_flow.portsSpecSchema import (PortsSpecSchema,
ConfSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['SortNode']
class SortNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
self.delayed_process = True
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
cols_required = {}
addition = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if 'keys' in self.conf:
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
for col in self.conf['keys']:
required[col] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
for col in self.conf['keys']:
if col in col_from_inport:
required[col] = col_from_inport[col]
else:
required[col] = None
meta_inports[self.INPUT_PORT_NAME] = required
self.template_meta_setup(in_ports=meta_inports, out_ports=None)
def conf_schema(self):
json = {
"title": "Sort Column configure",
"type": "object",
"description": """Sort the input frames based on a
list of columns, which are defined in the
`keys` of the node's conf""",
"properties": {
"keys": {
"type": "array",
"items": {
"type": "string"
},
"description": """array of columns to sort"""
}
},
"required": ["keys"],
}
ui = {
"keys": {
"items": {
"ui:widget": "text"
}
},
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['keys']['items']['enum'] = enums
ui = {}
return ConfSchema(json=json, ui=ui)
else:
ui = {
"column": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Sort the input frames based on a list of columns, which are defined
in the `keys` of the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
return {self.OUTPUT_PORT_NAME: input_df.sort_values(self.conf['keys'])}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/sortNode.py |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (PortsSpecSchema,
ConfSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['RenameNode']
class RenameNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
cols_required = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: {}
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
retention = {}
if 'new' in self.conf and 'old' in self.conf:
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
retention = {}
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
oldType = col_from_inport[self.conf['old']]
del col_from_inport[self.conf['old']]
col_from_inport[self.conf['new']] = oldType
retention = col_from_inport
meta_outports = self.template_meta_setup().outports
meta_outports[self.OUTPUT_PORT_NAME][MetaDataSchema.META_DATA] = \
retention
self.template_meta_setup(in_ports=None, out_ports=meta_outports)
def conf_schema(self):
json = {
"title": "Rename Node configure",
"type": "object",
"description": """Rename the column name in the datafame from `old` to `new`
defined in the node's conf""",
"properties": {
"old": {
"type": "string",
"description": """the old column name that need to be
replaced"""
},
"new": {
"type": "string",
"description": "the new column name"
}
},
"required": ["old", "new"],
}
ui = {
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['old']['enum'] = enums
return ConfSchema(json=json, ui=ui)
else:
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Rename the column name in the datafame from `old` to `new` defined in
the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
new_column = self.conf['new']
old_column = self.conf['old']
return {self.OUTPUT_PORT_NAME: input_df.rename(columns={
old_column: new_column})}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/renameNode.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ["AddSignIndicatorNode"]
class AddSignIndicatorNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
name = self.conf.get('sign', 'sign')
addition = {name: "int64"}
cols_required = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if 'column' in self.conf:
col_name = self.conf['column']
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
required[col_name] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
if col_name in col_from_inport:
required[col_name] = col_from_inport[col_name]
else:
required[col_name] = None
meta_inports[self.INPUT_PORT_NAME] = required
self.template_meta_setup(in_ports=meta_inports, out_ports=None)
def conf_schema(self):
json = {
"title": "Add Sign Indicator configure",
"type": "object",
"description": """If the number is bigger than zero,
the sign is 1, otherwise the sign is 0
""",
"properties": {
"column": {
"type": "string",
"description": """the column that is used to calcuate
sign"""
},
"sign": {
"type": "string",
"description": "the sign column name",
"default": "sign"
}
},
"required": ["column"],
}
ui = {
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['column']['enum'] = enums
return ConfSchema(json=json, ui=ui)
else:
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Rename the column name in the datafame from `old` to `new` defined in
the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
name = self.conf.get('sign', 'sign')
input_df[name] = (input_df[self.conf['column']] > 0).astype('int64')
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/addSignIndicator.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['DropNode']
class DropNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
port_type = PortsSpecSchema.port_type
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
meta_inports = {
self.INPUT_PORT_NAME: {}
}
dropped = {}
for k in self.conf.get('columns', {}):
dropped[k] = None
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_DELETION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: dropped
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if 'columns' in self.conf:
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
for col in self.conf['columns']:
required[col] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
for col in self.conf['columns']:
if col in col_from_inport:
required[col] = col_from_inport[col]
else:
required[col] = None
meta_inports[self.INPUT_PORT_NAME] = required
self.template_meta_setup(in_ports=meta_inports, out_ports=None)
def conf_schema(self):
json = {
"title": "Drop Column configure",
"type": "object",
"description": """Drop a few columns from the dataframe""",
"properties": {
"columns": {
"type": "array",
"items": {
"type": "string"
},
"description": """array of columns to be droped"""
}
},
"required": ["columns"],
}
ui = {
"columns": {
"items": {
"ui:widget": "text"
}
},
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['columns']['items']['enum'] = enums
ui = {}
return ConfSchema(json=json, ui=ui)
else:
ui = {
"column": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Drop a few columns from the dataframe that are defined in the `columns`
in the nodes' conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
column_names = self.conf['columns']
return {self.OUTPUT_PORT_NAME: input_df.drop(column_names, axis=1)}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/dropNode.py |
from .averageNode import AverageNode
from .assetFilterNode import AssetFilterNode
from .leftMergeNode import LeftMergeNode
from .returnFeatureNode import ReturnFeatureNode
from .sortNode import SortNode
from .datetimeFilterNode import DatetimeFilterNode
from .minNode import MinNode
from .maxNode import MaxNode
from .valueFilterNode import ValueFilterNode
from .renameNode import RenameNode
from .assetIndicatorNode import AssetIndicatorNode
from .dropNode import DropNode
from .indicatorNode import IndicatorNode
from .normalizationNode import NormalizationNode
from .addSignIndicator import AddSignIndicatorNode
from .linearEmbedding import LinearEmbeddingNode
from .onehotEncoding import OneHotEncodingNode
__all__ = ["AverageNode", "AssetFilterNode", "LeftMergeNode",
"ReturnFeatureNode", "SortNode",
"DatetimeFilterNode", "MinNode", "MaxNode",
"ValueFilterNode", "RenameNode", "AssetIndicatorNode",
"DropNode", "IndicatorNode", "NormalizationNode",
"AddSignIndicatorNode", "LinearEmbeddingNode",
"OneHotEncodingNode"]
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/__init__.py |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ["AssetFilterNode"]
class AssetFilterNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
self.INPUT_MAP_NAME = 'name_map'
self.OUTPUT_ASSET_NAME = 'stock_name'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.INPUT_MAP_NAME: {
port_type: [
"greenflow_gquant_plugin.dataloader.stockMap.StockMap"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
},
self.OUTPUT_ASSET_NAME: {
port_type: ['builtins.str']
}
}
cols_required = {"asset": "int64"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_MAP_NAME: {}
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: {}
},
self.OUTPUT_ASSET_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: {}
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
name = self._find_asset_name()
asset_retension = {"asset_name": name}
meta_outports = self.template_meta_setup().outports
meta_outports[self.OUTPUT_ASSET_NAME][
MetaDataSchema.META_DATA] = asset_retension
self.template_meta_setup(
in_ports=None,
out_ports=meta_outports
)
def _find_asset_name(self):
name = ""
input_meta = self.get_input_meta()
if self.outport_connected(self.OUTPUT_ASSET_NAME):
if self.INPUT_MAP_NAME in input_meta and 'asset' in self.conf:
col_from_inport = input_meta[self.INPUT_MAP_NAME]
enums = col_from_inport['asset']
enumNames = col_from_inport['asset_name']
found = False
for i, name in zip(enums, enumNames):
if i == self.conf['asset']:
found = True
break
if not found:
name = ""
return name
def conf_schema(self):
json = {
"title": "Asset Filter Node configure",
"type": "object",
"description": "select the asset based on asset id",
"properties": {
"asset": {
"type": "number",
"description": "asset id number"
}
},
"required": ["asset"],
}
ui = {
}
input_meta = self.get_input_meta()
if self.INPUT_MAP_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_MAP_NAME]
enums = col_from_inport['asset']
enumNames = col_from_inport['asset_name']
json['properties']['asset']['enum'] = enums
json['properties']['asset']['enumNames'] = enumNames
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
select the asset based on asset id, which is defined in `asset` in the
nodes' conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
output_df = input_df.query('asset==%s' % self.conf["asset"])
output = {self.OUTPUT_PORT_NAME: output_df}
if self.outport_connected(self.OUTPUT_ASSET_NAME):
name = self._find_asset_name()
output.update({self.OUTPUT_ASSET_NAME: name})
return output
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/assetFilterNode.py |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['ValueFilterNode']
class ValueFilterNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
cols_required = {"asset": "int64"}
addition = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if len(self.conf) > 0:
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
for col in self.conf:
required[col['column']] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
for col in self.conf:
col_name = col['column']
if col_name in col_from_inport:
required[col_name] = col_from_inport[col_name]
else:
required[col_name] = None
meta_inports[self.INPUT_PORT_NAME] = required
self.template_meta_setup(in_ports=meta_inports, out_ports=None)
def conf_schema(self):
json = {
"title": "Value Filter Node configure",
"type": "array",
"description": """Filter the dataframe based on a list of
min/max values.""",
"items": {
"type": "object",
"properties": {
"column": {
"type": "string",
"description": "dataframe column to be filered on"
},
"min": {
"type": "number",
"description": "min value, inclusive"
},
"max": {
"type": "number",
"description": "max value, inclusive"
}
}
}
}
ui = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['items']['properties']['column']['enum'] = enums
return ConfSchema(json=json, ui=ui)
else:
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
filter the dataframe based on a list of min/max values. The node's
conf is a list of column criteria. It defines the column name in
'column`, the min value in `min` and the max value in `max`.
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
str_list = []
for column_item in self.conf:
column_name = column_item['column']
if 'min' in column_item:
minValue = column_item['min']
str_item = '%s >= %f' % (column_name, minValue)
str_list.append(str_item)
if 'max' in column_item:
maxValue = column_item['max']
str_item = '%s <= %f' % (column_name, maxValue)
str_list.append(str_item)
input_df = input_df.query(" and ".join(str_list))
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/valueFilterNode.py |
from greenflow.dataframe_flow import (
Node, ConfSchema, PortsSpecSchema, MetaDataSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['MaxNode']
class MaxNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
cols_required = {"asset": "int64"}
if 'column' in self.conf:
retention = {self.conf['column']: "float64",
"asset": "int64"}
else:
retention = {"asset": "int64"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retention
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if 'column' in self.conf:
col_name = self.conf['column']
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
required[col_name] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
if col_name in col_from_inport:
required[col_name] = col_from_inport[col_name]
else:
required[col_name] = None
meta_inports[self.INPUT_PORT_NAME] = required
self.template_meta_setup(in_ports=meta_inports, out_ports=None)
def conf_schema(self):
json = {
"title": "Maximum Value Node configure",
"type": "object",
"description": "Compute the maximum value of the key column",
"properties": {
"column": {
"type": "string",
"description": "column to calculate the maximum value"
}
},
"required": ["column"],
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['column']['enum'] = enums
ui = {}
return ConfSchema(json=json, ui=ui)
else:
ui = {
"column": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Compute the maximum value of the key column which is defined in the
`column` of the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
max_column = self.conf['column']
volume_df = input_df[[max_column,
"asset"]].groupby(["asset"]).max().reset_index()
volume_df.columns = ['asset', max_column]
return {self.OUTPUT_PORT_NAME: volume_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/maxNode.py |
import cupy as cp
import copy
from collections import OrderedDict
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaData
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
from .data_obj import ProjectionData
__all__ = ['LinearEmbeddingNode']
SPECIAL_OUTPUT_DIM_COL = 'OUTPUT_DIM_23b1c5ce-e0bf-11ea-afcf-80e82cc76d44'
class LinearEmbeddingNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'df_in'
self.OUTPUT_PORT_NAME = 'df_out'
self.INPUT_PROJ_NAME = 'proj_data_in'
self.OUTPUT_PROJ_NAME = 'proj_data_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.INPUT_PROJ_NAME: {
port_type: [
"greenflow_gquant_plugin.transform.data_obj.ProjectionData"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:df_in}"
},
self.OUTPUT_PROJ_NAME: {
port_type: [
"greenflow_gquant_plugin.transform.data_obj.ProjectionData"
]
},
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
def meta_setup(self):
required = {
self.INPUT_PORT_NAME: {},
self.INPUT_PROJ_NAME: {}
}
if 'columns' in self.conf and self.conf.get('include', True):
cols_required = {}
for col in self.conf['columns']:
cols_required[col] = None
required = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_PROJ_NAME: cols_required
}
output_cols = {
self.OUTPUT_PORT_NAME: required[self.INPUT_PORT_NAME],
self.OUTPUT_PROJ_NAME: required[
self.INPUT_PROJ_NAME]
}
input_meta = self.get_input_meta()
if (self.INPUT_PROJ_NAME in input_meta and
self.INPUT_PORT_NAME in input_meta):
cols_required = copy.copy(input_meta[self.INPUT_PROJ_NAME])
required = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_PROJ_NAME: cols_required
}
col_from_inport = input_meta[self.INPUT_PORT_NAME]
if SPECIAL_OUTPUT_DIM_COL in cols_required:
out_dim = cols_required[SPECIAL_OUTPUT_DIM_COL]
del cols_required[SPECIAL_OUTPUT_DIM_COL]
cols = ['em'+str(i) for i in range(out_dim)]
for col in cols:
col_from_inport[col] = None
output_cols = {
self.OUTPUT_PORT_NAME: col_from_inport,
self.OUTPUT_PROJ_NAME: cols_required
}
metadata = MetaData(inports=required, outports=output_cols)
return metadata
elif (self.INPUT_PROJ_NAME in input_meta and
self.INPUT_PORT_NAME not in input_meta):
cols_required = copy.copy(input_meta[self.INPUT_PROJ_NAME])
required = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_PROJ_NAME: cols_required
}
output = copy.copy(cols_required)
if SPECIAL_OUTPUT_DIM_COL in cols_required:
out_dim = cols_required[SPECIAL_OUTPUT_DIM_COL]
del cols_required[SPECIAL_OUTPUT_DIM_COL]
cols = ['em'+str(i) for i in range(out_dim)]
for col in cols:
output[col] = None
output_cols = {
self.OUTPUT_PORT_NAME: output,
self.OUTPUT_PROJ_NAME: cols_required
}
metadata = MetaData(inports=required, outports=output_cols)
return metadata
elif (self.INPUT_PROJ_NAME not in input_meta and
self.INPUT_PORT_NAME in input_meta):
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
if 'columns' in self.conf:
if self.conf.get('include', True):
included_colums = self.conf['columns']
else:
included_colums = [col for col in enums
if col not in self.conf['columns']]
cols_required = OrderedDict()
for col in included_colums:
if col in col_from_inport:
cols_required[col] = col_from_inport[col]
else:
cols_required[col] = None
required = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_PROJ_NAME: cols_required
}
col_dict = ['em'+str(i) for i in range(
self.conf['out_dimension'])]
for col in col_dict:
col_from_inport[col] = None
proj_out = copy.copy(cols_required)
proj_out[SPECIAL_OUTPUT_DIM_COL] = self.conf['out_dimension']
output_cols = {
self.OUTPUT_PORT_NAME: col_from_inport,
self.OUTPUT_PROJ_NAME: proj_out
}
metadata = MetaData(inports=required, outports=output_cols)
return metadata
metadata = MetaData(inports=required, outports=output_cols)
return metadata
def conf_schema(self):
json = {
"title": "Linear Embeding configure",
"type": "object",
"description": """Project the features randomly and linearly to a
space of different dimension. It generates the random projection
matrix of size feature_dim x out_dimension and does dot product
with the input dataframe""",
"properties": {
"columns": {
"type": "array",
"description": """an array of columns that need to
be normalized, or excluded from normalization depending
on the `incldue` flag state""",
"items": {
"type": "string"
}
},
"include": {
"type": "boolean",
"description": """if set true, the `columns` need to be
normalized. if false, all dataframe columns except the
`columns` need to be normalized""",
"default": True
},
"out_dimension": {
"type": "integer",
"minimum": 0,
"description": """the projected dimension size"""
},
"seed": {
"type": "integer",
"description": """the seed number for random projection"""
}
},
"required": [],
}
ui = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['columns']['items']['enum'] = enums
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
normalize the data to zero mean, std 1
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
if self.INPUT_PROJ_NAME in inputs:
data_in = inputs[self.INPUT_PROJ_NAME].data
input_meta = self.get_input_meta()
col_from_inport = input_meta[self.INPUT_PROJ_NAME]
proj_data = data_in
cols = []
# it has required colmns that used to do mapping
for col in col_from_inport.keys():
if col != SPECIAL_OUTPUT_DIM_COL:
cols.append(col)
else:
if self.conf.get('include', True):
cols = self.conf['columns']
else:
cols = input_df.columns.difference(
self.conf['columns']).values.tolist()
# need to generate the random projection
if 'seed' in self.conf:
cp.random.seed(self.conf['seed'])
proj_data = cp.random.rand(len(cols), self.conf['out_dimension'])
# cols.sort()
# print(self.uid, cols)
# print(self.uid, proj_data)
output_matrix = input_df[cols].values.dot(proj_data)
col_dict = {'em'+str(i): output_matrix[:, i]
for i in range(proj_data.shape[1])}
# output_df = input_df[input_df.columns.difference(cols)]
output_df = input_df.assign(**col_dict)
output = {}
if self.outport_connected(self.OUTPUT_PORT_NAME):
output.update({self.OUTPUT_PORT_NAME: output_df})
if self.outport_connected(self.OUTPUT_PROJ_NAME):
payload = ProjectionData(proj_data)
output.update({self.OUTPUT_PROJ_NAME: payload})
return output
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/linearEmbedding.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['AverageNode']
class AverageNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
}
}
cols_required = {"asset": "int64"}
if 'column' in self.conf:
retention = {
"asset": "int64",
self.conf['column']: "float64",
}
else:
retention = {"asset": "int64"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retention
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if 'column' in self.conf:
col_name = self.conf['column']
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
required[col_name] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
if col_name in col_from_inport:
required[col_name] = col_from_inport[col_name]
else:
required[col_name] = None
meta_inports[self.INPUT_PORT_NAME] = required
self.template_meta_setup(in_ports=meta_inports, out_ports=None)
def conf_schema(self):
input_meta = self.get_input_meta()
json = {
"title": "Asset Average Configure",
"type": "object",
"description": """Compute the average value of the key column
which is defined in the configuration
""",
"properties": {
"column": {
"type": "string",
"description": """the column name in the dataframe
to average"""
}
},
"required": ["column"],
}
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['column']['enum'] = enums
ui = {}
return ConfSchema(json=json, ui=ui)
else:
ui = {
"column": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Compute the average value of the key column which is defined in the
`column` of the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
average_column = self.conf['column']
volume_df = input_df[[average_column, "asset"]] \
.groupby(["asset"]).mean().reset_index()
volume_df.columns = ['asset', average_column]
return {self.OUTPUT_PORT_NAME: volume_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/averageNode.py |
from greenflow.dataframe_flow import (
ConfSchema, PortsSpecSchema, MetaDataSchema)
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['MinNode']
class MinNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
cols_required = {"asset": "int64"}
if 'column' in self.conf:
retention = {self.conf['column']: "float64",
"asset": "int64"}
else:
retention = {"asset": "int64"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: retention
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if 'column' in self.conf:
col_name = self.conf['column']
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
required[col_name] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
if col_name in col_from_inport:
required[col_name] = col_from_inport[col_name]
else:
required[col_name] = None
meta_inports[self.INPUT_PORT_NAME] = required
self.template_meta_setup(in_ports=meta_inports, out_ports=None)
def conf_schema(self):
json = {
"title": "Minimum Value Node configure",
"type": "object",
"description": "Compute the minimum value of the key column",
"properties": {
"column": {
"type": "string",
"description": "column to calculate the minimum value"
}
},
"required": ["column"],
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['column']['enum'] = enums
ui = {
}
return ConfSchema(json=json, ui=ui)
else:
ui = {
"column": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Compute the minium value of the key column which is defined in the
`column` of the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
min_column = self.conf['column']
volume_df = input_df[[min_column,
"asset"]].groupby(["asset"]).min().reset_index()
volume_df.columns = ['asset', min_column]
return {self.OUTPUT_PORT_NAME: volume_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/minNode.py |
from collections import OrderedDict
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaData
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
from .data_obj import NormalizationData
__all__ = ['NormalizationNode']
class NormalizationNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'df_in'
self.OUTPUT_PORT_NAME = 'df_out'
self.INPUT_NORM_MODEL_NAME = 'norm_data_in'
self.OUTPUT_NORM_MODEL_NAME = 'norm_data_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.INPUT_NORM_MODEL_NAME: {
port_type: [
"greenflow_gquant_plugin.transform.data_obj.NormalizationData" # noqa
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:df_in}"
},
self.OUTPUT_NORM_MODEL_NAME: {
port_type: [
"greenflow_gquant_plugin.transform.data_obj.NormalizationData" # noqa
]
},
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
def meta_setup(self):
cols_required = {}
required = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_NORM_MODEL_NAME: cols_required
}
if 'columns' in self.conf and self.conf.get('include', True):
cols_required = {}
for col in self.conf['columns']:
cols_required[col] = None
required = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_NORM_MODEL_NAME: cols_required
}
output_cols = {
self.OUTPUT_PORT_NAME: required[self.INPUT_PORT_NAME],
self.OUTPUT_NORM_MODEL_NAME: required[
self.INPUT_NORM_MODEL_NAME]
}
input_meta = self.get_input_meta()
if (self.INPUT_NORM_MODEL_NAME in input_meta and
self.INPUT_PORT_NAME in input_meta):
cols_required = input_meta[self.INPUT_NORM_MODEL_NAME]
required = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_NORM_MODEL_NAME: cols_required
}
col_from_inport = input_meta[self.INPUT_PORT_NAME]
output_cols = {
self.OUTPUT_PORT_NAME: col_from_inport,
self.OUTPUT_NORM_MODEL_NAME: cols_required
}
elif (self.INPUT_NORM_MODEL_NAME in input_meta and
self.INPUT_PORT_NAME not in input_meta):
cols_required = input_meta[self.INPUT_NORM_MODEL_NAME]
required = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_NORM_MODEL_NAME: cols_required
}
output_cols = {
self.OUTPUT_PORT_NAME: cols_required,
self.OUTPUT_NORM_MODEL_NAME: cols_required
}
elif (self.INPUT_NORM_MODEL_NAME not in input_meta and
self.INPUT_PORT_NAME in input_meta):
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
if 'columns' in self.conf:
if self.conf.get('include', True):
included_colums = self.conf['columns']
else:
included_colums = [col for col in enums
if col not in self.conf['columns']]
cols_required = OrderedDict()
for col in included_colums:
if col in col_from_inport:
cols_required[col] = col_from_inport[col]
else:
cols_required[col] = None
required = {
self.INPUT_PORT_NAME: cols_required,
self.INPUT_NORM_MODEL_NAME: cols_required
}
output_cols = {
self.OUTPUT_PORT_NAME: col_from_inport,
self.OUTPUT_NORM_MODEL_NAME: cols_required
}
metadata = MetaData(inports=required, outports=output_cols)
# The port INPUT_NORM_MODEL_NAME connection is optional. If not
# connected do not set in required
isconnected = \
self.INPUT_NORM_MODEL_NAME in self.get_connected_inports()
if not isconnected:
metadata.inports.pop(self.INPUT_NORM_MODEL_NAME, None)
return metadata
def conf_schema(self):
json = {
"title": "Normalization Node configure",
"type": "object",
"description": "Normalize the columns to have zero mean and std 1",
"properties": {
"columns": {
"type": "array",
"description": """an array of columns that need to
be normalized, or excluded from normalization depending
on the `incldue` flag state""",
"items": {
"type": "string"
}
},
"include": {
"type": "boolean",
"description": """if set true, the `columns` need to be
normalized. if false, all dataframe columns except the
`columns` need to be normalized""",
"default": True
},
},
"required": [],
}
ui = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['columns']['items']['enum'] = enums
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
normalize the data to zero mean, std 1
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
if self.INPUT_NORM_MODEL_NAME in inputs:
norm_data = inputs[self.INPUT_NORM_MODEL_NAME].data
input_meta = self.get_input_meta()
means = norm_data['mean']
stds = norm_data['std']
col_from_inport = input_meta[self.INPUT_NORM_MODEL_NAME]
cols = [i for i in col_from_inport.keys()]
# cols.sort()
else:
# need to compute the mean and std
if self.conf.get('include', True):
cols = self.conf['columns']
else:
cols = input_df.columns.difference(
self.conf['columns']).values.tolist()
# cols.sort()
means = input_df[cols].mean()
stds = input_df[cols].std()
norm = (input_df[cols] - means) / stds
col_dict = {i: norm[i] for i in cols}
norm_df = input_df.assign(**col_dict)
output = {}
if self.outport_connected(self.OUTPUT_PORT_NAME):
output.update({self.OUTPUT_PORT_NAME: norm_df})
if self.outport_connected(self.OUTPUT_NORM_MODEL_NAME):
norm_data = {"mean": means, "std": stds}
payload = NormalizationData(norm_data)
output.update({self.OUTPUT_NORM_MODEL_NAME: payload})
return output
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/normalizationNode.py |
import copy
from greenflow.dataframe_flow import (ConfSchema, PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
from .. import cuindicator as ci
__all__ = ['IndicatorNode']
IN_DATA = {
"port_exponential_moving_average": {
"function": "port_exponential_moving_average",
"columns": ["close"],
"args": [2]
},
"port_moving_average": {
"function": "port_moving_average",
"columns": ["close"],
"args": [2]
},
"port_rate_of_change": {
"function": "port_rate_of_change",
"columns": ["close"],
"args": [2]
},
"port_diff": {"function": "port_diff",
"columns": ["close"],
"args": [-1]
},
"port_trix": {
"function": "port_trix",
"columns": ["close"],
"args": [2]
},
"port_average_directional_movement_index": {
"function": "port_average_directional_movement_index",
"columns": ["high", "low", "close"],
"args": [2, 3]
},
"port_donchian_channel": {
"function": "port_donchian_channel",
"columns": ["high", "low"],
"args": [2]
},
"port_fractional_diff": {"function": "port_fractional_diff",
"columns": ["close"],
"args": [0.9]
},
"port_chaikin_oscillator": {"function": "port_chaikin_oscillator",
"columns": ["high", "low", "close", "volume"],
"args": [2, 3]
},
"port_bollinger_bands": {"function": "port_bollinger_bands",
"columns": ["close"],
"args": [2],
"outputs": ["b1", "b2"]
},
"port_macd": {"function": "port_macd",
"columns": ["close"],
"args": [2, 3],
"outputs": ["MACDsign", "MACDdiff"]
},
"port_relative_strength_index": {
"function": "port_relative_strength_index",
"columns": ["high", "low"],
"args": [2],
},
"port_average_true_range": {"function": "port_average_true_range",
"columns": ["high", "low", "close"],
"args": [2],
},
"port_stochastic_oscillator_k": {
"function": "port_stochastic_oscillator_k",
"columns": ["high", "low", "close"],
"args": [],
},
"port_stochastic_oscillator_d": {
"function": "port_stochastic_oscillator_d",
"columns": ["high", "low", "close"],
"args": [2],
},
"port_money_flow_index": {
"function": "port_money_flow_index",
"columns": ["high", "low", "close", "volume"],
"args": [2],
},
"port_force_index": {"function": "port_force_index",
"columns": ["close", "volume"],
"args": [2],
},
"port_ultimate_oscillator": {"function": "port_ultimate_oscillator",
"columns": ["high", "low", "close"],
"args": [],
},
"port_accumulation_distribution": {
"function": "port_accumulation_distribution",
"columns": ["high", "low", "close", "volume"],
"args": [2],
},
"port_commodity_channel_index": {
"function": "port_commodity_channel_index",
"columns": ["high", "low", "close"],
"args": [2],
},
"port_on_balance_volume": {"function": "port_on_balance_volume",
"columns": ["close", "volume"],
"args": [2],
},
"port_vortex_indicator": {"function": "port_vortex_indicator",
"columns": ["high", "low", "close"],
"args": [2],
},
"port_kst_oscillator": {"function": "port_kst_oscillator",
"columns": ["close"],
"args": [3, 4, 5, 6, 7, 8, 9, 10],
},
"port_mass_index": {"function": "port_mass_index",
"columns": ["high", "low"],
"args": [2, 3],
},
"port_true_strength_index": {"function": "port_true_strength_index",
"columns": ["close"],
"args": [2, 3],
},
"port_ease_of_movement": {"function": "port_ease_of_movement",
"columns": ["high", "low", "volume"],
"args": [2],
},
"port_coppock_curve": {"function": "port_coppock_curve",
"columns": ["close"],
"args": [2],
},
"port_keltner_channel": {"function": "port_keltner_channel",
"columns": ["high", "low", "close"],
"args": [2],
"outputs": ["KelChD", "KelChM", "KelChU"]
},
"port_ppsr": {"function": "port_ppsr",
"columns": ["high", "low", "close"],
"args": [],
"outputs": ["PP", "R1", "S1", "R2", "S2", "R3", "S3"]
},
"port_shift": {"function": "port_shift",
"columns": ["returns"],
"args": [-1]
}
}
class IndicatorNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
}
}
cols_required = {'indicator': 'int32'}
addition = {}
if 'indicators' in self.conf:
indicators = self.conf['indicators']
for indicator in indicators:
functionId = indicator['function']
conf = copy.deepcopy(IN_DATA[functionId])
if 'args' in indicator:
if len(conf['args']) != 0:
conf['args'] = indicator['args']
if 'columns' in indicator:
conf['columns'] = indicator['columns']
for col in conf['columns']:
cols_required[col] = 'float64'
if 'outputs' in conf:
for out in conf['outputs']:
out_col = self._compose_name(conf, [out])
addition[out_col] = 'float64'
else:
out_col = self._compose_name(conf, [])
addition[out_col] = 'float64'
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def _compose_name(self, indicator, outname=[]):
name = indicator['function']
args_name = []
if 'args' in indicator:
args_name = [str(i) for i in indicator['args']]
splits = [i.upper() for i in name.split('_') if i != 'port']
if len(splits) > 2:
splits = [i[0] for i in splits] + outname + args_name
elif len(splits) == 2:
splits = [i[0:2] for i in splits] + outname + args_name
else:
splits = [splits[0]] + outname + args_name
return "_".join(splits)
def conf_schema(self):
json = {
"title": "Technical Indicator Node configure",
"type": "object",
"description": """Add technical indicators to the dataframe.
"remove_na" decides whether we want to remove the NAs
from the technical indicators""",
"properties": {
"indicators": {
"type": "array",
"items": {
"type": "object",
"anyOf": [
]
},
"description": """A list of indicators to be included"""
},
"remove_na": {
"type": "boolean",
"description": """Remove the NAs from the technical
indicators?""",
"enum": [True, False],
"default": True
}
},
"required": ["remove_na"],
}
input_meta = self.get_input_meta()
enums = []
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
for key in IN_DATA.keys():
fun_name = " ".join(key.split('_')[1:])
out = {
"function": {
"title": fun_name,
"enum": [key],
"default": key,
},
}
args = {
"type": "array",
"items": []
}
columns = {
"type": "array",
"items": []
}
for arg in range(len(IN_DATA[key]['args'])):
item = {
"type": "number",
"title": "parameter {}".format(arg+1),
"default": IN_DATA[key]['args'][arg]
}
args['items'].append(item)
for arg in range(len(IN_DATA[key]['columns'])):
item = {
"type": "string",
"default": IN_DATA[key]['columns'][arg]
}
if len(enums) > 0:
item['enum'] = enums
columns['items'].append(item)
if (len(IN_DATA[key]['args']) > 0):
out['args'] = args
if (len(IN_DATA[key]['columns']) > 0):
out['columns'] = columns
obj = {"type": "object", "properties": out, "title": fun_name}
json['properties']['indicators']['items']['anyOf'].append(obj)
ui = {}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Add technical indicators to the dataframe.
All technical indicators are defined in the self.conf
"remove_na" in self.conf decides whether we want to remove the NAs
from the technical indicators
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
indicators = self.conf['indicators']
for indicator in indicators:
functionId = indicator['function']
conf = copy.deepcopy(IN_DATA[functionId])
if 'args' in indicator:
# a bug work around to ignore the numbers from the client
if len(conf['args']) != 0:
conf['args'] = indicator['args']
if 'columns' in indicator:
conf['columns'] = indicator['columns']
fun = getattr(ci, indicator['function'])
parallel = [input_df['indicator']]
data = [input_df[col] for col in conf['columns']]
ar = []
if 'args' in conf:
ar = conf['args']
v = fun(*(parallel+data+ar))
if isinstance(v, tuple) and 'outputs' in conf:
for out in conf['outputs']:
out_col = self._compose_name(conf, [out])
val = getattr(v, out)
val.index = input_df.index
input_df[out_col] = val
else:
if isinstance(v, tuple):
v = v[0]
out_col = self._compose_name(conf, [])
v.index = input_df.index
input_df[out_col] = v
# remove all the na elements, requires cudf>=0.8
if "remove_na" in self.conf and self.conf["remove_na"]:
input_df = input_df.nans_to_nulls().dropna()
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/indicatorNode.py |
import datetime
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['DatetimeFilterNode']
class DatetimeFilterNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
"""
A node that is used to select datapoints based on range of time.
conf["beg"] defines the beginning of the date inclusively and
conf["end"] defines the end of the date exclusively.
all the date strs are in format of "Y-m-d".
"""
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
}
}
addition = {}
cols_required = {"datetime": "datetime64[ns]"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Asset indicator configure",
"type": "object",
"description": """Select the data based on an range of datetime""",
"properties": {
"beg": {
"type": "string",
"description": """start date, inclusive"""
},
"end": {
"type": "string",
"description": """end date, exclusive"""
}
},
"required": ["beg", "end"],
}
ui = {
"beg": {"ui:widget": "alt-date",
"ui:options": {
"yearsRange": [1985, 2025],
"hideNowButton": True,
"hideClearButton": True,
}
},
"end": {"ui:widget": "alt-date",
"ui:options": {
"yearsRange": [1985, 2025],
"hideNowButton": True,
"hideClearButton": True,
}
}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Select the data based on an range of datetime, which is defined in
`beg` and `end` in the nodes' conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
df = inputs[self.INPUT_PORT_NAME]
beg_date = datetime.datetime.strptime(self.conf['beg'],
'%Y-%m-%dT%H:%M:%S.%fZ')
end_date = datetime.datetime.strptime(self.conf['end'],
'%Y-%m-%dT%H:%M:%S.%fZ')
df = df.query('datetime<@end_date and datetime>=@beg_date',
local_dict={
'beg_date': beg_date,
'end_date': end_date
})
return {self.OUTPUT_PORT_NAME: df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/datetimeFilterNode.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['ReturnFeatureNode']
class ReturnFeatureNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
}
}
cols_required = {"close": "float64",
"asset": "int64"}
addition = {"returns": "float64"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Add Returen Feature Node configure",
"type": "object",
"description": """Add the rate of of return column based
on the `close` price for each of the asset in the dataframe.
""",
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Add the rate of of return column based on the `close` price for each
of the asset in the dataframe. The result column is named as `returns`
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
tmp_col = "ae699380a8834957b3a8b7ad60192dd7"
input_df = inputs[self.INPUT_PORT_NAME]
shifted = input_df['close'].shift(1)
input_df['returns'] = (input_df['close'] - shifted) / shifted
input_df['returns'] = input_df['returns'].fillna(0.0)
input_df[tmp_col] = (input_df['asset'] -
input_df['asset'].shift(1)).fillna(1)
input_df[tmp_col] = (input_df[tmp_col] != 0).astype('int32')
input_df[tmp_col][input_df[tmp_col] == 1] = None
return {self.OUTPUT_PORT_NAME: input_df.dropna(
subset=[tmp_col]).drop(tmp_col, axis=1)}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/returnFeatureNode.py |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.portsSpecSchema import PortsSpecSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['LeftMergeNode']
class LeftMergeNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_LEFT_NAME = 'left'
self.INPUT_PORT_RIGHT_NAME = 'right'
self.OUTPUT_PORT_NAME = 'merged'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_LEFT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.INPUT_PORT_RIGHT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:left}"
}
}
cols_required = {}
meta_inports = {
self.INPUT_PORT_LEFT_NAME: cols_required,
self.INPUT_PORT_RIGHT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: {}
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
input_meta = self.get_input_meta()
output_cols = {}
if (self.INPUT_PORT_LEFT_NAME in input_meta
and self.INPUT_PORT_RIGHT_NAME in input_meta):
col_from_left_inport = input_meta[self.INPUT_PORT_LEFT_NAME]
col_from_right_inport = input_meta[self.INPUT_PORT_RIGHT_NAME]
col_from_left_inport.update(col_from_right_inport)
output_cols = col_from_left_inport
elif self.INPUT_PORT_LEFT_NAME in input_meta:
col_from_left_inport = input_meta[self.INPUT_PORT_LEFT_NAME]
output_cols = col_from_left_inport
elif self.INPUT_PORT_RIGHT_NAME in input_meta:
col_from_right_inport = input_meta[self.INPUT_PORT_RIGHT_NAME]
output_cols = col_from_right_inport
meta_data = self.template_meta_setup()
meta_outports = meta_data.outports
meta_inports = meta_data.inports
left_required = meta_inports[self.INPUT_PORT_LEFT_NAME]
right_required = meta_inports[self.INPUT_PORT_RIGHT_NAME]
if 'column' in self.conf:
col_name = self.conf['column']
input_meta = self.get_input_meta()
if self.INPUT_PORT_LEFT_NAME not in input_meta:
left_required[col_name] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_LEFT_NAME]
if col_name in col_from_inport:
left_required[col_name] = col_from_inport[col_name]
else:
left_required[col_name] = None
if self.INPUT_PORT_RIGHT_NAME not in input_meta:
right_required[col_name] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_RIGHT_NAME]
if col_name in col_from_inport:
right_required[col_name] = col_from_inport[col_name]
else:
right_required[col_name] = None
meta_inports[self.INPUT_PORT_LEFT_NAME] = left_required
meta_inports[self.INPUT_PORT_RIGHT_NAME] = right_required
meta_outports[self.OUTPUT_PORT_NAME][MetaDataSchema.META_DATA] = \
output_cols
self.template_meta_setup(in_ports=meta_inports,
out_ports=meta_outports)
def conf_schema(self):
json = {
"title": "DataFrame Left Merge configure",
"type": "object",
"description": """Left merge two dataframes of the same types""",
"properties": {
"column": {
"type": "string",
"description": "column name on which to do the left merge"
}
},
"required": ["column"],
}
input_meta = self.get_input_meta()
if (self.INPUT_PORT_LEFT_NAME in input_meta
and self.INPUT_PORT_RIGHT_NAME in input_meta):
col_left_inport = input_meta[self.INPUT_PORT_LEFT_NAME]
col_right_inport = input_meta[self.INPUT_PORT_RIGHT_NAME]
enums1 = set([col for col in col_left_inport.keys()])
enums2 = set([col for col in col_right_inport.keys()])
json['properties']['column']['enum'] = list(
enums1.intersection(enums2))
ui = {}
return ConfSchema(json=json, ui=ui)
else:
ui = {
"column": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
left merge the two dataframes in the inputs. the `on column` is defined
in the `column` of the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
df1 = inputs[self.INPUT_PORT_LEFT_NAME]
df2 = inputs[self.INPUT_PORT_RIGHT_NAME]
return {self.OUTPUT_PORT_NAME: df1.merge(df2, on=self.conf['column'],
how='left')}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/leftMergeNode.py |
class NormalizationData(object):
def __init__(self, data):
self.data = data
class ProjectionData(object):
def __init__(self, data):
self.data = data
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/data_obj.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['AssetIndicatorNode']
class AssetIndicatorNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
}
}
cols_required = {"asset": "int64"}
addition = {"indicator": "int32"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Asset indicator configure",
"type": "object",
"description": """Add the indicator column in the dataframe which
set 1 at the beginning of the each of the assets, assuming the
rows are sorted so same asset are grouped together""",
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Add the indicator column in the dataframe which set 1 at the beginning
of the each of the assets, assuming the rows are sorted so same asset
are grouped together
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
input_df['indicator'] = (input_df['asset'] -
input_df['asset'].shift(1)).fillna(1)
input_df['indicator'] = (input_df['indicator'] != 0).astype('int32')
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/transform/assetIndicatorNode.py |
from numba import cuda
import numba
from .windows import (ewma_mean_window)
kernel_cache = {}
def get_ewm_kernel(method):
if method in kernel_cache:
return kernel_cache[method]
@cuda.jit
def kernel(in_arr, out_arr, average_length, span, arr_len, thread_tile,
min_size):
"""
This kernel is to copy input array elements into shared array.
The total window size. To compute
output element at i, it uses [i - average_length - 1, i] elements in
history.
Arguments:
in_arr: input gpu array
out_arr: output gpu_array
average_length: is the size used to compute expoential weighted
average
span: the span size for the exponential weighted average
arr_len: the input/output array length
thread_tile: each thread is responsible for `thread_tile` number
of elements
min_size: the minimum number of non-na elements
"""
shared = cuda.shared.array(shape=0,
dtype=numba.float64)
block_size = cuda.blockDim.x
tx = cuda.threadIdx.x
# Block id in a 1D grid
bid = cuda.blockIdx.x
starting_id = bid * block_size * thread_tile
# copy the thread_tile * number_of_thread_per_block into the shared
for j in range(thread_tile):
offset = tx + j * block_size
if (starting_id + offset) < arr_len:
shared[offset + average_length - 1] = in_arr[
starting_id + offset]
cuda.syncthreads()
# copy the average_length - 1 into the shared
for j in range(0, average_length - 1, block_size):
if (((tx + j) < average_length - 1) and
(starting_id - average_length + 1 + tx + j >= 0)):
shared[tx + j] = \
in_arr[starting_id - average_length + 1 + tx + j]
cuda.syncthreads()
# slice the shared memory for each threads
start_shared = tx * thread_tile
his_len = min(average_length - 1,
starting_id + tx * thread_tile)
# slice the global memory for each threads
start = starting_id + tx * thread_tile
end = min(starting_id + (tx + 1) * thread_tile, arr_len)
sub_outarr = out_arr[start:end]
sub_len = end - start
method(shared, his_len, sub_outarr,
average_length, span, sub_len,
average_length - 1 + start_shared,
min_size)
kernel_cache[method] = kernel
return kernel
class Ewm(object):
def __init__(self, span, input_arr, min_periods=None, thread_tile=48,
number_of_threads=64, expand_multiplier=10):
"""
The Ewm class that is used to do rolling exponential weighted moving
average. It uses expand_multiplier * span elements to do the weighted
average. So adjust expand_multiplier to adjust accuracy.
Arguments:
span: the span parameter in the exponential weighted moving average
input_arr: the input GPU array or cudf.Series
min_periods: the minimum number of non-na elements need to get an
output
thread_tile: each thread will be responsible for `thread_tile`
number of elements in window computation
number_of_threads: num. of threads in a block for CUDA computation
expand_multiplier: the number of elements used computing EWM is
controled by this constant. The higher this
number, the better the accuracy but slower in
performance
"""
if isinstance(input_arr, numba.cuda.cudadrv.devicearray.DeviceNDArray):
self.gpu_in = input_arr
else:
self.gpu_in = input_arr.to_gpu_array()
if min_periods is None:
self.min_periods = span
else:
self.min_periods = min_periods
self.span = span
self.window = span * expand_multiplier
self.number_of_threads = number_of_threads
self.array_len = len(self.gpu_in)
self.thread_tile = thread_tile
self.number_of_blocks = \
(self.array_len + (number_of_threads * thread_tile - 1)) // \
(number_of_threads * thread_tile)
self.shared_buffer_size = \
(self.number_of_threads * self.thread_tile + self.window - 1)
def apply(self, method):
gpu_out = numba.cuda.device_array_like(self.gpu_in)
kernel = get_ewm_kernel(method)
kernel[(self.number_of_blocks,),
(self.number_of_threads,),
0,
self.shared_buffer_size * 8](self.gpu_in,
gpu_out,
self.window,
self.span,
self.array_len,
self.thread_tile,
self.min_periods)
return gpu_out
def mean(self):
return self.apply(ewma_mean_window)
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/cuindicator/ewm.py |
Subsets and Splits