python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
from .rolling import Rolling
from numba import cuda
import math
import numba
number_of_threads = 128
def diff(in_arr, n):
if n < 0:
return Rolling(1, in_arr, forward_window=-n).forward_diff()
elif n > 0:
return Rolling(n + 1, in_arr).backward_diff()
else:
return in_arr
def shift(in_arr, n):
if n < 0:
return Rolling(1, in_arr, forward_window=-n).forward_shift()
elif n > 0:
return Rolling(n + 1, in_arr).backward_shift()
else:
return in_arr
@cuda.jit
def ultimate_oscillator_kernel(high_arr, low_arr, close_arr, TR_arr, BP_arr,
arr_len):
i = cuda.grid(1)
if i < arr_len:
if i == 0:
TR_arr[i] = 0
BP_arr[i] = 0
else:
TR = (max(high_arr[i],
close_arr[i - 1]) - min(low_arr[i], close_arr[i - 1]))
TR_arr[i] = TR
BP = close_arr[i] - min(low_arr[i], close_arr[i - 1])
BP_arr[i] = BP
@cuda.jit
def port_ultimate_oscillator_kernel(asset_ind, high_arr, low_arr, close_arr,
TR_arr, BP_arr,
arr_len):
i = cuda.grid(1)
if i < arr_len:
if asset_ind[i] == 1:
TR_arr[i] = 0
BP_arr[i] = 0
else:
TR = (max(high_arr[i],
close_arr[i - 1]) - min(low_arr[i], close_arr[i - 1]))
TR_arr[i] = TR
BP = close_arr[i] - min(low_arr[i], close_arr[i - 1])
BP_arr[i] = BP
@cuda.jit
def moneyflow_kernel(pp_arr, volume_arr, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if i == 0:
out_arr[i] = 0
else:
if pp_arr[i] > pp_arr[i - 1]:
out_arr[i] = pp_arr[i] * volume_arr[i]
else:
out_arr[i] = 0.0
@cuda.jit
def port_moneyflow_kernel(asset_ind, pp_arr, volume_arr, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if asset_ind[i] == 1:
out_arr[i] = 0
else:
if pp_arr[i] > pp_arr[i - 1]:
out_arr[i] = pp_arr[i] * volume_arr[i]
else:
out_arr[i] = 0.0
@cuda.jit
def onbalance_kernel(close_arr, volume_arr, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if i == 0:
out_arr[i] = 0
else:
if close_arr[i] - close_arr[i - 1] > 0:
out_arr[i] = volume_arr[i]
elif close_arr[i] - close_arr[i - 1] == 0:
out_arr[i] = 0.0
else:
out_arr[i] = -volume_arr[i]
@cuda.jit
def port_onbalance_kernel(asset_ind, close_arr, volume_arr, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if asset_ind[i] == 1:
out_arr[i] = 0
else:
if close_arr[i] - close_arr[i - 1] > 0:
out_arr[i] = volume_arr[i]
elif close_arr[i] - close_arr[i - 1] == 0:
out_arr[i] = 0.0
else:
out_arr[i] = -volume_arr[i]
@cuda.jit
def average_price_kernel(high_arr, low_arr, close_arr, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
out_arr[i] = (high_arr[i] + low_arr[i] + close_arr[i]) / 3.0
@cuda.jit
def true_range_kernel(high_arr, low_arr, close_arr, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if i == 0:
out_arr[i] = 0
else:
out_arr[i] = max(high_arr[i],
close_arr[i - 1]) - min(low_arr[i],
close_arr[i - 1])
@cuda.jit
def port_true_range_kernel(asset_ind, high_arr, low_arr, close_arr, out_arr,
arr_len):
i = cuda.grid(1)
if i < arr_len:
if asset_ind[i] == 1:
out_arr[i] = 0
else:
out_arr[i] = max(high_arr[i],
close_arr[i - 1]) - min(low_arr[i],
close_arr[i - 1])
@cuda.jit
def port_mask_kernel(asset_ind, beg, end, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if asset_ind[i] == 1:
if beg + i >= 0:
for j in range(beg + i, min(end + i, arr_len)):
out_arr[j] = math.nan
else:
for j in range(beg + i + arr_len, min(end + i + arr_len,
arr_len)):
out_arr[j] = math.nan
for j in range(0, min(end + i, arr_len)):
out_arr[j] = math.nan
@cuda.jit
def port_mask_zero_kernel(asset_ind, beg, end, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if asset_ind[i] == 1:
if beg + i >= 0:
for j in range(beg + i, min(end + i, arr_len)):
out_arr[j] = 0
else:
for j in range(beg + i + arr_len, min(end + i + arr_len,
arr_len)):
out_arr[j] = 0
for j in range(0, min(end + i, arr_len)):
out_arr[j] = 0
@cuda.jit
def lowhigh_diff_kernel(high_arr, low_arr, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if i == 0:
out_arr[i] = 0
else:
out_arr[i] = abs(high_arr[i] - low_arr[i - 1]) - \
abs(low_arr[i] - high_arr[i - 1])
@cuda.jit
def port_lowhigh_diff_kernel(asset_ind, high_arr, low_arr, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if asset_ind[i] == 1:
out_arr[i] = 0
else:
out_arr[i] = abs(high_arr[i] - low_arr[i - 1]) - \
abs(low_arr[i] - high_arr[i - 1])
@cuda.jit
def up_down_kernel(high_arr, low_arr, upD_arr, doD_arr, arr_len):
i = cuda.grid(1)
if i < arr_len - 1:
if (math.isnan(high_arr[i]) or math.isnan(high_arr[i + 1]) or
math.isnan(low_arr[i]) or math.isnan(low_arr[i + 1])):
upD_arr[i] = math.nan
doD_arr[i] = math.nan
else:
upMove = high_arr[i + 1] - high_arr[i]
doMove = low_arr[i] - low_arr[i + 1]
if upMove > doMove and upMove > 0:
upD_arr[i] = upMove
else:
upD_arr[i] = 0
if doMove > upMove and doMove > 0:
doD_arr[i] = doMove
else:
doD_arr[i] = 0
elif i == arr_len - 1:
upD_arr[i] = math.nan
doD_arr[i] = math.nan
@cuda.jit
def abs_kernel(in_arr, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if math.isnan(in_arr[i]):
out_arr[i] = math.nan
else:
out_arr[i] = abs(in_arr[i])
@cuda.jit
def binary_substract(in_arr1, in_arr2, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if (math.isnan(in_arr1[i]) or math.isnan(in_arr2[i])):
out_arr[i] = math.nan
else:
out_arr[i] = in_arr1[i] - in_arr2[i]
@cuda.jit
def binary_sum(in_arr1, in_arr2, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if (math.isnan(in_arr1[i]) or math.isnan(in_arr2[i])):
out_arr[i] = math.nan
else:
out_arr[i] = in_arr1[i] + in_arr2[i]
@cuda.jit
def binary_multiply(in_arr1, in_arr2, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if (math.isnan(in_arr1[i]) or math.isnan(in_arr2[i])):
out_arr[i] = math.nan
else:
out_arr[i] = in_arr1[i] * in_arr2[i]
@cuda.jit
def binary_div(in_arr1, in_arr2, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if (math.isnan(in_arr1[i]) or math.isnan(in_arr2[i])):
out_arr[i] = math.nan
else:
if in_arr2[i] == 0 and in_arr1[i] == 0:
out_arr[i] = math.nan
elif in_arr2[i] == 0 and in_arr1[i] > 0:
out_arr[i] = math.inf
elif in_arr2[i] == 0 and in_arr1[i] < 0:
out_arr[i] = -math.inf
else:
out_arr[i] = in_arr1[i] / in_arr2[i]
@cuda.jit
def scale_kernel(in_arr, scaler, out_arr, arr_len):
i = cuda.grid(1)
if i < arr_len:
if math.isnan(in_arr[i]):
out_arr[i] = math.nan
else:
out_arr[i] = in_arr[i] * scaler
def upDownMove(high_arr, low_arr):
upD_arr = cuda.device_array_like(high_arr)
doD_arr = cuda.device_array_like(high_arr)
array_len = len(high_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
up_down_kernel[(number_of_blocks,), (number_of_threads,)](high_arr,
low_arr,
upD_arr,
doD_arr,
array_len)
return upD_arr, doD_arr
def ultimate_osc(high_arr, low_arr, close_arr):
TR_arr = cuda.device_array_like(high_arr)
BP_arr = cuda.device_array_like(high_arr)
array_len = len(high_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
ultimate_oscillator_kernel[(number_of_blocks,),
(number_of_threads,)](high_arr,
low_arr,
close_arr,
TR_arr,
BP_arr,
array_len)
return TR_arr, BP_arr
def port_ultimate_osc(asset_ind, high_arr, low_arr, close_arr):
TR_arr = cuda.device_array_like(high_arr)
BP_arr = cuda.device_array_like(high_arr)
array_len = len(high_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
port_ultimate_oscillator_kernel[(number_of_blocks,),
(number_of_threads,)](asset_ind,
high_arr,
low_arr,
close_arr,
TR_arr,
BP_arr,
array_len)
return TR_arr, BP_arr
def abs_arr(in_arr):
out_arr = cuda.device_array_like(in_arr)
array_len = len(in_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
abs_kernel[(number_of_blocks,), (number_of_threads,)](in_arr,
out_arr,
array_len)
return out_arr
def true_range(high_arr, low_arr, close_arr):
out_arr = cuda.device_array_like(high_arr)
array_len = len(high_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
true_range_kernel[(number_of_blocks,), (number_of_threads,)](high_arr,
low_arr,
close_arr,
out_arr,
array_len)
return out_arr
def port_true_range(asset_indicator, high_arr, low_arr, close_arr):
out_arr = cuda.device_array_like(high_arr)
array_len = len(high_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
port_true_range_kernel[(number_of_blocks,),
(number_of_threads,)](asset_indicator,
high_arr,
low_arr,
close_arr,
out_arr,
array_len)
return out_arr
def port_mask_nan(asset_indicator, input_arr, beg, end):
array_len = len(input_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
port_mask_kernel[(number_of_blocks,),
(number_of_threads,)](asset_indicator,
beg,
end,
input_arr,
array_len)
def port_mask_zero(asset_indicator, input_arr, beg, end):
array_len = len(input_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
port_mask_zero_kernel[(number_of_blocks,),
(number_of_threads,)](asset_indicator,
beg,
end,
input_arr,
array_len)
def average_price(high_arr, low_arr, close_arr):
out_arr = cuda.device_array_like(high_arr)
array_len = len(high_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
average_price_kernel[(number_of_blocks,), (number_of_threads,)](high_arr,
low_arr,
close_arr,
out_arr,
array_len)
return out_arr
def money_flow(pp_arr, volume_arr):
out_arr = cuda.device_array_like(pp_arr)
array_len = len(pp_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
moneyflow_kernel[(number_of_blocks,), (number_of_threads,)](pp_arr,
volume_arr,
out_arr,
array_len)
return out_arr
def port_money_flow(asset_ind, pp_arr, volume_arr):
out_arr = cuda.device_array_like(pp_arr)
array_len = len(pp_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
port_moneyflow_kernel[(number_of_blocks,),
(number_of_threads,)](asset_ind,
pp_arr,
volume_arr,
out_arr,
array_len)
return out_arr
def onbalance_volume(close_arr, volume_arr):
out_arr = cuda.device_array_like(close_arr)
array_len = len(close_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
onbalance_kernel[(number_of_blocks,), (number_of_threads,)](close_arr,
volume_arr,
out_arr,
array_len)
return out_arr
def port_onbalance_volume(asset_ind, close_arr, volume_arr):
out_arr = cuda.device_array_like(close_arr)
array_len = len(close_arr)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
port_onbalance_kernel[(number_of_blocks,),
(number_of_threads,)](asset_ind,
close_arr,
volume_arr,
out_arr,
array_len)
return out_arr
def lowhigh_diff(high_arr, low_arr):
out_arr = cuda.device_array_like(high_arr)
array_len = len(high_arr)
number_of_blocks = \
(array_len + (number_of_threads - 1)) // number_of_threads
lowhigh_diff_kernel[(number_of_blocks,), (number_of_threads,)](high_arr,
low_arr,
out_arr,
array_len)
return out_arr
def port_lowhigh_diff(asset_ind, high_arr, low_arr):
out_arr = cuda.device_array_like(high_arr)
array_len = len(high_arr)
number_of_blocks = \
(array_len + (number_of_threads - 1)) // number_of_threads
port_lowhigh_diff_kernel[(number_of_blocks,),
(number_of_threads,)](asset_ind,
high_arr,
low_arr,
out_arr,
array_len)
return out_arr
def substract(in_arr1, in_arr2):
out_arr = cuda.device_array_like(in_arr1)
array_len = len(in_arr1)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
binary_substract[(number_of_blocks,), (number_of_threads,)](in_arr1,
in_arr2,
out_arr,
array_len)
return out_arr
def summation(in_arr1, in_arr2):
out_arr = cuda.device_array_like(in_arr1)
array_len = len(in_arr1)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
binary_sum[(number_of_blocks,), (number_of_threads,)](in_arr1,
in_arr2,
out_arr,
array_len)
return out_arr
def multiply(in_arr1, in_arr2):
out_arr = cuda.device_array_like(in_arr1)
array_len = len(in_arr1)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
binary_multiply[(number_of_blocks,), (number_of_threads,)](in_arr1,
in_arr2,
out_arr,
array_len)
return out_arr
def division(in_arr1, in_arr2):
out_arr = cuda.device_array_like(in_arr1)
array_len = len(in_arr1)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
binary_div[(number_of_blocks,), (number_of_threads,)](in_arr1,
in_arr2,
out_arr,
array_len)
return out_arr
def scale(in_arr1, scaler):
out_arr = cuda.device_array_like(in_arr1)
array_len = len(in_arr1)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
scale_kernel[(number_of_blocks,), (number_of_threads,)](in_arr1,
scaler,
out_arr,
array_len)
return out_arr
@cuda.jit
def cumsum_kernel(in_arr, out_arr, block_arr, arr_len):
shared = cuda.shared.array(shape=0, dtype=numba.float64)
num_threads = cuda.blockDim.x
tx = cuda.threadIdx.x
bid = cuda.blockIdx.x
partial_sum_offset = num_threads * 2
starting_id = bid * partial_sum_offset
# load the in_arr to shared
for j in range(2):
offset = tx + j * num_threads
if (offset + starting_id) < arr_len:
shared[offset] = in_arr[offset + starting_id]
else:
shared[offset] = 0.0
cuda.syncthreads()
offset = 1
d = num_threads
while d > 0:
cuda.syncthreads()
if (tx < d):
ai = offset*(2*tx+1)-1
bi = offset*(2*tx+2)-1
shared[bi] += shared[ai]
offset *= 2
d = d // 2
if (tx == 0):
block_arr[bid] = shared[2 * num_threads - 1]
shared[2 * num_threads - 1] = 0.0
d = 1
while d < 2 * num_threads:
offset = offset // 2
cuda.syncthreads()
if tx < d:
ai = offset*(2*tx+1)-1
bi = offset*(2*tx+2)-1
t = shared[ai]
shared[ai] = shared[bi]
shared[bi] += t
d *= 2
cuda.syncthreads()
# load back to the output
for j in range(2):
offset = tx + j * num_threads
if (offset + starting_id) < arr_len and offset + 1 < 2 * num_threads:
out_arr[offset + starting_id] = shared[offset + 1]
if tx == 0:
arr_id = min(arr_len - 1, starting_id + 2 * num_threads - 1)
out_arr[arr_id] = block_arr[bid]
@cuda.jit
def correct_kernel(in_arr, block_arr, arr_len):
num_threads = cuda.blockDim.x
tx = cuda.threadIdx.x
bid = cuda.blockIdx.x
partial_sum_offset = num_threads * 2
starting_id = bid * partial_sum_offset
for j in range(2):
offset = tx + j * num_threads
lookup = bid - 1
if lookup >= 0 and (offset + starting_id) < arr_len:
in_arr[offset + starting_id] += block_arr[lookup]
def cumsum(g_input, number_of_threads=1024):
array_len = len(g_input)
number_of_blocks = (array_len + (
number_of_threads * 2 - 1)) // (number_of_threads * 2)
shared_buffer_size = (number_of_threads * 2)
block_summary = numba.cuda.device_array(number_of_blocks)
gpu_out = numba.cuda.device_array_like(g_input)
cumsum_kernel[(number_of_blocks,),
(number_of_threads,),
0,
shared_buffer_size * 8](g_input,
gpu_out,
block_summary,
array_len)
if (number_of_blocks == 1):
return gpu_out
else:
block_sum = cumsum(block_summary)
correct_kernel[(number_of_blocks,),
(number_of_threads,)](gpu_out,
block_sum,
array_len)
return gpu_out
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/cuindicator/util.py |
from numba import cuda
import numba
from .windows import (portfolio_ewma_mean_window)
kernel_cache = {}
def get_ewm_kernel(method):
if method in kernel_cache:
return kernel_cache[method]
@cuda.jit
def kernel(asset_indicator, in_arr, out_arr, average_length, span, arr_len,
thread_tile, min_size):
"""
This kernel is to copy input array elements into shared array.
The total window size. To compute
output element at i, it uses [i - average_length - 1, i] elements in
history.
Arguments:
in_arr: input gpu array
out_arr: output gpu_array
average_length: is the size used to compute expoential weighted
average
span: the span size for the exponential weighted average
arr_len: the input/output array length
thread_tile: each thread is responsible for `thread_tile` number
of elements
min_size: the minimum number of non-na elements
"""
shared = cuda.shared.array(shape=0,
dtype=numba.float64)
block_size = cuda.blockDim.x
tx = cuda.threadIdx.x
# Block id in a 1D grid
bid = cuda.blockIdx.x
starting_id = bid * block_size * thread_tile
# copy the thread_tile * number_of_thread_per_block into the shared
for j in range(thread_tile):
offset = tx + j * block_size
if (starting_id + offset) < arr_len:
shared[offset + average_length - 1] = in_arr[
starting_id + offset]
cuda.syncthreads()
# copy the average_length - 1 into the shared
for j in range(0, average_length - 1, block_size):
if (((tx + j) < average_length - 1) and
(starting_id - average_length + 1 + tx + j >= 0)):
shared[tx + j] = \
in_arr[starting_id - average_length + 1 + tx + j]
cuda.syncthreads()
# slice the shared memory for each threads
start_shared = tx * thread_tile
his_len = min(average_length - 1,
starting_id + tx * thread_tile)
# slice the global memory for each threads
start = starting_id + tx * thread_tile
end = min(starting_id + (tx + 1) * thread_tile, arr_len)
sub_outarr = out_arr[start:end]
sub_len = end - start
method(asset_indicator, shared, his_len, sub_outarr,
average_length, span, sub_len,
average_length - 1 + start_shared,
min_size, start)
kernel_cache[method] = kernel
return kernel
class PEwm(object):
def __init__(self, span, input_arr, asset_indicator, min_periods=None,
thread_tile=48, number_of_threads=64, expand_multiplier=10):
"""
The Ewm class that is used to do rolling exponential weighted moving
average. It uses expand_multiplier * span elements to do the weighted
average. So adjust expand_multiplier to adjust accuracy.
Arguments:
span: the span parameter in the exponential weighted moving average
input_arr: the input GPU array or cudf.Series
min_periods: the minimum number of non-na elements need to get an
output
thread_tile: each thread will be responsible for `thread_tile`
number of elements in window computation
number_of_threads: num. of threads in a block for CUDA computation
expand_multiplier: the number of elements used computing EWM is
controled by this constant. The higher this
number, the better the accuracy but slower in
performance
"""
if isinstance(input_arr, numba.cuda.cudadrv.devicearray.DeviceNDArray):
self.gpu_in = input_arr
else:
self.gpu_in = input_arr.to_gpu_array()
if min_periods is None:
self.min_periods = span
else:
self.min_periods = min_periods
self.span = span
self.window = span * expand_multiplier
self.number_of_threads = number_of_threads
self.array_len = len(self.gpu_in)
self.thread_tile = thread_tile
self.number_of_blocks = (self.array_len +
(number_of_threads * thread_tile - 1)) // (
number_of_threads * thread_tile)
self.shared_buffer_size = \
(self.number_of_threads * self.thread_tile + self.window - 1)
if isinstance(asset_indicator,
numba.cuda.cudadrv.devicearray.DeviceNDArray):
self.asset_indicator = asset_indicator
else:
self.asset_indicator = asset_indicator.to_gpu_array()
def apply(self, method):
gpu_out = numba.cuda.device_array_like(self.gpu_in)
kernel = get_ewm_kernel(method)
kernel[(self.number_of_blocks,),
(self.number_of_threads,),
0,
self.shared_buffer_size * 8](self.asset_indicator,
self.gpu_in,
gpu_out,
self.window,
self.span,
self.array_len,
self.thread_tile,
self.min_periods)
return gpu_out
def mean(self):
return self.apply(portfolio_ewma_mean_window)
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/cuindicator/pewm.py |
from .util import shift, diff
from .rolling import Rolling
from .ewm import Ewm
from .pewm import PEwm
import cudf
import collections
import math
import numba
from .util import (substract, summation, multiply,
division, upDownMove, abs_arr,
true_range, lowhigh_diff, money_flow,
average_price, onbalance_volume,
ultimate_osc, scale, port_true_range,
port_mask_nan, port_lowhigh_diff,
port_money_flow, port_onbalance_volume,
port_ultimate_osc, port_mask_zero)
def moving_average(close_arr, n):
"""Calculate the moving average for the given data.
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: moving average in cu.Series
"""
MA = Rolling(n, close_arr).mean()
return cudf.Series(MA, nan_as_null=False)
def exponential_moving_average(close_arr, n):
"""Calculate the exponential weighted moving average for the given data.
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: expoential weighted moving average in cu.Series
"""
EMA = Ewm(n, close_arr).mean()
return cudf.Series(EMA, nan_as_null=False)
def port_exponential_moving_average(asset_indicator, close_arr, n):
"""Calculate the port exponential weighted moving average
for the given data.
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: expoential weighted moving average in cu.Series
"""
EMA = PEwm(n, close_arr, asset_indicator).mean()
return cudf.Series(EMA, nan_as_null=False)
def port_moving_average(asset_indicator, close_arr, n):
"""Calculate the port moving average for the given data.
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: expoential weighted moving average in cu.Series
"""
MA = Rolling(n, close_arr).mean()
port_mask_nan(asset_indicator.to_gpu_array(), MA, 0, n - 1)
return cudf.Series(MA, nan_as_null=False)
def momentum(close_arr, n):
"""Calculate the momentum for the given data.
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: momentum in cu.Series
"""
return cudf.Series(diff(close_arr, n), nan_as_null=False)
def rate_of_change(close_arr, n):
""" Calculate the rate of return
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: rate of change in cu.Series
"""
M = diff(close_arr, n - 1)
N = shift(close_arr, n - 1)
return cudf.Series(division(M, N), nan_as_null=False)
def port_rate_of_change(asset_indicator, close_arr, n):
""" Calculate the port rate of return
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: rate of change in cu.Series
"""
M = diff(close_arr, n - 1)
N = shift(close_arr, n - 1)
out = division(M, N)
if n - 1 >= 0:
port_mask_nan(asset_indicator.to_gpu_array(), out, 0, n - 1)
else:
port_mask_nan(asset_indicator.to_gpu_array(), out, n - 1, 0)
return cudf.Series(out, nan_as_null=False)
def port_diff(asset_indicator, close_arr, n):
""" Calculate the port diff
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: diff in cu.Series
"""
M = diff(close_arr.to_gpu_array(), n)
if n >= 0:
port_mask_nan(asset_indicator.to_gpu_array(), M, 0, n)
else:
port_mask_nan(asset_indicator.to_gpu_array(), M, n, 0)
return cudf.Series(M, nan_as_null=False)
def port_shift(asset_indicator, close_arr, n):
""" Calculate the port diff
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: shift in cu.Series
"""
M = shift(close_arr.to_gpu_array(), n)
if n >= 0:
port_mask_nan(asset_indicator.to_gpu_array(), M, 0, n)
else:
port_mask_nan(asset_indicator.to_gpu_array(), M, n, 0)
return cudf.Series(M, nan_as_null=False)
def bollinger_bands(close_arr, n):
"""Calculate the Bollinger Bands.
See https://www.investopedia.com/terms/b/bollingerbands.asp for details
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: b1 b2
"""
MA = Rolling(n, close_arr).mean()
MSD = Rolling(n, close_arr).std()
close_arr_gpu = numba.cuda.device_array_like(close_arr.to_gpu_array())
close_arr_gpu[:] = close_arr.to_gpu_array()[:]
close_arr_gpu[0:n-1] = math.nan
MSD_4 = scale(MSD, 4.0)
b1 = division(MSD_4, MA)
b2 = division(summation(substract(close_arr_gpu, MA), scale(MSD, 2.0)),
MSD_4)
out = collections.namedtuple('Bollinger', 'b1 b2')
return out(b1=cudf.Series(b1, nan_as_null=False),
b2=cudf.Series(b2, nan_as_null=False))
def port_bollinger_bands(asset_indicator, close_arr, n):
"""Calculate the port Bollinger Bands.
See https://www.investopedia.com/terms/b/bollingerbands.asp for details
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: b1 b2
"""
MA = Rolling(n, close_arr).mean()
port_mask_nan(asset_indicator.to_gpu_array(), MA, 0, n - 1)
MSD = Rolling(n, close_arr).std()
port_mask_nan(asset_indicator.to_gpu_array(), MSD, 0, n - 1)
close_arr_gpu = numba.cuda.device_array_like(close_arr.to_gpu_array())
close_arr_gpu[:] = close_arr.to_gpu_array()[:]
close_arr_gpu[0:n-1] = math.nan
MSD_4 = scale(MSD, 4.0)
b1 = division(MSD_4, MA)
b2 = division(summation(substract(close_arr_gpu, MA), scale(MSD, 2.0)),
MSD_4)
out = collections.namedtuple('Bollinger', 'b1 b2')
return out(b1=cudf.Series(b1, nan_as_null=False),
b2=cudf.Series(b2, nan_as_null=False))
def trix(close_arr, n):
"""Calculate TRIX for given data.
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: trix indicator in cudf.Series
"""
EX1 = Ewm(n, close_arr).mean()
EX2 = Ewm(n, EX1).mean()
EX3 = Ewm(n, EX2).mean()
return rate_of_change(cudf.Series(EX3, nan_as_null=False), 2)
def port_trix(asset_indicator, close_arr, n):
"""Calculate the port trix.
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: expoential weighted moving average in cu.Series
"""
EX1 = PEwm(n, close_arr, asset_indicator).mean()
EX2 = PEwm(n, EX1, asset_indicator).mean()
EX3 = PEwm(n, EX2, asset_indicator).mean()
return rate_of_change(cudf.Series(EX3, nan_as_null=False), 2)
def macd(close_arr, n_fast, n_slow):
"""Calculate MACD, MACD Signal and MACD difference
:param close_arr: close price of the bar, expect series from cudf
:param n_fast: fast time steps
:param n_slow: slow time steps
:return: MACD MACDsign MACDdiff
"""
EMAfast = Ewm(n_fast, close_arr).mean()
EMAslow = Ewm(n_slow, close_arr).mean()
MACD = substract(EMAfast, EMAslow)
average_window = 9
MACDsign = Ewm(average_window, MACD).mean()
MACDdiff = substract(MACD, MACDsign)
out = collections.namedtuple('MACD', 'MACD MACDsign MACDdiff')
return out(MACD=cudf.Series(MACD, nan_as_null=False),
MACDsign=cudf.Series(MACDsign, nan_as_null=False),
MACDdiff=cudf.Series(MACDdiff, nan_as_null=False))
def port_macd(asset_indicator, close_arr, n_fast, n_slow):
"""Calculate MACD, MACD Signal and MACD difference
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n_fast: fast time steps
:param n_slow: slow time steps
:return: MACD MACDsign MACDdiff
"""
EMAfast = PEwm(n_fast, close_arr, asset_indicator).mean()
EMAslow = PEwm(n_slow, close_arr, asset_indicator).mean()
MACD = substract(EMAfast, EMAslow)
average_window = 9
MACDsign = PEwm(average_window, MACD, asset_indicator).mean()
MACDdiff = substract(MACD, MACDsign)
out = collections.namedtuple('MACD', 'MACD MACDsign MACDdiff')
return out(MACD=cudf.Series(MACD, nan_as_null=False),
MACDsign=cudf.Series(MACDsign, nan_as_null=False),
MACDdiff=cudf.Series(MACDdiff, nan_as_null=False))
def average_true_range(high_arr, low_arr, close_arr, n):
"""Calculate the Average True Range
See https://www.investopedia.com/terms/a/atr.asp for details
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: average true range indicator
"""
tr = true_range(high_arr.to_gpu_array(), low_arr.to_gpu_array(),
close_arr.to_gpu_array())
ATR = Ewm(n, tr).mean()
return cudf.Series(ATR, nan_as_null=False)
def port_average_true_range(asset_indicator, high_arr,
low_arr, close_arr, n):
"""Calculate the port Average True Range
See https://www.investopedia.com/terms/a/atr.asp for details
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: average true range indicator
"""
tr = port_true_range(asset_indicator.to_gpu_array(),
high_arr.to_gpu_array(),
low_arr.to_gpu_array(),
close_arr.to_gpu_array())
ATR = PEwm(n, tr, asset_indicator).mean()
return cudf.Series(ATR, nan_as_null=False)
def ppsr(high_arr, low_arr, close_arr):
"""Calculate Pivot Points, Supports and Resistances for given data
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:return: PP R1 S1 R2 S2 R3 S3
"""
high_gpu = high_arr.to_gpu_array()
low_gpu = low_arr.to_gpu_array()
close_gpu = close_arr.to_gpu_array()
PP = average_price(high_gpu, low_gpu, close_gpu)
R1 = substract(scale(PP, 2.0), low_gpu)
S1 = substract(scale(PP, 2.0), high_gpu)
R2 = substract(summation(PP, high_gpu), low_gpu)
S2 = summation(substract(PP, high_gpu), low_gpu)
R3 = summation(high_gpu, scale(substract(PP, low_gpu), 2.0))
S3 = substract(low_gpu, scale(substract(high_gpu, PP), 2.0))
out = collections.namedtuple('PPSR', 'PP R1 S1 R2 S2 R3 S3')
return out(PP=cudf.Series(PP, nan_as_null=False),
R1=cudf.Series(R1, nan_as_null=False),
S1=cudf.Series(S1, nan_as_null=False),
R2=cudf.Series(R2, nan_as_null=False),
S2=cudf.Series(S2, nan_as_null=False),
R3=cudf.Series(R3, nan_as_null=False),
S3=cudf.Series(S3, nan_as_null=False))
def port_ppsr(asset_indicator, high_arr, low_arr, close_arr):
"""Calculate port Pivot Points, Supports and Resistances for given data
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:return: PP R1 S1 R2 S2 R3 S3
"""
high_gpu = high_arr.to_gpu_array()
low_gpu = low_arr.to_gpu_array()
close_gpu = close_arr.to_gpu_array()
PP = average_price(high_gpu, low_gpu, close_gpu)
R1 = substract(scale(PP, 2.0), low_gpu)
S1 = substract(scale(PP, 2.0), high_gpu)
R2 = substract(summation(PP, high_gpu), low_gpu)
S2 = summation(substract(PP, high_gpu), low_gpu)
R3 = summation(high_gpu, scale(substract(PP, low_gpu), 2.0))
S3 = substract(low_gpu, scale(substract(high_gpu, PP), 2.0))
out = collections.namedtuple('PPSR', 'PP R1 S1 R2 S2 R3 S3')
return out(PP=cudf.Series(PP, nan_as_null=False),
R1=cudf.Series(R1, nan_as_null=False),
S1=cudf.Series(S1, nan_as_null=False),
R2=cudf.Series(R2, nan_as_null=False),
S2=cudf.Series(S2, nan_as_null=False),
R3=cudf.Series(R3, nan_as_null=False),
S3=cudf.Series(S3, nan_as_null=False))
def stochastic_oscillator_k(high_arr, low_arr, close_arr):
"""Calculate stochastic oscillator K for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:return: stochastic oscillator K in cudf.Series
"""
SOk = (close_arr - low_arr) / (high_arr - low_arr)
return SOk
def port_stochastic_oscillator_k(asset_indicator, high_arr,
low_arr, close_arr):
"""Calculate stochastic oscillator K for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:return: stochastic oscillator K in cudf.Series
"""
SOk = (close_arr - low_arr) / (high_arr - low_arr)
return SOk
def stochastic_oscillator_d(high_arr, low_arr, close_arr, n):
"""Calculate stochastic oscillator D for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: stochastic oscillator D in cudf.Series
"""
SOk = stochastic_oscillator_k(high_arr, low_arr, close_arr)
SOd = Ewm(n, SOk).mean()
return cudf.Series(SOd, nan_as_null=False)
def port_stochastic_oscillator_d(asset_indicator, high_arr, low_arr,
close_arr, n):
"""Calculate port stochastic oscillator D for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: stochastic oscillator D in cudf.Series
"""
SOk = stochastic_oscillator_k(high_arr, low_arr, close_arr)
SOd = PEwm(n, SOk, asset_indicator).mean()
return cudf.Series(SOd, nan_as_null=False)
def average_directional_movement_index(high_arr, low_arr, close_arr, n, n_ADX):
"""Calculate the Average Directional Movement Index for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps to do EWM average
:param n_ADX: time steps to do EWM average of ADX
:return: Average Directional Movement Index in cudf.Series
"""
UpI, DoI = upDownMove(high_arr.to_gpu_array(),
low_arr.to_gpu_array())
last_ele = len(high_arr) - 1
tr = true_range(high_arr.to_gpu_array(), low_arr.to_gpu_array(),
close_arr.to_gpu_array())
ATR = Ewm(n, tr).mean()
PosDI = division(Ewm(n, UpI).mean(), ATR)
NegDI = division(Ewm(n, DoI).mean(), ATR)
NORM = division(abs_arr(substract(PosDI, NegDI)), summation(PosDI, NegDI))
NORM[last_ele] = math.nan
ADX = cudf.Series(Ewm(n_ADX, NORM).mean(), nan_as_null=False)
return ADX
def port_average_directional_movement_index(asset_indicator,
high_arr, low_arr,
close_arr, n, n_ADX):
"""Calculate the port Average Directional Movement Index for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps to do EWM average
:param n_ADX: time steps to do EWM average of ADX
:return: Average Directional Movement Index in cudf.Series
"""
UpI, DoI = upDownMove(high_arr.to_gpu_array(),
low_arr.to_gpu_array())
tr = port_true_range(asset_indicator.to_gpu_array(),
high_arr.to_gpu_array(),
low_arr.to_gpu_array(),
close_arr.to_gpu_array())
ATR = PEwm(n, tr, asset_indicator).mean()
PosDI = division(PEwm(n, UpI, asset_indicator).mean(), ATR)
NegDI = division(PEwm(n, DoI, asset_indicator).mean(), ATR)
NORM = division(abs_arr(substract(PosDI, NegDI)), summation(PosDI, NegDI))
port_mask_nan(asset_indicator.to_gpu_array(), NORM, -1, 0)
ADX = cudf.Series(PEwm(n_ADX, NORM, asset_indicator).mean(),
nan_as_null=False)
return ADX
def vortex_indicator(high_arr, low_arr, close_arr, n):
"""Calculate the Vortex Indicator for given data.
Vortex Indicator described here:
http://www.vortexindicator.com/VFX_VORTEX.PDF
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps to do EWM average
:return: Vortex Indicator in cudf.Series
"""
TR = true_range(high_arr.to_gpu_array(), low_arr.to_gpu_array(),
close_arr.to_gpu_array())
VM = lowhigh_diff(high_arr.to_gpu_array(),
low_arr.to_gpu_array())
VI = division(Rolling(n, VM).sum(), Rolling(n, TR).sum())
return cudf.Series(VI, nan_as_null=False)
def port_vortex_indicator(asset_indicator, high_arr, low_arr, close_arr, n):
"""Calculate the port Vortex Indicator for given data.
Vortex Indicator described here:
http://www.vortexindicator.com/VFX_VORTEX.PDF
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps to do EWM average
:return: Vortex Indicator in cudf.Series
"""
TR = port_true_range(asset_indicator.to_gpu_array(),
high_arr.to_gpu_array(),
low_arr.to_gpu_array(),
close_arr.to_gpu_array())
VM = port_lowhigh_diff(asset_indicator.to_gpu_array(),
high_arr.to_gpu_array(),
low_arr.to_gpu_array())
VI = division(Rolling(n, VM).sum(), Rolling(n, TR).sum())
port_mask_nan(asset_indicator.to_gpu_array(), VI, 0, n - 1)
return cudf.Series(VI, nan_as_null=False)
def kst_oscillator(close_arr, r1, r2, r3, r4, n1, n2, n3, n4):
"""Calculate KST Oscillator for given data.
:param close_arr: close price of the bar, expect series from cudf
:param r1: r1 time steps
:param r2: r2 time steps
:param r3: r3 time steps
:param r4: r4 time steps
:param n1: n1 time steps
:param n2: n2 time steps
:param n3: n3 time steps
:param n4: n4 time steps
:return: KST Oscillator in cudf.Series
"""
M1 = diff(close_arr, r1 - 1)
N1 = shift(close_arr, r1 - 1)
M2 = diff(close_arr, r2 - 1)
N2 = shift(close_arr, r2 - 1)
M3 = diff(close_arr, r3 - 1)
N3 = shift(close_arr, r3 - 1)
M4 = diff(close_arr, r4 - 1)
N4 = shift(close_arr, r4 - 1)
term1 = Rolling(n1, division(M1, N1)).sum()
term2 = scale(Rolling(n2, division(M2, N2)).sum(), 2.0)
term3 = scale(Rolling(n3, division(M3, N3)).sum(), 3.0)
term4 = scale(Rolling(n4, division(M4, N4)).sum(), 4.0)
KST = summation(summation(summation(term1, term2), term3), term4)
return cudf.Series(KST, nan_as_null=False)
def port_kst_oscillator(asset_indicator, close_arr,
r1, r2, r3, r4, n1, n2, n3, n4):
"""Calculate port KST Oscillator for given data.
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param r1: r1 time steps
:param r2: r2 time steps
:param r3: r3 time steps
:param r4: r4 time steps
:param n1: n1 time steps
:param n2: n2 time steps
:param n3: n3 time steps
:param n4: n4 time steps
:return: KST Oscillator in cudf.Series
"""
M1 = diff(close_arr, r1 - 1)
N1 = shift(close_arr, r1 - 1)
port_mask_nan(asset_indicator.to_gpu_array(), M1, 0, r1 - 1)
port_mask_nan(asset_indicator.to_gpu_array(), N1, 0, r1 - 1)
M2 = diff(close_arr, r2 - 1)
N2 = shift(close_arr, r2 - 1)
port_mask_nan(asset_indicator.to_gpu_array(), M2, 0, r2 - 1)
port_mask_nan(asset_indicator.to_gpu_array(), N2, 0, r2 - 1)
M3 = diff(close_arr, r3 - 1)
N3 = shift(close_arr, r3 - 1)
port_mask_nan(asset_indicator.to_gpu_array(), M3, 0, r3 - 1)
port_mask_nan(asset_indicator.to_gpu_array(), N3, 0, r3 - 1)
M4 = diff(close_arr, r4 - 1)
N4 = shift(close_arr, r4 - 1)
port_mask_nan(asset_indicator.to_gpu_array(), M4, 0, r4 - 1)
port_mask_nan(asset_indicator.to_gpu_array(), N4, 0, r4 - 1)
term1 = Rolling(n1, division(M1, N1)).sum()
port_mask_nan(asset_indicator.to_gpu_array(), term1, 0, n1 - 1)
term2 = scale(Rolling(n2, division(M2, N2)).sum(), 2.0)
port_mask_nan(asset_indicator.to_gpu_array(), term2, 0, n2 - 1)
term3 = scale(Rolling(n3, division(M3, N3)).sum(), 3.0)
port_mask_nan(asset_indicator.to_gpu_array(), term3, 0, n3 - 1)
term4 = scale(Rolling(n4, division(M4, N4)).sum(), 4.0)
port_mask_nan(asset_indicator.to_gpu_array(), term4, 0, n4 - 1)
KST = summation(summation(summation(term1, term2), term3), term4)
return cudf.Series(KST, nan_as_null=False)
def relative_strength_index(high_arr, low_arr, n):
"""Calculate Relative Strength Index(RSI) for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param n: time steps to do EWM average
:return: Relative Strength Index in cudf.Series
"""
UpI, DoI = upDownMove(high_arr.to_gpu_array(),
low_arr.to_gpu_array())
UpI_s = shift(UpI, 1)
UpI_s[0] = 0
DoI_s = shift(DoI, 1)
DoI_s[0] = 0
PosDI = Ewm(n, UpI_s).mean()
NegDI = Ewm(n, DoI_s).mean()
RSI = division(PosDI, summation(PosDI, NegDI))
return cudf.Series(RSI, nan_as_null=False)
def port_relative_strength_index(asset_indicator, high_arr, low_arr, n):
"""Calculate Relative Strength Index(RSI) for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param n: time steps to do EWM average
:return: Relative Strength Index in cudf.Series
"""
UpI, DoI = upDownMove(high_arr.to_gpu_array(),
low_arr.to_gpu_array())
UpI_s = shift(UpI, 1)
UpI_s[0] = 0
UpI_s = cudf.Series(UpI_s,
nan_as_null=False) * (1.0
- asset_indicator.reset_index(
drop=True))
DoI_s = shift(DoI, 1)
DoI_s[0] = 0
DoI_s = cudf.Series(DoI_s,
nan_as_null=False) * (1.0
- asset_indicator.reset_index(
drop=True))
PosDI = PEwm(n, UpI_s, asset_indicator).mean()
NegDI = PEwm(n, DoI_s, asset_indicator).mean()
RSI = division(PosDI, summation(PosDI, NegDI))
return cudf.Series(RSI, nan_as_null=False)
def mass_index(high_arr, low_arr, n1, n2):
"""Calculate the Mass Index for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param n1: n1 time steps
:param n1: n2 time steps
:return: Mass Index in cudf.Series
"""
Range = high_arr - low_arr
EX1 = Ewm(n1, Range).mean()
EX2 = Ewm(n1, EX1).mean()
Mass = division(EX1, EX2)
MassI = Rolling(n2, Mass).sum()
return cudf.Series(MassI, nan_as_null=False)
def port_mass_index(asset_indicator, high_arr, low_arr, n1, n2):
"""Calculate the port Mass Index for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param n1: n1 time steps
:param n1: n2 time steps
:return: Mass Index in cudf.Series
"""
Range = high_arr - low_arr
EX1 = PEwm(n1, Range, asset_indicator).mean()
EX2 = PEwm(n1, EX1, asset_indicator).mean()
Mass = division(EX1, EX2)
MassI = Rolling(n2, Mass).sum()
port_mask_nan(asset_indicator.to_gpu_array(), MassI, 0, n2 - 1)
return cudf.Series(MassI, nan_as_null=False)
def true_strength_index(close_arr, r, s):
"""Calculate True Strength Index (TSI) for given data.
:param close_arr: close price of the bar, expect series from cudf
:param r: r time steps
:param s: s time steps
:return: True Strength Index in cudf.Series
"""
M = diff(close_arr, 1)
aM = abs_arr(M)
EMA1 = Ewm(r, M).mean()
aEMA1 = Ewm(r, aM).mean()
EMA2 = Ewm(s, EMA1).mean()
aEMA2 = Ewm(s, aEMA1).mean()
TSI = division(EMA2, aEMA2)
return cudf.Series(TSI, nan_as_null=False)
def port_true_strength_index(asset_indicator, close_arr, r, s):
"""Calculate port True Strength Index (TSI) for given data.
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param r: r time steps
:param s: s time steps
:return: True Strength Index in cudf.Series
"""
M = diff(close_arr, 1)
port_mask_nan(asset_indicator.to_gpu_array(), M, 0, 1)
aM = abs_arr(M)
EMA1 = PEwm(r, M, asset_indicator).mean()
aEMA1 = PEwm(r, aM, asset_indicator).mean()
EMA2 = PEwm(s, EMA1, asset_indicator).mean()
aEMA2 = PEwm(s, aEMA1, asset_indicator).mean()
TSI = division(EMA2, aEMA2)
return cudf.Series(TSI, nan_as_null=False)
def chaikin_oscillator(high_arr, low_arr, close_arr, volume_arr, n1, n2):
"""Calculate Chaikin Oscillator for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n1: n1 time steps
:param n2: n2 time steps
:return: Chaikin Oscillator indicator in cudf.Series
"""
ad = (2.0 * close_arr - high_arr - low_arr) / (
high_arr - low_arr) * volume_arr
Chaikin = cudf.Series(Ewm(n1, ad).mean(),
nan_as_null=False) - cudf.Series(Ewm(n2, ad).mean(),
nan_as_null=False)
return Chaikin
def port_chaikin_oscillator(asset_indicator, high_arr, low_arr,
close_arr, volume_arr, n1, n2):
"""Calculate port Chaikin Oscillator for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n1: n1 time steps
:param n2: n2 time steps
:return: Chaikin Oscillator indicator in cudf.Series
"""
ad = (2.0 * close_arr - high_arr - low_arr) / (
high_arr - low_arr) * volume_arr
first = PEwm(n1, ad, asset_indicator).mean()
second = PEwm(n2, ad, asset_indicator).mean()
Chaikin = cudf.Series(substract(first, second), nan_as_null=False)
return Chaikin
def money_flow_index(high_arr, low_arr, close_arr, volume_arr, n):
"""Calculate Money Flow Index and Ratio for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n: time steps
:return: Money Flow Index in cudf.Series
"""
PP = average_price(high_arr.to_gpu_array(),
low_arr.to_gpu_array(),
close_arr.to_gpu_array())
PosMF = money_flow(PP, volume_arr.to_gpu_array())
MFR = division(PosMF,
(multiply(PP, volume_arr.to_gpu_array()))) # TotMF
MFI = Rolling(n, MFR).mean()
return cudf.Series(MFI, nan_as_null=False)
def port_money_flow_index(asset_indicator, high_arr, low_arr,
close_arr, volume_arr, n):
"""Calculate port Money Flow Index and Ratio for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n: time steps
:return: Money Flow Index in cudf.Series
"""
PP = average_price(high_arr.to_gpu_array(),
low_arr.to_gpu_array(),
close_arr.to_gpu_array())
PosMF = port_money_flow(asset_indicator.to_gpu_array(), PP,
volume_arr.to_gpu_array())
MFR = division(PosMF,
(multiply(PP, volume_arr.to_gpu_array()))) # TotMF
MFI = Rolling(n, MFR).mean()
port_mask_nan(asset_indicator.to_gpu_array(), MFI, 0, n - 1)
return cudf.Series(MFI, nan_as_null=False)
def on_balance_volume(close_arr, volume_arr, n):
"""Calculate On-Balance Volume for given data.
:param close_arr: close price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n: time steps
:return: On-Balance Volume in cudf.Series
"""
OBV = onbalance_volume(close_arr.to_gpu_array(),
volume_arr.to_gpu_array())
OBV_ma = Rolling(n, OBV).mean()
return cudf.Series(OBV_ma, nan_as_null=False)
def port_on_balance_volume(asset_indicator, close_arr, volume_arr, n):
"""Calculate port On-Balance Volume for given data.
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n: time steps
:return: On-Balance Volume in cudf.Series
"""
OBV = port_onbalance_volume(asset_indicator.to_gpu_array(),
close_arr.to_gpu_array(),
volume_arr.to_gpu_array())
OBV_ma = Rolling(n, OBV).mean()
port_mask_nan(asset_indicator.to_gpu_array(), OBV_ma, 0, n - 1)
return cudf.Series(OBV_ma, nan_as_null=False)
def force_index(close_arr, volume_arr, n):
"""Calculate Force Index for given data.
:param close_arr: close price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n: time steps
:return: Force Index in cudf.Series
"""
F = multiply(diff(close_arr, n), diff(volume_arr, n))
return cudf.Series(F, nan_as_null=False)
def port_force_index(asset_indicator, close_arr, volume_arr, n):
"""Calculate port Force Index for given data.
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n: time steps
:return: Force Index in cudf.Series
"""
F = multiply(diff(close_arr, n), diff(volume_arr, n))
port_mask_nan(asset_indicator.to_gpu_array(), F, 0, n)
return cudf.Series(F, nan_as_null=False)
def ease_of_movement(high_arr, low_arr, volume_arr, n):
"""Calculate Ease of Movement for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n: time steps
:return: Ease of Movement in cudf.Series
"""
high_arr_gpu = high_arr.to_gpu_array()
low_arr_gpu = low_arr.to_gpu_array()
EoM = division(multiply(summation(diff(high_arr_gpu, 1),
diff(low_arr_gpu, 1)),
substract(high_arr_gpu, low_arr_gpu)),
scale(volume_arr.to_gpu_array(), 2.0))
Eom_ma = Rolling(n, EoM).mean()
return cudf.Series(Eom_ma, nan_as_null=False)
def port_ease_of_movement(asset_indicator, high_arr, low_arr, volume_arr, n):
"""Calculate port Ease of Movement for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param volume_arr: volume the bar, expect series from cudf
:param n: time steps
:return: Ease of Movement in cudf.Series
"""
high_arr_gpu = high_arr.to_gpu_array()
low_arr_gpu = low_arr.to_gpu_array()
EoM = division(multiply(summation(diff(high_arr_gpu, 1),
diff(low_arr_gpu, 1)),
substract(high_arr_gpu, low_arr_gpu)),
scale(volume_arr.to_gpu_array(), 2.0))
port_mask_nan(asset_indicator.to_gpu_array(), EoM, 0, 1)
Eom_ma = Rolling(n, EoM).mean()
port_mask_nan(asset_indicator.to_gpu_array(), Eom_ma, 0, n - 1)
return cudf.Series(Eom_ma, nan_as_null=False)
def ultimate_oscillator(high_arr, low_arr, close_arr):
"""Calculate Ultimate Oscillator for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:return: Ultimate Oscillator in cudf.Series
"""
TR_l, BP_l = ultimate_osc(high_arr.to_gpu_array(),
low_arr.to_gpu_array(),
close_arr.to_gpu_array())
term1 = division(scale(Rolling(7, BP_l).sum(), 4.0),
Rolling(7, TR_l).sum())
term2 = division(scale(Rolling(14, BP_l).sum(), 2.0),
Rolling(14, TR_l).sum())
term3 = division(Rolling(28, BP_l).sum(), Rolling(28, TR_l).sum())
UltO = summation(summation(term1, term2), term3)
return cudf.Series(UltO, nan_as_null=False)
def port_ultimate_oscillator(asset_indicator, high_arr, low_arr, close_arr):
"""Calculate port Ultimate Oscillator for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:return: Ultimate Oscillator in cudf.Series
"""
TR_l, BP_l = port_ultimate_osc(asset_indicator.to_gpu_array(),
high_arr.to_gpu_array(),
low_arr.to_gpu_array(),
close_arr.to_gpu_array())
term1 = division(scale(Rolling(7, BP_l).sum(), 4.0),
Rolling(7, TR_l).sum())
term2 = division(scale(Rolling(14, BP_l).sum(), 2.0),
Rolling(14, TR_l).sum())
term3 = division(Rolling(28, BP_l).sum(), Rolling(28, TR_l).sum())
port_mask_nan(asset_indicator.to_gpu_array(), term1, 0, 6)
port_mask_nan(asset_indicator.to_gpu_array(), term2, 0, 13)
port_mask_nan(asset_indicator.to_gpu_array(), term3, 0, 27)
UltO = summation(summation(term1, term2), term3)
return cudf.Series(UltO, nan_as_null=False)
def donchian_channel(high_arr, low_arr, n):
"""Calculate donchian channel of given pandas data frame.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param n: time steps
:return: donchian channel in cudf.Series
"""
max_high = Rolling(n, high_arr).max()
min_low = Rolling(n, low_arr).min()
dc_l = substract(max_high, min_low)
dc_l[:n-1] = 0.0
donchian_chan = shift(dc_l, n - 1)
return cudf.Series(donchian_chan, nan_as_null=False)
def port_donchian_channel(asset_indicator, high_arr, low_arr, n):
"""Calculate port donchian channel of given pandas data frame.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param n: time steps
:return: donchian channel in cudf.Series
"""
max_high = Rolling(n, high_arr).max()
port_mask_nan(asset_indicator.to_gpu_array(), max_high, 0, n - 1)
min_low = Rolling(n, low_arr).min()
port_mask_nan(asset_indicator.to_gpu_array(), min_low, 0, n - 1)
dc_l = substract(max_high, min_low)
# dc_l[:n-1] = 0.0
port_mask_zero(asset_indicator.to_gpu_array(), dc_l, 0, n - 1)
donchian_chan = shift(dc_l, n - 1)
port_mask_nan(asset_indicator.to_gpu_array(), donchian_chan, 0, n - 1)
return cudf.Series(donchian_chan, nan_as_null=False)
def keltner_channel(high_arr, low_arr, close_arr, n):
"""Calculate Keltner Channel for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: Keltner Channel in cudf.Series
"""
M = ((high_arr + low_arr + close_arr) / 3.0)
KelChM = cudf.Series(Rolling(n, M).mean(), nan_as_null=False)
U = ((4.0 * high_arr - 2.0 * low_arr + close_arr) / 3.0)
KelChU = cudf.Series(Rolling(n, U).mean(), nan_as_null=False)
D = ((-2.0 * high_arr + 4.0 * low_arr + close_arr) / 3.0)
KelChD = cudf.Series(Rolling(n, D).mean(), nan_as_null=False)
out = collections.namedtuple('Keltner', 'KelChM KelChU KelChD')
return out(KelChM=KelChM, KelChU=KelChU, KelChD=KelChD)
def port_keltner_channel(asset_indicator, high_arr, low_arr, close_arr, n):
"""Calculate port Keltner Channel for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: Keltner Channel in cudf.Series
"""
M = ((high_arr + low_arr + close_arr) / 3.0)
KelChM = Rolling(n, M).mean()
port_mask_nan(asset_indicator.to_gpu_array(), KelChM, 0, n - 1)
U = ((4.0 * high_arr - 2.0 * low_arr + close_arr) / 3.0)
KelChU = Rolling(n, U).mean()
port_mask_nan(asset_indicator.to_gpu_array(), KelChU, 0, n - 1)
D = ((-2.0 * high_arr + 4.0 * low_arr + close_arr) / 3.0)
KelChD = Rolling(n, D).mean()
port_mask_nan(asset_indicator.to_gpu_array(), KelChD, 0, n - 1)
out = collections.namedtuple('Keltner', 'KelChM KelChU KelChD')
return out(KelChM=cudf.Series(KelChM, nan_as_null=False),
KelChU=cudf.Series(KelChU, nan_as_null=False),
KelChD=cudf.Series(KelChD, nan_as_null=False))
def coppock_curve(close_arr, n):
"""Calculate Coppock Curve for given data.
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: Coppock Curve in cudf.Series
"""
M = diff(close_arr, int(n * 11 / 10) - 1)
N = shift(close_arr, int(n * 11 / 10) - 1)
ROC1 = division(M, N)
M = diff(close_arr, int(n * 14 / 10) - 1)
N = shift(close_arr, int(n * 14 / 10) - 1)
ROC2 = division(M, N)
Copp = Ewm(n, summation(ROC1, ROC2)).mean()
return cudf.Series(Copp, nan_as_null=False)
def port_coppock_curve(asset_indicator, close_arr, n):
"""Calculate port Coppock Curve for given data.
:param asset_indicator: the indicator of beginning of the stock
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: Coppock Curve in cudf.Series
"""
M = diff(close_arr, int(n * 11 / 10) - 1)
N = shift(close_arr, int(n * 11 / 10) - 1)
port_mask_nan(asset_indicator.to_gpu_array(), M, 0,
int(n * 11 / 10) - 1)
port_mask_nan(asset_indicator.to_gpu_array(), N, 0,
int(n * 11 / 10) - 1)
ROC1 = division(M, N)
M = diff(close_arr, int(n * 14 / 10) - 1)
N = shift(close_arr, int(n * 14 / 10) - 1)
port_mask_nan(asset_indicator.to_gpu_array(), M, 0,
int(n * 14 / 10) - 1)
port_mask_nan(asset_indicator.to_gpu_array(), N, 0,
int(n * 14 / 10) - 1)
ROC2 = division(M, N)
Copp = PEwm(n, summation(ROC1, ROC2), asset_indicator).mean()
return cudf.Series(Copp, nan_as_null=False)
def accumulation_distribution(high_arr, low_arr, close_arr, vol_arr, n):
"""Calculate Accumulation/Distribution for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param vol_arr: volume of the bar, expect series from cudf
:param n: time steps
:return: Accumulation/Distribution in cudf.Series
"""
ad = (2.0 * close_arr - high_arr - low_arr)/(high_arr - low_arr) * vol_arr
M = diff(ad, n-1)
N = shift(ad, n-1)
return cudf.Series(division(M, N), nan_as_null=False)
def port_accumulation_distribution(asset_indicator, high_arr,
low_arr, close_arr, vol_arr, n):
"""Calculate port Accumulation/Distribution for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param vol_arr: volume of the bar, expect series from cudf
:param n: time steps
:return: Accumulation/Distribution in cudf.Series
"""
ad = (2.0 * close_arr - high_arr - low_arr)/(high_arr - low_arr) * vol_arr
M = diff(ad, n-1)
port_mask_nan(asset_indicator.to_gpu_array(), M, 0, n - 1)
N = shift(ad, n-1)
port_mask_nan(asset_indicator.to_gpu_array(), N, 0, n - 1)
return cudf.Series(division(M, N), nan_as_null=False)
def commodity_channel_index(high_arr, low_arr, close_arr, n):
"""Calculate Commodity Channel Index for given data.
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: Commodity Channel Index in cudf.Series
"""
PP = average_price(high_arr.to_gpu_array(),
low_arr.to_gpu_array(),
close_arr.to_gpu_array())
M = Rolling(n, PP).mean()
N = Rolling(n, PP).std()
CCI = division(substract(PP, M), N)
return cudf.Series(CCI, nan_as_null=False)
def port_commodity_channel_index(asset_indicator, high_arr,
low_arr, close_arr, n):
"""Calculate port Commodity Channel Index for given data.
:param asset_indicator: the indicator of beginning of the stock
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps
:return: Commodity Channel Index in cudf.Series
"""
PP = average_price(high_arr.to_gpu_array(),
low_arr.to_gpu_array(),
close_arr.to_gpu_array())
M = Rolling(n, PP).mean()
port_mask_nan(asset_indicator.to_gpu_array(), M, 0, n - 1)
N = Rolling(n, PP).std()
port_mask_nan(asset_indicator.to_gpu_array(), N, 0, n - 1)
CCI = division(substract(PP, M), N)
return cudf.Series(CCI, nan_as_null=False)
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/cuindicator/indicator.py |
from .ewm import Ewm
from .indicator import * # noqa: F403, F401
from .pewm import PEwm
from .rolling import Rolling
from .util import (shift, diff, substract, summation,
multiply, division, scale, cumsum)
from .frac_diff import (fractional_diff, get_weights_floored,
port_fractional_diff)
__all__ = ["Ewm", "PEwm", "Rolling", "shift", "diff", "substract",
"summation", "multiply", "division", "scale", "cumsum",
"fractional_diff", "port_fractional_diff", "get_weights_floored"]
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/cuindicator/__init__.py |
from numba import cuda
import numba
from .windows import (mean_window, std_window, var_window,
min_window, max_window, sum_window,
backward_diff_window,
backward_shift_window,
forward_diff_window,
forward_shift_window)
kernel_cache = {}
def get_rolling_kernel(method):
if method in kernel_cache:
return kernel_cache[method]
@cuda.jit
def kernel(in_arr, out_arr, backward_length, forward_length,
arr_len, thread_tile, min_size):
"""
This kernel is to copy input array elements into shared array.
The total window size is backward_length + forward_length. To compute
output element at i, it uses [i - backward_length - 1, i] elements in
history, and [i + 1, i + forward_lengh] elements in the future.
Arguments:
in_arr: input gpu array
out_arr: output gpu_array
backward_length: the history elements in the windonw
forward_length: the forward elements in the window
arr_len: the input/output array length
thread_tile: each thread is responsible for `thread_tile` number
of elements
min_size: the minimum number of non-na elements
"""
shared = cuda.shared.array(shape=0,
dtype=numba.float64)
block_size = cuda.blockDim.x # total number of threads
tx = cuda.threadIdx.x
# Block id in a 1D grid
bid = cuda.blockIdx.x
starting_id = bid * block_size * thread_tile
# copy the thread_tile * number_of_thread_per_block into the shared
for j in range(thread_tile):
offset = tx + j * block_size
if (starting_id + offset) < arr_len:
shared[offset + backward_length - 1] = in_arr[
starting_id + offset]
cuda.syncthreads()
# copy the backward_length - 1 into the shared
for j in range(0, backward_length - 1, block_size):
if (((tx + j) <
backward_length - 1) and (
starting_id - backward_length + 1 + tx + j >= 0)):
shared[tx + j] = \
in_arr[starting_id - backward_length + 1 + tx + j]
cuda.syncthreads()
# copy the forward_length into the shared
for j in range(0, forward_length, block_size):
element_id = (starting_id + thread_tile * block_size + tx + j)
if (((tx + j) < forward_length) and (element_id < arr_len)):
shared[thread_tile * block_size + backward_length - 1 + tx +
j] = in_arr[element_id]
cuda.syncthreads()
# slice the shared memory for each threads
start_shared = tx * thread_tile
his_len = min(backward_length - 1,
starting_id + tx * thread_tile)
future_len = max(arr_len - (starting_id + tx * thread_tile), 0)
# slice the global memory for each threads
start = starting_id + tx * thread_tile
end = min(starting_id + (tx + 1) * thread_tile, arr_len)
sub_outarr = out_arr[start:end]
sub_len = end - start
method(shared, his_len, future_len, sub_outarr,
backward_length, forward_length,
sub_len, backward_length - 1 + start_shared,
min_size
)
kernel_cache[method] = kernel
return kernel
class Rolling(object):
def __init__(self, window, input_arr, min_periods=None, forward_window=0,
thread_tile=48, number_of_threads=64):
"""
The Rolling class that is used to do rolling window computations.
The window size is `window + forward_window`. The element i uses
[i - window -1, i + forward_window] elements to do the window
computation.
Arguments:
window: the history window size.
input_arr: the input GPU array or cudf.Series
min_periods: the minimum number of non-na elements need to get an
output
forward_window: the windows size in the forward direction
thread_tile: each thread will be responsible for `thread_tile`
number of elements in window computation
number_of_threads: num. of threads in a block for CUDA computation
"""
if isinstance(input_arr, numba.cuda.cudadrv.devicearray.DeviceNDArray):
self.gpu_in = input_arr
else:
self.gpu_in = input_arr.to_gpu_array()
if min_periods is None:
self.min_periods = window + forward_window
else:
self.min_periods = min_periods
self.window = window
self.forward_window = forward_window
self.number_of_threads = number_of_threads
self.array_len = len(self.gpu_in)
self.gpu_out = numba.cuda.device_array_like(self.gpu_in)
self.thread_tile = thread_tile
self.number_of_blocks = \
(self.array_len + (number_of_threads * thread_tile - 1)) // \
(number_of_threads * thread_tile)
self.shared_buffer_size = (self.number_of_threads * self.thread_tile +
self.window - 1 + self.forward_window)
def apply(self, method):
gpu_out = numba.cuda.device_array_like(self.gpu_in)
# gpu_out = cudf.Series(gpu_out, nan_as_null=False)
kernel = get_rolling_kernel(method)
kernel[(self.number_of_blocks,),
(self.number_of_threads,),
0,
self.shared_buffer_size * 8](self.gpu_in,
gpu_out,
self.window,
self.forward_window,
self.array_len,
self.thread_tile,
self.min_periods)
# numba.cuda.synchronize()
return gpu_out
def mean(self):
return self.apply(mean_window)
def std(self):
return self.apply(std_window)
def var(self):
return self.apply(var_window)
def max(self):
return self.apply(max_window)
def min(self):
return self.apply(min_window)
def sum(self):
return self.apply(sum_window)
def backward_diff(self):
return self.apply(backward_diff_window)
def backward_shift(self):
return self.apply(backward_shift_window)
def forward_diff(self):
return self.apply(forward_diff_window)
def forward_shift(self):
return self.apply(forward_shift_window)
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/cuindicator/rolling.py |
import numba
import cmath
import numpy as np
from numba import cuda
from .util import port_mask_nan
__all__ = ["fractional_diff", "get_weights_floored", "port_fractional_diff"]
def get_weights_floored(d, num_k, floor=1e-3):
r"""Calculate weights ($w$) for each lag ($k$) through
$w_k = -w_{k-1} \frac{d - k + 1}{k}$ provided weight above a minimum value
(floor) for the weights to prevent computation of weights for the entire
time series.
Args:
d (int): differencing value.
num_k (int): number of lags (typically length of timeseries) to
calculate w.
floor (float): minimum value for the weights for computational
efficiency.
"""
w_k = np.array([1])
k = 1
while k < num_k:
w_k_latest = -w_k[-1] * ((d - k + 1)) / k
if abs(w_k_latest) <= floor:
break
w_k = np.append(w_k, w_k_latest)
k += 1
w_k = w_k.reshape(-1, 1)
return w_k
@cuda.jit(device=True)
def conv_window(shared, history_len, out_arr, window_size,
arr_len, offset, offset2, min_size):
"""
This function is to do convolution for one thread
Arguments:
------
shared: numba.cuda.DeviceNDArray
3 chunks of data are stored in the shared memory
the first [0, window_size) elements is the chunk of data that is
necessary to compute the first convolution element.
then [window_size, window_size + thread_tile * blockDim) elements
are the inputs allocated for this block of threads
the last [window_size + thread_tile,
window_size + thread_tile + window_size) is to store the kernel values
history_len: int
total number of historical elements available for this chunk of data
out_arr: numba.cuda.DeviceNDArray
output gpu_array of size of `thread_tile`
window_size: int
the number of elements in the kernel
arr_len: int
the chunk array length, same as `thread_tile`
offset: int
indicate the starting index of the chunk array in the shared for
this thread.
offset: int
indicate the starting position of the weights/kernel array
min_size: int
the minimum number of non-na elements
"""
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
else:
s = 0.0
average_size = 0
for j in range(0, window_size):
if not (cmath.isnan(
shared[offset + i - j])):
s += (shared[offset + i - j] *
shared[offset2 + window_size - 1 - j])
average_size += 1
if average_size >= min_size:
out_arr[i] = s
else:
out_arr[i] = np.nan
@cuda.jit
def kernel(in_arr, weight_arr, out_arr, window,
arr_len, thread_tile, min_size):
"""
This kernel is to do 1D convlution on `in_arr` array with `weight_arr`
as kernel. The results is saved on `out_arr`.
Arguments:
------
in_arr: numba.cuda.DeviceNDArray
input gpu array
weight_arr: numba.cuda.DeviceNDArray
convolution kernel gpu array
out_arr: numba.cuda.DeviceNDArray
output gpu_array
window: int
the number of elements in the weight_arr
arr_len: int
the input/output array length
thread_tile: int
each thread is responsible for `thread_tile` number of elements
min_size: int
the minimum number of non-na elements
"""
shared = cuda.shared.array(shape=0,
dtype=numba.float64)
block_size = cuda.blockDim.x # total number of threads
tx = cuda.threadIdx.x
# Block id in a 1D grid
bid = cuda.blockIdx.x
starting_id = bid * block_size * thread_tile
# copy the thread_tile * number_of_thread_per_block into the shared
for j in range(thread_tile):
offset = tx + j * block_size
if (starting_id + offset) < arr_len:
shared[offset + window - 1] = in_arr[
starting_id + offset]
cuda.syncthreads()
# copy the window - 1 into the shared
for j in range(0, window - 1, block_size):
if (((tx + j) <
window - 1) and (
starting_id - window + 1 + tx + j >= 0)):
shared[tx + j] = \
in_arr[starting_id - window + 1 + tx + j]
cuda.syncthreads()
# copy the weights into the shared
for j in range(0, window, block_size):
element_id = tx + j
if (((tx + j) < window) and (element_id < window)):
shared[thread_tile * block_size + window - 1 + tx +
j] = weight_arr[tx + j]
cuda.syncthreads()
# slice the shared memory for each threads
start_shared = tx * thread_tile
his_len = min(window - 1,
starting_id + tx * thread_tile)
# slice the global memory for each threads
start = starting_id + tx * thread_tile
end = min(starting_id + (tx + 1) * thread_tile, arr_len)
sub_outarr = out_arr[start:end]
sub_len = end - start
conv_window(shared, his_len, sub_outarr,
window, sub_len,
window - 1 + start_shared,
thread_tile * block_size + window - 1,
min_size)
def fractional_diff(input_arr, d=0.5, floor=1e-3, min_periods=None,
thread_tile=2, number_of_threads=512):
"""
The fractional difference computation method.
Arguments:
-------
input_arr: numba.cuda.DeviceNDArray or cudf.Series
the input array to compute the fractional difference
d: float
the differencing value. range from 0 to 1
floor: float
minimum value for the weights for computational efficiency.
min_periods: int
default the lengths of the weights. Need at least min_periods of
non-na elements to get fractional difference value
thread_tile: int
each thread will be responsible for `thread_tile` number of
elements in window computation
number_of_threads: int
number of threads in a block for CUDA computation
Returns
-------
(numba.cuda.DeviceNDArray, np.array)
the computed fractional difference array and the weight array tuple
"""
if isinstance(input_arr, numba.cuda.cudadrv.devicearray.DeviceNDArray):
gpu_in = input_arr
else:
gpu_in = input_arr.to_gpu_array()
# compute the weights for the fractional difference
weights = get_weights_floored(d=d,
num_k=len(input_arr),
floor=floor)[::-1, 0]
weights_out = np.ascontiguousarray(weights)
weights = numba.cuda.to_device(weights_out)
window = len(weights)
if min_periods is None:
min_periods = window
else:
min_periods = min_periods
number_of_threads = number_of_threads
array_len = len(gpu_in)
# allocate the output array
gpu_out = numba.cuda.device_array_like(gpu_in)
number_of_blocks = \
(array_len + (number_of_threads * thread_tile - 1)) // \
(number_of_threads * thread_tile)
shared_buffer_size = (number_of_threads * thread_tile +
window - 1 + window)
# call the conv kernel
kernel[(number_of_blocks,),
(number_of_threads,),
0,
shared_buffer_size * 8](gpu_in,
weights,
gpu_out,
window,
array_len,
thread_tile,
min_periods)
return gpu_out, weights_out
def port_fractional_diff(asset_indicator, input_arr, d=0.5, floor=1e-3,
min_periods=None, thread_tile=2,
number_of_threads=512):
"""
Calculate the fractional differencing signal for all the financial
assets indicated by asset_indicator.
Arguments:
-------
asset_indicator: cudf.Series
the integer indicator array to indicate the start of the different
asset
input_arr: numba.cuda.DeviceNDArray or cudf.Series
the input array to compute the fractional difference
d: float
the differencing value. range from 0 to 1
floor: float
minimum value for the weights for computational efficiency.
min_periods: int
default the lengths of the weights. Need at least min_periods of
non-na elements to get fractional difference value
thread_tile: int
each thread will be responsible for `thread_tile` number of
elements in window computation
number_of_threads: int
number of threads in a block for CUDA computation
Returns
-------
(numba.cuda.DeviceNDArray, np.array)
the computed fractional difference array and the weight array tuple
"""
out, weights = fractional_diff(input_arr, d=d, floor=floor,
min_periods=min_periods,
thread_tile=thread_tile,
number_of_threads=number_of_threads)
port_mask_nan(asset_indicator.to_gpu_array(), out, 0,
len(weights) - 1)
return out, weights
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/cuindicator/frac_diff.py |
import numpy as np
import math
from numba import cuda
import cmath
@cuda.jit(device=True)
def window_kernel(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to do window computation. Only one thread is assumed
to do the computation. This thread is responsible for the `arr_len` of
elements in the input array, which is cached in the `shared` array.
Due to the limitation of numba, shared array cannot be sliced properly. As
a work around, the passed-in `offset` position specify the beginning of
the input array.
Arguments:
shared: a chunk of the array stored in shared memory. The first
element of the data starts at `offset`. It has `history_len`
of historical records for the first elements computation.
history_len: the history length, which is mostly `window_size - 1`. For
the early elements of the array, the history can be shorter for
the beginning elements of the original array.
future_len: the total future elements length from array_len to
the first element that the current thread is responsible for.
out_arr: the output array of size `arr_len`.
window_size: the window size for the window function
arr_len: the length of the output array
offset: the starting position of the input shared array for the
current thread
min_size: at least there are min_size of non NA elements to compuate
"""
pass
@cuda.jit(device=True)
def portfolio_ewma_mean_window(asset_indicator, shared, history_len, out_arr,
window_size, span, arr_len, offset, min_size,
indicator_start):
"""
This function is to compute the exponetially-weighted moving average for
the window. See `window_kernel` for detailed arguments
"""
s = 0.0
v = 0.0
v_current = 0.0
first = False
alpha = 2 / (span + 1)
lam = 1
total_weight = 0
counter = 0
weight_scale = 1.0
weight_scale_current = 1.0
average_size = 0
for i in range(arr_len):
if asset_indicator[i + indicator_start] == 1:
history_len = -i
s = 0.0
v = 0.0
v_current = 0.0
first = False
lam = 1
total_weight = 0
counter = 0
weight_scale = 1.0
weight_scale_current = 1.0
average_size = 0
if i + history_len < span - 1:
out_arr[i] = np.nan
else:
if not first:
# print(i, i + history_len + 1, window_size, history_len)
for j in range(0, min(i + history_len + 1,
window_size)):
if (cmath.isnan(shared[i + offset - j])):
v = 0.0
weight_scale = 0.0
else:
v = shared[i + offset - j]
weight_scale = 1.0
average_size += 1
s += v * lam
counter += 1
total_weight += lam * weight_scale
lam *= (1 - alpha)
if (i + indicator_start - j >= 0 and
asset_indicator[i + indicator_start - j] == 1):
break
if average_size >= min_size:
out_arr[i] = s / total_weight
else:
out_arr[i] = np.nan
first = True
else:
if (cmath.isnan(shared[i + offset])):
v_current = 0.0
weight_scale_current = 0.0
else:
v_current = shared[i + offset]
weight_scale_current = 1.0
average_size += 1
if counter >= window_size:
if (cmath.isnan(shared[i + offset - window_size])):
v = 0.0
weight_scale = 0.0
else:
v = shared[i + offset - window_size]
weight_scale = 1.0
average_size -= 1
s -= v * lam / (1 - alpha)
total_weight -= lam / (1 - alpha) * weight_scale
else:
counter += 1
# total_weight += lam * weight_scale_current
lam *= (1 - alpha)
total_weight *= (1 - alpha)
total_weight += 1.0 * weight_scale_current
s *= (1 - alpha)
s += v_current
if average_size >= min_size:
out_arr[i] = s / total_weight
else:
out_arr[i] = np.nan
@cuda.jit(device=True)
def ewma_mean_window(shared, history_len, out_arr, window_size, span,
arr_len, offset, min_size):
"""
This function is to compute the exponetially-weighted moving average for
the window. See `window_kernel` for detailed arguments
"""
s = 0.0
v = 0.0
v_current = 0.0
first = False
alpha = 2 / (span + 1)
lam = 1
total_weight = 0
counter = 0
weight_scale = 1.0
weight_scale_current = 1.0
average_size = 0
for i in range(arr_len):
if i + history_len < span - 1:
out_arr[i] = np.nan
else:
if not first:
# print(i, i + history_len + 1, window_size, history_len)
for j in range(0, min(i + history_len + 1,
window_size)):
if (cmath.isnan(shared[i + offset - j])):
v = 0.0
weight_scale = 0.0
else:
v = shared[i + offset - j]
weight_scale = 1.0
average_size += 1
s += v * lam
counter += 1
total_weight += lam * weight_scale
lam *= (1 - alpha)
if average_size >= min_size:
out_arr[i] = s / total_weight
else:
out_arr[i] = np.nan
first = True
else:
if (cmath.isnan(shared[i + offset])):
v_current = 0.0
weight_scale_current = 0.0
else:
v_current = shared[i + offset]
weight_scale_current = 1.0
average_size += 1
if counter >= window_size:
if (cmath.isnan(shared[i + offset - window_size])):
v = 0.0
weight_scale = 0.0
else:
v = shared[i + offset - window_size]
weight_scale = 1.0
average_size -= 1
s -= v * lam / (1 - alpha)
total_weight -= lam / (1 - alpha) * weight_scale
else:
counter += 1
# total_weight += lam * weight_scale_current
lam *= (1 - alpha)
total_weight *= (1 - alpha)
total_weight += 1.0 * weight_scale_current
s *= (1 - alpha)
s += v_current
if average_size >= min_size:
out_arr[i] = s / total_weight
else:
out_arr[i] = np.nan
@cuda.jit(device=True)
def mean_window(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to compute the moving average for the window
See `window_kernel` for detailed arguments
"""
first = False
s = 0.0
average_size = 0
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
if not first:
for j in range(0, window_size + forward_window_size):
if not (cmath.isnan(
shared[offset + i - j + forward_window_size])):
s += shared[offset + i - j + forward_window_size]
average_size += 1
if average_size >= min_size:
out_arr[i] = s / np.float64(average_size)
else:
out_arr[i] = np.nan
first = True
else:
if not (cmath.isnan(
shared[offset + i + forward_window_size])):
s += shared[offset + i + forward_window_size]
average_size += 1
if not (cmath.isnan(
shared[offset + i - window_size])):
s -= shared[offset + i - window_size]
average_size -= 1
if average_size >= min_size:
out_arr[i] = s / np.float64(average_size)
else:
out_arr[i] = np.nan
@cuda.jit(device=True)
def var_window(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to compute the var for the window
See `window_kernel` for detailed arguments
"""
s = 0.0 # this is mean
var = 0.0 # this is variance
first = False
average_size = 0
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
if not first:
for j in range(0, window_size + forward_window_size):
if not (cmath.isnan(
shared[offset + i - j + forward_window_size])):
s += shared[offset + i - j + forward_window_size]
var += shared[offset + i - j + forward_window_size] * \
shared[offset + i - j + forward_window_size]
average_size += 1
if average_size >= min_size:
out_arr[i] = (var - s * s / np.float64(average_size)) / \
np.float64(average_size - 1.0)
else:
out_arr[i] = np.nan
first = True
else:
if not (cmath.isnan(
shared[offset + i + forward_window_size])):
s += shared[offset + i + forward_window_size]
var += shared[offset + i + forward_window_size] * \
shared[offset + i + forward_window_size]
average_size += 1
if not (cmath.isnan(
shared[offset + i - window_size])):
s -= shared[offset + i - window_size]
var -= shared[offset + i - window_size] * \
shared[offset + i - window_size]
average_size -= 1
if average_size >= min_size:
out_arr[i] = (var - s * s / np.float64(average_size)) / \
np.float64(average_size - 1.0)
else:
out_arr[i] = np.nan
@cuda.jit(device=True)
def std_window(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to compute the std for the window
See `window_kernel` for detailed arguments
"""
s = 0.0 # this is mean
var = 0.0 # this is variance
first = False
average_size = 0
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
if not first:
for j in range(0, window_size + forward_window_size):
if not (cmath.isnan(
shared[offset + i - j + forward_window_size])):
s += shared[offset + i - j + forward_window_size]
var += shared[offset + i - j + forward_window_size] * \
shared[offset + i - j + forward_window_size]
average_size += 1
if average_size >= min_size:
v = math.sqrt(
abs((var - s * s / np.float64(average_size)) /
np.float64(average_size - 1.0)))
out_arr[i] = v
else:
out_arr[i] = np.nan
first = True
else:
if not (cmath.isnan(
shared[offset + i + forward_window_size])):
s += shared[offset + i + forward_window_size]
var += shared[offset + i + forward_window_size] * \
shared[offset + i + forward_window_size]
average_size += 1
if not (cmath.isnan(
shared[offset + i - window_size])):
s -= shared[offset + i - window_size]
var -= shared[offset + i - window_size] * \
shared[offset + i - window_size]
average_size -= 1
if average_size >= min_size:
v = math.sqrt(
abs((var - s * s / np.float64(average_size)) /
np.float64(average_size - 1.0)))
out_arr[i] = v
else:
out_arr[i] = np.nan
@cuda.jit(device=True)
def sum_window(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to compute the sum for the window
See `window_kernel` for detailed arguments
"""
first = False
s = 0.0
average_size = 0
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
if not first:
for j in range(0, window_size + forward_window_size):
if not (cmath.isnan(
shared[offset + i - j + forward_window_size])):
s += shared[offset + i - j + forward_window_size]
average_size += 1
if average_size >= min_size:
out_arr[i] = s
else:
out_arr[i] = np.nan
first = True
else:
if not (cmath.isnan(
shared[offset + i + forward_window_size])):
s += shared[offset + i + forward_window_size]
average_size += 1
if not (cmath.isnan(
shared[offset + i - window_size])):
s -= shared[offset + i - window_size]
average_size -= 1
if average_size >= min_size:
out_arr[i] = s
else:
out_arr[i] = np.nan
@cuda.jit(device=True)
def max_window(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to compute the max for the window
See `window_kernel` for detailed arguments
"""
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
s = -np.inf # maximum
average_size = 0
for j in range(0, window_size + forward_window_size):
if not (cmath.isnan(
shared[offset + i - j + forward_window_size])):
# bigger than the max
if shared[i + offset - j + forward_window_size] > s:
s = shared[i + offset - j + forward_window_size]
average_size += 1
if average_size >= min_size:
out_arr[i] = s
else:
out_arr[i] = np.nan
@cuda.jit(device=True)
def min_window(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to compute the min for the window
See `window_kernel` for detailed arguments
"""
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
s = np.inf # minimum
average_size = 0
for j in range(0, window_size + forward_window_size):
if not (cmath.isnan(
shared[offset + i - j + forward_window_size])):
# smaller than the min
if shared[i + offset - j + forward_window_size] < s:
s = shared[i + offset - j + forward_window_size]
average_size += 1
if average_size >= min_size:
out_arr[i] = s
else:
out_arr[i] = np.nan
@cuda.jit(device=True)
def backward_diff_window(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to compute the backward element difference.
See `window_kernel` for detailed arguments
"""
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
if (cmath.isnan(shared[offset + i]) or
cmath.isnan(shared[offset + i - window_size + 1])):
out_arr[i] = np.nan
else:
out_arr[i] = shared[offset + i] - \
shared[offset + i - window_size + 1]
@cuda.jit(device=True)
def backward_shift_window(shared, history_len, future_len, out_arr,
window_size, forward_window_size, arr_len,
offset, min_size):
"""
This function is to shfit elements backward
See `window_kernel` for detailed arguments
"""
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
if (cmath.isnan(shared[offset + i - window_size + 1])):
out_arr[i] = np.nan
else:
out_arr[i] = shared[offset + i - window_size + 1]
@cuda.jit(device=True)
def forward_diff_window(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to compute the forward element difference.
See `window_kernel` for detailed arguments
"""
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
if (cmath.isnan(shared[offset + i]) or
cmath.isnan(shared[offset + i + forward_window_size])):
out_arr[i] = np.nan
else:
out_arr[i] = shared[offset + i] - \
shared[offset + i + forward_window_size]
@cuda.jit(device=True)
def forward_shift_window(shared, history_len, future_len, out_arr, window_size,
forward_window_size, arr_len, offset, min_size):
"""
This function is to compute the forward element difference.
See `window_kernel` for detailed arguments
"""
for i in range(arr_len):
if i + history_len < window_size-1:
out_arr[i] = np.nan
elif future_len - i < forward_window_size + 1:
out_arr[i] = np.nan
else:
if (cmath.isnan(shared[offset + i + forward_window_size])):
out_arr[i] = np.nan
else:
out_arr[i] = shared[offset + i + forward_window_size]
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/cuindicator/windows.py |
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ['XGBoostStrategyNode']
class XGBoostStrategyNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
"""
This is the Node used to compute trading signal from XGBoost Strategy.
"""
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
}
}
cols_required = {'predict': None, "asset": "int64"}
addition = {}
addition['signal'] = 'float64'
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "XGBoost Node configure",
"type": "object",
"description": """convert the predicted next day return as trading actions
""",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
input_df = inputs[self.INPUT_PORT_NAME]
# convert the signal to trading action
# 1 is buy and -1 is sell
# It predicts the tomorrow's return (shift -1)
# We shift 1 for trading actions so that it acts on the second day
input_df['signal'] = ((
input_df['predict'] >= 0).astype('float') * 2 - 1).shift(1)
# remove the bad datapints
input_df = input_df.dropna()
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/strategy/xgboostStrategyNode.py |
from .movingAverageStrategyNode import MovingAverageStrategyNode
from .portExpMovingAverageStrategyNode import PortExpMovingAverageStrategyNode
from .xgboostStrategyNode import XGBoostStrategyNode
__all__ = ["MovingAverageStrategyNode",
"PortExpMovingAverageStrategyNode",
"XGBoostStrategyNode"]
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/strategy/__init__.py |
from numba import cuda
from functools import partial
import math
import numpy as np
import cudf
import pandas as pd
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
from .. import cuindicator as ci
@cuda.jit
def moving_average_signal_kernel(ma_fast, ma_slow, out_arr, arr_len):
i = cuda.grid(1)
if i == 0:
out_arr[i] = np.nan
if i < arr_len - 1:
if math.isnan(ma_slow[i]) or math.isnan(ma_fast[i]):
out_arr[i + 1] = np.nan
elif ma_fast[i] - ma_slow[i] > 0.00001:
# shift 1 time to make sure no peeking into the future
out_arr[i + 1] = -1.0
else:
out_arr[i + 1] = 1.0
def port_exponential_moving_average(stock_df, n_fast, n_slow):
ma_slow = ci.port_exponential_moving_average(stock_df['indicator'],
stock_df['close'],
n_slow).to_gpu_array()
ma_fast = ci.port_exponential_moving_average(stock_df['indicator'],
stock_df['close'],
n_fast).to_gpu_array()
out_arr = cuda.device_array_like(ma_fast)
number_of_threads = 256
array_len = len(stock_df)
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
moving_average_signal_kernel[(number_of_blocks,),
(number_of_threads,)](ma_fast,
ma_slow,
out_arr,
array_len)
return out_arr, ma_slow, ma_fast
def cpu_exp_moving_average(df, n_slow=1, n_fast=3):
df['exp_ma_slow'] = df['close'].ewm(span=n_slow, min_periods=n_slow).mean()
df['exp_ma_fast'] = df['close'].ewm(span=n_fast, min_periods=n_fast).mean()
df['signal'] = 1.0
df['signal'][df['exp_ma_fast'] - df['exp_ma_slow'] > 0.00001] = -1.0
df['signal'] = df['signal'].shift(1)
df['signal'].values[0:n_slow] = np.nan
return df
class PortExpMovingAverageStrategyNode(
TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
self.delayed_process = True
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
}
}
cols_required = {"close": "float64",
"indicator": "int32"}
addition = {"signal": "float64",
"exp_ma_slow": "float64",
"exp_ma_fast": "float64"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Portfollio Moving Average Strategy Node configure",
"type": "object",
"description": """Simple mean reversion trading strategy.
It computes two exponential moving average signals of the
`close` prices and decides long/short of asset when these
two signals cross over.""",
"properties": {
"fast": {
"type": "number",
"description": "fast moving average window"
},
"slow": {
"type": "number",
"description": "slow moving average window"
}
},
"required": ["fast", "slow"],
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Simple mean reversion trading strategy. It computes two exponential
moving average signals of the `close` prices and decides long/short
of asset when these two signals cross over. It computes the trading
signals for all the assets in the dataframe.
The trading signal is named as `signal` in the dataframe. positive
value means long and negative value means short. The resulting moving
average signals are added to the dataframe.
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
n_fast = self.conf['fast']
n_slow = self.conf['slow']
if isinstance(input_df, pd.DataFrame):
fun = partial(cpu_exp_moving_average, n_fast=n_fast, n_slow=n_slow)
input_df = input_df.groupby("asset").apply(fun)
else:
signal, slow, fast = port_exponential_moving_average(input_df,
n_fast,
n_slow)
signal = cudf.Series(signal, index=input_df.index)
slow = cudf.Series(slow, index=input_df.index)
fast = cudf.Series(fast, index=input_df.index)
input_df['signal'] = signal
input_df['exp_ma_slow'] = slow
input_df['exp_ma_slow'] = input_df['exp_ma_slow'].fillna(0.0)
input_df['exp_ma_fast'] = fast
input_df['exp_ma_fast'] = input_df['exp_ma_fast'].fillna(0.0)
# remove the bad datapints
input_df = input_df.dropna()
input_df = input_df.query('indicator == 0')
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/strategy/portExpMovingAverageStrategyNode.py |
from .. import cuindicator as ci
from greenflow.dataframe_flow import Node, PortsSpecSchema
from numba import cuda
import math
import numpy as np
import cudf
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
@cuda.jit
def moving_average_signal_kernel(ma_fast, ma_slow, out_arr, arr_len):
i = cuda.grid(1)
if i == 0:
out_arr[i] = np.nan
if i < arr_len - 1:
if math.isnan(ma_slow[i]) or math.isnan(ma_fast[i]):
out_arr[i + 1] = np.nan
elif ma_fast[i] - ma_slow[i] > 0.00001:
# shift 1 time to make sure no peeking into the future
out_arr[i + 1] = -1.0
else:
out_arr[i + 1] = 1.0
def moving_average_signal(stock_df, n_fast, n_slow):
ma_slow = ci.moving_average(stock_df['close'],
n_slow).to_gpu_array()
ma_fast = ci.moving_average(stock_df['close'],
n_fast).to_gpu_array()
out_arr = cuda.device_array_like(ma_fast)
array_len = len(ma_slow)
number_of_threads = 256
number_of_blocks = (array_len + (
number_of_threads - 1)) // number_of_threads
moving_average_signal_kernel[(number_of_blocks,),
(number_of_threads,)](ma_fast,
ma_slow,
out_arr,
array_len)
return out_arr, ma_slow, ma_fast
class MovingAverageStrategyNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'stock_in'
self.OUTPUT_PORT_NAME = 'stock_out'
self.delayed_process = True
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:stock_in}"
}
}
cols_required = {"close": "float64"}
addition = {"signal": "float64",
"ma_slow": "float64",
"ma_fast": "float64"}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Moving Average Strategy Node configure",
"type": "object",
"description": """Simple mean reversion trading strategy.
It computes two moving average signals of the `close`
prices and decides long/short of asset when these two
signals cross over.select the asset based on asset id""",
"properties": {
"fast": {
"type": "number",
"description": "fast moving average window"
},
"slow": {
"type": "number",
"description": "slow moving average window"
}
},
"required": ["fast", "slow"],
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Simple mean reversion trading strategy. It computes two moving average
signals of the `close` prices and decides long/short of asset when
these two signals cross over.
The trading signal is named as `signal` in the dataframe. positive
value means long and negative value means short. The resulting moving
average signals are added to the dataframe.
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
n_fast = self.conf['fast']
n_slow = self.conf['slow']
signal, slow, fast = moving_average_signal(input_df, n_fast, n_slow)
signal = cudf.Series(signal, index=input_df.index)
slow = cudf.Series(slow, index=input_df.index)
fast = cudf.Series(fast, index=input_df.index)
input_df['signal'] = signal
input_df['ma_slow'] = slow
input_df['ma_slow'] = input_df['ma_slow'].fillna(0.0)
input_df['ma_fast'] = fast
input_df['ma_fast'] = input_df['ma_fast'].fillna(0.0)
input_df = input_df.dropna()
return {self.OUTPUT_PORT_NAME: input_df}
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/strategy/movingAverageStrategyNode.py |
class StockMap(object):
def __init__(self):
pass
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/dataloader/stockMap.py |
from greenflow.dataframe_flow import Node
import cudf
from greenflow.dataframe_flow.portsSpecSchema import (
PortsSpecSchema, NodePorts, ConfSchema)
from greenflow.dataframe_flow.metaSpec import MetaData
from ..cache import CACHE_NAME
from greenflow.dataframe_flow.util import get_file_path
from .stockMap import StockMap
from ..node_hdf_cache import NodeHDFCacheMixin
STOCK_NAME_PORT_NAME = 'stock_name'
STOCK_MAP_PORT_NAME = 'map_data'
class StockNameLoader(NodeHDFCacheMixin, Node):
def _compute_hash_key(self):
return hash((self.uid, self.conf['file']))
def ports_setup(self):
input_ports = {}
output_ports = {
STOCK_NAME_PORT_NAME: {
PortsSpecSchema.port_type: cudf.DataFrame
},
STOCK_MAP_PORT_NAME: {
PortsSpecSchema.port_type: StockMap
}
}
return NodePorts(inports=input_ports, outports=output_ports)
def meta_setup(self):
required = {}
column_types = {"asset": "int64",
"asset_name": "object"}
out_cols = {
STOCK_NAME_PORT_NAME: column_types,
}
if self.outport_connected(STOCK_MAP_PORT_NAME):
if 'file' in self.conf:
hash_key = self._compute_hash_key()
if hash_key in CACHE_NAME:
out_cols.update({
STOCK_MAP_PORT_NAME: CACHE_NAME[hash_key]})
else:
path = get_file_path(self.conf['file'])
name_df = cudf.read_csv(path)[['SM_ID', 'SYMBOL']]
name_df.columns = ["asset", 'asset_name']
pdf = name_df.to_pandas()
column_data = pdf.to_dict('list')
CACHE_NAME[hash_key] = column_data
out_cols.update({STOCK_MAP_PORT_NAME: column_data})
metadata = MetaData(inports=required, outports=out_cols)
return metadata
def conf_schema(self):
json = {
"title": "Stock name csv file loader configure",
"type": "object",
"description": "Load the stock name data from the csv file",
"properties": {
"file": {
"type": "string",
"description": "stock name csv file with full path"
}
},
"required": ["file"],
}
ui = {
"file": {"ui:widget": "CsvFileSelector"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Load the csv file mapping stock id to symbol name into cudf DataFrame
Arguments
-------
inputs: list
empty list
Returns
-------
cudf.DataFrame
"""
output = {}
if self.outport_connected(STOCK_NAME_PORT_NAME):
path = get_file_path(self.conf['file'])
name_df = cudf.read_csv(path)[['SM_ID', 'SYMBOL']]
# change the names
name_df.columns = ["asset", 'asset_name']
output.update({STOCK_NAME_PORT_NAME: name_df})
if self.outport_connected(STOCK_MAP_PORT_NAME):
output.update({STOCK_MAP_PORT_NAME: StockMap()})
return output
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/dataloader/stockNameLoader.py |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (
PortsSpecSchema, NodePorts, ConfSchema)
from greenflow.dataframe_flow.metaSpec import MetaData
import cudf
import dask_cudf
import cuml
import copy
import cuml.dask.datasets.classification
from ..node_hdf_cache import NodeHDFCacheMixin
CUDF_PORT_NAME = 'cudf_out'
DASK_CUDF_PORT_NAME = 'dask_cudf_out'
class ClassificationData(NodeHDFCacheMixin, Node):
def ports_setup(self):
input_ports = {}
output_ports = {
CUDF_PORT_NAME: {
PortsSpecSchema.port_type: cudf.DataFrame
},
DASK_CUDF_PORT_NAME: {
PortsSpecSchema.port_type: dask_cudf.DataFrame
}
}
return NodePorts(inports=input_ports, outports=output_ports)
def meta_setup(self):
column_types = {}
total_features = self.conf.get("n_features", 20)
dtype = self.conf.get("dtype", "float32")
for i in range(total_features):
column_types["x"+str(i)] = dtype
column_types['y'] = 'int64'
inputs = {
}
out_cols = {
CUDF_PORT_NAME: column_types,
DASK_CUDF_PORT_NAME: column_types,
}
metadata = MetaData(inports=inputs, outports=out_cols)
return metadata
def conf_schema(self):
json = {
"title": "Classification Node configure",
"type": "object",
"description": """Generate random dataframe for classification
tasks. Generate a random n-class classification problem. This
initially creates clusters of points normally distributed
(std=1) about vertices of an n_informative-dimensional hypercube
with sides of length 2*class_sep and assigns an equal number of
clusters to each class.""",
"properties": {
"n_samples": {"type": "number",
"description": "The number of samples.",
"default": 100},
"n_features": {"type": "number",
"description": """The total number of features.
These comprise n_informative informative
features, n_redundant redundant features,
n_repeated duplicated features and
n_features-n_informative-n_redundant-n_repeated
useless features drawn at random.""",
"default": 20},
"n_informative": {"type": "number",
"description": """The number of informative
features. Each class is composed of a number
of gaussian clusters each located around the
vertices of a hypercube in a subspace of
dimension n_informative. For each cluster,
informative features are drawn independently
from N(0, 1) and then randomly linearly
combined within each cluster in order to add
covariance. The clusters are then placed on
the vertices of the hypercube.""",
"default": 2},
"n_redundant": {"type": "number",
"description": """The number of redundant
features. These features are generated as
random linear combinations of the informative
features.""",
"default": 2},
"n_repeated": {"type": "number",
"description": """The number of duplicated
features, drawn randomly from the informative
and the redundant features.""",
"default": 0},
"n_classes": {"type": "number",
"description": """The number of classes (or
labels) of the classification problem.""",
"default": 2},
"n_clusters_per_class": {"type": "number",
"description": """The number of
clusters per class.""",
"default": 2},
"weights": {"type": "array",
"items": {
"type": "number"
},
"description": """The proportions of samples
assigned to each class. If None, then classes are
balanced. Note that if len(weights) ==
n_classes - 1, then the last class weight is
automatically inferred. More than n_samples
samples may be returned if the sum of weights
exceeds 1."""},
"flip_y": {"type": "number",
"description": """The fraction of samples whose
class is assigned randomly. Larger values introduce
noise in the labels and make the classification
task harder.""",
"default": 0.01},
"class_sep": {"type": "number",
"description": """The factor multiplying the
hypercube size. Larger values spread out the
clusters/classes and make the classification
task easier.""",
"default": 1.0},
"hypercube": {"type": "boolean",
"description": """If True, the clusters are put
on the vertices of a hypercube. If False, the
clusters are put on the vertices of a random
polytope.""",
"default": True},
"shift": {"type": "number",
"description": """Shift features by the specified
value. If None, then features are shifted by a
random value drawn in [-class_sep, class_sep].""",
"default": 0.0},
"scale": {"type": "number",
"description": """Multiply features by the specified
value. If None, then features are scaled by a random
value drawn in [1, 100]. Note that scaling happens
after shifting.""",
"default": 1.0},
"shuffle": {"type": "boolean",
"description": """Shuffle the samples and the
features.""",
"default": True},
"random_state": {"type": "number",
"description": """Determines random number
generation for dataset creation. Pass an int
for reproducible output across multiple
function calls. See Glossary."""},
"order": {"type": "string",
"description": "The order of the generated samples",
"enum": ["F", "C"],
"default": "F"},
"dtype": {"type": "string",
"description": "Dtype of the generated samples",
"enum": ["float64", "float32"],
"default": "float64"},
"n_parts": {"type": "number",
"description": """used for Dask dataframe, number
of partitions to generate (this can be greater
than the number of workers""",
"default": 4}
}
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
genearte the fake data for classification
Arguments
-------
inputs: list
empty list
Returns
-------
cudf.DataFrame
"""
output = {}
def get_cudf(offset=None):
conf = copy.copy(self.conf)
if 'n_parts' in conf:
del conf['n_parts']
x, y = cuml.datasets.make_classification(**conf)
df = cudf.DataFrame({'x'+str(i): x[:, i]
for i in range(x.shape[1])})
df['y'] = y
if offset is not None:
df.index += offset
return df
if self.outport_connected(CUDF_PORT_NAME):
df = get_cudf()
output.update({CUDF_PORT_NAME: df})
if self.outport_connected(DASK_CUDF_PORT_NAME):
def mapfun(x):
return x.get()
x, y = cuml.dask.datasets.classification.make_classification(
**self.conf)
ddf = x.map_blocks(mapfun,
dtype=x.dtype).to_dask_dataframe()
out = dask_cudf.from_dask_dataframe(ddf)
out.columns = ['x'+str(i) for i in range(x.shape[1])]
out['y'] = y.astype('int64')
output.update({DASK_CUDF_PORT_NAME: out})
return output
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/dataloader/classificationGenerator.py |
from .csvStockLoader import CsvStockLoader
from .stockNameLoader import StockNameLoader
from .classificationGenerator import ClassificationData
__all__ = ["CsvStockLoader", "StockNameLoader", "ClassificationData"]
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/dataloader/__init__.py |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (
PortsSpecSchema, NodePorts, ConfSchema)
from greenflow.dataframe_flow.metaSpec import MetaData
import cudf
import dask_cudf
import pandas as pd
from greenflow.dataframe_flow.util import get_file_path
from ..node_hdf_cache import NodeHDFCacheMixin
CUDF_PORT_NAME = 'cudf_out'
DASK_CUDF_PORT_NAME = 'dask_cudf_out'
PANDAS_PORT_NAME = 'pandas_out'
class CsvStockLoader(NodeHDFCacheMixin, Node):
def ports_setup(self):
input_ports = {}
output_ports = {
CUDF_PORT_NAME: {
PortsSpecSchema.port_type: cudf.DataFrame
},
DASK_CUDF_PORT_NAME: {
PortsSpecSchema.port_type: dask_cudf.DataFrame
},
PANDAS_PORT_NAME: {
PortsSpecSchema.port_type: pd.DataFrame
}
}
return NodePorts(inports=input_ports, outports=output_ports)
def meta_setup(self):
column_types = {
"datetime": "datetime64[ns]",
"open": "float64",
"close": "float64",
"high": "float64",
"low": "float64",
"asset": "int64",
"volume": "float64"
}
out_cols = {
CUDF_PORT_NAME: column_types,
DASK_CUDF_PORT_NAME: column_types,
PANDAS_PORT_NAME: column_types
}
required = {}
metadata = MetaData(inports=required, outports=out_cols)
return metadata
def conf_schema(self):
json = {
"title": "Stock csv data loader configure",
"type": "object",
"description": "Load the stock daily bar data from the csv file",
"properties": {
"file": {
"type": "string",
"description": "stock csv data file with full path"
},
"path": {
"type": "string",
"description": "path to the directory for csv files"
}
}
}
ui = {
"file": {"ui:widget": "CsvFileSelector"},
"path": {"ui:widget": "PathSelector"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Load the end of day stock CSV data into cuDF dataframe
Arguments
-------
inputs: list
empty list
Returns
-------
cudf.DataFrame
"""
output = {}
if self.outport_connected(CUDF_PORT_NAME):
path = get_file_path(self.conf['file'])
df = cudf.read_csv(path)
# extract the year, month, day
ymd = df['DTE'].astype(
'str').str.extract(r'(\d\d\d\d)(\d\d)(\d\d)')
# construct the standard datetime str
df['DTE'] = ymd[0].str.cat(
ymd[1],
'-').str.cat(ymd[2], '-').astype('datetime64[ns]')
df = df[['DTE', 'OPEN', 'CLOSE', 'HIGH', 'LOW', 'SM_ID', 'VOLUME']]
df['VOLUME'] /= 1000
# change the names
df.columns = ['datetime', 'open', 'close',
'high', 'low', "asset", 'volume']
output.update({CUDF_PORT_NAME: df})
if self.outport_connected(PANDAS_PORT_NAME):
path = get_file_path(self.conf['file'])
df = pd.read_csv(path,
converters={'DTE':
lambda x: pd.Timestamp(str(x))})
df = df[['DTE', 'OPEN',
'CLOSE', 'HIGH',
'LOW', 'SM_ID', 'VOLUME']]
df['VOLUME'] /= 1000
df.columns = ['datetime', 'open', 'close', 'high',
'low', "asset", 'volume']
output.update({PANDAS_PORT_NAME: df})
if self.outport_connected(DASK_CUDF_PORT_NAME):
path = get_file_path(self.conf['path'])
df = dask_cudf.read_csv(path+'/*.csv',
parse_dates=['datetime'])
output.update({DASK_CUDF_PORT_NAME: df})
return output
| fsi-samples-main | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/dataloader/csvStockLoader.py |
import greenflow
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
import json
data = """{
"conf": {
"input": [
"train_norm.df_in",
"test_norm.df_in"
],
"output": [
"train_infer.out",
"test_infer.out",
"train_xgboost.model_out",
"train_norm.df_out",
"test_norm.df_out"
],
"subnode_ids": [
"train_norm",
"train_xgboost"
],
"subnodes_conf": {},
"taskgraph": "taskgraphs/xgboost_example/xgboost_model.gq.yaml"
}
}
"""
class CustXGBoostNode(greenflow.plugin_nodes.util.CompositeNode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# modify the self.conf to the one that this Composite node wants
global data
node_conf = self.conf
data_obj = json.loads(data)
data_obj['conf']['subnodes_conf'].update(node_conf)
self.conf = data_obj['conf']
def conf_schema(self):
full_schema = super().conf_schema()
full_schema_json = full_schema.json
ui = full_schema.ui
json = {
"title": "CustXGBoostNode configure",
"type": "object",
"description": "Enter your node description here",
"properties": {
}
}
item_dict = full_schema_json['properties'][
'subnodes_conf']['properties']
for key in item_dict.keys():
json['properties'][key] = item_dict[key]
return ConfSchema(json=json, ui=ui)
| fsi-samples-main | gQuant/plugins/gquant_plugin/modules/my_node.py |
import math
import numpy as np
from numba import cuda
import cupy
import cudf
import dask_cudf
import dask
from greenflow.dataframe_flow import Node, MetaData
from greenflow.dataframe_flow import NodePorts, PortsSpecSchema
from greenflow.dataframe_flow import ConfSchema
import copy
class PointNode(Node):
def ports_setup(self):
input_ports = {}
output_ports = {
'points_df_out': {
PortsSpecSchema.port_type: cudf.DataFrame
},
'points_ddf_out': {
PortsSpecSchema.port_type: dask_cudf.DataFrame
},
}
return NodePorts(inports=input_ports, outports=output_ports)
def conf_schema(self):
json = {
"title": "PointNode configure",
"type": "object",
"properties": {
"npts": {
"type": "number",
"description": "number of data points",
"minimum": 10
},
"npartitions": {
"type": "number",
"description": "num of partitions in the Dask dataframe",
"minimum": 1
}
},
"required": ["npts", "npartitions"],
}
ui = {
"npts": {"ui:widget": "updown"},
"npartitions": {"ui:widget": "updown"}
}
return ConfSchema(json=json, ui=ui)
def init(self):
pass
def meta_setup(self):
columns_out = {
'points_df_out': {
'x': 'float64',
'y': 'float64'
},
'points_ddf_out': {
'x': 'float64',
'y': 'float64'
}
}
return MetaData(inports={}, outports=columns_out)
def process(self, inputs):
npts = self.conf['npts']
df = cudf.DataFrame()
df['x'] = np.random.rand(npts)
df['y'] = np.random.rand(npts)
output = {}
if self.outport_connected('points_df_out'):
output.update({'points_df_out': df})
if self.outport_connected('points_ddf_out'):
npartitions = self.conf['npartitions']
ddf = dask_cudf.from_cudf(df, npartitions=npartitions)
output.update({'points_ddf_out': ddf})
return output
class DistanceNode(Node):
def ports_setup(self):
port_type = PortsSpecSchema.port_type
input_ports = {
'points_df_in': {
port_type: [cudf.DataFrame, dask_cudf.DataFrame]
}
}
output_ports = {
'distance_df': {
port_type: [cudf.DataFrame, dask_cudf.DataFrame]
},
'distance_abs_df': {
PortsSpecSchema.port_type: [cudf.DataFrame, dask_cudf.DataFrame]
}
}
input_connections = self.get_connected_inports()
if 'points_df_in' in input_connections:
types = input_connections['points_df_in']
# connected, use the types passed in from parent
return NodePorts(inports={'points_df_in': {port_type: types}},
outports={'distance_df': {port_type: types},
'distance_abs_df': {port_type: types},
})
else:
return NodePorts(inports=input_ports, outports=output_ports)
def conf_schema(self):
return ConfSchema()
def init(self):
self.delayed_process = True
def meta_setup(self):
req_cols = {
'x': 'float64',
'y': 'float64'
}
required = {
'points_df_in': req_cols,
}
input_meta = self.get_input_meta()
output_cols = ({
'distance_df': {
'x': 'float64',
'y': 'float64',
'distance_cudf': 'float64',
},
'distance_abs_df': {
'x': 'float64',
'y': 'float64',
'distance_abs_cudf': 'float64',
}
})
if 'points_df_in' in input_meta:
col_from_inport = input_meta['points_df_in']
# additional ports
output_cols['distance_df'].update(col_from_inport)
output_cols['distance_abs_df'].update(col_from_inport)
return MetaData(inports=required, outports=output_cols)
def process(self, inputs):
df = inputs['points_df_in']
output = {}
if self.outport_connected('distance_df'):
copy_df = df.copy()
copy_df['distance_cudf'] = (df['x'] ** 2 + df['y'] ** 2).sqrt()
output.update({'distance_df': copy_df})
if self.outport_connected('distance_abs_df'):
copy_df = df.copy()
copy_df['distance_abs_cudf'] = df['x'].abs() + df['y'].abs()
output.update({'distance_abs_df': copy_df})
return output
@cuda.jit
def distance_kernel(x, y, distance, array_len):
# ii - overall thread index
ii = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
if ii < array_len:
distance[ii] = math.sqrt(x[ii] ** 2 + y[ii] ** 2)
class NumbaDistanceNode(Node):
def ports_setup(self):
port_type = PortsSpecSchema.port_type
input_ports = {
'points_df_in': {
port_type: [cudf.DataFrame,
dask_cudf.DataFrame]
}
}
output_ports = {
'distance_df': {
port_type: [cudf.DataFrame,
dask_cudf.DataFrame]
}
}
input_connections = self.get_connected_inports()
if 'points_df_in' in input_connections:
types = input_connections['points_df_in']
# connected
return NodePorts(inports={'points_df_in': {port_type: types}},
outports={'distance_df': {port_type: types}})
else:
return NodePorts(inports=input_ports, outports=output_ports)
def init(self):
self.delayed_process = True
def meta_setup(self,):
required_cols = {'x': 'float64', 'y': 'float64'}
required = {
'points_df_in': required_cols,
'distance_df': required_cols
}
input_meta = self.get_input_meta()
output_cols = ({
'distance_df': {
'x': 'float64',
'y': 'float64',
'distance_numba': 'float64',
}
})
if 'points_df_in' in input_meta:
col_from_inport = input_meta['points_df_in']
# additional ports
output_cols['distance_df'].update(col_from_inport)
return MetaData(inports=required, outports=output_cols)
def conf_schema(self):
return ConfSchema()
def process(self, inputs):
df = inputs['points_df_in']
# DEBUGGING
# try:
# from dask.distributed import get_worker
# worker = get_worker()
# print('worker{} process NODE "{}" worker: {}'.format(
# worker.name, self.uid, worker))
# except (ValueError, ImportError):
# pass
number_of_threads = 16
number_of_blocks = ((len(df) - 1) // number_of_threads) + 1
# Inits device array by setting 0 for each index.
darr = cuda.device_array(len(df))
distance_kernel[(number_of_blocks,), (number_of_threads,)](
df['x'],
df['y'],
darr,
len(df))
df['distance_numba'] = darr
return {'distance_df': df}
kernel_string = r'''
extern "C" __global__
void compute_distance(const double* x, const double* y,
double* distance, int arr_len) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < arr_len){
distance[tid] = sqrt(x[tid]*x[tid] + y[tid]*y[tid]);
}
}
'''
class CupyDistanceNode(Node):
def ports_setup(self):
port_type = PortsSpecSchema.port_type
input_ports = {
'points_df_in': {
port_type: [cudf.DataFrame,
dask_cudf.DataFrame]
}
}
output_ports = {
'distance_df': {
port_type: [cudf.DataFrame,
dask_cudf.DataFrame]
}
}
input_connections = self.get_connected_inports()
if 'points_df_in' in input_connections:
types = input_connections['points_df_in']
# connected
return NodePorts(inports={'points_df_in': {port_type: types}},
outports={'distance_df': {port_type: types}})
else:
return NodePorts(inports=input_ports, outports=output_ports)
def init(self):
self.delayed_process = True
def meta_setup(self,):
cols_required = {'x': 'float64',
'y': 'float64'}
required = {
'points_df_in': cols_required,
'distance_df': cols_required
}
input_meta = self.get_input_meta()
output_cols = ({
'distance_df': {
'x': 'float64',
'y': 'float64',
'distance_cupy': 'float64',
}
})
if 'points_df_in' in input_meta:
col_from_inport = input_meta['points_df_in']
# additional ports
output_cols['distance_df'].update(col_from_inport)
return MetaData(inports=required, outports=output_cols)
def conf_schema(self):
return ConfSchema()
def get_kernel(self):
raw_kernel = cupy.RawKernel(kernel_string, 'compute_distance')
return raw_kernel
def process(self, inputs):
df = inputs['points_df_in']
cupy_x = cupy.asarray(df['x'])
cupy_y = cupy.asarray(df['y'])
number_of_threads = 16
number_of_blocks = (len(df) - 1) // number_of_threads + 1
dis = cupy.ndarray(len(df), dtype=cupy.float64)
self.get_kernel()((number_of_blocks,), (number_of_threads,),
(cupy_x, cupy_y, dis, len(df)))
df['distance_cupy'] = dis
return {'distance_df': df}
class DistributedNode(Node):
def ports_setup(self):
input_ports = {
'points_df_in': {
PortsSpecSchema.port_type: cudf.DataFrame
}
}
output_ports = {
'points_ddf_out': {
PortsSpecSchema.port_type: dask_cudf.DataFrame
}
}
return NodePorts(inports=input_ports, outports=output_ports)
def init(self):
pass
def meta_setup(self,):
cols_required = {
'x': 'float64',
'y': 'float64'
}
required = {
'points_df_in': cols_required,
'points_ddf_out': cols_required
}
input_meta = self.get_input_meta()
output_cols = ({
'points_ddf_out': {
'x': 'float64',
'y': 'float64'
}
})
if 'points_df_in' in input_meta:
col_from_inport = input_meta['points_df_in']
# additional ports
output_cols['points_ddf_out'].update(col_from_inport)
return MetaData(inports=required, outports=output_cols)
def conf_schema(self):
json = {
"title": "DistributedNode configure",
"type": "object",
"properties": {
"npartitions": {
"type": "number",
"description": "num of partitions in the Dask dataframe",
"minimum": 1
}
},
"required": ["npartitions"],
}
ui = {
"npartitions": {"ui:widget": "updown"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
npartitions = self.conf['npartitions']
df = inputs['points_df_in']
ddf = dask_cudf.from_cudf(df, npartitions=npartitions)
return {'points_ddf_out': ddf}
class VerifyNode(Node):
def ports_setup(self):
input_ports = {
'df1': {
PortsSpecSchema.port_type: [cudf.DataFrame,
dask_cudf.DataFrame]
},
'df2': {
PortsSpecSchema.port_type: [cudf.DataFrame,
dask_cudf.DataFrame]
}
}
output_ports = {
'max_diff': {
PortsSpecSchema.port_type: float
}
}
connections = self.get_connected_inports()
for key in input_ports:
if key in connections:
# connected
types = connections[key]
input_ports[key].update({PortsSpecSchema.port_type: types})
return NodePorts(inports=input_ports, outports=output_ports)
def meta_setup(self):
required ={
"df1": {},
"df2": {}
}
return MetaData(inports=required, outports={'max_diff': {}})
def conf_schema(self):
json = {
"title": "VerifyNode configure",
"type": "object",
"properties": {
"df1_col": {
"type": "string",
"description": "dataframe1 column name"
},
"df2_col": {
"type": "string",
"description": "dataframe2 column name"
}
},
"required": ["df1_col", "df2_col"],
}
ui = {
"df1_col": {"ui:widget": "text"},
"df2_col": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df1 = inputs['df1']
df2 = inputs['df2']
col_df1 = self.conf['df1_col']
col_df2 = self.conf['df2_col']
df1_col = df1[col_df1]
if isinstance(df1, dask_cudf.DataFrame):
# df1_col = df1_col.compute()
pass
df2_col = df2[col_df2]
if isinstance(df2, dask_cudf.DataFrame):
# df2_col = df2_col.compute()
pass
max_difference = (df1_col - df2_col).abs().max()
if isinstance(max_difference, dask.dataframe.core.Scalar):
max_difference = float(max_difference.compute())
max_difference = float(max_difference)
# print('Max Difference: {}'.format(max_difference))
# assert(max_difference < 1e-8)
return {'max_diff': max_difference}
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/custom_port_nodes.py |
import ipywidgets as widgets
def getXGBoostWidget(replace_spec, task_graph, plot_figures):
def getRangeSlider(val0, val1, des=""):
return widgets.IntRangeSlider(value=[val0, val1],
min=1,
max=60,
step=1,
description=des,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True)
def getSlider(val, des=""):
return widgets.IntSlider(value=val,
min=1,
max=60,
step=1,
description=des,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
out = widgets.Output(layout={'border': '1px solid black'})
with out:
indicators = \
replace_spec['technical_indicator']['conf']['indicators']
chaikin_selector = getRangeSlider(indicators[0]['args'][0],
indicators[0]['args'][1], "Chaikin")
def chaikin_selection(*stocks):
with out:
indicators[0]['args'][0] = chaikin_selector.value[0]
indicators[0]['args'][1] = chaikin_selector.value[1]
chaikin_selector.observe(chaikin_selection, 'value')
bollinger_selector = getSlider(indicators[1]['args'][0], "bollinger")
def bollinger_selection(*stocks):
with out:
indicators[1]['args'][0] = bollinger_selector.value
bollinger_selector.observe(bollinger_selection, 'value')
macd_selector = getRangeSlider(indicators[2]['args'][0],
indicators[2]['args'][1],
"MACD")
def macd_selection(*stocks):
with out:
indicators[2]['args'][0] = macd_selector.value[0]
indicators[2]['args'][1] = macd_selector.value[1]
macd_selector.observe(macd_selection, 'value')
rsi_selector = getSlider(indicators[3]['args'][0], "Relative Str")
def rsi_selection(*stocks):
with out:
indicators[3]['args'][0] = rsi_selector.value
rsi_selector.observe(rsi_selection, 'value')
atr_selector = getSlider(indicators[4]['args'][0], "ATR")
def atr_selection(*stocks):
with out:
indicators[4]['args'][0] = atr_selector.value
atr_selector.observe(atr_selection, 'value')
sod_selector = getSlider(indicators[6]['args'][0], "Sto Osc")
def sod_selection(*stocks):
with out:
indicators[6]['args'][0] = sod_selector.value
sod_selector.observe(sod_selection, 'value')
mflow_selector = getSlider(indicators[7]['args'][0], "Money F")
def mflow_selection(*stocks):
with out:
indicators[7]['args'][0] = mflow_selector.value
mflow_selector.observe(mflow_selection, 'value')
findex_selector = getSlider(indicators[8]['args'][0], "Force Index")
def findex_selection(*stocks):
with out:
indicators[8]['args'][0] = findex_selector.value
findex_selector.observe(findex_selection, 'value')
adis_selector = getSlider(indicators[10]['args'][0], "Ave DMI")
def adis_selection(*stocks):
with out:
indicators[10]['args'][0] = adis_selector.value
adis_selector.observe(adis_selection, 'value')
ccindex_selector = getSlider(indicators[11]['args'][0], "Comm Cha")
def ccindex_selection(*stocks):
with out:
indicators[11]['args'][0] = ccindex_selector.value
ccindex_selector.observe(ccindex_selection, 'value')
bvol_selector = getSlider(indicators[12]['args'][0], "On Balance")
def bvol_selection(*stocks):
with out:
indicators[12]['args'][0] = bvol_selector.value
bvol_selector.observe(bvol_selection, 'value')
vindex_selector = getSlider(indicators[13]['args'][0], "Vortex")
def vindex_selection(*stocks):
with out:
indicators[13]['args'][0] = vindex_selector.value
vindex_selector.observe(vindex_selection, 'value')
mindex_selector = getRangeSlider(indicators[15]['args'][0],
indicators[15]['args'][1],
"Mass Index")
def mindex_selection(*stocks):
with out:
indicators[15]['args'][0] = mindex_selector.value[0]
indicators[15]['args'][1] = mindex_selector.value[1]
mindex_selector.observe(mindex_selection, 'value')
tindex_selector = getRangeSlider(indicators[16]['args'][0],
indicators[16]['args'][1],
"True Strength")
def tindex_selection(*stocks):
with out:
indicators[16]['args'][0] = tindex_selector.value[0]
indicators[16]['args'][1] = tindex_selector.value[1]
tindex_selector.observe(tindex_selection, 'value')
emove_selector = getSlider(indicators[17]['args'][0], "Easy Move")
def emove_selection(*stocks):
with out:
indicators[17]['args'][0] = emove_selector.value
emove_selector.observe(emove_selection, 'value')
cc_selector = getSlider(indicators[18]['args'][0], "Cppock Curve")
def cc_selection(*stocks):
with out:
indicators[18]['args'][0] = cc_selector.value
cc_selector.observe(cc_selection, 'value')
kchannel_selector = getSlider(indicators[19]['args'][0],
"Keltner Channel")
def kchannel_selection(*stocks):
with out:
indicators[19]['args'][0] = kchannel_selector.value
kchannel_selector.observe(kchannel_selection, 'value')
button = widgets.Button(
description='Compute',
disabled=False,
button_style='',
tooltip='Click me')
def on_button_clicked(b):
with out:
print("Button clicked.")
w.children = (w.children[0], widgets.Label("Busy...."),)
o_gpu = task_graph.run(replace=replace_spec)
figure_combo = plot_figures(o_gpu)
w.children = (w.children[0], figure_combo,)
button.on_click(on_button_clicked)
selectors = widgets.VBox([chaikin_selector, bollinger_selector,
macd_selector, rsi_selector, atr_selector,
sod_selector, mflow_selector, findex_selector,
adis_selector, ccindex_selector, bvol_selector,
vindex_selector, mindex_selector,
tindex_selector, emove_selector, cc_selector,
kchannel_selector, button])
w = widgets.VBox([selectors])
return w
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/plotutils.py |
'''
'''
import sys
from collections import OrderedDict
import re
import numpy as np
from greenflow.dataframe_flow import Node
import logging
# logging.config.dictConfig({
# 'version': 1,
# 'disable_existing_loggers': False
# })
_DISTRIB_FORMATTER = None
def init_workers_logger():
'''Initialize logger within all workers. Meant to be run as:
client.run(init_workers_logger)
'''
global _DISTRIB_FORMATTER
distrib_logger = logging.getLogger('distributed.worker')
formatter = logging.Formatter(
'%(asctime)s.%(msecs)03d %(name)s:%(levelname)s: %(message)s',
datefmt='%H:%M:%S'
)
if _DISTRIB_FORMATTER is None:
_DISTRIB_FORMATTER = distrib_logger.handlers[0].formatter
distrib_logger.handlers[0].setFormatter(formatter)
def restore_workers_logger():
'''Restore logger within all workers. Meant to be run as:
client.run(restore_workers_logger)
Run this after printing worker logs i.e. after:
wlogs = client.get_worker_logs()
# print entries form wlogs
'''
global _DISTRIB_FORMATTER
distrib_logger = logging.getLogger('distributed.worker')
if _DISTRIB_FORMATTER is not None:
distrib_logger.handlers[0].setFormatter(_DISTRIB_FORMATTER)
_DISTRIB_FORMATTER = None
_CONFIGLOG = True
class MortgagePluginsLoggerMgr(object):
'''Logger manager for greenflow mortgage plugins.
When using this log manager to hijack dask distributed.worker logger
(worker is not None), must first initialize worker loggers via:
client.run(init_workers_logger)
Afer printing out entries from worker logs restore worker loggers via:
client.run(restore_workers_logger)
WARNING: HIJACKING Dask Distributed logger within dask-workers!!! This
is NOT a great implementation. Done to capture and display logs in Jupyter.
TODO: Implement a server/client logger per example:
https://docs.python.org/3/howto/logging-cookbook.html#sending-and-receiving-logging-events-across-a-network
'''
def __init__(self, worker=None, logname='mortgage_plugins'):
if worker is None:
logger = self._get_mortgage_plugins_logger()
console_handler = None
else:
# WARNING: HIJACKING Dask Distributed logger!!!
logger = logging.getLogger('distributed.worker.' + logname)
console_handler = self._config_log_handler(
logger, propagate=True, addtimestamp=True)
self._logger = logger
self._console_handler = console_handler
@staticmethod
def _config_log_handler(logger, propagate=True, addtimestamp=False):
'''Configure logger handler with streaming to stdout and formatter. Add
the handler to the logger.
'''
if addtimestamp:
formatter = logging.Formatter(
'%(asctime)s.%(msecs)03d %(name)s:%(levelname)s: %(message)s',
datefmt='%H:%M:%S'
)
else:
formatter = logging.Formatter(
'%(name)s:%(levelname)s: %(message)s')
console_handler = logging.StreamHandler(sys.stdout) # console handeler
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
logger.propagate = propagate
# logger.info('CONFIGURING LOGGER')
return console_handler
@classmethod
def _get_mortgage_plugins_logger(cls):
'''Obtain a logger for mortgage plugins. Used when the running process
is not a dask-worker.
'''
logger = logging.getLogger(__name__)
global _CONFIGLOG
if _CONFIGLOG:
cls._config_log_handler(logger, propagate=False)
_CONFIGLOG = False
# Should only be one handler. With Dask there's a race condition and
# could have multiple logging handlers.
while len(logger.handlers) > 1:
logger.handlers.pop()
return logger
def get_logger(self):
'''Get the logger being managed by instante of this log manager.'''
return self._logger
def cleanup(self):
'''Clean up the logger.'''
if self._console_handler is not None:
self._logger.removeHandler(self._console_handler)
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def convert(name):
'''Convert CamelCase to snake_case.
https://stackoverflow.com/a/1176023/3457624
'''
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
class CsvMortgageAcquisitionDataLoader(Node):
'''greenflow task/node to read in a mortgage acquisition CSV file into a cudf
dataframe. Configuration requirements:
'conf': {
'csvfile_names': path to mortgage seller names csv datafile
'csvfile_acqdata': path to mortgage acquisition csv datafile
}
'''
cols_dtypes = OrderedDict([
('loan_id', 'int64'),
# ('orig_channel', 'category'),
('orig_channel', 'int32'),
# ('seller_name', 'category'),
('seller_name', 'int32'),
('orig_interest_rate', 'float64'),
('orig_upb', 'int64'),
('orig_loan_term', 'int64'),
('orig_date', 'date'),
('first_pay_date', 'date'),
('orig_ltv', 'float64'),
('orig_cltv', 'float64'),
('num_borrowers', 'float64'),
('dti', 'float64'),
('borrower_credit_score', 'float64'),
# ('first_home_buyer', 'category'),
('first_home_buyer', 'int32'),
# ('loan_purpose', 'category'),
('loan_purpose', 'int32'),
# ('property_type', 'category'),
('property_type', 'int32'),
('num_units', 'int64'),
# ('occupancy_status', 'category'),
('occupancy_status', 'int32'),
# ('property_state', 'category'),
('property_state', 'int32'),
('zip', 'int64'),
('mortgage_insurance_percent', 'float64'),
# ('product_type', 'category'),
('product_type', 'int32'),
('coborrow_credit_score', 'float64'),
('mortgage_insurance_type', 'float64'),
# ('relocation_mortgage_indicator', 'category')
('relocation_mortgage_indicator', 'int32')
])
def meta_setup(self):
self.addition = self.cols_dtypes
def process(self, inputs):
'''
'''
import cudf
worker = None
try:
from dask.distributed import get_worker
worker = get_worker()
except (ValueError, ImportError):
pass
logname = convert(self.__class__.__name__)
logmgr = MortgagePluginsLoggerMgr(worker, logname)
logger = logmgr.get_logger()
worker_name = ''
if worker is not None:
worker_name = 'WORKER {} '.format(worker.name)
col_names_path = self.conf['csvfile_names']
cols_dtypes = OrderedDict([
('seller_name', 'int32'),
('new', 'int32'),
])
cols = list(cols_dtypes.keys())
dtypes = list(cols_dtypes.values())
names_gdf = cudf.read_csv(
col_names_path,
names=cols, dtype=dtypes,
delimiter='|', skiprows=1)
acquisition_path = self.conf['csvfile_acqdata']
cols = list(self.addition.keys())
dtypes = list(self.addition.values())
logger.info(worker_name + 'LOADING: {}'.format(acquisition_path))
acq_gdf = cudf.read_csv(
acquisition_path,
names=cols, dtype=dtypes,
delimiter='|', skiprows=1)
acq_gdf = acq_gdf.merge(names_gdf, how='left', on=['seller_name'])
acq_gdf['seller_name'] = acq_gdf['new']
acq_gdf.drop_column('new')
logmgr.cleanup()
return acq_gdf
class CsvMortgagePerformanceDataLoader(Node):
'''greenflow task/node to read in a mortgage performance CSV file into a cudf
dataframe. Configuration requirements:
'conf': {
'csvfile_perfdata': path to mortgage performance csv datafile
}
'''
cols_dtypes = OrderedDict([
('loan_id', 'int64'),
('monthly_reporting_period', 'date'),
# ('servicer', 'category'),
('servicer', 'int32'),
('interest_rate', 'float64'),
('current_actual_upb', 'float64'),
('loan_age', 'float64'),
('remaining_months_to_legal_maturity', 'float64'),
('adj_remaining_months_to_maturity', 'float64'),
('maturity_date', 'date'),
('msa', 'float64'),
('current_loan_delinquency_status', 'int32'),
# ('mod_flag', 'category'),
('mod_flag', 'int32'),
# ('zero_balance_code', 'category'),
('zero_balance_code', 'int32'),
('zero_balance_effective_date', 'date'),
('last_paid_installment_date', 'date'),
('foreclosed_after', 'date'),
('disposition_date', 'date'),
('foreclosure_costs', 'float64'),
('prop_preservation_and_repair_costs', 'float64'),
('asset_recovery_costs', 'float64'),
('misc_holding_expenses', 'float64'),
('holding_taxes', 'float64'),
('net_sale_proceeds', 'float64'),
('credit_enhancement_proceeds', 'float64'),
('repurchase_make_whole_proceeds', 'float64'),
('other_foreclosure_proceeds', 'float64'),
('non_interest_bearing_upb', 'float64'),
('principal_forgiveness_upb', 'float64'),
# ('repurchase_make_whole_proceeds_flag', 'category'),
('repurchase_make_whole_proceeds_flag', 'int32'),
('foreclosure_principal_write_off_amount', 'float64'),
# ('servicing_activity_indicator', 'category')
('servicing_activity_indicator', 'int32')
])
def meta_setup(self):
self.addition = self.cols_dtypes
def process(self, inputs):
'''
'''
import cudf
worker = None
try:
from dask.distributed import get_worker
worker = get_worker()
except (ValueError, ImportError):
pass
logname = convert(self.__class__.__name__)
logmgr = MortgagePluginsLoggerMgr(worker, logname)
logger = logmgr.get_logger()
worker_name = ''
if worker is not None:
worker_name = 'WORKER {} '.format(worker.name)
performance_path = self.conf['csvfile_perfdata']
logger.info(worker_name + 'LOADING: {}'.format(performance_path))
cols = list(self.addition.keys())
dtypes = list(self.addition.values())
mortgage_gdf = cudf.read_csv(
performance_path,
names=cols, dtype=dtypes,
delimiter='|', skiprows=1)
logmgr.cleanup()
return mortgage_gdf
class CreateEverFeatures(Node):
'''greenflow task/node to calculate delinquecy status period features.
Refer to meta_setup method for the columns produced.
'''
def meta_setup(self):
self.required = OrderedDict([
('loan_id', 'int64'),
('current_loan_delinquency_status', 'int32')
])
self.retention = {
'loan_id': 'int64',
'ever_30': 'int8',
'ever_90': 'int8',
'ever_180': 'int8'
}
def process(self, inputs):
'''
'''
gdf = inputs[0]
everdf = gdf[['loan_id', 'current_loan_delinquency_status']]
everdf = everdf.groupby('loan_id', method='hash', as_index=False).max()
everdf['ever_30'] = \
(everdf['current_loan_delinquency_status'] >= 1).astype('int8')
everdf['ever_90'] = \
(everdf['current_loan_delinquency_status'] >= 3).astype('int8')
everdf['ever_180'] = \
(everdf['current_loan_delinquency_status'] >= 6).astype('int8')
everdf.drop_column('current_loan_delinquency_status')
return everdf
class CreateDelinqFeatures(Node):
'''greenflow task/node to calculate delinquecy features.
Refer to meta_setup method for the columns produced.
'''
def meta_setup(self):
self.required = OrderedDict([
('loan_id', 'int64'),
('monthly_reporting_period', 'date'),
('current_loan_delinquency_status', 'int32')
])
self.retention = {
'loan_id': 'int64',
'delinquency_30': 'date',
'delinquency_90': 'date',
'delinquency_180': 'date'
}
def process(self, inputs):
'''
'''
perf_df = inputs[0]
delinq_gdf = perf_df[[
'loan_id', 'monthly_reporting_period',
'current_loan_delinquency_status']]
delinq_30 = delinq_gdf.query('current_loan_delinquency_status >= 1')[[
'loan_id', 'monthly_reporting_period']]\
.groupby('loan_id', method='hash', as_index=False).min()
delinq_30['delinquency_30'] = delinq_30['monthly_reporting_period']
delinq_30.drop_column('monthly_reporting_period')
delinq_90 = delinq_gdf.query('current_loan_delinquency_status >= 3')[[
'loan_id', 'monthly_reporting_period']]\
.groupby('loan_id', method='hash', as_index=False).min()
delinq_90['delinquency_90'] = delinq_90['monthly_reporting_period']
delinq_90.drop_column('monthly_reporting_period')
delinq_180 = delinq_gdf.query('current_loan_delinquency_status >= 6')[[
'loan_id', 'monthly_reporting_period']]\
.groupby('loan_id', method='hash', as_index=False).min()
delinq_180['delinquency_180'] = delinq_180['monthly_reporting_period']
delinq_180.drop_column('monthly_reporting_period')
delinq_merge = delinq_30.merge(
delinq_90, how='left', on=['loan_id'], type='hash')
delinq_merge['delinquency_90'] = delinq_merge['delinquency_90']\
.fillna(np.dtype('datetime64[ms]').type('1970-01-01')
.astype('datetime64[ms]'))
delinq_merge = delinq_merge.merge(
delinq_180, how='left', on=['loan_id'], type='hash')
delinq_merge['delinquency_180'] = delinq_merge['delinquency_180']\
.fillna(np.dtype('datetime64[ms]').type('1970-01-01')
.astype('datetime64[ms]'))
del(delinq_30)
del(delinq_90)
del(delinq_180)
return delinq_merge
class JoinPerfEverDelinqFeatures(Node):
'''greenflow task/node to merge delinquecy features. Merges dataframes
produced by CreateEverFeatures and CreateDelinqFeatures.
Refer to meta_setup method for the columns produced.
'''
cols_dtypes = {
'timestamp': 'date',
'delinquency_12': 'int32',
'upb_12': 'float64',
'ever_30': 'int8',
'ever_90': 'int8',
'ever_180': 'int8',
'delinquency_30': 'date',
'delinquency_90': 'date',
'delinquency_180': 'date'
}
def meta_setup(self):
'''
'''
self.retention = {
'loan_id': 'int64',
'timestamp_month': 'int32',
'timestamp_year': 'int32'
}
self.retention.update(self.cols_dtypes)
def __join_ever_delinq_features(self, everdf_in, delinqdf_in):
everdf = everdf_in.merge(
delinqdf_in, on=['loan_id'], how='left', type='hash')
everdf['delinquency_30'] = everdf['delinquency_30']\
.fillna(np.dtype('datetime64[ms]').type('1970-01-01')
.astype('datetime64[ms]'))
everdf['delinquency_90'] = everdf['delinquency_90']\
.fillna(np.dtype('datetime64[ms]').type('1970-01-01')
.astype('datetime64[ms]'))
everdf['delinquency_180'] = everdf['delinquency_180']\
.fillna(np.dtype('datetime64[ms]').type('1970-01-01')
.astype('datetime64[ms]'))
return everdf
def process(self, inputs):
'''
'''
perf_df = inputs[0]
# if using JoinEverDelinqFeatures. Seems unnecessary
# ever_delinq_df = inputs[1]
everdf_in = inputs[1]
delinqdf_in = inputs[2]
ever_delinq_df = \
self.__join_ever_delinq_features(everdf_in, delinqdf_in)
test = perf_df[[
'loan_id',
'monthly_reporting_period',
'current_loan_delinquency_status',
'current_actual_upb'
]]
test['timestamp'] = test['monthly_reporting_period']
test.drop_column('monthly_reporting_period')
test['timestamp_month'] = test['timestamp'].dt.month
test['timestamp_year'] = test['timestamp'].dt.year
test['delinquency_12'] = test['current_loan_delinquency_status']
test.drop_column('current_loan_delinquency_status')
test['upb_12'] = test['current_actual_upb']
test.drop_column('current_actual_upb')
test['upb_12'] = test['upb_12'].fillna(999999999)
test['delinquency_12'] = test['delinquency_12'].fillna(-1)
joined_df = test.merge(
ever_delinq_df, how='left', on=['loan_id'], type='hash')
joined_df['ever_30'] = joined_df['ever_30'].fillna(-1)
joined_df['ever_90'] = joined_df['ever_90'].fillna(-1)
joined_df['ever_180'] = joined_df['ever_180'].fillna(-1)
joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1)
joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1)
joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1)
joined_df['timestamp_month'] = \
joined_df['timestamp_month'].astype('int32')
joined_df['timestamp_year'] = \
joined_df['timestamp_year'].astype('int32')
return joined_df
class Create12MonFeatures(Node):
'''greenflow task/node to calculate delinquecy feature over 12 months.
Refer to meta_setup method for the columns produced.
'''
def meta_setup(self):
'''
'''
self.retention = {
'loan_id': 'int64',
'delinquency_12': 'int32',
'upb_12': 'float64',
'timestamp_month': 'int8',
'timestamp_year': 'int16'
}
def process(self, inputs):
'''
'''
import cudf
perf_ever_delinq_df = inputs[0]
testdfs = []
n_months = 12
for y in range(1, n_months + 1):
tmpdf = perf_ever_delinq_df[[
'loan_id', 'timestamp_year', 'timestamp_month',
'delinquency_12', 'upb_12'
]]
tmpdf['josh_months'] = \
tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month']
tmpdf['josh_mody_n'] = \
((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12)\
.floor()
tmpdf = tmpdf.groupby(
['loan_id', 'josh_mody_n'], method='hash', as_index=False)\
.agg({'delinquency_12': 'max', 'upb_12': 'min'})
# tmpdf['delinquency_12'] = \
# (tmpdf['max_delinquency_12'] > 3).astype('int32')
# tmpdf.drop_column('max_delinquency_12')
#
# tmpdf['delinquency_12'] += \
# (tmpdf['min_upb_12'] == 0).astype('int32')
#
# tmpdf['upb_12'] = tmpdf['min_upb_12']
# tmpdf.drop_column('min_upb_12')
tmpdf['timestamp_year'] = \
(((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12)\
.floor().astype('int16')
tmpdf.drop_column('josh_mody_n')
tmpdf['timestamp_month'] = np.int8(y)
testdfs.append(tmpdf)
test_12mon_feat_df = cudf.concat(testdfs)
return test_12mon_feat_df
def _null_workaround(df):
'''Fix up null entries in dataframes. This is specific to the mortgage
workflow.
'''
for column, data_type in df.dtypes.items():
if str(data_type) == "category":
df[column] = df[column]\
.astype('int32').fillna(np.dtype(np.int32).type(-1))
if str(data_type) in \
['int8', 'int16', 'int32', 'int64', 'float32', 'float64']:
df[column] = df[column]\
.fillna(np.dtype(data_type).type(-1)).astype(data_type)
return df
class FinalPerfDelinq(Node):
'''Merge performance dataframe with calculated features dataframes.
Refer to meta_setup method for the columns produced.
'''
cols_dtypes = dict()
cols_dtypes.update(CsvMortgagePerformanceDataLoader.cols_dtypes)
cols_dtypes.update(JoinPerfEverDelinqFeatures.cols_dtypes)
def meta_setup(self):
'''
'''
self.retention = self.cols_dtypes
@staticmethod
def __combine_joined_12_mon(perf_ever_delinq_df, test_12mon_df):
perf_ever_delinq_df.drop_column('delinquency_12')
perf_ever_delinq_df.drop_column('upb_12')
perf_ever_delinq_df['timestamp_year'] = \
perf_ever_delinq_df['timestamp_year'].astype('int16')
perf_ever_delinq_df['timestamp_month'] = \
perf_ever_delinq_df['timestamp_month'].astype('int8')
return perf_ever_delinq_df.merge(
test_12mon_df,
how='left',
on=['loan_id', 'timestamp_year', 'timestamp_month'],
type='hash')
@classmethod
def __final_performance_delinquency(
cls, perf_df, perf_ever_delinq_df, test_12mon_df):
joined_df = \
cls.__combine_joined_12_mon(perf_ever_delinq_df, test_12mon_df)
merged = _null_workaround(perf_df)
joined_df = _null_workaround(joined_df)
joined_df['timestamp_month'] = \
joined_df['timestamp_month'].astype('int8')
joined_df['timestamp_year'] = \
joined_df['timestamp_year'].astype('int16')
merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month
merged['timestamp_month'] = merged['timestamp_month'].astype('int8')
merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year
merged['timestamp_year'] = merged['timestamp_year'].astype('int16')
merged = merged.merge(
joined_df, how='left',
on=['loan_id', 'timestamp_year', 'timestamp_month'], type='hash')
merged.drop_column('timestamp_month')
merged.drop_column('timestamp_year')
return merged
def process(self, inputs):
'''
'''
perf_df = inputs[0].copy()
perf_ever_delinq_df = inputs[1].copy()
test_12mon_df = inputs[2]
final_perf_df = self.__final_performance_delinquency(
perf_df, perf_ever_delinq_df, test_12mon_df)
return final_perf_df
class JoinFinalPerfAcqClean(Node):
'''Merge acquisition dataframe with dataframe produced by FinalPerfDelinq.
Refer to meta_setup method for the columns produced.
'''
_drop_list = [
'loan_id',
'orig_date',
'first_pay_date',
'seller_name',
'monthly_reporting_period',
'last_paid_installment_date',
'maturity_date',
'ever_30', 'ever_90', 'ever_180',
'delinquency_30', 'delinquency_90', 'delinquency_180',
'upb_12',
'zero_balance_effective_date',
'foreclosed_after',
'disposition_date',
'timestamp'
]
cols_dtypes = dict()
cols_dtypes.update(FinalPerfDelinq.cols_dtypes)
cols_dtypes.update(CsvMortgageAcquisitionDataLoader.cols_dtypes)
# all float64, int32 and int64 types are converted to float32 types.
for icol, itype in cols_dtypes.items():
if itype in ('float64', 'int32', 'int64',):
cols_dtypes[icol] = 'float32'
# The only exception is delinquency_12 which becomes int8
cols_dtypes.update({'delinquency_12': 'int8'})
for col in _drop_list:
cols_dtypes.pop(col)
def meta_setup(self):
'''
'''
self.retention = self.cols_dtypes
@classmethod
def __last_mile_cleaning(cls, df, cols_to_keep=tuple()):
drop_list = cls._drop_list
for column in drop_list:
if column in cols_to_keep:
continue
if column not in df.columns:
continue
df.drop_column(column)
for col, dtype in df.dtypes.iteritems():
if str(dtype) == 'category':
df[col] = df[col].cat.codes
df[col] = df[col].astype('float32')
if 'delinquency_12' in df.columns:
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = \
df['delinquency_12'].fillna(False).astype('int8')
for column in df.columns:
df[column] = df[column].fillna(-1)
# return df.to_arrow(preserve_index=False)
return df
def process(self, inputs):
'''
'''
perf_df = inputs[0].copy()
acq_df = inputs[1].copy()
perf_df = _null_workaround(perf_df)
acq_df = _null_workaround(acq_df)
cols_to_keep = ('loan_id', 'seller_name',)
perf_df = self.__last_mile_cleaning(perf_df, cols_to_keep=cols_to_keep)
# cleaning acq_df causes out of memory error during merge!? rapids 0.14
# acq_df = self.__last_mile_cleaning(acq_df, cols_to_keep=cols_to_keep)
acq_df['seller_name'] = acq_df['seller_name'].astype('category')
perf_acq_df = perf_df.merge(
acq_df, how='left', on=['loan_id'], type='hash')
perf_acq_df = self.__last_mile_cleaning(perf_acq_df)
return perf_acq_df
def mortgage_greenflow_run(run_params_dict):
'''Using dataframe-flow runs the tasks/workflow specified in the
run_params_dict. Expected run_params_dict ex:
run_params_dict = {
'replace_spec': replace_spec,
'task_spec_list': greenflow_task_spec_list,
'out_list': out_list
}
greenflow_task_spec_list - Mortgage ETL workflow list of task-specs. Refer to
module mortgage_common function mortgage_etl_workflow_def.
out_list - Expected to specify one output which should be the final
dataframe produced by the mortgage ETL workflow.
:param run_params_dict: Dictionary with parameters and greenflow task list to
run mortgage workflow.
'''
from greenflow.dataframe_flow import TaskGraph
task_spec_list = run_params_dict['task_spec_list']
out_list = run_params_dict['out_list']
replace_spec = run_params_dict['replace_spec']
task_graph = TaskGraph(task_spec_list)
(final_perf_acq_df,) = task_graph.run(out_list, replace_spec)
return final_perf_acq_df
def print_ram_usage(worker_name='', logger=None):
'''Display host RAM usage on the system using free -m command.'''
import os
logmgr = None
if logger is None:
logmgr = MortgagePluginsLoggerMgr()
logger = logmgr.get_logger()
tot_m, used_m, free_m = \
map(int, os.popen('free -t -m').readlines()[-1].split()[1:])
logger.info(
worker_name + 'HOST RAM (MB) TOTAL {}; USED {}; FREE {}'
.format(tot_m, used_m, free_m))
if logmgr is not None:
logmgr.cleanup()
def mortgage_workflow_runner(mortgage_run_params_dict_list):
'''Runs the mortgage_greenflow_run for each entry in the
mortgage_run_params_dict_list. Each entry is a run_params_dict.
Expected run_params_dict:
run_params_dict = {
'replace_spec': replace_spec,
'task_spec_list': greenflow_task_spec_list,
'out_list': out_list
}
:param mortgage_run_params_dict_list: List of run_params_dict
'''
import os # @Reimport
import gc
import pyarrow as pa
# count = len(mortgage_run_params_dict_list)
# print('LOGGER: ', logger)
worker = None
try:
from dask.distributed import get_worker
worker = get_worker()
except (ValueError, ImportError):
pass
logname = 'mortgage_workflow_runner'
logmgr = MortgagePluginsLoggerMgr(worker, logname)
logger = logmgr.get_logger()
worker_name = ''
if worker is not None:
worker_name = 'WORKER {} '.format(worker.name)
logger.info(worker_name + 'RUNNING MORTGAGE Greenflow DataframeFlow')
logger.info(worker_name + 'NCCL_P2P_DISABLE: {}'.format(
os.environ.get('NCCL_P2P_DISABLE')))
logger.info(worker_name + 'CUDA_VISIBLE_DEVICES: {}'.format(
os.environ.get('CUDA_VISIBLE_DEVICES')))
# cpu_df_concat_pandas = None
final_perf_acq_arrow_concat = None
for ii, run_params_dict in enumerate(mortgage_run_params_dict_list):
# performance_path = run_params_dict['csvfile_perfdata']
# logger.info(worker_name + 'LOADING: {}'.format(performance_path))
final_perf_acq_gdf = mortgage_greenflow_run(run_params_dict)
# CONCATENATE DATAFRAMES AS THEY ARE CALCULATED
# cpu_df_pandas = gpu_df.to_pandas()
# if cpu_df_concat_pandas is None:
# cpu_df_concat_pandas = cpu_df_pandas
# else:
# cpu_df_concat_pandas = \
# pd.concat([cpu_df_concat_pandas, cpu_df_pandas])
# del(cpu_df_pandas)
final_perf_acq_arrow = \
final_perf_acq_gdf.to_arrow(preserve_index=False)
if final_perf_acq_arrow_concat is None:
final_perf_acq_arrow_concat = final_perf_acq_arrow
else:
final_perf_acq_arrow_concat = pa.concat_tables([
final_perf_acq_arrow_concat, final_perf_acq_arrow])
del(final_perf_acq_gdf)
logger.info(worker_name + 'LOADED {} FRAMES'.format(ii + 1))
print_ram_usage(worker_name, logger)
logger.info(worker_name + 'RUN PYTHON GARBAGE COLLECTION TO MAYBE CLEAR '
'CPU AND GPU MEMORY')
gc.collect()
print_ram_usage(worker_name, logger)
# df_concat = cpu_df_concat_pandas
# delinq_df = df_concat[['delinquency_12']]
# indexes_besides_delinq = \
# df_concat.columns.difference(['delinquency_12'])
# mortgage_feat_df = df_concat[list(indexes_besides_delinq)]
# del(df_concat)
logger.info(worker_name + 'USING ARROW')
cpu_df_concat_arrow = final_perf_acq_arrow_concat
delinq_arrow_col = cpu_df_concat_arrow.column('delinquency_12')
mortgage_feat_arrow_table = cpu_df_concat_arrow.drop(['delinquency_12'])
# logger.info(worker_name + 'ARROW TO CUDF')
# delinq_arrow_table = pa.Table.from_arrays([delinq_arrow_col])
# delinq_df = cudf.DataFrame.from_arrow(delinq_arrow_table)
# mortgage_feat_df = cudf.DataFrame.from_arrow(mortgage_feat_arrow_table)
logger.info(worker_name + 'ARROW TO PANDAS')
delinq_df = delinq_arrow_col.to_pandas()
mortgage_feat_df = mortgage_feat_arrow_table.to_pandas()
del(delinq_arrow_col)
del(mortgage_feat_arrow_table)
# clear CPU/GPU memory
gc.collect()
print_ram_usage(worker_name, logger)
logmgr.cleanup()
return (mortgage_feat_df, delinq_df)
class MortgageWorkflowRunner(Node):
'''Runs the mortgage greenflow workflow and returns the mortgage features
dataframe and mortgage delinquency dataframe. These can be passed on
to xgboost for training.
conf: {
'mortgage_run_params_dict_list': REQUIRED. List of dictionaries of
mortgage run params.
}
mortgage_run_param_dict = {
'replace_spec': replace_spec,
'task_spec_list': greenflow_task_spec_list,
'out_list': out_list
}
Returns: mortgage_feat_df_pandas, delinq_df_pandas
DataframeFlow will return a tuple so unpack as tuple of tuples:
((mortgage_feat_df_pandas, delinq_df_pandas),)
'''
def meta_setup(self):
'''
'''
pass
def process(self, inputs):
logmgr = MortgagePluginsLoggerMgr()
logger = logmgr.get_logger()
mortgage_run_params_dict_list = \
self.conf['mortgage_run_params_dict_list']
count = len(mortgage_run_params_dict_list)
logger.info('TRYING TO LOAD {} FRAMES'.format(count))
mortgage_feat_df_pandas, delinq_df_pandas = \
mortgage_workflow_runner(mortgage_run_params_dict_list)
logmgr.cleanup()
return mortgage_feat_df_pandas, delinq_df_pandas
class XgbMortgageTrainer(Node):
'''Trains an XGBoost booster.
Configuration:
conf: {
'delete_dataframes': OPTIONAL. Boolean (True or False). Delete the
intermediate mortgage dataframes from which an xgboost dmatrix
is created. This is to potentially clear up CPU/GPU memory.
'xgb_gpu_params': REQUIRED. Dictionary of xgboost trainer
parameters.
}
Example of xgb_gpu_params:
xgb_gpu_params = {
'nround': 100,
'max_depth': 8,
'max_leaves': 2 ** 8,
'alpha': 0.9,
'eta': 0.1,
'gamma': 0.1,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'scale_pos_weight': 2,
'min_child_weight': 30,
'tree_method': 'gpu_hist',
'n_gpus': 1,
'loss': 'ls',
# 'objective': 'gpu:reg:linear',
'objective': 'reg:squarederror',
'max_features': 'auto',
'criterion': 'friedman_mse',
'grow_policy': 'lossguide',
'verbose': True
}
Inputs:
mortgage_feat_df_pandas, delinq_df_pandas = inputs[0]
These inputs are provided by MortgageWorkflowRunner.
Outputs:
bst - XGBoost trained booster model.
'''
def meta_setup(self):
'''
'''
pass
def process(self, inputs):
import gc # python standard lib garbage collector
import xgboost as xgb
logmgr = MortgagePluginsLoggerMgr()
logger = logmgr.get_logger()
mortgage_feat_df_pandas, delinq_df_pandas = inputs[0]
delete_dataframes = self.conf.get('delete_dataframes')
xgb_gpu_params = self.conf['xgb_gpu_params']
logger.info('JUST BEFORE DMATRIX')
print_ram_usage()
logger.info('CREATING DMATRIX')
# DMatrix directly from dataframe requires xgboost from rapidsai:
# https://github.com/rapidsai/xgboost
# Convert to DMatrix for XGBoost training.
xgb_dmatrix = xgb.DMatrix(mortgage_feat_df_pandas, delinq_df_pandas)
# logger.info('XGB_DMATRIX:\n', xgb_dmatrix)
logger.info('JUST AFTER DMATRIX')
print_ram_usage()
# clear CPU/GPU memory
if delete_dataframes:
del(mortgage_feat_df_pandas)
del(delinq_df_pandas)
gc.collect()
logger.info('CLEAR MEMORY JUST BEFORE XGBOOST TRAINING')
print_ram_usage()
logger.info('RUNNING XGBOOST TRAINING')
# booster object
bst = xgb.train(
xgb_gpu_params, xgb_dmatrix,
num_boost_round=xgb_gpu_params['nround'])
logmgr.cleanup()
return bst
# RMM - RAPIDS Memory Manager.
# IMPORTANT!!! IF USING RMM START CLIENT prior to any cudf imports and that
# means prior to any greenflow imports, 3rd party libs with cudf, etc.
# This is needed if distributing workflows to workers.
def initialize_rmm_pool():
import rmm
return rmm.reinitialize(
pool_allocator=True, # default is False
managed_memory=False
)
def initialize_rmm_no_pool():
import rmm
return rmm.reinitialize(
pool_allocator=False, # default is False
managed_memory=False
)
def finalize_rmm():
import rmm
return rmm.rmm.librmm.rmm_finalize()
def print_distributed_dask_hijacked_logs(wlogs, logger, filters=None):
'''Prints (uses logger.info) the log entries from worker logs
(wlogs = client.get_worker_logs()). Filters what is printed based on
keywords in the filters. If filters is None then prints everything.
:param filters: A tuple. Even if one entry ('somestr',)
'''
# print('WORKER LOGS:\n{}'.format(json.dumps(wlogs, indent=2)))
for iworker_log in wlogs.values():
for _, msg in iworker_log:
# if 'distributed.worker.' in msg:
# if filter in msg:
if filters is None:
logger.info(msg)
continue
if any(ff in msg for ff in filters):
logger.info(msg)
class DaskMortgageWorkflowRunner(Node):
'''Runs the mortgage greenflow workflow and returns the mortgage features
dataframe and mortgage delinquency dataframe. These can be passed on
to xgboost for training.
conf: {
'mortgage_run_params_dict_list': REQUIRED. List of dictionaries of
mortgage run params.
'client': REQUIRED. Dask distributed client. Runs with distributed
dask.
'use_rmm': OPTIONAL. Boolean (True or False). Use RAPIDS Memory
Manager.,
'filter_dask_logger': OPTIONAL. Boolean to display hijacked
dask.distributed log. If False (default) then doesn't display.
}
Format of expected mortgage run params:
mortgage_run_param_dict = {
'replace_spec': replace_spec,
'task_spec_list': greenflow_task_spec_list,
'out_list': out_list
}
Returns: dask-distributed Futures where each future holds a tuple:
mortgage_feat_df_pandas, delinq_df_pandas
The number of futures returned corresponds to the number of workers
obtained from the client.
DataframeFlow will return a tuple so unpack as tuple of tuples in
whatever operates on the future:
((mortgage_feat_df_pandas, delinq_df_pandas),)
'''
def meta_setup(self):
'''
'''
pass
def process(self, inputs):
from dask.distributed import wait
logmgr = MortgagePluginsLoggerMgr()
logger = logmgr.get_logger()
filter_dask_logger = self.conf.get('filter_dask_logger')
client = self.conf['client']
client.run(init_workers_logger)
use_rmm = self.conf.get('use_rmm')
if use_rmm:
rmm_init_results = client.run(initialize_rmm_pool)
logger.info('RMM INIT RESULTS:\n', rmm_init_results)
mortgage_run_params_dict_list = \
self.conf['mortgage_run_params_dict_list']
workers_names = \
[iw['name'] for iw in client.scheduler_info()['workers'].values()]
nworkers = len(workers_names)
count = len(mortgage_run_params_dict_list)
logger.info('TRYING TO LOAD {} FRAMES'.format(count))
# Make a list of size nworkers where each element is a sublist of
# mortgage_run_params_dict_list.
subset_sz = count // nworkers
mortgage_run_params_dict_list_chunks = [
mortgage_run_params_dict_list[iw * subset_sz:(iw + 1) * subset_sz]
if iw < (nworkers - 1) else
mortgage_run_params_dict_list[iw * subset_sz:]
for iw in range(nworkers)]
logger.info(
'SPLIT MORTGAGE DATA INTO {} CHUNKS AMONGST {} WORKERS'
.format(len(mortgage_run_params_dict_list_chunks), nworkers))
# For debugging. Add entry 'csvfile_perfdata' to run_params_dict.
# for ii, ichunk in enumerate(mortgage_run_params_dict_list_chunks):
# files_in_chunk = \
# [iparam['csvfile_perfdata'] for iparam in ichunk]
# logger.info('CHUNK {} FILES TO LOAD: {}'.format(
# ii, files_in_chunk))
# List of dask Futures of PyArrow Tables from final_perf_acq cudf
# dataframe
mortgage_feat_df_delinq_df_pandas_futures = client.map(
mortgage_workflow_runner,
mortgage_run_params_dict_list_chunks)
wait(mortgage_feat_df_delinq_df_pandas_futures)
if filter_dask_logger:
wlogs = client.get_worker_logs()
print_distributed_dask_hijacked_logs(
wlogs, logger,
('mortgage_workflow_runner',
convert(CsvMortgagePerformanceDataLoader.__name__),
convert(CsvMortgageAcquisitionDataLoader.__name__))
)
client.run(restore_workers_logger)
cinfo = client.who_has(mortgage_feat_df_delinq_df_pandas_futures)
logger.info('CLIENT INFO WHO HAS WHAT: {}'.format(str(cinfo)))
if use_rmm:
client.run(finalize_rmm)
client.run(initialize_rmm_no_pool)
logmgr.cleanup()
return mortgage_feat_df_delinq_df_pandas_futures
class DaskXgbMortgageTrainer(Node):
'''Trains an XGBoost booster using Dask-XGBoost
Configuration:
conf: {
'delete_dataframes': OPTIONAL. Boolean (True or False). Delete the
intermediate mortgage dataframes from which an xgboost dmatrix
is created. This is to potentially clear up CPU//GPU memory.
'dxgb_gpu_params': REQUIRED. Dictionary of dask-xgboost trainer
parameters.
'client': REQUIRED. Dask distributed client. Runs with distributed
dask.
'create_dmatrix_serially': OPTIONAL. Boolean (True or False) Might
be able to process more data/dataframes. Creating a dmatrix
takes a lot of host memory. Set delete_dataframes to True as
well to hopefully help with memory.
'filter_dask_logger': OPTIONAL. Boolean to display hijacked
dask.distributed log.
}
Example of dxgb_gpu_params:
dxgb_gpu_params = {
'nround': 100,
'max_depth': 8,
'max_leaves': 2 ** 8,
'alpha': 0.9,
'eta': 0.1,
'gamma': 0.1,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'scale_pos_weight': 2,
'min_child_weight': 30,
'tree_method': 'gpu_hist',
'n_gpus': 1,
'distributed_dask': True,
'loss': 'ls',
# 'objective': 'gpu:reg:linear',
'objective': 'reg:squarederror',
'max_features': 'auto',
'criterion': 'friedman_mse',
'grow_policy': 'lossguide',
'verbose': True
}
Inputs:
mortgage_feat_df_delinq_df_pandas_futures = inputs[0]
These inputs are provided by DaskMortgageWorkflowRunner.
Outputs:
bst - XGBoost trained booster model.
'''
def meta_setup(self):
'''
'''
pass
def process(self, inputs):
import gc # python standard lib garbage collector
import xgboost as xgb
from dask.delayed import delayed
from dask.distributed import (wait, get_worker)
import dask_xgboost as dxgb_gpu
logmgr = MortgagePluginsLoggerMgr()
logger = logmgr.get_logger()
filter_dask_logger = self.conf.get('filter_dask_logger')
client = self.conf['client']
client.run(init_workers_logger)
dxgb_gpu_params = self.conf['dxgb_gpu_params']
delete_dataframes = self.conf.get('delete_dataframes')
create_dmatrix_serially = self.conf.get('create_dmatrix_serially')
mortgage_feat_df_delinq_df_pandas_futures = inputs[0]
# TODO: Update to xgb.dask.DaskDMatrix and xgb.dask.train API. Refer to
# https://medium.com/rapids-ai/a-new-official-dask-api-for-xgboost-e8b10f3d1eb7
def make_xgb_dmatrix(
mortgage_feat_df_delinq_df_pandas_tuple,
delete_dataframes=None):
worker = get_worker()
logname = 'make_xgb_dmatrix'
logmgr = MortgagePluginsLoggerMgr(worker, logname)
logger = logmgr.get_logger()
logger.info('CREATING DMATRIX ON WORKER {}'.format(worker.name))
(mortgage_feat_df, delinq_df) = \
mortgage_feat_df_delinq_df_pandas_tuple
dmat = xgb.DMatrix(mortgage_feat_df, delinq_df)
if delete_dataframes:
del(mortgage_feat_df)
del(delinq_df)
# del(mortgage_feat_df_delinq_df_pandas_tuple)
gc.collect()
logmgr.cleanup()
return dmat
dmatrix_delayed_list = []
nworkers = len(mortgage_feat_df_delinq_df_pandas_futures)
if create_dmatrix_serially:
logger.info('CREATING DMATRIX SERIALLY ACROSS {} WORKERS'
.format(nworkers))
else:
logger.info('CREATING DMATRIX IN PARALLEL ACROSS {} WORKERS'
.format(nworkers))
for ifut in mortgage_feat_df_delinq_df_pandas_futures:
dmat_delayed = delayed(make_xgb_dmatrix)(ifut, delete_dataframes)
dmat_delayed_persist = dmat_delayed.persist()
if create_dmatrix_serially:
# TODO: For multinode efficiency need to poll the futures
# such that only doing serial dmatrix creation on the
# same node, but across nodes should be in parallel.
wait(dmat_delayed_persist)
dmatrix_delayed_list.append(dmat_delayed_persist)
wait(dmatrix_delayed_list)
if filter_dask_logger:
wlogs = client.get_worker_logs()
print_distributed_dask_hijacked_logs(
wlogs, logger, ('make_xgb_dmatrix',)
)
client.run(restore_workers_logger)
logger.info('JUST AFTER DMATRIX')
print_ram_usage()
logger.info('RUNNING XGBOOST TRAINING USING DASK-XGBOOST')
labels = None
bst = dxgb_gpu.train(
client, dxgb_gpu_params, dmatrix_delayed_list, labels,
num_boost_round=dxgb_gpu_params['nround'])
logmgr.cleanup()
return bst
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/mortgage_e2e_gquant/mortgage_greenflow_plugins.py |
'''
'''
import os
from greenflow.dataframe_flow import (TaskSpecSchema, TaskGraph)
from mortgage_common import (
mortgage_etl_workflow_def, generate_mortgage_greenflow_run_params_list,
MortgageTaskNames)
def main():
_basedir = os.path.dirname(__file__)
# mortgage_data_path = '/datasets/rapids_data/mortgage'
mortgage_data_path = os.path.join(_basedir, 'mortgage_data')
# Using some default csv files for testing.
# csvfile_names = os.path.join(mortgage_data_path, 'names.csv')
# acq_data_path = os.path.join(mortgage_data_path, 'acq')
# perf_data_path = os.path.join(mortgage_data_path, 'perf')
# csvfile_acqdata = os.path.join(acq_data_path, 'Acquisition_2000Q1.txt')
# csvfile_perfdata = \
# os.path.join(perf_data_path, 'Performance_2000Q1.txt_0')
# mortgage_etl_workflow_def(
# csvfile_names, csvfile_acqdata, csvfile_perfdata)
greenflow_task_spec_list = mortgage_etl_workflow_def()
start_year = 2000
end_year = 2001 # end_year is inclusive
# end_year = 2016 # end_year is inclusive
# part_count = 16 # the number of data files to train against
part_count = 12 # the number of data files to train against
# part_count = 4 # the number of data files to train against
mortgage_run_params_dict_list = generate_mortgage_greenflow_run_params_list(
mortgage_data_path, start_year, end_year, part_count,
greenflow_task_spec_list)
_basedir = os.path.dirname(__file__)
mortgage_lib_module = os.path.join(_basedir, 'mortgage_greenflow_plugins.py')
mortgage_workflow_runner_task = {
TaskSpecSchema.task_id:
MortgageTaskNames.mortgage_workflow_runner_task_name,
TaskSpecSchema.node_type: 'MortgageWorkflowRunner',
TaskSpecSchema.conf: {
'mortgage_run_params_dict_list': mortgage_run_params_dict_list
},
TaskSpecSchema.inputs: [],
TaskSpecSchema.filepath: mortgage_lib_module
}
# Can be multi-gpu. Set ngpus > 1. This is different than dask xgboost
# which is distributed multi-gpu i.e. dask-xgboost could distribute on one
# node or multiple nodes. In distributed mode the dmatrix is disributed.
ngpus = 1
xgb_gpu_params = {
'nround': 100,
'max_depth': 8,
'max_leaves': 2 ** 8,
'alpha': 0.9,
'eta': 0.1,
'gamma': 0.1,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'scale_pos_weight': 2,
'min_child_weight': 30,
'tree_method': 'gpu_hist',
'n_gpus': ngpus,
# 'distributed_dask': True,
'loss': 'ls',
# 'objective': 'gpu:reg:linear',
'objective': 'reg:squarederror',
'max_features': 'auto',
'criterion': 'friedman_mse',
'grow_policy': 'lossguide',
'verbose': True
}
xgb_trainer_task = {
TaskSpecSchema.task_id: MortgageTaskNames.xgb_trainer_task_name,
TaskSpecSchema.node_type: 'XgbMortgageTrainer',
TaskSpecSchema.conf: {
'delete_dataframes': False,
'xgb_gpu_params': xgb_gpu_params
},
TaskSpecSchema.inputs: [
MortgageTaskNames.mortgage_workflow_runner_task_name
],
TaskSpecSchema.filepath: mortgage_lib_module
}
task_spec_list = [mortgage_workflow_runner_task, xgb_trainer_task]
task_graph = TaskGraph(task_spec_list)
# out_list = [MortgageTaskNames.mortgage_workflow_runner_task_name]
# ((mortgage_feat_df_pandas, delinq_df_pandas),) = task_graph.run(out_list)
out_list = [MortgageTaskNames.xgb_trainer_task_name]
(bst,) = task_graph.run(out_list)
print('XGBOOST BOOSTER:\n', bst)
if __name__ == '__main__':
main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/mortgage_e2e_gquant/mortgage_run_workflow_local.py |
'''
Collection of functions to run the mortgage example.
'''
import os
from glob import glob
class MortgageTaskNames(object):
'''Task names commonly used by scripts for naming tasks when creating
a greenflow mortgage workflow.
'''
load_acqdata_task_name = 'acqdata'
load_perfdata_task_name = 'perfdata'
ever_feat_task_name = 'ever_features'
delinq_feat_task_name = 'delinq_features'
join_perf_ever_delinq_feat_task_name = 'join_perf_ever_delinq_features'
create_12mon_feat_task_name = 'create_12mon_features'
final_perf_delinq_task_name = 'final_perf_delinq_features'
final_perf_acq_task_name = 'final_perf_acq_df'
mortgage_workflow_runner_task_name = 'mortgage_workflow_runner'
xgb_trainer_task_name = 'xgb_trainer'
dask_mortgage_workflow_runner_task_name = 'dask_mortgage_workflow_runner'
dask_xgb_trainer_task_name = 'dask_xgb_trainer'
def mortgage_etl_workflow_def(
csvfile_names=None, csvfile_acqdata=None,
csvfile_perfdata=None):
'''Define the ETL (extract-transform-load) portion of the mortgage
workflow.
:returns: greenflow task-spec list. Currently a simple list of dictionaries.
Each dict is a task-spec per TaskSpecSchema.
:rtype: list
'''
from greenflow.dataframe_flow import TaskSpecSchema
_basedir = os.path.dirname(__file__)
mortgage_lib_module = os.path.join(_basedir, 'mortgage_greenflow_plugins.py')
# print('CSVFILE_ACQDATA: ', csvfile_acqdata)
# print('CSVFILE_PERFDATA: ', csvfile_perfdata)
# load acquisition
load_acqdata_task = {
TaskSpecSchema.task_id: MortgageTaskNames.load_acqdata_task_name,
TaskSpecSchema.node_type: 'CsvMortgageAcquisitionDataLoader',
TaskSpecSchema.conf: {
'csvfile_names': csvfile_names,
'csvfile_acqdata': csvfile_acqdata
},
TaskSpecSchema.inputs: [],
TaskSpecSchema.filepath: mortgage_lib_module
}
# load performance data
load_perfdata_task = {
TaskSpecSchema.task_id: MortgageTaskNames.load_perfdata_task_name,
TaskSpecSchema.node_type: 'CsvMortgagePerformanceDataLoader',
TaskSpecSchema.conf: {
'csvfile_perfdata': csvfile_perfdata
},
TaskSpecSchema.inputs: [],
TaskSpecSchema.filepath: mortgage_lib_module
}
# calculate loan delinquency stats
ever_feat_task = {
TaskSpecSchema.task_id: MortgageTaskNames.ever_feat_task_name,
TaskSpecSchema.node_type: 'CreateEverFeatures',
TaskSpecSchema.conf: dict(),
TaskSpecSchema.inputs: [MortgageTaskNames.load_perfdata_task_name],
TaskSpecSchema.filepath: mortgage_lib_module
}
delinq_feat_task = {
TaskSpecSchema.task_id: MortgageTaskNames.delinq_feat_task_name,
TaskSpecSchema.node_type: 'CreateDelinqFeatures',
TaskSpecSchema.conf: dict(),
TaskSpecSchema.inputs: [MortgageTaskNames.load_perfdata_task_name],
TaskSpecSchema.filepath: mortgage_lib_module
}
join_perf_ever_delinq_feat_task = {
TaskSpecSchema.task_id:
MortgageTaskNames.join_perf_ever_delinq_feat_task_name,
TaskSpecSchema.node_type: 'JoinPerfEverDelinqFeatures',
TaskSpecSchema.conf: dict(),
TaskSpecSchema.inputs: [
MortgageTaskNames.load_perfdata_task_name,
MortgageTaskNames.ever_feat_task_name,
MortgageTaskNames.delinq_feat_task_name
],
TaskSpecSchema.filepath: mortgage_lib_module
}
create_12mon_feat_task = {
TaskSpecSchema.task_id: MortgageTaskNames.create_12mon_feat_task_name,
TaskSpecSchema.node_type: 'Create12MonFeatures',
TaskSpecSchema.conf: dict(),
TaskSpecSchema.inputs: [
MortgageTaskNames.join_perf_ever_delinq_feat_task_name
],
TaskSpecSchema.filepath: mortgage_lib_module
}
final_perf_delinq_task = {
TaskSpecSchema.task_id: MortgageTaskNames.final_perf_delinq_task_name,
TaskSpecSchema.node_type: 'FinalPerfDelinq',
TaskSpecSchema.conf: dict(),
TaskSpecSchema.inputs: [
MortgageTaskNames.load_perfdata_task_name,
MortgageTaskNames.join_perf_ever_delinq_feat_task_name,
MortgageTaskNames.create_12mon_feat_task_name
],
TaskSpecSchema.filepath: mortgage_lib_module
}
final_perf_acq_task = {
TaskSpecSchema.task_id: MortgageTaskNames.final_perf_acq_task_name,
TaskSpecSchema.node_type: 'JoinFinalPerfAcqClean',
TaskSpecSchema.conf: dict(),
TaskSpecSchema.inputs: [
MortgageTaskNames.final_perf_delinq_task_name,
MortgageTaskNames.load_acqdata_task_name
],
TaskSpecSchema.filepath: mortgage_lib_module
}
task_spec_list = [
load_acqdata_task, load_perfdata_task,
ever_feat_task, delinq_feat_task, join_perf_ever_delinq_feat_task,
create_12mon_feat_task, final_perf_delinq_task, final_perf_acq_task
]
return task_spec_list
def generate_mortgage_greenflow_run_params_list(
mortgage_data_path, start_year, end_year, part_count,
greenflow_task_spec_list):
'''For the specified years and limit (part_count) to the number of files
(performance files), generates a list of run_params_dict.
run_params_dict = {
'replace_spec': replace_spec,
'task_spec_list': greenflow_task_spec_list,
'out_list': out_list
}
replace_spec - to be passed to Dataframe flow run command's replace option.
replace_spec = {
MortgageTaskNames.load_acqdata_task_name: {
TaskSpecSchema.conf: {
'csvfile_names': csvfile_names,
'csvfile_acqdata': csvfile_acqdata
}
},
MortgageTaskNames.load_perfdata_task_name: {
TaskSpecSchema.conf: {
'csvfile_perfdata': csvfile_perfdata
}
}
}
out_list - Expected to specify one output which should be the final
dataframe produced by the mortgage ETL workflow.
Example:
from greenflow.dataframe_flow import TaskGraph
task_spec_list = run_params_dict['task_spec_list']
out_list = run_params_dict['out_list']
replace_spec = run_params_dict['replace_spec']
task_graph = TaskGraph(task_spec_list)
(final_perf_acq_df,) = task_graph.run(out_list, replace_spec)
:param str mortgage_data_path: Path to mortgage data. Should have a file
"names.csv" and two subdirectories "acq" and "perf".
:param int start_year: Start year is used to traverse the appropriate range
of directories with corresponding year(s) in mortgage data.
:param int end_year: End year is used to traverse the appropriate range
of directories with corresponding year(s) in mortgage data.
:param int part_count: Limit to how many performance files to load. There
is a single corresponding acquisition file for year and quarter.
Performance files are very large csv files (1GB files) and are broken
down i.e. for a given year and quarter you could have several file
chunks: *.txt_0, *.txt_1, etc.
:param greenflow_task_spec_list: Mortgage ETL workflow list of tasks. Refer to
function mortgage_etl_workflow_def.
:returns: list of run_params_dict
:rtype: list
'''
from greenflow.dataframe_flow import TaskSpecSchema
csvfile_names = os.path.join(mortgage_data_path, 'names.csv')
acq_data_path = os.path.join(mortgage_data_path, 'acq')
perf_data_path = os.path.join(mortgage_data_path, 'perf')
quarter = 1
year = start_year
count = 0
out_list = [MortgageTaskNames.final_perf_acq_task_name]
mortgage_run_params_dict_list = []
while year <= end_year:
if count >= part_count:
break
perf_data_files = glob(os.path.join(
perf_data_path + "/Performance_{}Q{}*".format(
str(year), str(quarter))))
csvfile_acqdata = acq_data_path + "/Acquisition_" + \
str(year) + "Q" + str(quarter) + ".txt"
for csvfile_perfdata in perf_data_files:
if count >= part_count:
break
replace_spec = {
MortgageTaskNames.load_acqdata_task_name: {
TaskSpecSchema.conf: {
'csvfile_names': csvfile_names,
'csvfile_acqdata': csvfile_acqdata
}
},
MortgageTaskNames.load_perfdata_task_name: {
TaskSpecSchema.conf: {
'csvfile_perfdata': csvfile_perfdata
}
}
}
# Uncomment 'csvfile_perfdata' for debugging chunks in
# DaskMortgageWorkflowRunner.
run_params_dict = {
# 'csvfile_perfdata': csvfile_perfdata,
'replace_spec': replace_spec,
'task_spec_list': greenflow_task_spec_list,
'out_list': out_list
}
mortgage_run_params_dict_list.append(run_params_dict)
count += 1
quarter += 1
if quarter == 5:
year += 1
quarter = 1
return mortgage_run_params_dict_list
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/mortgage_e2e_gquant/mortgage_common.py |
'''
'''
import os
try:
# Disable NCCL P2P. Only necessary for versions of NCCL < 2.4
# https://rapidsai.github.io/projects/cudf/en/0.8.0/dask-xgb-10min.html#Disable-NCCL-P2P.-Only-necessary-for-versions-of-NCCL-%3C-2.4
os.environ["NCCL_P2P_DISABLE"] = "1"
except Exception:
pass
import json
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
# from distributed import Client
from mortgage_common import (
mortgage_etl_workflow_def, generate_mortgage_greenflow_run_params_list,
MortgageTaskNames)
def main():
memory_limit = 128e9
threads_per_worker = 4
cluster = LocalCUDACluster(
memory_limit=memory_limit,
threads_per_worker=threads_per_worker)
client = Client(cluster)
sched_info = client.scheduler_info()
print('CLIENT: {}'.format(client))
print('SCHEDULER INFO:\n{}'.format(json.dumps(sched_info, indent=2)))
# Importing here in case RMM is used later on. Must start client prior
# to importing cudf stuff if using RMM.
from greenflow.dataframe_flow import (TaskSpecSchema, TaskGraph)
# workers_names = \
# [iw['name'] for iw in client.scheduler_info()['workers'].values()]
# nworkers = len(workers_names)
_basedir = os.path.dirname(__file__)
# mortgage_data_path = '/datasets/rapids_data/mortgage'
mortgage_data_path = os.path.join(_basedir, 'mortgage_data')
# Using some default csv files for testing.
# csvfile_names = os.path.join(mortgage_data_path, 'names.csv')
# acq_data_path = os.path.join(mortgage_data_path, 'acq')
# perf_data_path = os.path.join(mortgage_data_path, 'perf')
# csvfile_acqdata = os.path.join(acq_data_path, 'Acquisition_2000Q1.txt')
# csvfile_perfdata = \
# os.path.join(perf_data_path, 'Performance_2000Q1.txt_0')
# mortgage_etl_workflow_def(
# csvfile_names, csvfile_acqdata, csvfile_perfdata)
greenflow_task_spec_list = mortgage_etl_workflow_def()
start_year = 2000
end_year = 2001 # end_year is inclusive
# end_year = 2016 # end_year is inclusive
# part_count = 16 # the number of data files to train against
# create_dmatrix_serially - When False on same node if not enough host RAM
# then it's a race condition when creating the dmatrix. Make sure enough
# host RAM otherwise set to True.
# create_dmatrix_serially = False
# able to do 18 with create_dmatrix_serially set to True
part_count = 18 # the number of data files to train against
create_dmatrix_serially = True
# part_count = 4 # the number of data files to train against
# Use RAPIDS Memory Manager. Seems to work fine without it.
use_rmm = False
# Clean up intermediate dataframes in the xgboost training task.
delete_dataframes = True
mortgage_run_params_dict_list = generate_mortgage_greenflow_run_params_list(
mortgage_data_path, start_year, end_year, part_count,
greenflow_task_spec_list)
_basedir = os.path.dirname(__file__)
mortgage_lib_module = os.path.join(_basedir, 'mortgage_greenflow_plugins.py')
filter_dask_logger = False
mortgage_workflow_runner_task = {
TaskSpecSchema.task_id:
MortgageTaskNames.dask_mortgage_workflow_runner_task_name,
TaskSpecSchema.node_type: 'DaskMortgageWorkflowRunner',
TaskSpecSchema.conf: {
'mortgage_run_params_dict_list': mortgage_run_params_dict_list,
'client': client,
'use_rmm': use_rmm,
'filter_dask_logger': filter_dask_logger,
},
TaskSpecSchema.inputs: [],
TaskSpecSchema.filepath: mortgage_lib_module
}
dxgb_gpu_params = {
'nround': 100,
'max_depth': 8,
'max_leaves': 2 ** 8,
'alpha': 0.9,
'eta': 0.1,
'gamma': 0.1,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'scale_pos_weight': 2,
'min_child_weight': 30,
'tree_method': 'gpu_hist',
'n_gpus': 1,
'distributed_dask': True,
'loss': 'ls',
# 'objective': 'gpu:reg:linear',
'objective': 'reg:squarederror',
'max_features': 'auto',
'criterion': 'friedman_mse',
'grow_policy': 'lossguide',
'verbose': True
}
dxgb_trainer_task = {
TaskSpecSchema.task_id: MortgageTaskNames.dask_xgb_trainer_task_name,
TaskSpecSchema.node_type: 'DaskXgbMortgageTrainer',
TaskSpecSchema.conf: {
'create_dmatrix_serially': create_dmatrix_serially,
'delete_dataframes': delete_dataframes,
'dxgb_gpu_params': dxgb_gpu_params,
'client': client,
'filter_dask_logger': filter_dask_logger
},
TaskSpecSchema.inputs: [
MortgageTaskNames.dask_mortgage_workflow_runner_task_name
],
TaskSpecSchema.filepath: mortgage_lib_module
}
task_spec_list = [mortgage_workflow_runner_task, dxgb_trainer_task]
out_list = [MortgageTaskNames.dask_xgb_trainer_task_name]
task_graph = TaskGraph(task_spec_list)
(bst,) = task_graph.run(out_list)
print('XGBOOST BOOSTER:\n', bst)
if __name__ == '__main__':
main()
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/mortgage_e2e_gquant/mortgage_run_workflow_daskdistrib.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import chaikin_oscillator as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntRangeSlider(value=[10, 30],
min=3,
max=60,
step=1,
description="Ch Oscillator:",
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True)
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
widget = para_selector_widgets[0]
return (stock_df["high"], stock_df["low"], stock_df["close"],
stock_df["volume"], widget.value[0], widget.value[1])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = stock_df['out'].fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f,
indicator_figure_height, figure_width,
add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Ch Osc', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/ch_oscillator.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import on_balance_volume as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="On Balance volume")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"], stock_df["volume"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc,
color_id, f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='On Balance volume',
scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/on_balance_volume.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import mass_index as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntRangeSlider(value=[10, 30],
min=3,
max=60,
step=1,
description="Mass Index:",
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True)
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
widget = para_selector_widgets[0]
return (stock_df["high"], stock_df["low"], widget.value[0],
widget.value[1])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = stock_df['out'].fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f,
indicator_figure_height, figure_width,
add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Mass Index', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/mass_index.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import money_flow_index as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Money Flow Index")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"],
stock_df["low"], stock_df["close"],
stock_df["volume"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc,
color_id, f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Money Flow Index', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/money_flow_index.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import kst_oscillator as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="KST Oscillator")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
param = [w.value for w in para_selector_widgets]
param_grp = [param[0] + i for i in range(8)]
return (stock_df["close"], param_grp[0], param_grp[1],
param_grp[2], param_grp[3],
param_grp[4], param_grp[5], param_grp[6], param_grp[7])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f,
indicator_figure_height, figure_width,
add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='KST Oscillator', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/kst_oscillator.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import accumulation_distribution as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Accumulation Distribution")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"],
stock_df["low"],
stock_df["close"],
stock_df["volume"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height, figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Accumulation Distribution',
scale=sc_co, orientation='vertical')
new_line = Lines(
x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co}, colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/accumulation_distribution.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Lines
import math
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import moving_average as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(min=2, max=60, description="avg periods")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"],) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = stock_df['out'].fillna(math.inf)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f,
indicator_figure_height, figure_width, add_new_indicator):
line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc},
colors=[CATEGORY20[color_id[0]]])
figs = [line]
f.marks = f.marks + figs
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/ma.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import true_strength_index as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntRangeSlider(value=[10, 30],
min=3,
max=60,
step=1,
description="True Strength Index",
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True)
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
widget = para_selector_widgets[0]
return (stock_df["close"], widget.value[0], widget.value[1])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = stock_df['out'].fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='True Strength Index',
scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/true_strength_index.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import stochastic_oscillator_d as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Stochastic Oscillator D")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"], stock_df["low"],
stock_df["close"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc,
color_id, f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Stochastic Oscillator D',
scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(),
y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/stochastic_oscillator_d.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import rate_of_change as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Rate of Change")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"],) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc,
color_id, f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Rate of Change', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/rate_of_change.py |
import ipywidgets as widgets
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import macd as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntRangeSlider(value=[10, 30],
min=3,
max=60,
step=1,
description="MACD:",
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True)
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
widget = para_selector_widgets[0]
return (stock_df["close"], widget.value[0], widget.value[1])
def process_outputs(output, stock_df):
output.MACD.index = stock_df.index
output.MACDsign.index = stock_df.index
output.MACDdiff.index = stock_df.index
stock_df['out0'] = output.MACD
stock_df['out0'] = stock_df['out0'].fillna(0)
stock_df['out1'] = output.MACDsign
stock_df['out1'] = stock_df['out1'].fillna(0)
stock_df['out2'] = output.MACDdiff
stock_df['out2'] = stock_df['out2'].fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height, figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='MACD', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(),
y=[stock['out0'].to_array(),
stock['out1'].to_array(),
stock['out2'].to_array()],
scales={'x': dt_scale, 'y': sc_co})
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
def update_figure(stock, objects):
line = objects[0]
with line.hold_trait_notifications():
line.y = [stock['out0'].to_array(),
stock['out1'].to_array(), stock['out2'].to_array()]
line.x = stock.datetime.to_array()
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/macd.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import donchian_channel as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Donchian Channel")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"], stock_df["low"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f,
indicator_figure_height, figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Donchian Channel', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/donchian_channel.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import average_directional_movement_index as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntRangeSlider(value=[10, 30],
min=3,
max=60,
step=1,
description="ADMI:",
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True)
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
widget = para_selector_widgets[0]
return (stock_df["high"], stock_df["low"], stock_df["close"],
widget.value[0], widget.value[1])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = stock_df['out'].fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f,
indicator_figure_height, figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='ADMI', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/admi.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import force_index as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(min=2, max=60, description="Force Index")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"], stock_df["volume"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Force Index', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/force_index.py |
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import stochastic_oscillator_k as indicator_fun # noqa #F401
def get_para_widgets():
para_selector_widgets = []
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"], stock_df["low"], stock_df["close"])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Stochastic Oscillator K',
scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/stochastic_oscillator_k.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import bollinger_bands as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Bollinger Bands")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"],) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.b1.index = stock_df.index
output.b2.index = stock_df.index
stock_df['out0'] = output.b1
stock_df['out0'] = stock_df['out0'].fillna(0)
stock_df['out1'] = output.b2
stock_df['out1'] = stock_df['out1'].fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height, figure_width,
add_new_indicator):
sc_co = LinearScale()
sc_co2 = LinearScale()
ax_y = Axis(label='Bollinger b1', scale=sc_co, orientation='vertical')
ax_y2 = Axis(label='Bollinger b2', scale=sc_co2,
orientation='vertical', side='right')
new_line = Lines(x=stock.datetime.to_array(),
y=stock['out0'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_line2 = Lines(x=stock.datetime.to_array(), y=stock['out1'].to_array(),
scales={'x': dt_scale, 'y': sc_co2},
colors=[CATEGORY20[(color_id[0] + 1) % len(CATEGORY20)]])
new_fig = Figure(marks=[new_line, new_line2], axes=[ax_y, ax_y2])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line, new_line2]
add_new_indicator(new_fig)
return figs
def update_figure(stock, objects):
line = objects[0]
line2 = objects[1]
with (line.hold_trait_notifications(), line2.hold_trait_notifications()):
line.y = stock['out0'].to_array()
line.x = stock.datetime.to_array()
line2.y = stock['out1'].to_array()
line2.x = stock.datetime.to_array()
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/bollinger_bands.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import coppock_curve as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Coppock Curve")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"],) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc,
color_id, f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Coppock Curve', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/coppock_curve.py |
fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/__init__.py |
|
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import vortex_indicator as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Vortex Indicator")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"], stock_df["low"],
stock_df["close"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Vortex Indicator', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(),
y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/vortex_indicator.py |
import ipywidgets as widgets
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import keltner_channel as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(min=2, max=60,
description="Keltner Channel")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"],
stock_df["low"],
stock_df["close"]) + tuple([
w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.KelChM.index = stock_df.index
output.KelChU.index = stock_df.index
output.KelChD.index = stock_df.index
stock_df['out0'] = output.KelChM
stock_df['out0'] = stock_df['out0'].fillna(0)
stock_df['out1'] = output.KelChU
stock_df['out1'] = stock_df['out1'].fillna(0)
stock_df['out2'] = output.KelChD
stock_df['out2'] = stock_df['out2'].fillna(0)
return stock_df
def create_figure(stock, dt_scale,
sc, color_id, f,
indicator_figure_height, figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Keltner Channel', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=[stock['out0'].to_array(),
stock['out1'].to_array(),
stock['out2'].to_array()],
scales={'x': dt_scale, 'y': sc_co})
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
def update_figure(stock, objects):
line = objects[0]
with line.hold_trait_notifications():
line.y = [stock['out0'].to_array(), stock['out1'].to_array(),
stock['out2'].to_array()]
line.x = stock.datetime.to_array()
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/keltner_channel.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Lines
import math
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import exponential_moving_average as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="ewa avg periods")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"],) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = stock_df['out'].fillna(math.inf)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f,
indicator_figure_height, figure_width, add_new_indicator):
line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc},
colors=[CATEGORY20[color_id[0]]])
figs = [line]
f.marks = f.marks + figs
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/ewa.py |
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import ultimate_oscillator as indicator_fun # noqa #F401
def get_para_widgets():
para_selector_widgets = []
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"], stock_df["low"], stock_df["close"])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f,
indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Ultimate Oscillator',
scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(),
y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/ultimate_oscillator.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import commodity_channel_index as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Commodity Channel Index")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"],
stock_df["low"],
stock_df["close"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height, figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='CCI', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/commodity_channel_index.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import relative_strength_index as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(min=2, max=60, description="RSI")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"], stock_df["low"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = stock_df['out'].fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='RSI', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(),
y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/rsi.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import average_true_range as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Average True Range")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"],
stock_df["low"],
stock_df["close"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f,
indicator_figure_height, figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Average True Range',
scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(),
y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/average_true_range.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import exponential_moving_average as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(min=2, max=60, description="TRIX")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"],) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = stock_df['out'].fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='TRIX', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(),
y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/trix.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import ease_of_movement as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(
min=2, max=60, description="Ease of Movement")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"],
stock_df["low"],
stock_df["volume"]) + tuple([
w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id, f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Ease of Movement', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/ease_of_movement.py |
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import ppsr as indicator_fun # noqa #F401
def get_para_widgets():
para_selector_widgets = []
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["high"], stock_df["low"], stock_df["close"])
def process_outputs(output, stock_df):
output.PP.index = stock_df.index
output.R1.index = stock_df.index
output.S1.index = stock_df.index
output.R2.index = stock_df.index
output.S2.index = stock_df.index
output.R3.index = stock_df.index
output.S3.index = stock_df.index
stock_df['out0'] = output.PP
stock_df['out0'] = stock_df['out0'].fillna(0)
stock_df['out1'] = output.R1
stock_df['out1'] = stock_df['out1'].fillna(0)
stock_df['out2'] = output.S1
stock_df['out2'] = stock_df['out2'].fillna(0)
stock_df['out3'] = output.R2
stock_df['out3'] = stock_df['out3'].fillna(0)
stock_df['out4'] = output.S2
stock_df['out4'] = stock_df['out4'].fillna(0)
stock_df['out5'] = output.R3
stock_df['out5'] = stock_df['out5'].fillna(0)
stock_df['out6'] = output.S3
stock_df['out6'] = stock_df['out6'].fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
sc_co2 = LinearScale()
sc_co3 = LinearScale()
sc_co4 = LinearScale()
sc_co5 = LinearScale()
sc_co6 = LinearScale()
sc_co7 = LinearScale()
ax_y = Axis(label='PPSR PP', scale=sc_co, orientation='vertical')
ax_y2 = Axis(label='PPSR R1', scale=sc_co2,
orientation='vertical', side='right')
ax_y3 = Axis(label='PPSR S1', scale=sc_co3,
orientation='vertical', side='right')
ax_y4 = Axis(label='PPSR R2', scale=sc_co4,
orientation='vertical', side='right')
ax_y5 = Axis(label='PPSR S2', scale=sc_co5,
orientation='vertical', side='right')
ax_y6 = Axis(label='PPSR R3', scale=sc_co6,
orientation='vertical', side='right')
ax_y7 = Axis(label='PPSR S3', scale=sc_co7,
orientation='vertical', side='right')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out0'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_line2 = Lines(x=stock.datetime.to_array(), y=stock['out1'].to_array(),
scales={'x': dt_scale, 'y': sc_co2},
colors=[CATEGORY20[(color_id[0] + 1) % len(CATEGORY20)]])
new_line3 = Lines(x=stock.datetime.to_array(), y=stock['out2'].to_array(),
scales={'x': dt_scale, 'y': sc_co3},
colors=[CATEGORY20[(color_id[0] + 2) % len(CATEGORY20)]])
new_line4 = Lines(x=stock.datetime.to_array(), y=stock['out3'].to_array(),
scales={'x': dt_scale, 'y': sc_co4},
colors=[CATEGORY20[(color_id[0] + 3) % len(CATEGORY20)]])
new_line5 = Lines(x=stock.datetime.to_array(), y=stock['out4'].to_array(),
scales={'x': dt_scale, 'y': sc_co5},
colors=[CATEGORY20[(color_id[0] + 4) % len(CATEGORY20)]])
new_line6 = Lines(x=stock.datetime.to_array(), y=stock['out5'].to_array(),
scales={'x': dt_scale, 'y': sc_co6},
colors=[CATEGORY20[(color_id[0] + 5) % len(CATEGORY20)]])
new_line7 = Lines(x=stock.datetime.to_array(), y=stock['out6'].to_array(),
scales={'x': dt_scale, 'y': sc_co7},
colors=[CATEGORY20[(color_id[0] + 6) % len(CATEGORY20)]])
new_fig = Figure(marks=[new_line, new_line2, new_line3, new_line4,
new_line5, new_line6, new_line7],
axes=[ax_y, ax_y2, ax_y3, ax_y4, ax_y5, ax_y6, ax_y7])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line, new_line2, new_line3,
new_line4, new_line5, new_line6, new_line7]
add_new_indicator(new_fig)
return figs
def update_figure(stock, objects):
line = objects[0]
line2 = objects[1]
with line.hold_trait_notifications(), line2.hold_trait_notifications():
line.y = stock['out0'].to_array()
line.x = stock.datetime.to_array()
line2.y = stock['out1'].to_array()
line2.x = stock.datetime.to_array()
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/parabolic_sar.py |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import momentum as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(min=2, max=60, description="Momentum")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"],) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height, figure_width,
add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Momentum', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| fsi-samples-main | gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/momentum.py |
from setuptools import setup, find_packages
setup(
name='greenflow_nemo_plugin',
packages=find_packages(include=['greenflow_nemo_plugin',
'greenflow_nemo_plugin.nemo_util']),
entry_points={
'greenflow.plugin':
['greenflow_nemo_plugin = greenflow_nemo_plugin',
'greenflow_nemo_plugin.asr = greenflow_nemo_plugin.asr',
'greenflow_nemo_plugin.cv = greenflow_nemo_plugin.cv',
'greenflow_nemo_plugin.nlp = greenflow_nemo_plugin.nlp',
'greenflow_nemo_plugin.util = greenflow_nemo_plugin.nemo_util',
'greenflow_nemo_plugin.gan = greenflow_nemo_plugin.simple_gan',
'greenflow_nemo_plugin.tts = greenflow_nemo_plugin.tts',
'greenflow_nemo_plugin.tutorials = greenflow_nemo_plugin.tutorials'],
}
)
| fsi-samples-main | gQuant/plugins/nemo_plugin/setup.py |
from greenflow.dataframe_flow import Node
from .nemoBaseNode import NeMoBase
import nemo
import nemo.collections.simple_gan
class DiscriminatorLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.simple_gan.gan.DiscriminatorLoss)
class GradientPenaltyNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.simple_gan.gan.GradientPenalty)
class InterpolateImageNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.simple_gan.gan.InterpolateImage)
class MnistGanDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.simple_gan.gan.MnistGanDataLayer)
class RandomDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.simple_gan.gan.RandomDataLayer)
class SimpleDiscriminatorNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.simple_gan.gan.SimpleDiscriminator)
class SimpleGeneratorNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.simple_gan.gan.SimpleGenerator)
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/simple_gan.py |
from greenflow.dataframe_flow import Node
from .nemoBaseNode import NeMoBase
import nemo
import nemo.backends.pytorch.tutorials
class DialogDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.chatbot.modules.DialogDataLayer)
class EncoderRNNNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.chatbot.modules.EncoderRNN)
class GreedyLuongAttnDecoderRNNNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.chatbot.modules.GreedyLuongAttnDecoderRNN)
class LuongAttnDecoderRNNNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.chatbot.modules.LuongAttnDecoderRNN)
class MaskedXEntropyLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.chatbot.modules.MaskedXEntropyLoss)
class CrossEntropyLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.toys.CrossEntropyLoss)
class L1LossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.toys.L1Loss)
class MSELossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.toys.MSELoss)
class RealFunctionDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.toys.RealFunctionDataLayer)
class TaylorNetNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.tutorials.toys.TaylorNet)
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/tutorials.py |
validation_fun = """
//first check types
const reqElement = required['element'];
const outElement = outputs['element'];
if (
outElement['types'][0] !== 'VoidType' &&
reqElement['types'][0] !== 'VoidType'
) {
if (
outElement['types'].findIndex(
(d) => d === reqElement['types'][0]
) < 0
) {
// req type should be generic,
// out type should be specific, i.e. subclass of req
// first required element type should be the parent type of the output element
return false;
}
if (outElement['fields'] !== reqElement['fields']) {
return false;
}
if (outElement['parameters'] !== reqElement['parameters']) {
return false;
}
}
const reqAxes = required['axes'];
const outAxes = outputs['axes'];
if (reqAxes.length === 0) {
return true;
}
if (reqAxes.length !== outAxes.length) {
return false;
}
for (let i = 0; i < reqAxes.length; i++) {
if (reqAxes[i]['kind'] !== outAxes[i]['kind']) {
return false;
}
if (
reqAxes[i]['size'] !== null &&
outAxes[i]['size'] !== null &&
reqAxes[i]['size'] !== outAxes['size']
) {
return false;
}
}
return true;
"""
display_fun = """
const axes = metaObj['axes'];
const element = metaObj['element'];
header = '';
header += '<table>';
header += '<tr>';
header += '<th>Axes: </th>';
if ('axes' in metaObj && axes.length > 0) {
for (let i = 0; i < axes.length; i++) {
if (axes[i]['size']) {
header += `<th>${i === 0 ? '(' : ''}${axes[i]['kind']}(${
axes[i]['size']
})${i === axes.length - 1 ? ')' : ''}</th>`;
} else {
header += `<th>${i === 0 ? '(' : ''}${axes[i]['kind']}${
i === axes.length - 1 ? ')' : ''
}</th>`;
}
}
} else {
header += '<th>()</th>';
}
header += '</tr>';
header += '</table>';
header += '<ul>';
if ('types' in element) {
header += `<li>Element Type: ${element['types'][0]}</li>`;
if ('fileds' in element && element['fields'] !== 'None') {
header += `<li>Element fileds: ${element['fields']}</li>`;
}
if ('parameters' in element && element['parameters'] !== '{}') {
header += `<li>Element parameters: ${element['parameters']}</li>`;
}
}
header += '</ul>';
return header;
"""
validation = {}
display = {}
validation['nemo.core.neural_types.neural_type.NmTensor'] = validation_fun
display['nemo.core.neural_types.neural_type.NmTensor'] = display_fun
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/client.py |
from greenflow.dataframe_flow import Node
from .nemoBaseNode import NeMoBase
import nemo
import nemo.collections.tts
class AudioDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.data_layers.AudioDataLayer)
class FastSpeechNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.fastspeech_modules.FastSpeech)
class FastSpeechDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.fastspeech_modules.FastSpeechDataLayer)
class FastSpeechLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.fastspeech_modules.FastSpeechLoss)
class MakeGateNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.tacotron2_modules.MakeGate)
class Tacotron2DecoderNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.tacotron2_modules.Tacotron2Decoder)
class Tacotron2DecoderInferNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.tacotron2_modules.Tacotron2DecoderInfer)
class Tacotron2EncoderNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.tacotron2_modules.Tacotron2Encoder)
class Tacotron2LossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.tacotron2_modules.Tacotron2Loss)
class Tacotron2PostnetNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.tacotron2_modules.Tacotron2Postnet)
class TextEmbeddingNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.tacotron2_modules.TextEmbedding)
class LenSamplerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.talknet_modules.LenSampler)
class TalkNetNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.talknet_modules.TalkNet)
class TalkNetDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.talknet_modules.TalkNetDataLayer)
class TalkNetDursLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.talknet_modules.TalkNetDursLoss)
class TalkNetMelsLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.talknet_modules.TalkNetMelsLoss)
class WaveGlowInferNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.waveglow_modules.WaveGlowInferNM)
class WaveGlowLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.waveglow_modules.WaveGlowLoss)
class WaveGlowNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.tts.waveglow_modules.WaveGlowNM)
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/tts.py |
from .client import validation, display # noqa: #401
from greenflow.dataframe_flow._node_flow import register_cleanup
def clean_nemo(ui_clean):
"""
ui_clean is True if the client send
'clean' command to the greenflow backend
"""
try:
import nemo
nf = nemo.core.NeuralModuleFactory.get_default_factory()
except ModuleNotFoundError:
nf = None
if nf is not None:
nf.reset_trainer()
if ui_clean:
state = nemo.utils.app_state.AppState()
state._module_registry.clear()
state.active_graph.modules.clear()
register_cleanup('cleannemo', clean_nemo)
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/__init__.py |
from greenflow.dataframe_flow import Node
from .nemoBaseNode import NeMoBase
import nemo
import nemo.collections.asr
class AudioPreprocessorNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.audio_preprocessing.AudioPreprocessor)
class AudioToMFCCPreprocessorNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.audio_preprocessing.AudioToMFCCPreprocessor)
class AudioToMelSpectrogramPreprocessorNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.audio_preprocessing.AudioToMelSpectrogramPreprocessor)
class AudioToSpectrogramPreprocessorNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.audio_preprocessing.AudioToSpectrogramPreprocessor)
class CropOrPadSpectrogramAugmentationNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.audio_preprocessing.CropOrPadSpectrogramAugmentation)
class MultiplyBatchNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.audio_preprocessing.MultiplyBatch)
class SpectrogramAugmentationNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.audio_preprocessing.SpectrogramAugmentation)
class TimeStretchAugmentationNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.audio_preprocessing.TimeStretchAugmentation)
class BeamSearchDecoderWithLMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.beam_search_decoder.BeamSearchDecoderWithLM)
class ContextNetDecoderForCTCNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.contextnet.ContextNetDecoderForCTC)
class ContextNetEncoderNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.contextnet.ContextNetEncoder)
class JasperEncoderNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.jasper.JasperEncoder)
class AudioToSpeechLabelDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.data_layer.AudioToSpeechLabelDataLayer)
class AudioToTextDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.data_layer.AudioToTextDataLayer)
class KaldiFeatureDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.data_layer.KaldiFeatureDataLayer)
class TarredAudioToTextDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.data_layer.TarredAudioToTextDataLayer)
class TranscriptDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.data_layer.TranscriptDataLayer)
class GreedyCTCDecoderNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.greedy_ctc_decoder.GreedyCTCDecoder)
class JasperDecoderForCTCNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.jasper.JasperDecoderForCTC)
class JasperDecoderForClassificationNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.jasper.JasperDecoderForClassification)
class JasperDecoderForSpkrClassNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.jasper.JasperDecoderForSpkrClass)
class CTCLossNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.losses.CTCLossNM)
class ASRConvCTCModelNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.models.asrconvctcmodel.ASRConvCTCModel)
class JasperNetNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.models.asrconvctcmodel.JasperNet)
class QuartzNetNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.asr.models.asrconvctcmodel.QuartzNet)
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/asr.py |
from greenflow.dataframe_flow import Node
from .nemoBaseNode import NeMoBase
import nemo
import nemo.collections.nlp.nm
class BertInferDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.data_layers.bert_inference_datalayer.BertInferDataLayer)
class BertPretrainingDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.data_layers.lm_bert_datalayer.BertPretrainingDataLayer)
class BertPretrainingPreprocessedDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.data_layers.lm_bert_datalayer.BertPretrainingPreprocessedDataLayer)
class TextDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.data_layers.text_datalayer.TextDataLayer)
class MaskedLogLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.losses.masked_xentropy_loss.MaskedLogLoss)
class SGDDialogueStateLossNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.losses.sgd_loss.SGDDialogueStateLossNM)
class SmoothedCrossEntropyLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.losses.smoothed_cross_entropy_loss.SmoothedCrossEntropyLoss)
class SpanningLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.losses.spanning_loss.SpanningLoss)
class AlbertNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.huggingface.albert_nm.Albert)
class BERTNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.huggingface.bert_nm.BERT)
class BeamSearchTranslatorNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.transformer.transformer_nm.BeamSearchTranslatorNM)
class BertTokenClassifierNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.token_classification_nm.BertTokenClassifier)
class EncoderRNNNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.encoder_rnn.EncoderRNN)
class GreedyLanguageGeneratorNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.transformer.transformer_nm.GreedyLanguageGeneratorNM)
class JointIntentSlotClassifierNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.joint_intent_slot.joint_intent_slot_classifier_nm.JointIntentSlotClassifier)
class PunctCapitTokenClassifierNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.punctuation_capitalization.punctuation_capitalization_classifier_nm.PunctCapitTokenClassifier)
class RobertaNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.huggingface.roberta_nm.Roberta)
class SGDDecoderNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.dialogue_state_tracking.sgd.sgd_decoder_nm.SGDDecoderNM)
class SGDEncoderNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.dialogue_state_tracking.sgd.sgd_encoder_nm.SGDEncoderNM)
class SequenceClassifierNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.sequence_classification_nm.SequenceClassifier)
class SequenceRegressionNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.sequence_regression_nm.SequenceRegression)
class TRADEGeneratorNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.dialogue_state_tracking.trade_generator_nm.TRADEGenerator)
class TokenClassifierNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.token_classification_nm.TokenClassifier)
class TransformerDecoderNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.transformer.transformer_nm.TransformerDecoderNM)
class TransformerEncoderNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.nlp.nm.trainables.common.transformer.transformer_nm.TransformerEncoderNM)
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/nlp.py |
from greenflow.dataframe_flow import (
ConfSchema, PortsSpecSchema, NodePorts, MetaData)
from nemo.core.neural_types import NmTensor
import inspect
from greenflow.plugin_nodes.util.json_util import type_map
from collections import OrderedDict
from greenflow.dataframe_flow.util import get_file_path
from nemo.backends.pytorch.nm import (DataLayerNM,
TrainableNM, LossNM)
import nemo
__all__ = ["NeMoBase"]
defaut_type = 'number'
share_weight = 'share_weight'
class FeedProperty(object):
def __init__(self, conf):
self.__dict__.update(conf)
def serialize_type(neural_type):
output = {}
axes = []
if neural_type.axes is None:
pass
else:
for ax in neural_type.axes:
axes.append({'kind': str(ax.kind),
'size': ax.size})
output['axes'] = axes
ele = {}
ele_type = neural_type.elements_type
ele['types'] = [cla.__name__ for cla in ele_type.__class__.mro()
if cla.__name__ != 'ABC' and cla.__name__ != 'object']
ele['fields'] = str(ele_type.fields)
ele['parameters'] = str(ele_type.type_parameters)
output['element'] = ele
return output
def get_parameters(class_obj, conf):
init_fun = class_obj.__init__
sig = inspect.signature(init_fun)
hasEmpty = False
init_para = {}
for key in sig.parameters.keys():
if key == 'self':
# ignore the self
continue
if key in conf:
init_para[key] = conf[key]
else:
hasEmpty = True
break
if not hasEmpty:
return init_para
else:
return None
def get_conf_parameters(class_obj):
init_fun = class_obj.__init__
sig = inspect.signature(init_fun)
init_para = OrderedDict()
for key in sig.parameters.keys():
if key == 'self':
# ignore the self
continue
para = sig.parameters[key]
default_val = None
if para.default == inspect._empty:
p_type = defaut_type
elif para.default is None:
p_type = defaut_type
else:
if para.default.__class__.__name__ not in type_map:
print(para.default, type(para.default))
p_type = defaut_type
else:
p_type = type_map[para.default.__class__.__name__]
default_val = para.default
init_para[para.name] = (p_type, default_val)
return init_para
class NeMoBase:
def init(self, class_obj):
if nemo.core.NeuralModuleFactory.get_default_factory() is None:
nemo.core.NeuralModuleFactory()
self.instanceClass = class_obj
self.instance = None
self.file_fields = []
conf_para = get_conf_parameters(class_obj)
self.fix_type = {}
self.INPUT_NM = 'in_nm'
self.OUTPUT_NM = 'out_nm'
for key in conf_para.keys():
if key.find('name') >= 0:
self.fix_type[key] = "string"
if key.find('model') >= 0:
self.fix_type[key] = "string"
if key.find('file') >= 0:
self.file_fields.append(key)
for f in self.file_fields:
self.fix_type[f] = 'string'
if f in self.conf and self.conf[f]:
self.conf[f] = get_file_path(self.conf[f])
if not issubclass(class_obj, DataLayerNM):
try:
if issubclass(self.instanceClass, TrainableNM):
input_meta = self.get_input_meta()
if self.INPUT_NM in input_meta:
if (share_weight in self.conf and
self.conf[share_weight] == 'Reuse'):
self.conf = input_meta[self.INPUT_NM]
app = nemo.utils.app_state.AppState()
ins = None
for mod in app._module_registry:
if isinstance(mod, self.instanceClass):
ins = mod
break
if ins is None:
ins = class_obj(**self.conf)
if self.instance is None:
self.instance = ins
except Exception as e:
print(e)
pass
def _clean_dup(self):
app = nemo.utils.app_state.AppState()
if 'name' in self.conf:
if app._module_registry.has(self.conf['name']):
existing = app._module_registry[self.conf['name']]
app._module_registry.remove(existing)
removeList = []
for mod in app._module_registry:
if isinstance(mod, self.instanceClass):
# remove the duplicate instances
removeList.append(mod)
for mod in removeList:
app._module_registry.remove(mod)
def ports_setup(self):
port_type = PortsSpecSchema.port_type
if self.instance is not None:
inports = self.instance.input_ports
outports = self.instance.output_ports
else:
try:
p_inports = self.instanceClass.input_ports
p_outports = self.instanceClass.output_ports
feeder = FeedProperty(self.conf)
inports = p_inports.fget(feeder)
outports = p_outports.fget(feeder)
except Exception:
inports = None
outports = None
o_inports = {}
o_outports = {}
if inports is not None:
for k in inports.keys():
o_inports[k] = {port_type: NmTensor}
if outports is not None:
for k in outports.keys():
o_outports[k] = {port_type: NmTensor}
if issubclass(self.instanceClass, TrainableNM):
# added the port for tying the weights
o_inports[self.INPUT_NM] = {port_type: TrainableNM}
o_outports[self.OUTPUT_NM] = {port_type: TrainableNM}
elif issubclass(self.instanceClass, LossNM):
o_outports[self.OUTPUT_NM] = {port_type: LossNM}
elif issubclass(self.instanceClass, DataLayerNM):
o_outports[self.OUTPUT_NM] = {port_type: DataLayerNM}
return NodePorts(inports=o_inports, outports=o_outports)
def meta_setup(self):
input_meta = self.get_input_meta()
if issubclass(self.instanceClass, TrainableNM):
input_meta = self.get_input_meta()
if self.INPUT_NM in input_meta:
if (share_weight in self.conf and
self.conf[share_weight] == 'Reuse'):
self.conf = input_meta[self.INPUT_NM]
if self.instance is not None:
inports = self.instance.input_ports
outports = self.instance.output_ports
else:
try:
p_inports = self.instanceClass.input_ports
p_outports = self.instanceClass.output_ports
feeder = FeedProperty(self.conf)
inports = p_inports.fget(feeder)
outports = p_outports.fget(feeder)
except Exception:
inports = None
outports = None
required = {}
out_meta = {}
if inports is not None:
for k in inports.keys():
required[k] = serialize_type(inports[k])
if outports is not None:
for k in outports.keys():
out_meta[k] = serialize_type(outports[k])
if self.instance is not None:
out_meta[self.OUTPUT_NM] = self.conf
metadata = MetaData(inports=required, outports=out_meta)
return metadata
def conf_schema(self):
conf_para = get_conf_parameters(self.instanceClass)
class_doc = self.instanceClass.__doc__
desc = "" if class_doc is None else class_doc
init_doc = self.instanceClass.__init__.__doc__
desc += "" if init_doc is None else init_doc
json = {
"title": "NeMo "+self.instanceClass.__name__+" Node",
"type": "object",
"description": desc,
"properties": {
},
}
ui = {
}
for f in self.file_fields:
if f in conf_para:
ui[f] = {"ui:widget": "FileSelector"}
for p in conf_para.keys():
stype = conf_para[p][0]
if p in self.fix_type:
stype = self.fix_type[p]
json['properties'][p] = {
"type": stype,
"default": conf_para[p][1]
}
if issubclass(self.instanceClass, TrainableNM):
if share_weight in conf_para:
print('warning, share_weight parameter name collision')
json['properties'][share_weight] = {
"type": 'string',
"description": """Weight Sharing between Modules: Reuse,
re-use neural modules between training, evaluation and
inference graphs; Copy: copy weights betwen modules. subsequent
update of weights in one module will not affect weights in the
other module. This means that the weights will get DIFFERENT
gradients on the update step. Tying: the default one, tie
weights between two or more modules. Tied weights are identical
across all modules. Gradients to the weights will be the
SAME.""",
"enum": ['Reuse', 'Copy', 'Tying'],
"default": 'Tying'
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
para = get_parameters(self.instanceClass, self.conf)
app = nemo.utils.app_state.AppState()
self.instance = None
if issubclass(self.instanceClass, TrainableNM):
if self.INPUT_NM in inputs:
inputIn = inputs[self.INPUT_NM]
if (share_weight in self.conf and
self.conf[share_weight] == 'Reuse'):
self.instance = inputIn
if para is not None and self.instance is None:
self._clean_dup()
self.instance = self.instanceClass(**para)
if self.instance is None:
return {}
if issubclass(self.instanceClass, TrainableNM):
if self.INPUT_NM in inputs:
inputIn = inputs[self.INPUT_NM]
if (share_weight in self.conf and
self.conf[share_weight] == 'Reuse'):
pass
elif (share_weight in self.conf and
self.conf[share_weight] == 'Copy'):
self.instance.set_weights(inputIn.get_weights())
else:
self.instance.tie_weights_with(inputIn,
list(
inputIn.get_weights(
).keys()))
inputsCopy = OrderedDict()
for k in self.instance.input_ports.keys():
if k in inputs:
inputsCopy[k] = inputs[k]
instanceName = self.instance.name
if instanceName in app.active_graph._modules:
del app.active_graph._modules[instanceName]
o = self.instance(**inputsCopy)
if isinstance(o, tuple):
output = {}
for key in self.instance.output_ports.keys():
output[key] = getattr(o, key)
else:
key = list(self.instance.output_ports.keys())[0]
output = {key: o}
# if self.uid == 'eval_data':
# print(inputs, output)
# for k in output.keys():
# print(output[k].name)
# print(output[k].unique_name)
output[self.OUTPUT_NM] = self.instance
return output
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/nemoBaseNode.py |
from greenflow.dataframe_flow import Node
from .nemoBaseNode import NeMoBase
import nemo
import nemo.backends.pytorch.common
class BCEWithLogitsLossNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.losses.BCEWithLogitsLossNM)
class CrossEntropyLossNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.losses.CrossEntropyLossNM)
class LossAggregatorNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.losses.LossAggregatorNM)
class MSELossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.losses.MSELoss)
class SequenceLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.losses.SequenceLoss)
class SequenceEmbeddingNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.other.SequenceEmbedding)
class ZerosLikeNMNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.other.ZerosLikeNM)
class DecoderRNNNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.rnn.DecoderRNN)
class EncoderRNNNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.rnn.EncoderRNN)
class NeMoModelNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.core.nemo_model.NeMoModel)
class NeuralModuleNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.core.neural_modules.NeuralModule)
class BeamSearchNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.search.BeamSearch)
class GreedySearchNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.search.GreedySearch)
class ZerosDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.backends.pytorch.common.zero_data.ZerosDataLayer)
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/common.py |
from greenflow.dataframe_flow import Node
from .nemoBaseNode import NeMoBase
import nemo
import nemo.collections.cv
class CIFAR100DataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.data_layers.cifar100_datalayer.CIFAR100DataLayer)
class CIFAR10DataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.data_layers.cifar10_datalayer.CIFAR10DataLayer)
class MNISTDataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.data_layers.mnist_datalayer.MNISTDataLayer)
class STL10DataLayerNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.data_layers.stl10_datalayer.STL10DataLayer)
class NLLLossNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.losses.nll_loss.NLLLoss)
class NonLinearityNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.non_trainables.non_linearity.NonLinearity)
class ReshapeTensorNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.non_trainables.reshape_tensor.ReshapeTensor)
class ConvNetEncoderNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.trainables.convnet_encoder.ConvNetEncoder)
class FeedForwardNetworkNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.trainables.feed_forward_network.FeedForwardNetwork)
class ImageEncoderNode(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.trainables.image_encoder.ImageEncoder)
class LeNet5Node(NeMoBase, Node):
def init(self):
NeMoBase.init(self, nemo.collections.cv.modules.trainables.lenet5.LeNet5)
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/cv.py |
from greenflow_gquant_plugin.ml import GridRandomSearchNode
from greenflow.plugin_nodes.util.contextCompositeNode import ContextCompositeNode # noqa #E402
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema, # noqa #E402
NodePorts)
from greenflow.dataframe_flow import TaskGraph # noqa #E402
from greenflow.dataframe_flow import Node # noqa #E402
from greenflow.dataframe_flow.util import get_file_path # noqa #E402
from greenflow.dataframe_flow.cache import CACHE_SCHEMA # noqa #E402
from greenflow.dataframe_flow.taskSpecSchema import TaskSpecSchema # noqa #E402
import cudf # noqa #E402
import uuid # noqa #E402
import pandas # noqa #E402
import copy # noqa #E402
__all__ = ["NemoHyperTuneNode"]
_SCHED_CONF = {
"type": "object",
"description": """distributed implementations of early
stopping algorithms such as Median Stopping Rule,
HyperBand, and ASHA.""",
"properties": {
"name": {
"type": "string",
"enum": ["AsyncHyperBandScheduler",
"HyperBandScheduler",
"MedianStoppingRule"]
},
},
"dependencies": {
"name": {
"oneOf": [
{
"properties": {
"name": {
"type": "string",
"enum": ["AsyncHyperBandScheduler"]
},
"parameters": {
"type": "object",
"properties": {
"time_attr": {
"type": "string",
"description": """A training result
attr to use for comparing time.
Note that you can pass in something
non-temporal such as
training_iteration as a measure of
progress, the only requirement is that
the attribute should increase
monotonically.""",
"enum": ["training_iteration"],
"default": "training_iteration"
},
"max_t": {
"type": "number",
"description": """max time units per
trial. Trials will be stopped after
max_t time units (determined by
time_attr) have passed.""",
"default": 100.0
},
"grace_period": {
"type": "number",
"description": """Only stop trials at
least this old in time. The units are
the same as the attribute named by
time_attr""",
"default": 1.0
},
"reduction_factor": {
"type": "number",
"description": """Used to set halving
rate and amount. This is simply a
unit-less scalar.""",
"default": 4.0
},
"brackets": {
"type": "integer",
"description": """Number of brackets.
Each bracket has a different halving
rate, specified by the reduction
factor.""",
"default": 1
}
}
}
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["HyperBandScheduler"]
},
"parameters": {
"type": "object",
"properties": {
"time_attr": {
"type": "string",
"description": """A training result attr to
use for comparing time. Note that you
can pass in something non-temporal such
as training_iteration as a measure of
progress, the only requirement is that
the attribute should increase
monotonically.""",
"enum": ["training_iteration"],
"default": "training_iteration"
},
"max_t": {
"type": "number",
"description": """max time units per
trial. Trials will be stopped after
max_t time units (determined by
time_attr) have passed.""",
"default": 100.0
},
"reduction_factor": {
"type": "number",
"description": """Used to set halving
rate and amount. This is simply a
unit-less scalar.""",
"default": 4.0
},
}
}
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["MedianStoppingRule"]
},
"parameters": {
"type": "object",
"properties": {
"time_attr": {
"type": "string",
"description": """A training result attr to
use for comparing time. Note that you
can pass in something non-temporal such
as training_iteration as a measure of
progress, the only requirement is that
the attribute should increase
monotonically.""",
"enum": ["training_iteration"],
"default": "training_iteration"
},
"grace_period": {
"type": "number",
"description": """Only stop trials at
least this old in time. The units are
the same as the attribute named by
time_attr""",
"default": 60.0
},
"min_samples_required": {
"type": "integer",
"description": """Minimum number of
trials to compute median over.""",
"default": 3
},
"min_time_slice": {
"type": "number",
"description": """Each trial runs at
least this long before yielding
(assuming it isn’t stopped).
Note: trials ONLY yield if there
are not enough samples to evaluate
performance for the current result
AND there are other trials waiting to
run. The units are the same as the
attribute named by time_attr.""",
"default": 0.0
},
"hard_stop": {
"type": "boolean",
"description": """If False, pauses
trials instead of stopping them.
When all other trials are complete,
paused trials will be resumed and
allowed to run FIFO.""",
"default": True
},
}
}
}
},
]
}
}
}
class NemoHyperTuneNode(GridRandomSearchNode):
def init(self):
GridRandomSearchNode.init(self)
def ports_setup(self):
return GridRandomSearchNode.ports_setup(self)
def meta_setup(self):
return GridRandomSearchNode.meta_setup(self)
def conf_schema(self):
cache_key, task_graph, _ = self._compute_hash_key()
if cache_key in CACHE_SCHEMA:
return CACHE_SCHEMA[cache_key]
tensors = []
if task_graph is not None:
for task in task_graph:
if task.get('type') == 'NemoTrainNode':
conf = task.get('conf')
if ('eval_callback' in conf and
'eval_tensors' in conf['eval_callback']):
tensors = conf['eval_callback']['eval_tensors']
tensors = [t.split('@')[-1] for t in tensors]
print(tensors)
conf = GridRandomSearchNode.conf_schema(self)
json = conf.json
if 'properties' in json:
del json['properties']['metrics']
json['properties']['best'][
'properties']['metric']['enum'] = tensors
json['properties']['scheduler'] = copy.deepcopy(_SCHED_CONF)
return ConfSchema(json=json, ui=conf.ui)
def process(self, inputs):
_, task_graph, _ = self._compute_hash_key()
train_id = None
if task_graph is not None:
for task in task_graph:
if task.get('type') == 'NemoTrainNode':
conf = task.get('conf')
if ('eval_callback' in conf and
'eval_tensors' in conf['eval_callback']):
tensors = conf['eval_callback']['eval_tensors']
tensors = [t.split('@')[-1] for t in tensors]
train_id = task.get('id')
if train_id is None:
print('no train node detected')
return {}
import ray
from ray import tune
if self.INPUT_CONFIG in inputs:
self.conf.update(inputs[self.INPUT_CONFIG].data)
output = {}
if self.outport_connected(self.OUTPUT_CONFIG):
data_store = {}
for key in inputs.keys():
v = inputs[key]
if isinstance(v, cudf.DataFrame):
# it is a work around,
# the ray.put doesn't support GPU cudf
data_store[key] = ray.put(v.to_pandas())
else:
data_store[key] = ray.put(v)
# here we need to do the hyper parameter search
def search_fun(config, checkpoint_dir=None):
myinputs = {}
for key in data_store.keys():
v = ray.get(data_store[key])
if isinstance(v, pandas.DataFrame):
myinputs[key] = cudf.from_pandas(v)
else:
myinputs[key] = v
task_graph = TaskGraph.load_taskgraph(
get_file_path(self.conf['taskgraph']))
task_graph.build()
outputLists = [train_id+'.'+'checkpoint_dir']
replaceObj = {}
input_feeders = []
def inputNode_fun(inputNode, in_ports):
inports = inputNode.ports_setup().inports
class InputFeed(Node):
def meta_setup(self):
output = {}
for inp in inputNode.inputs:
output[inp['to_port']] = inp[
'from_node'].meta_setup()[
inp['from_port']]
# it will be something like { input_port: columns }
return output
def ports_setup(self):
# it will be something like { input_port: types }
return NodePorts(inports={}, outports=inports)
def conf_schema(self):
return ConfSchema()
def process(self, empty):
output = {}
for key in inports.keys():
if (inputNode.uid+'@'+key
in myinputs):
output[key] = myinputs[
inputNode.uid+'@'+key]
return output
uni_id = str(uuid.uuid1())
obj = {
TaskSpecSchema.task_id: uni_id,
TaskSpecSchema.conf: {},
TaskSpecSchema.node_type: InputFeed,
TaskSpecSchema.inputs: []
}
input_feeders.append(obj)
newInputs = {}
for key in inports.keys():
if inputNode.uid+'@'+key in myinputs:
newInputs[key] = uni_id+'.'+key
for inp in inputNode.inputs:
if inp['to_port'] not in in_ports:
# need to keep the old connections
newInputs[inp['to_port']] = (
inp['from_node'].uid + '.' + inp['from_port'])
replaceObj.update({inputNode.uid: {
TaskSpecSchema.inputs: newInputs}
})
def outNode_fun(outNode, out_ports):
pass
self._make_sub_graph_connection(task_graph,
inputNode_fun, outNode_fun)
task_graph.extend(input_feeders)
self.update_conf_for_search(replaceObj, task_graph, config)
task_graph.run(outputLists, replace=replaceObj)
# metric_report = {item: result[item] for item in outputLists}
# tune.report(**metric_report)
config = {}
for para in self.conf['parameters']:
fun_name = para['search']['function']
fun = getattr(tune, fun_name)
if fun_name == 'grid_search' or fun_name == 'choice':
config[para['name']] = fun(para['search']['args'])
else:
config[para['name']] = fun(*para['search']['args'])
scheduler_instance = None
if 'scheduler' in self.conf and 'name' in self.conf['scheduler']:
import ray.tune.schedulers
sconf = self.conf['scheduler']
name = sconf['name']
scheduler = getattr(ray.tune.schedulers, name)
para = sconf['parameters']
para.update(self.conf['best'])
print(para)
scheduler_instance = scheduler(**para)
if scheduler_instance is None:
analysis = tune.run(search_fun, **self.conf['tune'],
config=config)
else:
analysis = tune.run(search_fun, **self.conf['tune'],
config=config,
scheduler=scheduler_instance)
best = analysis.get_best_config(**self.conf['best'])
print('best parameter', best)
for key in best.keys():
self.conf['context'][key]['value'] = best[key]
output[self.OUTPUT_CONFIG] = self.conf
# TODO: Fix the check point directory loading. Ray creates checkpoint
# directories under its "tune->local_dir". The directory names are
# taken from the taskgraph on which the HPO is being perfomed.
# These checkpoint directories within ray subdirectory need to
# override or deal with the checkpoint directories that might be
# set for the taskgraph for which the HPO is being performed.
# print('NemoHyperTuneNode CONF:\n{}'.format(self.conf))
more_output = ContextCompositeNode.process(self, inputs)
output.update(more_output)
return output
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/nemo_util/nemoHPO.py |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow import (
ConfSchema, PortsSpecSchema, NodePorts, MetaData)
from nemo.core.neural_types import NmTensor
import nemo
import numpy
import copy
__all__ = ["NemoTrainNode"]
class CallBack(object):
def __init__(self):
self.counter = 0
def __call__(self, global_vars):
import ray
from ray import tune
reports = {}
for key in global_vars.keys():
value = numpy.array(global_vars[key]).mean()
print('eval:', key, value)
reports[key] = value
if ray.is_initialized():
reports['training_iteration'] = self.counter
tune.report(**reports)
self.counter += 1
class NemoTrainNode(Node):
def init(self):
self.OUTPUT_PORT_NAME = 'checkpoint_dir'
def ports_setup(self):
dy = PortsSpecSchema.dynamic
port_type = PortsSpecSchema.port_type
o_inports = {}
o_outports = {}
o_inports['input_tensor'] = {port_type: NmTensor, dy: True}
# if hasattr(self, 'inputs'):
# for inp in self.inputs:
# # TODO: Move TaskGrah rewire logic here instead of in
# # chartEngine.tsx ChartEngine._fixNeMoPorts
# o_inports[inp['from_node'].uid+'@'+inp['from_port']] = {
# port_type: NmTensor}
o_outports[self.OUTPUT_PORT_NAME] = {port_type: str}
return NodePorts(inports=o_inports, outports=o_outports)
def meta_setup(self):
required = {}
output = {}
output['axes'] = []
output['element'] = {}
output['element']['types'] = ['VoidType']
output['element']['fields'] = 'None'
output['element']['parameters'] = '{}'
required = self.get_input_meta()
required['input_tensor'] = copy.deepcopy(output)
metadata = MetaData(inports=required,
outports={self.OUTPUT_PORT_NAME: {}})
return metadata
def conf_schema(self):
json = {
"title": "NeMo Train Node",
"type": "object",
"description": "Node used to train a NeMo neural network",
"properties": {
"parameters": {
"type": "object",
"description": "parameters for train method",
"properties": {
"tensors_to_optimize": {
"type": "array",
"items": {
"type": "string",
}
},
"batches_per_step": {
"type": "number",
"default": None
},
"stop_on_nan_loss": {
"type": "boolean",
"default": False
},
"synced_batchnorm": {
"type": "boolean",
"default": False
},
"synced_batchnorm_groupsize": {
"type": "number",
"default": 0
},
"gradient_predivide": {
"type": "boolean",
"default": False
},
"amp_max_loss_scale": {
"type": "number",
"default": 16777216.0
},
"reset": {
"type": "boolean",
"default": False
}
}
},
"check_point": {
"type": "object",
"description": "parameters for checkpoint method",
"properties": {
"folder": {
"type": "string",
"description": """A path where checkpoints are to
be stored and loaded from if load_from_folder
is None"""
},
"load_from_folder": {
"type": ["string", "null"],
"description": """A path where checkpoints can be
loaded from""",
"default": None
},
"step_freq": {
"type": ["integer", "null"],
"description": """How often in terms of steps to
save checkpoints. One of step_freq or epoch_freq
is required""",
"default": None
},
"epoch_freq": {
"type": ["integer", "null"],
"description": """How often in terms of epochs to
save checkpoints. One of step_freq or epoch_freq
is required.""",
"default": None
},
"checkpoints_to_keep": {
"type": "integer",
"description": """Number of most recent
checkpoints to keep. Older checkpoints will be
deleted.""",
"default": 4
},
"force_load": {
"type": "boolean",
"description": """Whether to crash if loading
is unsuccessful.""",
"default": False
}
}
},
"simple_logger": {
"type": "object",
"description": """A simple callback that prints tensors
to screen. It's default option is to print the training
loss every 100 steps. Additional tensors can be printed
by adding them to the tensors_to_log argument.""",
"properties": {
"step_freq": {
"type": "integer",
"description": """The frequency of printing to
screen. Defaults to every 100 steps""",
"default": 100
},
"tensors_to_log": {
"type": "array",
"description": """A list of NmTensors which will
be printed every step_freq steps.""",
"items": {
"type": "string",
}
}
}
},
"eval_callback": {
"type": "object",
"description": """Used to report the statistics of
evaluation dataset""",
"properties": {
"eval_step": {
"type": ["integer", "null"],
"description": """The frequency of running eval""",
"default": None
},
"eval_tensors": {
"type": "array",
"description": """A list of NmTensors which will
be evaluated every eval_step steps.""",
"items": {
"type": "string",
}
}
}
},
"warmup_policy": {
"type": "object",
"description": """Choose a warm up policy""",
"properties": {
"name": {
"type": "string",
"enum": ["WarmupPolicy", "WarmupHoldPolicy",
"SquareAnnealing", "SquareRootAnnealing",
"CosineAnnealing", "WarmupAnnealing",
"InverseSquareRootAnnealing",
"PolynomialDecayAnnealing",
"PolynomialHoldDecayAnnealing"]
},
},
"dependencies": {
"name": {
"oneOf": [
{
"properties": {
"name": {
"type": "string",
"enum": ["WarmupPolicy"]
},
"parameters": {
"type": "object",
"properties": {
"warmup_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps in
warmup stage""",
"default": None
},
"warmup_ratio": {
"type": ["number", "null"],
"description": """Ratio of
warmup steps to total
steps""",
"default": None
},
"total_steps": {
"type": ["integer",
"null"],
"description": """Total number
of steps while training or
`None` for infinite
training""",
"default": None
}
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["WarmupHoldPolicy"]
},
"parameters": {
"type": "object",
"properties": {
"warmup_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps in
warmup stage""",
"default": None
},
"warmup_ratio": {
"type": ["number",
"null"],
"description": """Ratio of
warmup steps to total
steps""",
"default": None
},
"total_steps": {
"type": ["integer",
"null"],
"description": """Total number
of steps while training or
`None` for infinite
training""",
"default": None
},
"hold_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps to hold the
learning rate after warm
up""",
"default": None
},
"hold_ratio": {
"type": ["integer",
"null"],
"description": """Ratio of hold
steps to total steps""",
"default": None
},
"min_lr": {
"type": "number",
"description": """minimum learing
rate""",
"default": 0.0
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["SquareAnnealing"]
},
"parameters": {
"type": "object",
"properties": {
"warmup_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps in
warmup stage""",
"default": None
},
"warmup_ratio": {
"type": ["number", "null"],
"description": """Ratio of
warmup steps to total
steps""",
"default": None
},
"total_steps": {
"type": ["integer",
"null"],
"description": """Total number
of steps while training or
`None` for infinite
training""",
"default": None
},
"min_lr": {
"type": "number",
"description": """minimum learing
rate""",
"default": 0.00001
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["SquareRootAnnealing"]
},
"parameters": {
"type": "object",
"properties": {
"warmup_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps in
warmup stage""",
"default": None
},
"warmup_ratio": {
"type": ["number", "null"],
"description": """Ratio of
warmup steps to total
steps""",
"default": None
},
"total_steps": {
"type": ["integer",
"null"],
"description": """Total number
of steps while training or
`None` for infinite
training""",
"default": None
},
"min_lr": {
"type": "number",
"description": """minimum learing
rate""",
"default": 0.0
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["CosineAnnealing"]
},
"parameters": {
"type": "object",
"properties": {
"warmup_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps in
warmup stage""",
"default": None
},
"warmup_ratio": {
"type": ["number", "null"],
"description": """Ratio of
warmup steps to total
steps""",
"default": None
},
"total_steps": {
"type": ["integer",
"null"],
"description": """Total number
of steps while training or
`None` for infinite
training""",
"default": None
},
"min_lr": {
"type": "number",
"description": """minimum learing
rate""",
"default": 0.0
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["WarmupAnnealing"]
},
"parameters": {
"type": "object",
"properties": {
"warmup_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps in
warmup stage""",
"default": None
},
"warmup_ratio": {
"type": ["number", "null"],
"description": """Ratio of
warmup steps to total
steps""",
"default": None
},
"total_steps": {
"type": ["integer",
"null"],
"description": """Total number
of steps while training or
`None` for infinite
training""",
"default": None
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": [
"InverseSquareRootAnnealing"]
},
"parameters": {
"type": "object",
"properties": {
"warmup_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps in
warmup stage""",
"default": None
},
"warmup_ratio": {
"type": ["number", "null"],
"description": """Ratio of
warmup steps to total
steps""",
"default": None
},
"total_steps": {
"type": ["integer",
"null"],
"description": """Total number
of steps while training or
`None` for infinite
training""",
"default": None
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": [
"PolynomialDecayAnnealing"]
},
"parameters": {
"type": "object",
"properties": {
"warmup_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps in
warmup stage""",
"default": None
},
"warmup_ratio": {
"type": ["number", "null"],
"description": """Ratio of
warmup steps to total
steps""",
"default": None
},
"total_steps": {
"type": ["integer",
"null"],
"description": """Total number
of steps while training or
`None` for infinite
training""",
"default": None
},
"min_lr": {
"type": "number",
"description": """minimum learing
rate""",
"default": 0.0
},
"power": {
"type": "number",
"default": 1.0
},
"cycle": {
"type": "boolean",
"default": False
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": [
"PolynomialHoldDecayAnnealing"]
},
"parameters": {
"type": "object",
"properties": {
"warmup_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps in
warmup stage""",
"default": None
},
"warmup_ratio": {
"type": ["number", "null"],
"description": """Ratio of
warmup steps to total
steps""",
"default": None
},
"total_steps": {
"type": ["integer",
"null"],
"description": """Total number
of steps while training or
`None` for infinite
training""",
"default": None
},
"hold_steps": {
"type": ["integer",
"null"],
"description": """Number of
training steps to hold the
learning rate after warm
up""",
"default": None
},
"hold_ratio": {
"type": ["integer",
"null"],
"description": """Ratio of hold
steps to total steps""",
"default": None
},
"min_lr": {
"type": "number",
"description": """minimum learing
rate""",
"default": 0.0
},
"power": {
"type": "number",
"default": 1.0
},
"cycle": {
"type": "boolean",
"default": False
},
}
},
}
}
]
}
}
},
"optimizer": {
"type": "object",
"description": """The optimization algorithm""",
"properties": {
"name": {
"type": "string",
"enum": ["sgd", "adam", "fused_adam", "adam_w",
"novograd", "fused_novograd",
"fused_lamb"]
},
},
"dependencies": {
"name": {
"oneOf": [
{
"properties": {
"name": {
"type": "string",
"enum": ["sgd"]
},
"parameters": {
"type": "object",
"properties": {
"num_epochs": {
"type": "integer",
"default": 10
},
"lr": {
"type": "number",
"default": 0.001
},
"momentum": {
"type": "number",
"default": 0.9
},
"weight_decay": {
"type": "number",
"default": 0.0
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["adam"]
},
"parameters": {
"type": "object",
"properties": {
"num_epochs": {
"type": "integer",
"default": 10
},
"lr": {
"type": "number",
"default": 0.001
},
"betas": {
"type": "array",
"items": [
{
"type": "number",
"default": 0.9
},
{
"type": "number",
"default": 0.999
}
]
},
"eps": {
"type": "number",
"default": 0.000000001
},
"weight_decay": {
"type": "number",
"default": 0.0
},
"amsgrad": {
"type": "boolean",
"default": False
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["fused_adam"]
},
"parameters": {
"type": "object",
"properties": {
"num_epochs": {
"type": "integer",
"default": 10
},
"lr": {
"type": "number",
"default": 0.001
},
"betas": {
"type": "array",
"items": [
{
"type": "number",
"default": 0.9
},
{
"type": "number",
"default": 0.999
}
]
},
"eps": {
"type": "number",
"default": 0.00000001,
},
"weight_decay": {
"type": "number",
"default": 0.0
}
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["adam_w"]
},
"parameters": {
"type": "object",
"properties": {
"num_epochs": {
"type": "integer",
"default": 10
},
"lr": {
"type": "number",
"default": 0.001
},
"betas": {
"type": "array",
"items": [
{
"type": "number",
"default": 0.9
},
{
"type": "number",
"default": 0.999
}
]
},
"eps": {
"type": "number",
"default": 0.00000001,
},
"weight_decay": {
"type": "number",
"default": 0.0
},
"amsgrad": {
"type": "boolean",
"default": False
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["novograd"]
},
"parameters": {
"type": "object",
"properties": {
"num_epochs": {
"type": "integer",
"default": 10
},
"lr": {
"type": "number",
"default": 0.001
},
"betas": {
"type": "array",
"items": [
{
"type": "number",
"default": 0.9
},
{
"type": "number",
"default": 0.999
}
]
},
"luc": {
"type": "boolean",
"default": False
},
"luc_eta": {
"type": "number",
"default": 0.001
},
"weight_decay": {
"type": "number",
"default": 0.0
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["fused_novograd"]
},
"parameters": {
"type": "object",
"properties": {
"num_epochs": {
"type": "integer",
"default": 10
},
"lr": {
"type": "number",
"default": 0.001
},
"betas": {
"type": "array",
"items": [
{
"type": "number",
"default": 0.9
},
{
"type": "number",
"default": 0.999
}
]
},
"reg_inside_moment": {
"type": "boolean",
"default": True
},
"grad_averaging": {
"type": "boolean",
"default": False
},
"weight_decay": {
"type": "number",
"default": 0.0
},
}
},
}
},
{
"properties": {
"name": {
"type": "string",
"enum": ["fused_lamb"]
},
"parameters": {
"type": "object",
"properties": {
"num_epochs": {
"type": "integer",
"default": 10
},
"lr": {
"type": "number",
"default": 0.001
},
}
},
}
}
]
}
}
}
}
}
ui = {
"check_point": {
"folder": {"ui:widget": "PathSelector"},
"load_from_folder": {"ui:widget": "PathSelector"},
},
"warmup_policy": {
"parameters": {
"warmup_steps": {"ui:widget": "updown"},
"total_steps": {"ui:widget": "updown"},
"warmup_ratio": {"ui:widget": "updown"},
"hold_steps": {"ui:widget": "updown"},
"hold_ratio": {"ui:widget": "updown"}
},
},
"eval_callback": {
"eval_step": {"ui:widget": "updown"},
}
}
enum = []
enumNames = []
count = 1
if hasattr(self, 'inputs'):
for i in self.inputs:
enum.append(i['from_node'].uid+'@'+i['from_port'])
enumNames.append(i['from_node'].uid+'.'+i['from_port'])
count += 1
json['properties']['parameters'][
'properties']["tensors_to_optimize"][
'items']['enum'] = enum
json['properties']['parameters'][
'properties']["tensors_to_optimize"][
'items']['enumNames'] = enumNames
json['properties']['simple_logger'][
'properties']["tensors_to_log"][
'items']['enum'] = enum
json['properties']['simple_logger'][
'properties']["tensors_to_log"][
'items']['enumNames'] = enumNames
json['properties']['eval_callback'][
'properties']["eval_tensors"][
'items']['enum'] = enum
json['properties']['eval_callback'][
'properties']["eval_tensors"][
'items']['enumNames'] = enumNames
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
nf = nemo.core.NeuralModuleFactory.get_default_factory()
log_conf = copy.copy(self.conf["simple_logger"])
log_conf['tensors_to_log'] = [
inputs[i] for i in log_conf['tensors_to_log']]
log_callback = nemo.core.SimpleLogger(**log_conf)
check_callback = nemo.core.CheckpointCallback(
**self.conf['check_point'])
all_args = copy.copy(self.conf['parameters'])
all_args['tensors_to_optimize'] = [
inputs[i] for i in all_args['tensors_to_optimize']]
# eval callbacks
def eval_iter_callback(tensors, global_vars):
for e_name in eval_names:
if e_name not in global_vars:
global_vars[e_name] = []
for e_name in eval_names:
for kv, v in tensors.items():
if kv.startswith(e_name):
global_vars[e_name].append(v[0].cpu().numpy().mean())
all_args['callbacks'] = [check_callback, log_callback]
if ('eval_callback' in self.conf and
'eval_tensors' in self.conf['eval_callback'] and
len(self.conf['eval_callback']['eval_tensors']) > 0):
eval_conf = copy.copy(self.conf['eval_callback'])
eval_conf['eval_tensors'] = [
inputs[i] for i in eval_conf['eval_tensors']]
eval_names = [i.name for i in eval_conf['eval_tensors']]
eval_conf['user_iter_callback'] = eval_iter_callback
eval_conf['user_epochs_done_callback'] = CallBack()
eval_callback = nemo.core.EvaluatorCallback(**eval_conf)
all_args['callbacks'].append(eval_callback)
all_args['optimizer'] = self.conf['optimizer']['name']
all_args['optimization_params'] = self.conf['optimizer']['parameters']
if 'warmup_policy' in self.conf and 'name' in self.conf[
"warmup_policy"]:
policy_name = self.conf["warmup_policy"]['name']
from nemo.utils import lr_policies
policy_class = getattr(lr_policies, policy_name)
lr_policy = policy_class(
**self.conf["warmup_policy"]['parameters'])
all_args['lr_policy'] = lr_policy
nf.train(**all_args)
log_directory = ''
if (('step_freq' in self.conf['check_point']
and self.conf['check_point']['step_freq'] is not None)
or ('epoch_freq' in self.conf['check_point'] and
self.conf['check_point']['epoch_freq'] is not None)):
log_directory = self.conf['check_point']['folder']
return {
self.OUTPUT_PORT_NAME: log_directory,
}
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/nemo_util/trainNemo.py |
# from .toy import TaylorNetNode
# from .toy import MSELossNode
# from .toy import RealFunctionDataNode
from .trainNemo import NemoTrainNode
from .inferNemo import NemoInferNode
from .nemoHPO import NemoHyperTuneNode
__all__ = ["NemoTrainNode", "NemoInferNode", "NemoHyperTuneNode"]
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/nemo_util/__init__.py |
from pathlib import Path
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow import (
ConfSchema, PortsSpecSchema, NodePorts, MetaData)
from nemo.core.neural_types import NmTensor
import nemo
import copy
__all__ = ["NemoInferNode"]
def _isempty(pp):
'''pp is pathlib Path
:type pp: Path
'''
if not pp.is_dir():
return True
try:
next(pp.rglob('*'))
except StopIteration:
return True
return False
class NemoInferNode(Node):
def init(self):
self.OUTPUT_PORT_NAME = 'torch_tensor'
self.INPUT_PORT_NAME = 'log_dir'
def ports_setup(self):
port_type = PortsSpecSchema.port_type
dy = PortsSpecSchema.dynamic
o_inports = {}
o_inports[self.INPUT_PORT_NAME] = {port_type: str}
o_inports['input_tensor'] = {port_type: NmTensor, dy: True}
# if hasattr(self, 'inputs'):
# for inp in self.inputs:
# if inp['to_port'] in (self.INPUT_PORT_NAME,):
# continue
# # TODO: Move TaskGrah rewire logic here instead of in
# # chartEngine.tsx ChartEngine._fixNeMoPorts
# o_inports[inp['from_node'].uid+'@'+inp['from_port']] = {
# port_type: NmTensor}
o_outports = {}
o_outports[self.OUTPUT_PORT_NAME] = {port_type: list}
return NodePorts(inports=o_inports, outports=o_outports)
def meta_setup(self):
required = {}
output = {}
output['axes'] = []
output['element'] = {}
output['element']['types'] = ['VoidType']
output['element']['fields'] = 'None'
output['element']['parameters'] = '{}'
required = self.get_input_meta()
required['input_tensor'] = copy.deepcopy(output)
metadata = MetaData(inports=required,
outports={self.OUTPUT_PORT_NAME: {}})
return metadata
def conf_schema(self):
json = {
"title": "NeMo Infer Node",
"type": "object",
"description": """Node used to run NeMo neural network inference
to obtain values for tensors""",
"properties": {
"tensors": {
"type": "array",
"description": """List of NeMo tensors
that we want to get values of""",
"items": {
"type": "string",
}
},
"checkpoint_dir": {
"type": ["string", "null"],
"description": """Path to checkpoint directory.
Default is None which does not load checkpoints.""",
"default": None
},
"ckpt_pattern": {
"type": "string",
"description": """Pattern used to check for checkpoints inside
checkpoint_dir. Default is '' which matches any checkpoints
inside checkpoint_dir.""",
"default": '',
},
"verbose": {
"type": "boolean",
"description": """Controls printing.""",
"default": True
},
"cache": {
"type": "boolean",
"description": """If True, cache all `tensors` and intermediate
tensors so that future calls that have use_cache set will
avoid computation.""",
"default": False
},
"use_cache": {
"type": "boolean",
"description": """Values from `tensors` will be always re-computed.
It will re-use intermediate tensors from the DAG leading to
`tensors`. If you want something to be re-computed, put it into
`tensors` list.""",
"default": False
},
"offload_to_cpu": {
"type": "boolean",
"description": """If True, all evaluated tensors are moved to
cpu memory after each inference batch.""",
"default": True
}
}
}
ui = {
"checkpoint_dir": {"ui:widget": "PathSelector"},
}
enum = []
enumNames = []
count = 1
if hasattr(self, 'inputs'):
for i in self.inputs:
if i['to_port'] in (self.INPUT_PORT_NAME,):
continue
enum.append(i['from_node'].uid+'@'+i['from_port'])
enumNames.append(i['from_node'].uid+'.'+i['from_port'])
count += 1
json['properties']["tensors"]['items']['enum'] = enum
json['properties']["tensors"]['items']['enumNames'] = enumNames
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
nf = nemo.core.NeuralModuleFactory.get_default_factory()
conf = copy.copy(self.conf)
log_dir = inputs.get(self.INPUT_PORT_NAME, conf['checkpoint_dir'])
if not _isempty(Path(log_dir)):
conf['checkpoint_dir'] = log_dir
conf['tensors'] = [inputs[i] for i in conf['tensors']]
result = nf.infer(**conf)
return {self.OUTPUT_PORT_NAME: result}
| fsi-samples-main | gQuant/plugins/nemo_plugin/greenflow_nemo_plugin/nemo_util/inferNemo.py |
from setuptools import setup, find_packages
setup(
name='example_plugin',
packages=find_packages(include=['example']),
entry_points={
'greenflow.plugin': [
'custom_nodes = example',
],
}
)
| fsi-samples-main | gQuant/plugins/simple_example/setup.py |
display_fun = """
const columnKeys = Object.keys(metaObj);
let header = '';
if (columnKeys.length > 0) {
header += '<table>';
header += '<tr>';
header += '<th>Column Name</th>';
for (let i = 0; i < columnKeys.length; i++) {
header += `<th>${columnKeys[i]}</th>`;
}
header += '</tr>';
header += '<tr>';
header += '<th>Type</th>';
for (let i = 0; i < columnKeys.length; i++) {
header += `<td>${metaObj[columnKeys[i]]}</td>`;
}
header += '</tr>';
header += '</table>';
}
return header;
"""
validation = {}
display = {}
display['pandas.core.frame.DataFrame'] = display_fun
| fsi-samples-main | gQuant/plugins/simple_example/example/client.py |
from .distanceNode import DistanceNode
from .pointNode import PointNode
import pandas as pd
import numpy as np
from .client import validation, display # noqa: F40
from greenflow.dataframe_flow._node_flow import register_validator
from greenflow.dataframe_flow._node_flow import register_copy_function
def _validate_df(df_to_val, ref_cols, obj):
'''Validate a pandas DataFrame.
:param df_to_val: A dataframe typically of type pd.DataFrame
:param ref_cols: Dictionary of column names and their expected types.
:returns: True or False based on matching all columns in the df_to_val
and columns spec in ref_cols.
:raises: Exception - Raised when invalid dataframe length or unexpected
number of columns. TODO: Create a ValidationError subclass.
'''
if (isinstance(df_to_val, pd.DataFrame) and len(df_to_val) == 0):
err_msg = 'Node "{}" produced empty output'.format(obj.uid)
raise Exception(err_msg)
if not isinstance(df_to_val, pd.DataFrame):
return True
i_cols = df_to_val.columns
if len(i_cols) != len(ref_cols):
print("expect %d columns, only see %d columns"
% (len(ref_cols), len(i_cols)))
print("ref:", ref_cols)
print("columns", i_cols)
raise Exception("not valid for node %s" % (obj.uid))
for col in ref_cols.keys():
if col not in i_cols:
print("error for node %s, column %s is not in the required "
"output df" % (obj.uid, col))
return False
if ref_cols[col] is None:
continue
err_msg = "for node {} type {}, column {} type {} "\
"does not match expected type {}".format(
obj.uid, type(obj), col, df_to_val[col].dtype,
ref_cols[col])
if ref_cols[col] == 'category':
# comparing pandas.core.dtypes.dtypes.CategoricalDtype to
# numpy.dtype causes TypeError. Instead, let's compare
# after converting all types to their string representation
# d_type_tuple = (pd.core.dtypes.dtypes.CategoricalDtype(),)
d_type_tuple = (str(pd.CategoricalDtype()),)
elif ref_cols[col] == 'date':
# Cudf read_csv doesn't understand 'datetime64[ms]' even
# though it reads the data in as 'datetime64[ms]', but
# expects 'date' as dtype specified passed to read_csv.
d_type_tuple = ('datetime64[ms]', 'date', 'datetime64[ns]')
else:
d_type_tuple = (str(np.dtype(ref_cols[col])),)
if (str(df_to_val[col].dtype) not in d_type_tuple):
print("ERROR: {}".format(err_msg))
# Maybe raise an exception here and have the caller
# try/except the validation routine.
return False
return True
def copy_df(df_obj):
return df_obj.copy(deep=False)
register_validator(pd.DataFrame, _validate_df)
register_copy_function(pd.DataFrame, copy_df)
| fsi-samples-main | gQuant/plugins/simple_example/example/__init__.py |
import numpy as np
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow import PortsSpecSchema
from greenflow.dataframe_flow import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
class DistanceNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
port_type = PortsSpecSchema.port_type
self.INPUT_PORT_NAME = "points_df_in"
self.OUTPUT_PORT_NAME = "distance_df"
self.ABS_OUTPUT_PORT_NAME = "distance_abs_df"
port_inports = {
self.INPUT_PORT_NAME: {
port_type: ["pandas.DataFrame"]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:points_df_in}"
},
self.ABS_OUTPUT_PORT_NAME: {
port_type: "${port:points_df_in}"
},
}
req_cols = {
'x': 'float64',
'y': 'float64'
}
meta_inports = {
self.INPUT_PORT_NAME: req_cols
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: {
'distance_cudf': 'float64',
}
},
self.ABS_OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: {
'distance_abs_cudf': 'float64',
}
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
return ConfSchema()
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
output = {}
if self.outport_connected(self.OUTPUT_PORT_NAME):
copy_df = df.copy()
copy_df['distance_cudf'] = np.sqrt((df['x'] ** 2 + df['y'] ** 2))
output.update({self.OUTPUT_PORT_NAME: copy_df})
if self.outport_connected(self.ABS_OUTPUT_PORT_NAME):
copy_df = df.copy()
copy_df['distance_abs_cudf'] = np.abs(df['x']) + np.abs(df['y'])
output.update({self.ABS_OUTPUT_PORT_NAME: copy_df})
return output
| fsi-samples-main | gQuant/plugins/simple_example/example/distanceNode.py |
import numpy as np
import pandas as pd
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow import PortsSpecSchema
from greenflow.dataframe_flow import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
class PointNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.OUTPUT_PORT_NAME = 'points_df_out'
port_inports = {}
port_outports = {
self.OUTPUT_PORT_NAME: {
PortsSpecSchema.port_type: ["pandas.DataFrame"]
}
}
meta_inports = {}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: {
'x': 'float64',
'y': 'float64'
}
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "PointNode configure",
"type": "object",
"properties": {
"npts": {
"type": "number",
"description": "number of data points",
"minimum": 10
}
},
"required": ["npts"],
}
ui = {
"npts": {"ui:widget": "updown"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
npts = self.conf['npts']
df = pd.DataFrame()
df['x'] = np.random.rand(npts)
df['y'] = np.random.rand(npts)
output = {}
if self.outport_connected(self.OUTPUT_PORT_NAME):
output.update({self.OUTPUT_PORT_NAME: df})
return output
| fsi-samples-main | gQuant/plugins/simple_example/example/pointNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from setuptools import setup, find_packages
setup(
name='greenflow_hrp_plugin',
install_requires=[
"matplotlib", "shap"
],
packages=find_packages(include=['greenflow_hrp_plugin']),
entry_points={
'greenflow.plugin': [
'investment_nodes = greenflow_hrp_plugin',
],
}
)
| fsi-samples-main | gQuant/plugins/hrp_plugin/setup.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
python -m unittest tests/unit/test_hrp_weight.py -v
'''
import unittest
import cupy
from greenflow_hrp_plugin.kernels import HRP_weights
import numpy as np
import pandas as pd
def compute_HRP_weights(covariances, res_order):
weights = pd.Series(1, index=res_order)
clustered_alphas = [res_order]
while len(clustered_alphas) > 0:
clustered_alphas = [cluster[start:end] for cluster in clustered_alphas
for start, end in ((0, len(cluster) // 2),
(len(cluster) // 2, len(cluster)))
if len(cluster) > 1]
for subcluster in range(0, len(clustered_alphas), 2):
left_cluster = clustered_alphas[subcluster]
right_cluster = clustered_alphas[subcluster + 1]
left_subcovar = covariances[left_cluster, :][:, left_cluster]
inv_diag = 1 / cupy.diag(left_subcovar)
parity_w = inv_diag * (1 / cupy.sum(inv_diag))
left_cluster_var = cupy.dot(
parity_w, cupy.dot(left_subcovar, parity_w))
right_subcovar = covariances[right_cluster, :][:, right_cluster]
inv_diag = 1 / cupy.diag(right_subcovar)
parity_w = inv_diag * (1 / cupy.sum(inv_diag))
right_cluster_var = cupy.dot(
parity_w, cupy.dot(right_subcovar, parity_w))
alloc_factor = 1 - left_cluster_var / \
(left_cluster_var + right_cluster_var)
weights[left_cluster] *= alloc_factor.item()
weights[right_cluster] *= 1 - alloc_factor.item()
return weights
class TestHRPWeight(unittest.TestCase):
def setUp(self):
self.assets = 10
self.samples = 5
self.numbers = 30
seq = 100
cupy.random.seed(10)
self.cov_matrix = cupy.zeros(
(self.samples, self.numbers, self.assets, self.assets))
self.order_matrix = cupy.random.randint(
0, self.assets, (self.samples, self.numbers, self.assets))
for i in range(self.samples):
for j in range(self.numbers):
cov = cupy.cov(cupy.random.rand(self.assets, seq))
self.cov_matrix[i, j] = cov
order = cupy.arange(self.assets)
cupy.random.shuffle(order)
self.order_matrix[i, j] = order
def test_order(self):
num_months = self.numbers
total_samples = self.samples
assets = self.assets
number_of_threads = 1
number_of_blocks = num_months * total_samples
weights = cupy.ones((total_samples, num_months, assets))
HRP_weights[(number_of_blocks,), (number_of_threads,)](
weights,
self.cov_matrix,
self.order_matrix,
assets,
num_months)
for i in range(self.samples):
for j in range(self.numbers):
cpu_weights = compute_HRP_weights(
self.cov_matrix[i][j], self.order_matrix[i][j].get())
cpu_weights = cpu_weights[range(self.assets)].values
self.assertTrue(np.allclose(cpu_weights, weights[i][j].get()))
| fsi-samples-main | gQuant/plugins/hrp_plugin/tests/unit/test_hrp_weight.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
python -m unittest tests/unit/test_distance.py -v
'''
import unittest
import pandas as pd
import cupy
import cudf
from greenflow_hrp_plugin.kernels import _get_log_return_matrix
from greenflow_hrp_plugin.kernels import _get_month_start_pos
from greenflow_hrp_plugin.kernels import compute_cov, MAX_YEARS
from scipy.spatial.distance import squareform
import math
class TestDistance(unittest.TestCase):
def create_df(self):
date_df = cudf.DataFrame()
date_df['date'] = pd.date_range('1/1/1990', '12/31/1991', freq='B')
full_df = cudf.concat([date_df, date_df])
sample_id = cupy.repeat(cupy.arange(2), len(date_df))
full_df['sample_id'] = sample_id
full_df['year'] = full_df['date'].dt.year
full_df['month'] = full_df['date'].dt.month-1
cupy.random.seed(3)
full_df[0] = cupy.random.rand(len(full_df))
full_df[1] = cupy.random.rand(len(full_df))
full_df[2] = cupy.random.rand(len(full_df))
return full_df
def setUp(self):
self.df = self.create_df()
def test_months_start(self):
log_return = self.df
first_sample = log_return['sample_id'].min().item()
all_dates = log_return[first_sample == log_return['sample_id']]['date']
months_start = _get_month_start_pos(all_dates)
print(type(months_start))
self.assertTrue(months_start[0].item() == 0)
for i in range(1, len(months_start)):
start_day_month = log_return.iloc[months_start[i].item(
)]['date'].dt.month
last_day_month = log_return.iloc[(
months_start[i].item()-1)]['date'].dt.month
diff = start_day_month.values[0] - last_day_month.values[0]
self.assertTrue(abs(diff) != 0)
def test_distance(self):
total_samples = 2
window = 6
log_return = self.df
first_sample = log_return['sample_id'].min().item()
all_dates = log_return[first_sample == log_return['sample_id']]['date']
months_start = _get_month_start_pos(all_dates)
log_return_ma = _get_log_return_matrix(total_samples, log_return)
_, assets, timelen = log_return_ma.shape
number_of_threads = 256
num_months = len(months_start) - window
number_of_blocks = num_months * total_samples
means = cupy.zeros((total_samples, num_months, assets))
cov = cupy.zeros((total_samples, num_months, assets, assets))
distance = cupy.zeros(
(total_samples, num_months, (assets - 1) * assets // 2))
compute_cov[(number_of_blocks, ), (number_of_threads, ), 0,
256 * MAX_YEARS * 8](means, cov, distance, log_return_ma,
months_start, num_months, assets,
timelen, window)
print('return shape', log_return_ma.shape)
num = 0
for sample in range(2):
for num in range(num_months):
truth = (
log_return_ma[sample, :, months_start[num]:months_start[
num + window]].mean(axis=1))
compute = means[sample][num]
self.assertTrue(cupy.allclose(compute, truth))
for sample in range(2):
for num in range(num_months):
s = log_return_ma[sample, :, months_start[num]:months_start[
num + window]]
truth = (cupy.cov(s, bias=True))
compute = cov[sample][num]
self.assertTrue(cupy.allclose(compute, truth))
for sample in range(2):
for num in range(num_months):
cov_m = cov[sample][num]
corr_m = cov_m.copy()
for i in range(3):
for j in range(3):
corr_m[i, j] = corr_m[i, j] / \
math.sqrt(cov_m[i, i] * cov_m[j, j])
dis = cupy.sqrt((1.0 - corr_m)/2.0)
res = cupy.zeros_like(dis)
for i in range(3):
for j in range(3):
res[i, j] = cupy.sqrt(
((dis[i, :] - dis[j, :])**2).sum())
truth = (squareform(res.get()))
compute = distance[sample][num]
self.assertTrue(cupy.allclose(compute, truth))
| fsi-samples-main | gQuant/plugins/hrp_plugin/tests/unit/test_distance.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
python -m unittest tests/unit/test_max_drawdown.py -v
'''
import unittest
import pandas as pd
import cupy
import numpy as np
import cudf
from greenflow_hrp_plugin.kernels import _get_log_return_matrix
from greenflow_hrp_plugin.kernels import _get_month_start_pos
from greenflow_hrp_plugin.kernels import drawdown_kernel
class TestMaxDrawdown(unittest.TestCase):
def create_df(self):
date_df = cudf.DataFrame()
date_df['date'] = pd.date_range('1/1/1990', '12/31/1992', freq='B')
full_df = cudf.concat([date_df, date_df])
sample_id = cupy.repeat(cupy.arange(2), len(date_df))
full_df['sample_id'] = sample_id
full_df['year'] = full_df['date'].dt.year
full_df['month'] = full_df['date'].dt.month-1
cupy.random.seed(3)
full_df[0] = cupy.random.normal(0, 0.02, len(full_df))
full_df[1] = cupy.random.normal(0, 0.02, len(full_df))
full_df[2] = cupy.random.normal(0, 0.02, len(full_df))
return full_df
def setUp(self):
self.df = self.create_df()
def compute_drawdown(self, times):
cumsum = np.cumsum(times)
cumsum = np.exp(cumsum)
maxreturn = np.maximum.accumulate(np.concatenate([np.array([1.0]),
cumsum]))[1:]
drawdown = cumsum/maxreturn - 1.0
return -drawdown.min()
def test_max_drawdown(self):
total_samples = 2
window = 12
log_return = self.df
first_sample = log_return['sample_id'].min().item()
all_dates = log_return[first_sample == log_return['sample_id']]['date']
all_dates = all_dates.reset_index(drop=True)
months_start = _get_month_start_pos(all_dates)
log_return_ma = _get_log_return_matrix(total_samples, log_return)
_, assets, timelen = log_return_ma.shape
number_of_threads = 128
num_months = len(months_start) - window
number_of_blocks = num_months * total_samples
drawdown = cupy.zeros((total_samples, num_months, assets))
drawdown_kernel[(number_of_blocks, ),
(number_of_threads, )](drawdown, log_return_ma,
months_start, window)
for s in range(total_samples):
for a in range(assets):
for i in range(num_months):
gpu_drawdown = drawdown[s][i][a]
cpu_drawdown = self.compute_drawdown(
log_return_ma[s][a][
months_start[i]:months_start[i+window]].get())
self.assertTrue(cupy.allclose(gpu_drawdown,
cpu_drawdown))
| fsi-samples-main | gQuant/plugins/hrp_plugin/tests/unit/test_max_drawdown.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
| fsi-samples-main | gQuant/plugins/hrp_plugin/tests/unit/__init__.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
python -m unittest tests/unit/test_leverage.py -v
'''
import unittest
import pandas as pd
import cupy
import cudf
from greenflow_hrp_plugin.kernels import _get_log_return_matrix
from greenflow_hrp_plugin.kernels import _get_month_start_pos
from greenflow_hrp_plugin.kernels import leverage_for_target_vol, MAX_YEARS
import math
class TestDistance(unittest.TestCase):
def create_df(self):
date_df = cudf.DataFrame()
date_df['date'] = pd.date_range('1/1/1990', '12/31/1991', freq='B')
full_df = cudf.concat([date_df, date_df])
sample_id = cupy.repeat(cupy.arange(2), len(date_df))
full_df['sample_id'] = sample_id
full_df['year'] = full_df['date'].dt.year
full_df['month'] = full_df['date'].dt.month-1
cupy.random.seed(3)
full_df['portfolio'] = cupy.random.rand(len(full_df))
return full_df
def setUp(self):
self.df = self.create_df()
def test_months_start(self):
log_return = self.df
first_sample = log_return['sample_id'].min().item()
all_dates = log_return[first_sample == log_return['sample_id']]['date']
months_start = _get_month_start_pos(all_dates)
self.assertTrue(months_start[0].item() == 0)
for i in range(1, len(months_start)):
start_day_month = log_return.iloc[months_start[i].item(
)]['date'].dt.month
last_day_month = log_return.iloc[(
months_start[i].item()-1)]['date'].dt.month
diff = start_day_month.values[0] - last_day_month.values[0]
self.assertTrue(abs(diff) != 0)
def test_distance(self):
total_samples = 2
# window = 3
long_window = 59
short_window = 19
target_vol = 0.05
log_return = self.df
first_sample = log_return['sample_id'].min().item()
all_dates = log_return[first_sample == log_return['sample_id']]['date']
all_dates = all_dates.reset_index(drop=True)
months_start = _get_month_start_pos(all_dates)
for window in range(len(months_start)):
if (months_start[window] - long_window) > 0:
break
print(window)
print('offset', months_start[window] - long_window)
port_return_ma = log_return['portfolio'].values.reshape(
total_samples, -1)
number_of_threads = 256
num_months = len(months_start) - window
if num_months == 0: # this case, use all the data to compute
num_months = 1
number_of_blocks = num_months * total_samples
leverage = cupy.zeros((total_samples, num_months))
leverage_for_target_vol[(number_of_blocks, ), (number_of_threads, ), 0,
256 * MAX_YEARS * 8](leverage, port_return_ma,
months_start, num_months,
window,
long_window, short_window,
target_vol)
for sample in range(2):
for num in range(num_months):
end_id = months_start[num + window]
mean = port_return_ma[sample,
end_id - long_window:end_id].mean()
sd_long = cupy.sqrt(
((port_return_ma[sample, end_id - long_window:end_id] -
mean)**2).mean())
# print('long', sd_long)
mean = (port_return_ma[sample,
end_id - short_window:end_id].mean())
sd_short = cupy.sqrt(
((port_return_ma[sample, end_id - short_window:end_id] -
mean)**2).mean())
# print('sort', sd_short)
max_sd = max(sd_long, sd_short)
lev = target_vol / (max_sd * math.sqrt(252))
# print(lev)
# print(leverage[sample, num], lev-leverage[sample, num])
# compute = means[sample][num]
self.assertTrue(cupy.allclose(leverage[sample, num], lev))
# for sample in range(2):
# for num in range(num_months):
# s = log_return_ma[sample, :, months_start[num]:months_start[
# num + window]]
# truth = (cupy.cov(s, bias=True))
# compute = cov[sample][num]
# self.assertTrue(cupy.allclose(compute, truth))
# for sample in range(1):
# for num in range(1):
# cov_m = cov[sample][num]
# corr_m = cov_m.copy()
# for i in range(3):
# for j in range(3):
# corr_m[i, j] = corr_m[i, j] / \
# math.sqrt(cov_m[i, i] * cov_m[j, j])
# dis = (1.0 - corr_m)/2.0
# truth = (squareform(dis.get()))
# compute = distance[sample][num]
# self.assertTrue(cupy.allclose(compute, truth))
| fsi-samples-main | gQuant/plugins/hrp_plugin/tests/unit/test_leverage.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
python -m unittest tests/unit/test_bootstrap.py -v
'''
import unittest
import cupy
from greenflow_hrp_plugin.kernels import boot_strap
class TestBootstrap(unittest.TestCase):
def setUp(self):
pass
def test_bootstrap(self):
number_samples = 2
block_size = 2
number_of_threads = 256
length, assets = (6, 2)
ref = cupy.array([[1.0, 2.0], [2.0, 3.0], [3.0, 4.0], [4.0, 5.0],
[5.0, 6.0]])
output = cupy.zeros((number_samples, assets, length)) # output results
num_positions = (
length - 2
) // block_size + 1
# number of positions to sample to cover the whole seq length
# sample starting position, exclusive
sample_range = length - block_size
print('price_len', length, 'sample range', sample_range)
sample_positions = cupy.array([0, 1, 2, 3, 2, 1])
number_of_blocks = len(sample_positions)
boot_strap[(number_of_blocks,), (number_of_threads,)](
output,
ref.T,
block_size,
num_positions,
sample_positions)
truth0 = cupy.array([[0., 1., 2., 2., 3., 3.],
[0., 2., 3., 3., 4., 4.]])
truth1 = cupy.array([[0., 4., 5., 3., 4., 2.],
[0., 5., 6., 4., 5., 3.]])
self.assertTrue(cupy.allclose(truth0, output[0]))
self.assertTrue(cupy.allclose(truth1, output[1]))
print(output)
| fsi-samples-main | gQuant/plugins/hrp_plugin/tests/unit/test_bootstrap.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
python -m unittest tests/unit/test_order.py -v
'''
import unittest
import cupy
from greenflow_hrp_plugin.kernels import single_linkage
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import linkage
import numpy as np
def seriation(Z, N, cur_index):
"""Returns the order implied by a hierarchical tree (dendrogram).
:param Z: A hierarchical tree (dendrogram).
:param N: The number of points given to the clustering process.
:param cur_index: The position in the tree for the recursive traversal.
:return: The order implied by the hierarchical tree Z.
"""
if cur_index < N:
return [cur_index]
else:
left = int(Z[cur_index - N, 0])
right = int(Z[cur_index - N, 1])
return (seriation(Z, N, left) + seriation(Z, N, right))
class TestOrder(unittest.TestCase):
def setUp(self):
self.assets = 10
self.samples = 5
self.numbers = 30
seq = 100
self.distance = cupy.zeros(
(self.samples, self.numbers, self.assets * (self.assets-1) // 2))
cupy.random.seed(10)
for i in range(self.samples):
for j in range(self.numbers):
cov = cupy.cov(cupy.random.rand(self.assets, seq))
dia = cupy.diag(cov)
corr = cov / cupy.sqrt(cupy.outer(dia, dia))
dist = (1.0 - corr) / 2.0
self.distance[i, j] = cupy.array(squareform(dist.get()))
def test_order(self):
num_months = self.numbers
total_samples = self.samples
assets = self.assets
number_of_threads = 1
number_of_blocks = num_months * total_samples
output = cupy.zeros((total_samples, num_months, assets-1, 3))
orders = cupy.zeros(
(total_samples, num_months, assets), dtype=cupy.int64)
single_linkage[(number_of_blocks,), (number_of_threads,)](
output,
orders,
self.distance,
num_months, assets)
for i in range(self.samples):
for j in range(self.numbers):
gpu_order = orders[0][0]
gpu_linkage = output[0][0]
cpu_linkage = linkage(self.distance[0][0].get())
cpu_order = seriation(cpu_linkage, assets, assets*2 - 2)
self.assertTrue(np.allclose(gpu_order.get(), cpu_order))
self.assertTrue(np.allclose(
gpu_linkage.get(), cpu_linkage[:, :-1]))
| fsi-samples-main | gQuant/plugins/hrp_plugin/tests/unit/test_order.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import (ConfSchema, PortsSpecSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
class FeatureNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.SIGNAL_DF = 'signal_df'
self.FEATURE_DF = 'feature_df'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.SIGNAL_DF: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.FEATURE_DF: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
],
PortsSpecSchema.optional: True
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:signal_df}"
},
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def conf_schema(self):
json = {
"title": "Calculate the std and mean across assets as features",
"type": "object",
"properties": {
"name": {
"type": "string",
"title": "Feature Name"
}
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
required = {
'year': 'int16',
'month': 'int16',
'sample_id': 'int64',
}
name = self.conf.get("name", "feature")
input_meta = self.get_input_meta()
if self.FEATURE_DF not in input_meta:
col_from_inport = required.copy()
else:
col_from_inport = input_meta[self.FEATURE_DF].copy()
meta_inports[self.SIGNAL_DF] = required
meta_inports[self.FEATURE_DF] = required
# additional ports
cols = {
name+"_mean": "float64",
name+"_std": "float64"
}
col_from_inport.update(cols)
meta_outports[self.OUTPUT_PORT_NAME] = col_from_inport
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def process(self, inputs):
df = inputs[self.SIGNAL_DF]
name = self.conf.get("name", "feature")
if self.FEATURE_DF not in inputs:
output_df = df[['year', 'month', 'sample_id']].copy()
else:
output_df = inputs[self.FEATURE_DF]
# df = df.drop('datetime', axis=1)
output = {}
if self.outport_connected(self.OUTPUT_PORT_NAME):
col = list(df.columns)
col.remove('sample_id')
col.remove('year')
col.remove('month')
mean_val = df[col].values.mean(axis=1)
std_val = df[col].values.std(axis=1)
output_df[name+'_mean'] = mean_val
output_df[name+'_std'] = std_val
output.update({self.OUTPUT_PORT_NAME: output_df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/featureNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import (ConfSchema, PortsSpecSchema)
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from .kernels import get_weights
import cudf
import math
class HRPWeightNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.COV_IN = 'covariance_df'
self.ORDER_IN = 'asset_order_df'
self.OUTPUT_PORT_NAME = 'out'
self.delayed_process = True
self.infer_meta = False
port_type = PortsSpecSchema.port_type
port_inports = {
self.COV_IN: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.ORDER_IN: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:covariance_df}"
},
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def conf_schema(self):
json = {
"title": "Compute the HRP weights",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
required = {
'month': 'int16',
'year': 'int16',
'sample_id': 'int64',
}
meta_inports[self.COV_IN] = required
meta_inports[self.ORDER_IN] = required
json = {}
input_meta = self.get_input_meta()
if self.COV_IN in input_meta:
assets = int(math.sqrt(len(input_meta[self.COV_IN]) - 3))
for i in range(assets):
json[i] = 'float64'
elif self.ORDER_IN in input_meta:
assets = len(input_meta[self.ORDER_IN]) - 3
for i in range(assets):
json[i] = 'float64'
json.update(required)
meta_outports[self.OUTPUT_PORT_NAME] = json
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def process(self, inputs):
input_meta = self.get_input_meta()
df_cov = inputs[self.COV_IN]
df_order = inputs[self.ORDER_IN]
all_sample_ids = df_cov['sample_id'].unique()
# print(all_sample_ids - df_order['sample_id'].unique())
total_samples = len(all_sample_ids)
input_meta = self.get_input_meta()
if self.COV_IN in input_meta:
assets = int(math.sqrt(len(input_meta[self.COV_IN]) - 3))
elif self.ORDER_IN in input_meta:
assets = len(input_meta[self.ORDER_IN]) - 3
output = {}
col = list(df_cov.columns)
col.remove('sample_id')
col.remove('year')
col.remove('month')
cov = df_cov[col].values
cov = cov.reshape(
total_samples, -1, assets, assets)
_, num_months, _, _ = cov.shape
col = list(df_order.columns)
col.remove('sample_id')
col.remove('year')
col.remove('month')
order = df_order[col].values
order = order.reshape(
total_samples, -1, assets)
weights = get_weights(total_samples, cov,
order, num_months, assets)
weights = weights.reshape(-1, assets)
weight_df = cudf.DataFrame(weights)
weight_df['month'] = df_order['month']
weight_df['year'] = df_order['year']
weight_df['sample_id'] = df_order['sample_id']
output.update({self.OUTPUT_PORT_NAME: weight_df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/hrpWeight.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import (ConfSchema, PortsSpecSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
import cudf
import cupy
class PortfolioNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.RETURN_IN = 'return_df'
self.WEIGHT_IN = 'weight_df'
self.TRANS_IN = 'transaction_df'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.RETURN_IN: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.WEIGHT_IN: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.TRANS_IN: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:return_df}"
},
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def conf_schema(self):
json = {
"title": "Construct the portfolio",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
return_required = {
"date": "datetime64[ns]",
'sample_id': 'int64',
'year': 'int16',
'month': 'int16',
}
weight_required = {
'sample_id': 'int64',
'year': 'int16',
'month': 'int16',
}
tran_required = {
'sample_id': 'int64',
'year': 'int16',
'month': 'int16',
}
addition = {
'portfolio': 'float64'
}
input_meta = self.get_input_meta()
if self.RETURN_IN not in input_meta:
col_from_inport = return_required.copy()
else:
col_from_inport = input_meta[self.RETURN_IN].copy()
meta_inports[self.RETURN_IN] = return_required
meta_inports[self.WEIGHT_IN] = weight_required
meta_inports[self.TRANS_IN] = tran_required
col_from_inport.update(addition)
# additional ports
meta_outports[self.OUTPUT_PORT_NAME] = col_from_inport
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def process(self, inputs):
input_meta = self.get_input_meta()
if self.RETURN_IN in input_meta:
assets = len(input_meta[self.RETURN_IN]) - 4
elif self.WEIGHT_IN in input_meta:
assets = len(input_meta[self.WEIGHT_IN]) - 3
elif self.TRANS_IN in input_meta:
assets = len(input_meta[self.TRANS_IN]) - 3
return_df = inputs[self.RETURN_IN]
weight_df = inputs[self.WEIGHT_IN]
date_df = return_df[['date', 'sample_id', 'year', 'month']]
expand_table = date_df.reset_index().merge(
weight_df, on=['sample_id', 'year', 'month'],
how='left').set_index('index')
price_table = return_df[list(range(assets))]
weight_table = expand_table[list(range(assets))]
if self.TRANS_IN in input_meta:
tran_df = inputs[self.TRANS_IN]
tran_expand_table = date_df.reset_index().merge(
tran_df, on=['sample_id', 'year',
'month'], how='left').set_index('index')
tran_expand_table = tran_expand_table.sort_index().dropna()
months = (tran_expand_table['year'] * 12 +
tran_expand_table['month']).values
months = ((months[1:] - months[:-1]) != 0).astype(cupy.float64)
months = cupy.pad(months, ((1, 0)), mode='constant')
months[0] = 1.0
tran_table = tran_expand_table[list(range(assets))].values
tran_table = tran_table * months[:, None]
tran_table = cudf.DataFrame(tran_table)
tran_table.index = tran_expand_table.index
apply_table = (price_table * weight_table).sort_index().dropna()
# hack to fix the column names
apply_table.columns = list(range(assets))
apply_weight = (apply_table - tran_table).sum(axis=1)
else:
apply_weight = (price_table * weight_table).sum(axis=1)
return_df['portfolio'] = apply_weight.astype('float64')
return_df = return_df.dropna()
output = {}
output.update({self.OUTPUT_PORT_NAME: return_df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/portfolioNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
validation = {}
display = {}
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/client.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.portsSpecSchema import PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
import cudf
class MergeNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_LEFT_NAME = 'left'
self.INPUT_PORT_RIGHT_NAME = 'right'
self.OUTPUT_PORT_NAME = 'merged'
self.delayed_process = True
self.infer_meta = False
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_LEFT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.INPUT_PORT_RIGHT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:left}"
},
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
cols_required = {}
input_meta = self.get_input_meta()
if (self.INPUT_PORT_LEFT_NAME in input_meta
and self.INPUT_PORT_RIGHT_NAME in input_meta):
col_from_left_inport = input_meta[self.INPUT_PORT_LEFT_NAME]
col_from_right_inport = input_meta[self.INPUT_PORT_RIGHT_NAME]
col_from_left_inport.update(col_from_right_inport)
meta_outports[self.OUTPUT_PORT_NAME] = col_from_left_inport
elif self.INPUT_PORT_LEFT_NAME in input_meta:
col_from_left_inport = input_meta[self.INPUT_PORT_LEFT_NAME]
meta_outports[self.OUTPUT_PORT_NAME] = col_from_left_inport
elif self.INPUT_PORT_RIGHT_NAME in input_meta:
col_from_right_inport = input_meta[self.INPUT_PORT_RIGHT_NAME]
meta_outports[self.OUTPUT_PORT_NAME] = col_from_right_inport
else:
meta_outports[self.OUTPUT_PORT_NAME] = {}
meta_inports[self.INPUT_PORT_RIGHT_NAME] = cols_required
meta_inports[self.INPUT_PORT_LEFT_NAME] = cols_required
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "DataFrame Merge configure",
"type": "object",
"description": """Merge two dataframes""",
"properties": {
"column": {
"type": "string",
"description": "column name on which to do the merge"
}
},
"required": ["column"],
}
input_meta = self.get_input_meta()
if (self.INPUT_PORT_LEFT_NAME in input_meta
and self.INPUT_PORT_RIGHT_NAME in input_meta):
col_left_inport = input_meta[self.INPUT_PORT_LEFT_NAME]
col_right_inport = input_meta[self.INPUT_PORT_RIGHT_NAME]
enums1 = set([col for col in col_left_inport.keys()])
enums2 = set([col for col in col_right_inport.keys()])
json['properties']['column']['enum'] = list(
enums1.intersection(enums2))
ui = {}
return ConfSchema(json=json, ui=ui)
else:
ui = {
"column": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
left merge the two dataframes in the inputs. the `on column` is defined
in the `column` of the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
df1 = inputs[self.INPUT_PORT_LEFT_NAME]
df2 = inputs[self.INPUT_PORT_RIGHT_NAME]
return {self.OUTPUT_PORT_NAME: cudf.merge(df1, df2,
on=self.conf['column'],
how='inner')}
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/mergeNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema, PortsSpecSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
import math
import datetime
import cudf
import cupy
from .kernels import get_drawdown_metric
class PerformanceMetricNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.INPUT_PORT_NAME = 'in'
self.RET_DF = 'ret_df'
self.SD_DF = 'sd_df'
self.SHARPE_DF = 'sharpe_df'
self.CALMAR_DF = 'calmar_df'
self.MDD_DF = 'maxdd_df'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.RET_DF: {
port_type: "${port:in}"
},
self.SD_DF: {
port_type: "${port:in}"
},
self.SHARPE_DF: {
port_type: "${port:in}"
},
self.CALMAR_DF: {
port_type: "${port:in}"
},
self.MDD_DF: {
port_type: "${port:in}"
}
}
required = {
"date": "datetime64[ns]",
'sample_id': 'int64',
'portfolio': 'float64'
}
output = {
'sample_id': 'int64',
'portfolio': 'float64',
}
meta_inports = {
self.INPUT_PORT_NAME: required
}
meta_outports = {
self.RET_DF: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: output
},
self.SD_DF: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: output
},
self.SHARPE_DF: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: output
},
self.CALMAR_DF: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: output
},
self.MDD_DF: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: output
}
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Compute the Sharpe Ratio",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
# df = df.drop('datetime', axis=1)
output = {}
df = df.sort_values(['date'])
group_obj = df.groupby('sample_id')
beg = datetime.datetime.utcfromtimestamp(
group_obj.nth(0)['date'].values[0].item() // 1e9)
end = datetime.datetime.utcfromtimestamp(
group_obj.nth(-1)['date'].values[0].item() // 1e9)
total_days = (end - beg).days
total = cudf.exp(group_obj['portfolio'].sum())
avg_return = cupy.power(total, (365/total_days)) - 1.0
return_series = cudf.Series(avg_return)
return_series.index = total.index
mean_df = cudf.DataFrame({'portfolio': return_series})
# mean_df = df.groupby(['sample_id']).agg({'portfolio': 'mean'})
std_df = df.groupby(['sample_id']).agg(
{'portfolio': 'std'}) * math.sqrt(252)
if self.outport_connected(self.SHARPE_DF):
# sort by dates
out_df = (mean_df / std_df).reset_index()
output.update({self.SHARPE_DF: out_df})
if self.outport_connected(self.SD_DF):
output.update({self.SD_DF: std_df.reset_index()})
if self.outport_connected(self.RET_DF):
output.update({self.RET_DF: mean_df.reset_index()})
if (self.outport_connected(self.MDD_DF) or
self.outport_connected(self.CALMAR_DF)):
all_sample_ids = df['sample_id'].unique()
total_samples = len(all_sample_ids)
drawdown, all_dates = get_drawdown_metric(df, total_samples)
drawdown_series = cudf.Series(
cupy.abs(drawdown.reshape(total_samples)))
drawdown_series.index = mean_df.index
drawdown_df = cudf.DataFrame({'portfolio': drawdown_series})
if self.outport_connected(self.MDD_DF):
output.update({self.MDD_DF: drawdown_df.reset_index()})
if self.outport_connected(self.CALMAR_DF):
calmar_df = (mean_df / drawdown_df).reset_index()
output.update({self.CALMAR_DF: calmar_df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/performanceMetricNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from .loadCsvNode import LoadCsvNode
from .bootstrapNode import BootstrapNode
from .logReturnNode import LogReturnNode
from .distanceNode import DistanceNode
from .hierarchicalClusteringNode import HierarchicalClusteringNode
from .hrpWeight import HRPWeightNode
from .portfolioNode import PortfolioNode
from .performanceMetricNode import PerformanceMetricNode
from .nrpWeightNode import NRPWeightNode
from .maxDrawdownNode import MaxDrawdownNode
from .featureNode import FeatureNode
from .aggregateTimeFeature import AggregateTimeFeatureNode
from .mergeNode import MergeNode
from .diffNode import DiffNode
from .rSquaredNode import RSquaredNode
from .shapSummaryPlotNode import ShapSummaryPlotPlotNode
from .leverageNode import LeverageNode
from .rawDataNode import RawDataNode
from .transactionCostNode import TransactionCostNode
__all__ = ["LoadCsvNode", "BootstrapNode", "LogReturnNode",
"DistanceNode", "HierarchicalClusteringNode", "HRPWeightNode",
"PortfolioNode", "PerformanceMetricNode", "NRPWeightNode",
"MaxDrawdownNode", "FeatureNode", "AggregateTimeFeatureNode",
"MergeNode", "DiffNode", "RSquaredNode", "ShapSummaryPlotPlotNode",
"LeverageNode", "RawDataNode", "TransactionCostNode"]
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/__init__.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema, PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow import Node
from .kernels import compute_leverage
import cupy
import cudf
class LeverageNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.LEVERAGE_DF = 'lev_df'
self.INPUT_PORT_NAME = "in"
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.LEVERAGE_DF: {
port_type: "${port:in}"
},
}
sub_dict = {
"date": "datetime64[ns]",
'sample_id': 'int64',
'year': 'int16',
'month': 'int16',
'portfolio': "float64",
}
meta_inports = {
self.INPUT_PORT_NAME: sub_dict
}
meta_outports = {
self.LEVERAGE_DF: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: sub_dict
}
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Compute the Leverage to match the target volatility",
"type": "object",
"properties": {
"target_vol": {
'type': "number",
"title": "Target Volativity",
"description": """The target volatility to match""",
"default": 0.05
},
"long_window": {
'type': "integer",
"title": "Long window size",
"description": """the large number of days in the past to compute
volatility""",
"default": 59
},
"short_window": {
'type': "integer",
"title": "Short window size",
"description": """the small number of days in the past to compute
volatility""",
"default": 19
}
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
all_sample_ids = df['sample_id'].unique()
total_samples = len(all_sample_ids)
lev, all_dates, window = compute_leverage(total_samples, df,
**self.conf)
total_samples, num_months = lev.shape
months_id = all_dates.dt.year*12 + (all_dates.dt.month-1)
months_id = months_id - months_id.min()
mid = (cupy.arange(months_id.max() + 1) +
(all_dates.dt.month - 1)[0])[window:]
minyear = all_dates.dt.year.min()
if len(mid) == 0:
mid = cupy.array([0])
months = mid % 12
years = mid // 12 + minyear
output = {}
df_lev = cudf.DataFrame(
{'leverage': lev.reshape(total_samples * num_months)})
df_lev['year'] = cupy.concatenate(
[years]*total_samples).astype(cupy.int16)
df_lev['month'] = cupy.concatenate(
[months]*total_samples).astype(cupy.int16)
df_lev['sample_id'] = cupy.repeat(cupy.arange(
total_samples) + all_sample_ids.min(), len(mid))
date_df = df[['date', 'sample_id', 'year', 'month', 'portfolio']]
expand_table = date_df.reset_index().merge(
df_lev, on=['sample_id', 'year', 'month'],
how='left').set_index('index')
expand_table['portfolio'] = expand_table[
'portfolio'] * expand_table['leverage']
expand_table = expand_table.dropna()[[
'date', 'sample_id', 'year', 'month', 'portfolio'
]]
output.update({self.LEVERAGE_DF: expand_table})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/leverageNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import (ConfSchema, PortsSpecSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
class DiffNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.OUTPUT_PORT_NAME = 'out'
self.DIFF_A = 'diff_a'
self.DIFF_B = 'diff_b'
port_type = PortsSpecSchema.port_type
port_inports = {
self.DIFF_A: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.DIFF_B: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:diff_a}"
},
}
col_required = {
'sample_id': 'int64',
'portfolio': 'float64',
}
meta_inports = {
self.DIFF_A: col_required,
self.DIFF_B: col_required
}
output_meta = {
'sample_id': 'int64',
'portfolio': 'float64',
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: output_meta
}
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Calculate Sharpe diff",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df_a = inputs[self.DIFF_A].set_index('sample_id')
df_b = inputs[self.DIFF_B].set_index('sample_id')
# df = df.drop('datetime', axis=1)
output = {}
diff = df_a - df_b
output.update({self.OUTPUT_PORT_NAME: diff.reset_index()})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/diffNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema, PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from .kernels import get_orders
import math
import cudf
class HierarchicalClusteringNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
},
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def conf_schema(self):
json = {
"title": "Hierachical Clustering Node",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
required = {
'month': 'int16',
'year': 'int16',
'sample_id': 'int64',
}
meta_inports[self.INPUT_PORT_NAME] = required
json = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
num = len(input_meta[self.INPUT_PORT_NAME]) - 3
assets = (1 + int(math.sqrt(1 + 8 * num))) // 2
for i in range(assets):
json[i] = 'int64'
json.update(required)
meta_outports[self.OUTPUT_PORT_NAME] = json
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def process(self, inputs):
input_meta = self.get_input_meta()
df = inputs[self.INPUT_PORT_NAME]
all_sample_ids = df['sample_id'].unique()
total_samples = len(all_sample_ids)
if self.INPUT_PORT_NAME in input_meta:
num = len(input_meta[self.INPUT_PORT_NAME]) - 3
assets = (1 + int(math.sqrt(1 + 8 * num))) // 2
df = inputs[self.INPUT_PORT_NAME]
output = {}
col = list(df.columns)
col.remove('sample_id')
col.remove('year')
col.remove('month')
distance = df[col].values
distance = distance.reshape(
total_samples, -1, assets*(assets-1)//2)
_, num_months, _ = distance.shape
orders = get_orders(total_samples, num_months, assets, distance)
orders = orders.reshape(-1, assets)
order_df = cudf.DataFrame(orders)
order_df['month'] = df['month']
order_df['year'] = df['year']
order_df['sample_id'] = df['sample_id']
output.update({self.OUTPUT_PORT_NAME: order_df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/hierarchicalClusteringNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema
from greenflow.dataframe_flow.portsSpecSchema import PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow import Node
import cudf
class AggregateTimeFeatureNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
self.delayed_process = True
self.infer_meta = False
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
cols_required = {
'sample_id': 'int64',
'year': 'int16',
'month': 'int16',
}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: {}
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
input_meta = self.get_input_meta()
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if self.INPUT_PORT_NAME not in input_meta:
col_from_inport = {
'sample_id': 'int64'
}
col_ref = {}
else:
col_from_inport = {
'sample_id': 'int64'
}
col_ref = input_meta[self.INPUT_PORT_NAME].copy()
for key in col_ref.keys():
if key in required:
continue
new_key = key+"_mean"
col_from_inport[new_key] = col_ref[key]
for key in col_ref.keys():
if key in required:
continue
new_key = key+"_std"
col_from_inport[new_key] = col_ref[key]
meta_outports[self.OUTPUT_PORT_NAME] = col_from_inport
self.template_meta_setup(
in_ports=None,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Aggregate feature across time, get mean and std",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
output = {}
col = list(df.columns)
col.remove('year')
col.remove('month')
mdf = df[col].groupby('sample_id').mean()
mdf.columns = [c+"_mean" for c in mdf.columns]
sdf = df[col].groupby('sample_id').std()
sdf.columns = [c+"_std" for c in sdf.columns]
out = cudf.merge(mdf, sdf,
left_index=True,
right_index=True).reset_index()
output.update({self.OUTPUT_PORT_NAME: out})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/aggregateTimeFeature.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from numba import cuda
import numba
import cupy
import math
MAX_ASSETS = 32
MAX_YEARS = 24
PARENT_MAX_ASSETS = 2 * MAX_ASSETS - 1
SUM_LEN = 256 * 32
# MAX_SHARE = 256 * MAX_YEARS * 4
@cuda.jit
def boot_strap(result, ref, block_size, num_positions, positions):
sample, assets, length = result.shape
i = cuda.threadIdx.x
sample_id = cuda.blockIdx.x // num_positions
position_id = cuda.blockIdx.x % num_positions
sample_at = positions[cuda.blockIdx.x]
for k in range(i, block_size*assets, cuda.blockDim.x):
asset_id = k // block_size
loc = k % block_size
if (position_id * block_size + loc + 1 < length):
result[sample_id, asset_id, position_id * block_size +
loc + 1] = ref[asset_id, sample_at + loc]
@cuda.jit(device=True)
def gpu_sum(array):
i = cuda.threadIdx.x
total_len = SUM_LEN
length = total_len
while length > 0:
length = length // 2
for k in range(i, length, cuda.blockDim.x):
if k+length < total_len:
array[k] += array[k + length]
cuda.syncthreads()
@cuda.jit
def compute_cov(means, cov, distance, returns, months_starts, num_months,
assets, time_len, window):
"""
means of size [sample, months, assets]
num_months should be 60 - 12, as the windows size is one year 12 months
"""
shared = cuda.shared.array(shape=0, dtype=numba.float32)
shared_buffer_size = shared.size
i = cuda.threadIdx.x
sample_id = cuda.blockIdx.x // num_months
step_id = cuda.blockIdx.x % num_months
start_id = months_starts[step_id]
end_id = months_starts[
step_id +
window] if step_id + window < months_starts.size else time_len
for a in range(assets):
# copy asset return to shared
for k in range(i, shared_buffer_size, cuda.blockDim.x):
shared[k] = 0
cuda.syncthreads()
for k in range(i + start_id, end_id, cuda.blockDim.x):
shared[k - start_id] = returns[sample_id, a, k]
cuda.syncthreads()
gpu_sum(shared)
if i == 0:
means[sample_id, step_id, a] = shared[0] / (end_id - start_id)
cuda.syncthreads()
for a in range(assets):
for b in range(a, assets):
# copy asset return to shared
for k in range(i, shared_buffer_size, cuda.blockDim.x):
shared[k] = 0
cuda.syncthreads()
mean_a = means[sample_id, step_id, a]
mean_b = means[sample_id, step_id, b]
for k in range(i + start_id, end_id, cuda.blockDim.x):
shared[k - start_id] = (returns[sample_id, a, k] - mean_a) * (
returns[sample_id, b, k] - mean_b)
cuda.syncthreads()
gpu_sum(shared)
if i == 0:
cov[sample_id, step_id, a, b] = shared[0] / (end_id - start_id)
cov[sample_id, step_id, b, a] = shared[0] / (end_id - start_id)
cuda.syncthreads()
# compute distance
for k in range(i, assets*assets, cuda.blockDim.x):
a = k // assets
b = k % assets
if b > a:
var_a = cov[sample_id, step_id, a, a]
var_b = cov[sample_id, step_id, b, b]
cov_ab = cov[sample_id, step_id, a, b]
dis_ab = math.sqrt((1.0 - cov_ab / math.sqrt(var_a * var_b)) / 2.0)
offset = (2 * assets - 1 - a) * a // 2 + (b - a - 1)
shared[offset] = dis_ab
# distance[sample_id, step_id, offset] = dis_ab
cuda.syncthreads()
# compute distance of the distance
for k in range(i, assets*assets, cuda.blockDim.x):
a = k // assets
b = k % assets
if b > a:
summ = 0.0
for col_id in range(assets):
if col_id > a:
offset_a = (2 * assets - 1 - a) * a // 2 + (col_id - a - 1)
val_a = shared[offset_a]
elif col_id < a:
offset_a = (2 * assets - 1 - col_id) * col_id // 2 + (
a - col_id - 1)
val_a = shared[offset_a]
else:
val_a = 0.0
if col_id > b:
offset_b = (2 * assets - 1 - b) * b // 2 + (col_id - b - 1)
val_b = shared[offset_b]
elif col_id < b:
offset_b = (2 * assets - 1 - col_id) * col_id // 2 + (
b - col_id - 1)
val_b = shared[offset_b]
else:
val_b = 0.0
summ += (val_a - val_b) * (val_a - val_b)
offset = (2 * assets - 1 - a) * a // 2 + (b - a - 1)
distance[sample_id, step_id, offset] = math.sqrt(summ)
@cuda.jit
def leverage_for_target_vol(leverage, returns, months_starts, num_months,
window, long_window,
short_window, target_vol):
"""
each block calculate for one rebalancing month,
leverage of shape [sample, months]
returns of shape [sample, time_len]
num_months should be 60 - 12, as the windows size is one year 12 months
"""
# shared = cuda.shared.array(MAX_SHARE, dtype=numba.float64)
shared = cuda.shared.array(shape=0, dtype=numba.float32)
total_samples, time_len = returns.shape
# means = cuda.shared.array(1, dtype=numba.float64)
# sds = cuda.shared.array(2, dtype=numba.float64)
# means = shared[-1:]
# sds = shared[-3:-1]
annual_const = math.sqrt(252.)
shared_buffer_size = shared.size
i = cuda.threadIdx.x
sample_id = cuda.blockIdx.x // num_months
step_id = cuda.blockIdx.x % num_months
start_id = months_starts[step_id]
end_id = months_starts[
step_id +
window] if step_id + window < months_starts.size else time_len
# calculate the means for the long window
start_id = end_id - long_window
# copy asset return to shared
for k in range(i, shared_buffer_size, cuda.blockDim.x):
shared[k] = 0
cuda.syncthreads()
for k in range(i + start_id, end_id, cuda.blockDim.x):
shared[k - start_id] = returns[sample_id, k]
cuda.syncthreads()
gpu_sum(shared)
cuda.syncthreads()
means = shared[0] / (end_id - start_id)
# calculate the std for the long window
# copy asset return to shared
for k in range(i, shared_buffer_size, cuda.blockDim.x):
shared[k] = 0
cuda.syncthreads()
for k in range(i + start_id, end_id, cuda.blockDim.x):
shared[k - start_id] = (returns[sample_id, k] -
means) * (returns[sample_id, k] - means)
cuda.syncthreads()
gpu_sum(shared)
sd_long = math.sqrt(shared[0] / (end_id - start_id))
# calculate the means for the short window
start_id = end_id - short_window
# copy asset return to shared
for k in range(i, shared_buffer_size, cuda.blockDim.x):
shared[k] = 0
cuda.syncthreads()
for k in range(i + start_id, end_id, cuda.blockDim.x):
shared[k - start_id] = returns[sample_id, k]
cuda.syncthreads()
gpu_sum(shared)
cuda.syncthreads()
means = shared[0] / (end_id - start_id)
cuda.syncthreads()
# calculate the std for the short window
for k in range(i, shared_buffer_size, cuda.blockDim.x):
shared[k] = 0
cuda.syncthreads()
for k in range(i + start_id, end_id, cuda.blockDim.x):
shared[k - start_id] = (returns[sample_id, k] - means) * (
returns[sample_id, k] - means)
cuda.syncthreads()
gpu_sum(shared)
sd_short = math.sqrt(shared[0] / (end_id - start_id))
if i == 0:
lev = target_vol / (max(sd_short, sd_long)*annual_const)
leverage[sample_id, step_id] = lev
@cuda.jit(device=True)
def find(x, parent):
p = x
while parent[x] != x:
x = parent[x]
while parent[p] != x:
p, parent[p] = parent[p], x
return x
@cuda.jit(device=True)
def label(Z, n, parent):
"""Correctly label clusters in unsorted dendrogram."""
next_label = n
for i in range(n - 1):
x, y = int(Z[i, 0]), int(Z[i, 1])
x_root, y_root = find(x, parent), find(y, parent)
if x_root < y_root:
Z[i, 0], Z[i, 1] = x_root, y_root
else:
Z[i, 0], Z[i, 1] = y_root, x_root
parent[x_root] = next_label
parent[y_root] = next_label
next_label += 1
@cuda.jit(device=True)
def mergeSort(a, L, R):
current_size = 1
# Outer loop for traversing Each
# sub array of current_size
while current_size < len(a):
left = 0
# Inner loop for merge call
# in a sub array
# Each complete Iteration sorts
# the iterating sub array
while left < len(a)-1:
# mid index = left index of
# sub array + current sub
# array size - 1
mid = min((left + current_size - 1), (len(a)-1))
# (False result,True result)
# [Condition] Can use current_size
# if 2 * current_size < len(a)-1
# else len(a)-1
if 2 * current_size + left - 1 > len(a)-1:
right = len(a) - 1
else:
right = 2 * current_size + left - 1
# Merge call for each sub array
merge(a, left, mid, right, L, R)
left = left + current_size*2
# Increasing sub array size by
# multiple of 2
current_size = 2 * current_size
@cuda.jit(device=True)
def merge(a, ll, m, r, L, R):
n1 = m - ll + 1
n2 = r - m
L[:, :] = 0
R[:, :] = 0
for i in range(0, n1):
L[i, 0] = a[ll + i, 0]
L[i, 1] = a[ll + i, 1]
L[i, 2] = a[ll + i, 2]
for i in range(0, n2):
R[i, 0] = a[m + i + 1, 0]
R[i, 1] = a[m + i + 1, 1]
R[i, 2] = a[m + i + 1, 2]
i, j, k = 0, 0, ll
while i < n1 and j < n2:
if L[i, 2] > R[j, 2]:
a[k, 0] = R[j, 0]
a[k, 1] = R[j, 1]
a[k, 2] = R[j, 2]
j += 1
else:
a[k, 0] = L[i, 0]
a[k, 1] = L[i, 1]
a[k, 2] = L[i, 2]
i += 1
k += 1
while i < n1:
a[k, 0] = L[i, 0]
a[k, 1] = L[i, 1]
a[k, 2] = L[i, 2]
i += 1
k += 1
while j < n2:
a[k, 0] = R[j, 0]
a[k, 1] = R[j, 1]
a[k, 2] = R[j, 2]
j += 1
k += 1
@cuda.jit(device=True)
def condensed_index(n, i, j):
"""
Calculate the condensed index of element (i, j) in an n x n condensed
matrix.
"""
if i < j:
return n * i - (i * (i + 1) // 2) + (j - i - 1)
elif i > j:
return n * j - (j * (j + 1) // 2) + (i - j - 1)
@cuda.jit(device=True)
def my_seriation(Z, N, stack, result):
"""Returns the order implied by a hierarchical tree (dendrogram).
:param Z: A hierarchical tree (dendrogram).
:param N: The number of points given to the clustering process.
:param cur_index: The position in the tree for the recursive traversal.
:return: The order implied by the hierarchical tree Z.
"""
o_point = -1
stack_point = 0
stack[0] = N + N - 2
while stack_point >= 0:
v = stack[stack_point]
stack_point -= 1
left = int(Z[v - N, 0])
right = int(Z[v - N, 1])
if right >= N:
stack_point += 1
stack[stack_point] = right
if left >= N:
stack_point += 1
stack[stack_point] = left
if left < N:
o_point += 1
result[o_point] = left
if right < N:
o_point += 1
result[o_point] = right
return result
@cuda.jit
def single_linkage(output, orders, dists, num_months, n):
"""
dists is shape [sample, months, distance]
output is of shape [sample, months, n-1, 3]
"""
large = 1e200
merged = cuda.shared.array(MAX_ASSETS, dtype=numba.int64)
merged[:] = 0
D = cuda.shared.array(MAX_ASSETS, dtype=numba.float64)
D[:] = large
L = cuda.shared.array(shape=(MAX_ASSETS, 3), dtype=numba.float64)
R = cuda.shared.array(shape=(MAX_ASSETS, 3), dtype=numba.float64)
parent = cuda.shared.array(PARENT_MAX_ASSETS, dtype=numba.int64)
for k in range(PARENT_MAX_ASSETS):
parent[k] = k
stack = cuda.shared.array(MAX_ASSETS, dtype=numba.int64)
sample_id = cuda.blockIdx.x // num_months
step_id = cuda.blockIdx.x % num_months
x = 0
for k in range(n - 1):
current_min = large
merged[x] = 1
for i in range(n):
if merged[i] == 1:
continue
dis_id = int(condensed_index(n, x, i))
dist = dists[sample_id, step_id, dis_id]
# print(k, i, dis_id, dist, D[i])
if D[i] > dist:
D[i] = dist
if D[i] < current_min:
y = i
current_min = D[i]
output[sample_id, step_id, k, 0] = x
output[sample_id, step_id, k, 1] = y
output[sample_id, step_id, k, 2] = current_min
x = y
# # Sort Z by cluster distances.
mergeSort(output[sample_id, step_id], L, R)
# # Find correct cluster labels and compute cluster sizes inplace.
label(output[sample_id, step_id], n, parent)
my_seriation(output[sample_id, step_id], n,
stack, orders[sample_id, step_id])
@cuda.jit
def HRP_weights(weights, covariances, res_order, N, num_months):
"""
covariances, [samples, number, N, N]
res_order, [sample, number, N]
"""
start_pos = cuda.shared.array(MAX_ASSETS, dtype=numba.int64)
end_pos = cuda.shared.array(MAX_ASSETS, dtype=numba.int64)
old_start_pos = cuda.shared.array(MAX_ASSETS, dtype=numba.int64)
old_end_pos = cuda.shared.array(MAX_ASSETS, dtype=numba.int64)
parity_w = cuda.shared.array(MAX_ASSETS, dtype=numba.float64)
sample_id = cuda.blockIdx.x // num_months
step_id = cuda.blockIdx.x % num_months
cluster_num = 1
old_cluster_num = 1
start_pos[0] = 0
end_pos[0] = N
old_start_pos[0] = 0
old_end_pos[0] = N
while cluster_num > 0:
cluster_num = 0
for i in range(old_cluster_num):
start = old_start_pos[i]
end = old_end_pos[i]
half = (end - start) // 2
if half > 0:
start_pos[cluster_num] = start
end_pos[cluster_num] = start + half
cluster_num += 1
if half > 0:
start_pos[cluster_num] = start + half
end_pos[cluster_num] = end
cluster_num += 1
for subcluster in range(0, cluster_num, 2):
left_s = start_pos[subcluster]
left_e = end_pos[subcluster]
right_s = start_pos[subcluster+1]
right_e = end_pos[subcluster+1]
summ = 0.0
for i in range(left_s, left_e):
idd = res_order[sample_id, step_id, i]
parity_w[i - left_s] = 1.0 / \
covariances[sample_id, step_id, idd, idd]
# print('parity', i, parity_w[i - left_s])
summ += parity_w[i - left_s]
for i in range(left_s, left_e):
parity_w[i - left_s] *= 1.0 / summ
summ = 0.0
for i in range(left_s, left_e):
idd_x = res_order[sample_id, step_id, i]
for j in range(left_s, left_e):
idd_y = res_order[sample_id, step_id, j]
summ += parity_w[i - left_s]*parity_w[j - left_s] * \
covariances[sample_id, step_id, idd_x, idd_y]
left_cluster_var = summ
summ = 0.0
for i in range(right_s, right_e):
idd = res_order[sample_id, step_id, i]
parity_w[i - right_s] = 1.0 / \
covariances[sample_id, step_id, idd, idd]
summ += parity_w[i - right_s]
for i in range(right_s, right_e):
parity_w[i - right_s] *= 1.0 / summ
summ = 0.0
for i in range(right_s, right_e):
idd_x = res_order[sample_id, step_id, i]
for j in range(right_s, right_e):
idd_y = res_order[sample_id, step_id, j]
summ += parity_w[i - right_s]*parity_w[j - right_s] * \
covariances[sample_id, step_id, idd_x, idd_y]
right_cluster_var = summ
alloc_factor = 1 - left_cluster_var / \
(left_cluster_var + right_cluster_var)
for i in range(left_s, left_e):
idd = res_order[sample_id, step_id, i]
weights[sample_id, step_id, idd] *= alloc_factor
for i in range(right_s, right_e):
idd = res_order[sample_id, step_id, i]
weights[sample_id, step_id, idd] *= 1 - alloc_factor
for i in range(cluster_num):
old_start_pos[i] = start_pos[i]
old_end_pos[i] = end_pos[i]
old_cluster_num = cluster_num
@cuda.jit
def drawdown_kernel(drawdown, returns, months_starts, window):
"""
returns, [samples, assets, length]
drawdown, [smaples, months, assets]
num_months should be 60 - 12, as the windows size is one year 12 months
"""
# shared = cuda.shared.array(shape=0, dtype=numba.float64)
# shared_buffer_size = shared.size
total_samples, assets, time_len = returns.shape
_, num_months, _ = drawdown.shape
i = cuda.threadIdx.x
sample_id = cuda.blockIdx.x // num_months
step_id = cuda.blockIdx.x % num_months
start_id = months_starts[step_id]
end_id = months_starts[
step_id +
window] if step_id + window < months_starts.size else time_len
for a in range(i, assets, cuda.blockDim.x):
cumsum = 0.0
currentMax = 1.0
minDrawDown = 100.0
for k in range(start_id, end_id):
cumsum += returns[sample_id, a, k]
value = math.exp(cumsum)
if value > currentMax:
currentMax = value
currDrawdown = value / currentMax - 1.0
if currDrawdown < minDrawDown:
minDrawDown = currDrawdown
drawdown[sample_id, step_id, a] = -minDrawDown
def get_drawdown(log_return, total_samples, negative=False, window=12):
first_sample = log_return['sample_id'].min().item()
all_dates = log_return[first_sample == log_return['sample_id']]['date']
all_dates = all_dates.reset_index(drop=True)
months_start = _get_month_start_pos(all_dates)
log_return_ma = _get_log_return_matrix(total_samples, log_return)
if negative:
log_return_ma = -1.0 * log_return_ma
_, assets, timelen = log_return_ma.shape
number_of_threads = 128
num_months = len(months_start) - window
if num_months == 0: # use all the months to compute
num_months = 1
number_of_blocks = num_months * total_samples
drawdown = cupy.zeros((total_samples, num_months, assets))
drawdown_kernel[(number_of_blocks, ),
(number_of_threads, )](drawdown, log_return_ma,
months_start, window)
return drawdown, all_dates
def get_drawdown_metric(log_return, total_samples):
first_sample = log_return['sample_id'].min().item()
all_dates = log_return[first_sample == log_return['sample_id']]['date']
all_dates = all_dates.reset_index(drop=True)
months_start = _get_month_start_pos(all_dates)
# log_return_ma = _get_log_return_matrix(total_samples, log_return)
port_return_ma = log_return['portfolio'].values.reshape(
total_samples, 1, -1)
_, assets, timelen = port_return_ma.shape
number_of_threads = 128
window = len(months_start)
num_months = len(months_start) - window
if num_months == 0: # use all the months to compute
num_months = 1
number_of_blocks = num_months * total_samples
drawdown = cupy.zeros((total_samples, num_months, assets))
drawdown_kernel[(number_of_blocks, ),
(number_of_threads, )](drawdown, port_return_ma,
months_start, window)
return drawdown, all_dates
def get_weights(total_samples, cov, orders, num_months, assets):
number_of_threads = 1
number_of_blocks = num_months * total_samples
weights = cupy.ones((total_samples, num_months, assets))
HRP_weights[(number_of_blocks,), (number_of_threads,)](
weights,
cov,
orders,
assets,
num_months)
return weights
def get_orders(total_samples, num_months, assets, distance):
number_of_threads = 1
number_of_blocks = num_months * total_samples
output = cupy.zeros((total_samples, num_months, assets-1, 3))
orders = cupy.zeros((total_samples, num_months, assets), dtype=cupy.int64)
single_linkage[(number_of_blocks,), (number_of_threads,)](
output,
orders,
distance,
num_months, assets)
return orders
def run_bootstrap(v, number_samples=2, block_size=60, number_of_threads=256):
"""
@v, stock price matrix. [time, stocks]
@number_samples, number of samples
@block_size, sample block size
"""
length, assets = v.shape # get the time length and the number of assets,
init_prices = v[0, :].reshape(1, -1, 1) # initial prices for all assets
v = cupy.log(v)
# compute the price difference, dimension of [length -1, assets]
ref = cupy.diff(v, axis=0)
# output results
output = cupy.zeros((number_samples, assets, length))
# sample starting position, exclusive
sample_range = length - block_size
# number of positions to sample to cover the whole seq length
num_positions = (length - 2) // block_size + 1
sample_positions = cupy.random.randint(
0, sample_range,
num_positions * number_samples) # compute random starting posistion
number_of_blocks = len(sample_positions)
boot_strap[(number_of_blocks,), (number_of_threads,)](
output,
ref.T,
block_size,
num_positions,
sample_positions)
# reshape the results [number_samples, number assets, time]
# output = output.reshape(number_samples, assets, length)
# convert it into prices
return (cupy.exp(output.cumsum(axis=2)) * init_prices)
def _get_month_start_pos(all_dates):
months_id = all_dates.dt.year*12 + (all_dates.dt.month-1)
months_id = months_id - months_id.min()
# months_id = months_id[1:]
month_start = months_id - months_id.shift(1)
month_start[0] = 1
months_start = cupy.where((month_start == 1).values)[0]
# print('month start position', months_start)
return months_start
def _get_log_return_matrix(total_samples, log_return):
col = list(log_return.columns)
col.remove('date')
col.remove('sample_id')
col.remove('year')
col.remove('month')
log_return_ma = log_return[col].values
log_return_ma = log_return_ma.reshape(total_samples, -1, len(col))
log_return_ma = log_return_ma.transpose((0, 2, 1))
# sample #, assets dim, time length
return log_return_ma
def compute_cov_distance(total_samples,
log_return,
window=12):
first_sample = log_return['sample_id'].min().item()
all_dates = log_return[first_sample == log_return['sample_id']]['date']
all_dates = all_dates.reset_index(drop=True)
months_start = _get_month_start_pos(all_dates)
log_return_ma = _get_log_return_matrix(total_samples, log_return)
_, assets, timelen = log_return_ma.shape
number_of_threads = 256
num_months = len(months_start) - window
# print('num', num_months, len(months_start), window)
if num_months == 0: # this case, use all the data to compute
num_months = 1
number_of_blocks = num_months * total_samples
means = cupy.zeros((total_samples, num_months, assets))
cov = cupy.zeros((total_samples, num_months, assets, assets))
distance = cupy.zeros(
(total_samples, num_months, (assets - 1) * assets // 2))
compute_cov[(number_of_blocks, ), (number_of_threads, ), 0,
256 * MAX_YEARS * 8](means, cov, distance, log_return_ma,
months_start, num_months, assets, timelen,
window)
return means, cov, distance, all_dates
def compute_leverage(total_samples,
log_return,
long_window=59,
short_window=19,
target_vol=0.05):
first_sample = log_return['sample_id'].min().item()
all_dates = log_return[first_sample == log_return['sample_id']]['date']
all_dates = all_dates.reset_index(drop=True)
months_start = _get_month_start_pos(all_dates)
for window in range(len(months_start)):
if (months_start[window] - long_window) > 0:
break
port_return_ma = log_return['portfolio'].values.reshape(total_samples, -1)
number_of_threads = 256
num_months = len(months_start) - window
if num_months == 0: # this case, use all the data to compute
num_months = 1
number_of_blocks = num_months * total_samples
leverage = cupy.zeros((total_samples, num_months))
leverage_for_target_vol[(number_of_blocks, ), (number_of_threads, ), 0,
256 * MAX_YEARS * 8](leverage, port_return_ma,
months_start, num_months,
window, long_window,
short_window, target_vol)
return leverage, all_dates, window
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/kernels.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema,
NodePorts)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
import cudf
from xgboost import Booster
import pandas as pd
from matplotlib.figure import Figure
from dask.dataframe import DataFrame as DaskDataFrame
import shap
class ShapSummaryPlotPlotNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.SHAP_INPUT_PORT_NAME = 'shap_in'
self.MODEL_INPUT_PORT_NAME = 'model_in'
self.DATA_INPUT_PORT_NAME = 'data_in'
self.OUTPUT_PORT_NAME = 'summary_plot'
port_type = PortsSpecSchema.port_type
port_inports = {
self.SHAP_INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.MODEL_INPUT_PORT_NAME: {
port_type: [
"xgboost.Booster", "builtins.dict",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.DATA_INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "matplotlib.figure.Figure"
},
}
meta_inports = {
self.MODEL_INPUT_PORT_NAME: {},
self.DATA_INPUT_PORT_NAME: {},
self.SHAP_INPUT_PORT_NAME: {}
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: {}
}
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Shap Summary Plot Node",
"type": "object",
"description": """Plot the Shap summary""",
"properties": {
"max_display": {
"type": "integer",
"description": """
How many top features to include in the plot
(default is 20, or 7 for interaction plots)
""",
"default": 20
},
"plot_type": {
"type": "string",
"description": """
"dot" (default for single output), "bar" (default for
multi-output), "violin",
""",
"enum": ["dot", "bar", "violin"]
}
}
}
# input_meta = self.get_input_meta()
ui = {
}
return ConfSchema(json=json, ui=ui)
def ports_setup(self):
types = [cudf.DataFrame,
DaskDataFrame,
pd.DataFrame]
port_type = PortsSpecSchema.port_type
input_ports = {
self.SHAP_INPUT_PORT_NAME: {
port_type: types
},
self.MODEL_INPUT_PORT_NAME: {
port_type: [Booster, dict]
},
self.DATA_INPUT_PORT_NAME: {
port_type: types
}
}
output_ports = {
self.OUTPUT_PORT_NAME: {
port_type: Figure
}
}
input_connections = self.get_connected_inports()
if (self.SHAP_INPUT_PORT_NAME in input_connections):
determined_type = input_connections[self.SHAP_INPUT_PORT_NAME]
input_ports[self.SHAP_INPUT_PORT_NAME] = {
port_type: determined_type
}
if (self.DATA_INPUT_PORT_NAME in input_connections):
determined_type = input_connections[self.DATA_INPUT_PORT_NAME]
input_ports[self.DATA_INPUT_PORT_NAME] = {
port_type: determined_type
}
if (self.MODEL_INPUT_PORT_NAME in input_connections):
determined_type = input_connections[self.MODEL_INPUT_PORT_NAME]
input_ports[self.MODEL_INPUT_PORT_NAME] = {
port_type: determined_type
}
ports = NodePorts(inports=input_ports, outports=output_ports)
return ports
def process(self, inputs):
"""
Plot the lines from the input dataframe. The plotted lines are the
columns in the input dataframe which are specified in the `lines` of
node's `conf`
The plot title is defined in the `title` of the node's `conf`
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
Figure
"""
import matplotlib.pyplot as pl
pl.figure()
shap_values = inputs[self.SHAP_INPUT_PORT_NAME]
df = inputs[self.DATA_INPUT_PORT_NAME]
if isinstance(shap_values, DaskDataFrame):
shap_values = shap_values.compute()
if isinstance(df, DaskDataFrame):
df = df.compute()
if isinstance(shap_values, cudf.DataFrame):
shap_values = shap_values.values.get()
else:
shap_values = shap_values.values
if isinstance(df, cudf.DataFrame):
df = df.to_pandas()
input_meta = self.get_input_meta()
required_cols = input_meta[
self.MODEL_INPUT_PORT_NAME]['train']
df = df[required_cols]
self.conf['show'] = False
# max_display = self.conf.get('max_display', 20)
# plot_type = self.conf.get('plot_type', 'bar')
shap.summary_plot(shap_values[:, :-1],
df, **self.conf)
f = pl.gcf()
return {self.OUTPUT_PORT_NAME: f}
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/shapSummaryPlotNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema, PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
import math
import cupy
import cudf
class NRPWeightNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
self.delayed_process = True
self.infer_meta = False
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
},
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def conf_schema(self):
json = {
"title": "Compute the Sharpe Ratio",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
required = {
'month': 'int16',
'year': 'int16',
'sample_id': 'int64',
}
meta_inports[self.INPUT_PORT_NAME] = required
json = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
assets = int(math.sqrt(len(input_meta[self.INPUT_PORT_NAME]) - 3))
for i in range(assets):
json[i] = 'float64'
json.update(required)
meta_outports[self.OUTPUT_PORT_NAME] = json
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
all_sample_ids = df['sample_id'].unique()
total_samples = len(all_sample_ids)
# df = df.drop('datetime', axis=1)
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
assets = int(math.sqrt(len(input_meta[self.INPUT_PORT_NAME]) - 3))
output = {}
data_ma = df[list(range(assets*assets))].values
data_ma = data_ma.reshape(total_samples, -1, assets, assets)
diagonzied = cupy.diagonal(data_ma, 0, 2, 3)
diagonzied = cupy.sqrt(1.0 / diagonzied) # inverse variance
diagonzied = diagonzied / diagonzied.sum(axis=2, keepdims=True)
diagonzied = diagonzied.reshape(-1, assets)
weight_df = cudf.DataFrame(diagonzied)
weight_df['month'] = df['month']
weight_df['year'] = df['year']
weight_df['sample_id'] = df['sample_id']
output.update({self.OUTPUT_PORT_NAME: weight_df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/nrpWeightNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
import cudf
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow import PortsSpecSchema
from greenflow.dataframe_flow import ConfSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from .kernels import run_bootstrap
import cupy
import dask
import dask_cudf
from collections import OrderedDict
class BootstrapNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
self.OUTPUT_DASK_PORT = 'dask_df'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
},
self.OUTPUT_DASK_PORT: {
port_type: ["dask_cudf.DataFrame", "dask.dataframe.DataFrame"]
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
col_required = {
"date": "date"
}
input_meta = self.get_input_meta()
json = OrderedDict()
if self.INPUT_PORT_NAME in input_meta:
assets = len(input_meta[self.INPUT_PORT_NAME]) - 1
for i in range(assets):
json[i] = 'float64'
json['date'] = "datetime64[ns]"
json['sample_id'] = 'int64'
json['year'] = 'int16'
json['month'] = 'int16'
meta_inports[self.INPUT_PORT_NAME] = col_required
meta_outports[self.OUTPUT_DASK_PORT] = json
meta_outports[self.OUTPUT_PORT_NAME] = json
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Generate bootstrap dataframe",
"type": "object",
"properties": {
"samples": {
"type": "integer",
"description": "Number of samples to bootstrap"
},
"partitions": {
"type": "integer",
"description": "Number of partitions for Dask Dataframe"
},
"offset": {
"type": "integer",
"description": "Sample id offset",
"default": 0
},
},
"required": ["samples"],
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def _process(self, df, partition_id):
number_samples = self.conf['samples']
all_dates = df['date']
cols = list(df.columns)
cols.remove('date')
price_matrix = df[cols].values
result = run_bootstrap(price_matrix, number_samples=number_samples)
# print('bootstrap')
total_samples, assets, length = result.shape
datetime_col = cudf.concat([all_dates] *
total_samples).reset_index(drop=True)
result = result.transpose([0, 2, 1]).reshape(-1, assets)
df = cudf.DataFrame(result)
df['date'] = datetime_col
sample_id = cupy.repeat(cupy.arange(0, total_samples), length)
df['sample_id'] = sample_id + partition_id * number_samples
df['year'] = df['date'].dt.year
df['month'] = df['date'].dt.month - 1
return df
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
# df = df.drop('datetime', axis=1)
output = {}
if self.outport_connected(self.OUTPUT_PORT_NAME):
offset = self.conf.get('offset', 0)
out_df = self._process(df, offset)
output.update({self.OUTPUT_PORT_NAME: out_df})
if self.outport_connected(self.OUTPUT_DASK_PORT):
partitions = self.conf['partitions']
out_dfs = [
dask.delayed(self._process)(df, i) for i in range(partitions)
]
meta = self.meta_setup().outports[self.OUTPUT_DASK_PORT]
meta['date'] = 'datetime64[ns]'
dask_df = dask_cudf.from_delayed(
out_dfs, meta=meta)
output.update({self.OUTPUT_DASK_PORT: dask_df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/bootstrapNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
import cudf
from greenflow.dataframe_flow import Node, MetaData
from greenflow.dataframe_flow import NodePorts, PortsSpecSchema
from greenflow.dataframe_flow.util import get_file_path
from greenflow.dataframe_flow import ConfSchema
class LoadCsvNode(Node):
def ports_setup(self):
input_ports = {}
output_ports = {
'df_out': {
PortsSpecSchema.port_type: cudf.DataFrame
}
}
return NodePorts(inports=input_ports, outports=output_ports)
def conf_schema(self):
json = {
"title": "Load stock data",
"type": "object",
"properties": {
"csvfile": {
"type": "string",
"description": "csv tick data"
},
"17assets": {
"type": "boolean",
"description": "17 assets dataset"
}
},
"required": ["csvfile"],
}
ui = {
"csvfile": {"ui:widget": "CsvFileSelector"}
}
return ConfSchema(json=json, ui=ui)
def init(self):
pass
def meta_setup(self):
df_out_10 = {
'date': 'date',
'AAA': 'float64',
'BBB': 'float64',
'CCC': 'float64',
'DDD': 'float64',
'EEE': 'float64',
'FFF': 'float64',
'GGG': 'float64',
'HHH': 'float64',
'III': 'float64',
'JJJ': 'float64',
}
df_out_17 = {
'date': 'date',
'BZA Index (Equities)': 'float64',
'CLA Comdty (Commodities)': 'float64',
'CNA Comdty (Fixed Income)': 'float64',
'ESA Index (Equities)': 'float64',
'G A Comdty (Fixed Income)': 'float64',
'GCA Comdty (Commodities)': 'float64',
'HIA Index (Equities)': 'float64',
'NKA Index (Equities)': 'float64',
'NQA Index (Equities)': 'float64',
'RXA Comdty (Fixed Income)': 'float64',
'SIA Comdty (Commodities)': 'float64',
'SMA Index (Equities)': 'float64',
'TYA Comdty (Fixed Income)': 'float64',
'VGA Index (Equities)': 'float64',
'XMA Comdty (Fixed Income)': 'float64',
'XPA Index (Equities)': 'float64',
'Z A Index (Equities)': 'float64',
}
assets_17 = self.conf.get('17assets', False)
columns_out = {
}
columns_out['df_out'] = df_out_17 if assets_17 else df_out_10
return MetaData(inports={}, outports=columns_out)
def process(self, inputs):
import dask.distributed
try:
client = dask.distributed.client.default_client()
except ValueError:
from dask_cuda import LocalCUDACluster
cluster = LocalCUDACluster()
from dask.distributed import Client
client = Client(cluster) # noqa
print('start new Cluster')
filename = get_file_path(self.conf['csvfile'])
df = cudf.read_csv(filename, parse_dates=[0])
df.columns = ['date']+[c for c in df.columns][1:]
output = {}
if self.outport_connected('df_out'):
output.update({'df_out': df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/loadCsvNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema, PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow import Node
from dask.dataframe import DataFrame as DaskDataFrame
class RSquaredNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "builtins.float"
},
}
meta_inports = {
self.INPUT_PORT_NAME: {}
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: {}
}
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Compute the R-squared score for regression problems",
"type": "object",
"properties": {
"columns": {
"type": "array",
"items": {
"type": "string"
},
"description": """Two columns used to compute the
R-squared score""",
"minItems": 2,
"maxItems": 2
}
},
"required": ["columns"]
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['columns']['items']['enum'] = enums
ui = {}
return ConfSchema(json=json, ui=ui)
else:
ui = {
"column": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
# df = df.drop('datetime', axis=1)
output = {}
subdf = df[self.conf['columns']]
if isinstance(subdf, DaskDataFrame):
result = subdf.corr().compute().values[0, 1]**2
else:
result = subdf.corr().values[0, 1]**2
output.update({self.OUTPUT_PORT_NAME: result.item()})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/rSquaredNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema, PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from .kernels import compute_cov_distance
import cupy
import cudf
class DistanceNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.INPUT_PORT_NAME = 'in'
self.COV_DF = 'cov_df'
self.MEAN_DF = 'mean_df'
self.STD_DF = 'std_df'
self.CORR_DF = 'corr_df'
self.DISTANCE_DF = 'distance_df'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.MEAN_DF: {
port_type: "${port:in}"
},
self.STD_DF: {
port_type: "${port:in}"
},
self.COV_DF: {
port_type: "${port:in}"
},
self.CORR_DF: {
port_type: "${port:in}"
},
self.DISTANCE_DF: {
port_type: "${port:in}"
}
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
sub_dict = {
'year': 'int16',
'month': 'int16',
'sample_id': 'int64',
}
required = {
"date": "datetime64[ns]",
}
required.update(sub_dict)
meta_inports[self.INPUT_PORT_NAME] = required
json_cov = {}
json_dis = {}
json_mean = {}
json_corr = {}
json_std = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
assets = len(input_meta[self.INPUT_PORT_NAME]) - 4
for i in range(assets*assets):
json_cov[i] = 'float64'
for i in range(assets):
json_mean[i] = 'float64'
json_std[i] = 'float64'
for i in range(assets*(assets-1)//2):
json_dis[i] = 'float64'
json_corr[i] = 'float64'
json_cov.update(sub_dict)
json_dis.update(sub_dict)
json_mean.update(sub_dict)
json_std.update(sub_dict)
json_corr.update(sub_dict)
meta_outports[self.MEAN_DF] = json_mean
meta_outports[self.STD_DF] = json_std
meta_outports[self.COV_DF] = json_cov
meta_outports[self.CORR_DF] = json_corr
meta_outports[self.DISTANCE_DF] = json_dis
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Compute the Distance Matrix and Cov df",
"type": "object",
"properties": {
"window": {
'type': "integer",
"title": "Window size",
"description": """the number of months used to compute the
distance and vairance"""
}
},
"required": ["window"],
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
all_sample_ids = df['sample_id'].unique()
total_samples = len(all_sample_ids)
window = self.conf['window']
means, cov, distance, all_dates = compute_cov_distance(total_samples,
df,
window=window)
total_samples, num_months, assets, assets = cov.shape
months_id = all_dates.dt.year*12 + (all_dates.dt.month-1)
months_id = months_id - months_id.min()
mid = (cupy.arange(months_id.max() + 1) +
(all_dates.dt.month - 1)[0])[window:]
minyear = all_dates.dt.year.min()
if len(mid) == 0:
mid = cupy.array([0])
months = mid % 12
years = mid // 12 + minyear
output = {}
# print(num_months, len(mid))
if self.outport_connected(self.MEAN_DF):
df_mean = cudf.DataFrame(
means.reshape(total_samples*num_months, -1))
df_mean['year'] = cupy.concatenate(
[years]*total_samples).astype(cupy.int16)
df_mean['month'] = cupy.concatenate(
[months]*total_samples).astype(cupy.int16)
df_mean['sample_id'] = cupy.repeat(cupy.arange(
total_samples) + all_sample_ids.min(), len(mid))
output.update({self.MEAN_DF: df_mean})
if self.outport_connected(self.STD_DF):
data_ma = cov.reshape(total_samples*num_months, assets, assets)
diagonzied = cupy.diagonal(data_ma, 0, 1, 2) # get var
diagonzied = cupy.sqrt(diagonzied) # get std
df_std = cudf.DataFrame(diagonzied)
df_std['year'] = cupy.concatenate(
[years]*total_samples).astype(cupy.int16)
df_std['month'] = cupy.concatenate(
[months]*total_samples).astype(cupy.int16)
df_std['sample_id'] = cupy.repeat(cupy.arange(
total_samples) + all_sample_ids.min(), len(mid))
output.update({self.STD_DF: df_std})
if self.outport_connected(self.COV_DF):
df_cov = cudf.DataFrame(cov.reshape(total_samples*num_months, -1))
df_cov['year'] = cupy.concatenate(
[years]*total_samples).astype(cupy.int16)
df_cov['month'] = cupy.concatenate(
[months]*total_samples).astype(cupy.int16)
df_cov['sample_id'] = cupy.repeat(cupy.arange(
total_samples) + all_sample_ids.min(), len(mid))
output.update({self.COV_DF: df_cov})
if self.outport_connected(self.CORR_DF):
dis_ma = distance.reshape(total_samples*num_months, -1)
dis_ma = 1 - 2.0 * dis_ma
df_corr = cudf.DataFrame(dis_ma)
df_corr['year'] = cupy.concatenate(
[years]*total_samples).astype(cupy.int16)
df_corr['month'] = cupy.concatenate(
[months]*total_samples).astype(cupy.int16)
df_corr['sample_id'] = cupy.repeat(cupy.arange(
total_samples) + all_sample_ids.min(), len(mid))
output.update({self.CORR_DF: df_corr})
if self.outport_connected(self.DISTANCE_DF):
df_dis = cudf.DataFrame(distance.reshape(total_samples*num_months,
-1))
df_dis['year'] = cupy.concatenate(
[years]*total_samples).astype(cupy.int16)
df_dis['month'] = cupy.concatenate(
[months]*total_samples).astype(cupy.int16)
df_dis['sample_id'] = cupy.repeat(cupy.arange(
total_samples) + all_sample_ids.min(), len(mid))
output.update({self.DISTANCE_DF: df_dis})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/distanceNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow import PortsSpecSchema
from greenflow.dataframe_flow import ConfSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from collections import OrderedDict
class RawDataNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
},
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def conf_schema(self):
json = {
"title": "Pass along the raw dataframe dataframe",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
required = {
"date": "date"
}
input_meta = self.get_input_meta()
json = OrderedDict()
if self.INPUT_PORT_NAME in input_meta:
assets = len(input_meta[self.INPUT_PORT_NAME]) - 1
for i in range(assets):
json[i] = 'float64'
json['date'] = "datetime64[ns]"
json['sample_id'] = 'int64'
json['year'] = 'int16'
json['month'] = 'int16'
meta_outports[self.INPUT_PORT_NAME] = required
meta_outports[self.OUTPUT_PORT_NAME] = json
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def _process(self, df, partition_id):
all_dates = df['date']
cols = list(df.columns)
cols.remove('date')
df = df[cols]
df.columns = list(range(len(df.columns)))
df['date'] = all_dates
df['sample_id'] = partition_id
df['year'] = df['date'].dt.year
df['month'] = df['date'].dt.month - 1
return df
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
# df = df.drop('datetime', axis=1)
output = {}
offset = self.conf.get('offset', 0)
out_df = self._process(df, offset)
output.update({self.OUTPUT_PORT_NAME: out_df})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/rawDataNode.py |
"""
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from greenflow.dataframe_flow import ConfSchema, PortsSpecSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from .kernels import get_drawdown
import cupy
import cudf
class MaxDrawdownNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.INPUT_PORT_NAME = 'logreturn_df'
self.OUTPUT_PORT_NAME = "out"
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:logreturn_df}"
},
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
def conf_schema(self):
json = {
"title": "Compute the Maximum Drawdown Matrix Dataframe",
"type": "object",
"properties": {
"window": {
'type': "integer",
"title": "Window size",
"description": """the number of months used to compute the
distance and vairance"""
},
"negative": {
'type': "boolean",
"title": "Negative return",
"description": """Compute
max drawdown on negative return""",
"default": False
}
},
"required": ["window"],
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def update(self):
TemplateNodeMixin.update(self)
meta_outports = self.template_meta_setup().outports
meta_inports = self.template_meta_setup().inports
sub_dict = {
'year': 'int16',
'month': 'int16',
'sample_id': 'int64',
}
required = {
"date": "datetime64[ns]",
}
required.update(sub_dict)
meta_inports[self.INPUT_PORT_NAME] = required
json_drawdown = {}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
assets = len(input_meta[self.INPUT_PORT_NAME]) - 4
for i in range(assets):
json_drawdown[i] = 'float64'
json_drawdown.update(sub_dict)
meta_outports[self.OUTPUT_PORT_NAME] = json_drawdown
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def process(self, inputs):
df = inputs[self.INPUT_PORT_NAME]
all_sample_ids = df['sample_id'].unique()
total_samples = len(all_sample_ids)
window = self.conf['window']
negative = self.conf.get("negative", False)
drawdown, all_dates = get_drawdown(df, total_samples,
negative=negative, window=window)
total_samples, num_months, assets = drawdown.shape
months_id = all_dates.dt.year*12 + (all_dates.dt.month-1)
months_id = months_id - months_id.min()
mid = (cupy.arange(months_id.max() + 1) +
(all_dates.dt.month - 1)[0])[window:]
minyear = all_dates.dt.year.min()
if len(mid) == 0:
mid = cupy.array([0])
months = mid % 12
years = mid // 12 + minyear
output = {}
df_drawdown = cudf.DataFrame(
drawdown.reshape(total_samples*num_months, -1))
df_drawdown['year'] = cupy.concatenate(
[years]*total_samples).astype(cupy.int16)
df_drawdown['month'] = cupy.concatenate(
[months]*total_samples).astype(cupy.int16)
df_drawdown['sample_id'] = cupy.repeat(cupy.arange(
total_samples) + all_sample_ids.min(), len(mid))
output.update({self.OUTPUT_PORT_NAME: df_drawdown})
return output
| fsi-samples-main | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/maxDrawdownNode.py |
Subsets and Splits