filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_18177
|
import tensorflow as tf
import numpy as np
import tensorflow as tf
import numpy as np
import os
from model.tf_l0_models import *
class New_Start_L0_Mnist( object ):
def __init__(self, opts):
self.sess = tf.Session()
self.opts = opts
self.init()
self.saver = tf.train.Saver( max_to_keep = 100 )
def init( self ):
self.add_input_placeholder()
self.construct_clf()
self.construct_loss()
self.sess.run( [ tf.global_variables_initializer() ] )
os.mkdir( self.opts.cpt_path ) if not os.path.exists( self.opts.cpt_path ) else print()
def add_input_placeholder( self ):
opts= self.opts
self.in_sample = tf.placeholder( tf.float32, [ None ] + opts.sample_shape )
self.in_label = tf.placeholder( tf.float32, [ None ] + opts.label_shape )
def construct_clf( self ):
with tf.variable_scope( "nn" ) as scope:
self.l0_obj = L0_Dense( 784, 300, activation = tf.nn.leaky_relu, weight_decay = 1., lamba = 1/50000 , name = "l0" )
self.l0_train, self.l0_test, self.l0_regu = self.l0_obj.build( self.in_sample, self.in_sample )
self.l1_obj = L0_Dense( 300, 100, activation = tf.nn.leaky_relu, weight_decay = 1., lamba = 1/50000, name = "l1" )
self.l1_train, self.l1_test, self.l1_regu = self.l1_obj.build( self.l0_train, self.l0_test )
self.logit_obj = L0_Dense( 100, 10, weight_decay = 1./10, lamba = 1/50000., name = "logit" )
self.logit_train, self.logit_test, self.logit_regu = self.logit_obj.build( self.l1_train, self.l1_test )
self.prediction = tf.nn.softmax( self.logit_test )
def construct_loss( self ):
self.classifier_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2( logits = self.logit_train, labels = self.in_label ) )
self.classifier_loss = self.classifier_loss \
-0.5*( tf.reduce_mean( self.l0_regu ) + tf.reduce_mean( self.l1_regu ) )
#self.loss = tf.reduce_mean( tf.square( self.in_label - self.prediction ) )
self.optim_nn = tf.train.AdamOptimizer( self.opts.lr, beta1 = 0.9, beta2 = 0.99 ).minimize( loss = self.classifier_loss, var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='nn') )
def calc_sparsity( self, in_sample, in_label, thresh = 0.95 ):
l0_feat_gate_test, l1_feat_gate_test = self.sess.run( [ self.l0_obj.mask_test, self.l1_obj.mask_test ],
feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
l0_feat_gate_test = l0_feat_gate_test.ravel()
l1_feat_gate_test = l1_feat_gate_test.ravel()
print( l0_feat_gate_test[ :10 ] )
l0 = np.sum( l0_feat_gate_test <= 0.01 )
l1 = np.sum( l1_feat_gate_test <= 0.01 )
print( "l0 gate", l0, "out of", l0_feat_gate_test.shape[0], "l1_gate", l1, "out of", l1_feat_gate_test.shape )
def train( self ):
self.loss_list = []
self.accu_list = []
max_accu = 0
max_accu_iter = 0
for i in range( 0, self.opts.train_iter + 1 ):
in_sample, in_label = self.opts.data_source.next_batch()
# print(in_label)
self.sess.run( self.optim_nn, feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
# for p in range( 5 ):
# self.sess.run( self.optim_gate, feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
if i % 100 == 0:
nn_loss = self.sess.run( self.classifier_loss, feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
# gate_loss = self.sess.run( self.gate_loss, feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
print( "iter: ", i, "NN LOSS: ", nn_loss )
print("-----------------")
if i % 1000 == 0:
in_sample, in_label = self.opts.data_source.get_test()
self.calc_sparsity( in_sample, in_label )
accu = self.predict( in_sample, in_label )
if accu > max_accu:
max_accu = accu
max_accu_iter = i
print( "Iter: ", i, "Accu: ", accu, "Max Accu: ", max_accu, "Max Accu Iter: ", max_accu_iter )
print("-------------------------------------")
self.accu_list.append( accu )
if i != 0 and i % 20000 == 0:
path = self.opts.cpt_path +"/"+ str( i )
os.mkdir( path )
path += "/model.ckpt"
self.saver.save( self.sess, path )
def predict( self, sample, label ):
res = self.sess.run( self.prediction, feed_dict = { self.in_sample : sample, self.in_label: label } )
res = np.argmax( res, axis = 1 )
true = np.argmax( label, axis = 1 )
print( res[:10] )
print( true[:10])
accu = np.sum(res == true) / res.shape[0]
return accu
|
the-stack_106_18178
|
from PyQt5 import uic
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox
import sys
import modeller
class MainWindow(QWidget):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self._ui = uic.loadUi("window.ui", self)
@property
def parameters(self):
u = self._ui
return {
'client_m': float(u.le_client_m.text()),
'client_d': float(u.le_client_d.text()),
'op0_m': float(u.le_op0_m.text()),
'op0_d': float(u.le_op0_d.text()),
'op1_m': float(u.le_op1_m.text()),
'op1_d': float(u.le_op1_d.text()),
'op2_m': float(u.le_op2_m.text()),
'op2_d': float(u.le_op2_d.text()),
'comp0_m': float(u.le_comp0_m.text()),
'comp1_m': float(u.le_comp1_m.text()),
'c_count': float(u.le_client_count.text())
}
def on_pushButton_model_clicked(self):
self._ui.le_lost_clients.setText('{:.4f}'.format(modeller.event_based_modelling(**self.parameters)))
def main():
app = QApplication(sys.argv)
window = MainWindow()
window.show()
return app.exec()
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_18180
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests the format of human readable logs.
It checks the response of the API configuration calls and the logs that show
up in the configured logging FIFO.
"""
import json
import os
import re
from time import strptime
import host_tools.logging as log_tools
# Array of supported log levels of the current logging system.
# Do not change order of values inside this array as logic depends on this.
LOG_LEVELS = ["ERROR", "WARN", "INFO", "DEBUG"]
def to_formal_log_level(log_level):
"""Convert a pretty-print log level into the related log level code.
Turns a pretty formatted log level (i.e Warning) into the one actually
being logged (i.e WARN).
:param log_level: pretty formatted log level
:return: actual level being logged
"""
if log_level == "Error":
return LOG_LEVELS[0]
if log_level == "Warning":
return LOG_LEVELS[1]
if log_level == "Info":
return LOG_LEVELS[2]
if log_level == "Debug":
return LOG_LEVELS[3]
return ""
def check_log_message(log_str, instance_id, level, show_level, show_origin):
"""Ensure correctness of the logged message.
Parse the string representing the logs and look for the parts
that should be there.
The log line should look lie this:
YYYY-MM-DDTHH:MM:SS.NNNNNNNNN [ID:LEVEL:FILE:LINE] MESSAGE
where LEVEL and FILE:LINE are both optional.
e.g.:
`2018-09-09T12:52:00.123456789 [MYID:WARN:/path/to/file.rs:52] warning`
"""
(timestamp, tag, _) = log_str.split(' ')[:3]
timestamp = timestamp[:-10]
strptime(timestamp, "%Y-%m-%dT%H:%M:%S")
pattern = "\\[(" + instance_id + ")"
if show_level:
pattern += ":(" + "|".join(LOG_LEVELS) + ")"
if show_origin:
pattern += ":([^:]+/[^:]+):([0-9]+)"
pattern += "\\]"
mo = re.match(pattern, tag)
assert mo is not None
if show_level:
tag_level = mo.group(2)
tag_level_no = LOG_LEVELS.index(tag_level)
configured_level_no = LOG_LEVELS.index(to_formal_log_level(level))
assert tag_level_no <= configured_level_no
def test_no_origin_logs(test_microvm_with_ssh):
"""Check that logs do not contain the origin (i.e file and line number)."""
_test_log_config(
microvm=test_microvm_with_ssh,
show_level=True,
show_origin=False
)
def test_no_level_logs(test_microvm_with_ssh):
"""Check that logs do not contain the level."""
_test_log_config(
microvm=test_microvm_with_ssh,
show_level=False,
show_origin=True
)
def test_no_nada_logs(test_microvm_with_ssh):
"""Check that logs do not contain either level or origin."""
_test_log_config(
microvm=test_microvm_with_ssh,
show_level=False,
show_origin=False
)
def test_info_logs(test_microvm_with_ssh):
"""Check output of logs when minimum level to be displayed is info."""
_test_log_config(microvm=test_microvm_with_ssh)
def test_warn_logs(test_microvm_with_ssh):
"""Check output of logs when minimum level to be displayed is warning."""
_test_log_config(
microvm=test_microvm_with_ssh,
log_level='Warning'
)
def test_error_logs(test_microvm_with_ssh):
"""Check output of logs when minimum level of logs displayed is error."""
_test_log_config(
microvm=test_microvm_with_ssh,
log_level='Error'
)
def test_dirty_page_metrics(test_microvm_with_api):
"""Check the `dirty_pages` metric."""
microvm = test_microvm_with_api
microvm.spawn()
microvm.basic_config()
# Configure logging.
log_fifo_path = os.path.join(microvm.path, 'log_fifo')
metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
log_fifo = log_tools.Fifo(log_fifo_path)
metrics_fifo = log_tools.Fifo(metrics_fifo_path)
response = microvm.logger.put(
log_fifo=microvm.create_jailed_resource(log_fifo.path),
metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
level='Error',
show_level=False,
show_log_origin=False,
options=['LogDirtyPages']
)
assert microvm.api_session.is_good_response(response.status_code)
microvm.start()
lines = metrics_fifo.sequential_reader(3)
for line in lines:
assert int(json.loads(line)['memory']['dirty_pages']) >= 0
# TODO force metrics flushing and get real data without waiting for
# Firecracker to flush periodically.
def log_file_contains_strings(log_fifo, string_list):
"""Check if the log file contains all strings in string_list.
We search for each string in the string_list array only in the
first 100 lines of the log.
"""
log_lines = log_fifo.sequential_reader(100)
for log_line in log_lines:
for text in string_list:
if text in log_line:
string_list.remove(text)
break
if not string_list:
return True
return False
def test_api_requests_logs(test_microvm_with_api):
"""Test that API requests are logged."""
microvm = test_microvm_with_api
microvm.spawn()
microvm.basic_config()
# Configure logging.
log_fifo_path = os.path.join(microvm.path, 'log_fifo')
metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
log_fifo = log_tools.Fifo(log_fifo_path)
metrics_fifo = log_tools.Fifo(metrics_fifo_path)
response = microvm.logger.put(
log_fifo=microvm.create_jailed_resource(log_fifo.path),
metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
level='Info',
show_level=True,
show_log_origin=True,
options=[]
)
assert response.status_code == 204
expected_log_strings = []
# Check that a Put request on /machine-config is logged.
response = microvm.machine_cfg.put(vcpu_count=4)
assert microvm.api_session.is_good_response(response.status_code)
# We are not interested in the actual body. Just check that the log
# message also has the string "body" in it.
expected_log_strings.append(
"The API server received a synchronous Put request "
"on \"/machine-config\" with body"
)
# Check that a Get request on /machine-config is logged without the
# body.
response = microvm.machine_cfg.get()
assert response.status_code == 200
expected_log_strings.append(
"The API server received a synchronous Get request "
"on \"/machine-config\"."
)
# Check that all requests on /mmds are logged without the body.
dummy_json = {
'latest': {
'meta-data': {
'ami-id': 'dummy'
}
}
}
response = microvm.mmds.put(json=dummy_json)
assert response.status_code == 204
expected_log_strings.append(
"The API server received a synchronous Put request on \"/mmds\"."
)
response = microvm.mmds.patch(json=dummy_json)
assert response.status_code == 204
expected_log_strings.append(
"The API server received a synchronous Patch request on \"/mmds\"."
)
response = microvm.mmds.get()
assert response.status_code == 200
expected_log_strings.append(
"The API server received a synchronous Get request on \"/mmds\"."
)
assert log_file_contains_strings(log_fifo, expected_log_strings)
# pylint: disable=W0102
def _test_log_config(
microvm,
log_level='Info',
show_level=True,
show_origin=True,
options=[]
):
"""Exercises different scenarios for testing the logging config."""
microvm.spawn()
microvm.basic_config()
# Configure logging.
log_fifo_path = os.path.join(microvm.path, 'log_fifo')
metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
log_fifo = log_tools.Fifo(log_fifo_path)
metrics_fifo = log_tools.Fifo(metrics_fifo_path)
response = microvm.logger.put(
log_fifo=microvm.create_jailed_resource(log_fifo.path),
metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
level=log_level,
show_level=show_level,
show_log_origin=show_origin,
options=options
)
assert microvm.api_session.is_good_response(response.status_code)
microvm.start()
lines = log_fifo.sequential_reader(20)
for line in lines:
check_log_message(
line,
microvm.id,
log_level,
show_level,
show_origin
)
|
the-stack_106_18183
|
import os
from io import StringIO
from unittest.mock import Mock
import pytest
from doit.exceptions import InvalidCommand
from doit import reporter, runner
from doit.cmd_run import Run
from tests.conftest import tasks_sample, CmdFactory
class TestCmdRun(object):
def testProcessRun(self, dependency1, depfile_name):
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=tasks_sample())
result = cmd_run._execute(output)
assert 0 == result
got = output.getvalue().split("\n")[:-1]
assert [". t1", ". t2", ". g1.a", ". g1.b", ". t3"] == got
@pytest.mark.skipif('not runner.MRunner.available()')
def testProcessRunMP(self, dependency1, depfile_name):
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=tasks_sample())
result = cmd_run._execute(output, num_process=1)
assert 0 == result
got = output.getvalue().split("\n")[:-1]
assert [". t1", ". t2", ". g1.a", ". g1.b", ". t3"] == got
def testProcessRunMThread(self, dependency1, depfile_name):
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=tasks_sample())
result = cmd_run._execute(output, num_process=1, par_type='thread')
assert 0 == result
got = output.getvalue().split("\n")[:-1]
assert [". t1", ". t2", ". g1.a", ". g1.b", ". t3"] == got
def testInvalidParType(self, dependency1, depfile_name):
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=tasks_sample())
pytest.raises(InvalidCommand, cmd_run._execute,
output, num_process=1, par_type='not_exist')
def testMP_not_available(self, dependency1, depfile_name,
capsys, monkeypatch):
# make sure MRunner wont be used
monkeypatch.setattr(runner.MRunner, "available",
Mock(return_value=False))
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=tasks_sample())
result = cmd_run._execute(output, num_process=1)
assert 0 == result
got = output.getvalue().split("\n")[:-1]
assert [". t1", ". t2", ". g1.a", ". g1.b", ". t3"] == got
err = capsys.readouterr()[1]
assert "WARNING:" in err
assert "parallel using threads" in err
def testProcessRunFilter(self, depfile_name):
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=tasks_sample(), sel_tasks=["g1.a"])
cmd_run._execute(output)
got = output.getvalue().split("\n")[:-1]
assert [". g1.a"] == got
def testProcessRunSingle(self, depfile_name):
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=tasks_sample(), sel_tasks=["t3"])
cmd_run._execute(output, single=True)
got = output.getvalue().split("\n")[:-1]
# t1 is a depenendency of t3 but not included
assert [". t3"] == got
def testProcessRunSingleSubtasks(self, depfile_name):
output = StringIO()
task_list = tasks_sample()
assert task_list[4].name == 'g1.b'
task_list[4].task_dep = ['t3']
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=task_list, sel_tasks=["g1"])
cmd_run._execute(output, single=True)
got = output.getvalue().split("\n")[:-1]
# t3 is a depenendency of g1.b but not included
assert [". g1.a", ". g1.b"] == got
def testProcessRunEmptyFilter(self, depfile_name):
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=tasks_sample(), sel_tasks=[])
cmd_run._execute(output)
got = output.getvalue().split("\n")[:-1]
assert [] == got
class MyReporter(reporter.ConsoleReporter):
def get_status(self, task):
self.outstream.write('MyReporter.start %s\n' % task.name)
class TestCmdRunReporter(object):
def testReporterInstance(self, depfile_name):
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=[tasks_sample()[0]])
cmd_run._execute(output, reporter=MyReporter(output, {}))
got = output.getvalue().split("\n")[:-1]
assert 'MyReporter.start t1' == got[0]
def testCustomReporter(self, depfile_name):
output = StringIO()
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=[tasks_sample()[0]])
cmd_run._execute(output, reporter=MyReporter)
got = output.getvalue().split("\n")[:-1]
assert 'MyReporter.start t1' == got[0]
def testPluginReporter(self, depfile_name):
output = StringIO()
cmd_run = CmdFactory(
Run, backend='dbm',
dep_file=depfile_name,
task_list=[tasks_sample()[0]],
config={'REPORTER':{'my': 'tests.test_cmd_run:MyReporter'}})
cmd_run._execute(output, reporter='my')
got = output.getvalue().split("\n")[:-1]
assert 'MyReporter.start t1' == got[0]
class TestCmdRunOptions(object):
def test_outfile(self, depfile_name):
cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name,
task_list=tasks_sample(), sel_tasks=["g1.a"])
cmd_run._execute('test.out')
try:
outfile = open('test.out', 'r')
got = outfile.read()
outfile.close()
assert ". g1.a\n" == got
finally:
if os.path.exists('test.out'):
os.remove('test.out')
|
the-stack_106_18184
|
a = []
abc = int(input("Enter the number of terms"))
for i in range(abc):
b = int(input())
a.append(b)
n = len(a)
for i in range(n):
for j in range(0,n-i-1):
if(a[j]>a[j+1]):
a[j],a[j+1] = a[j+1],a[j]
print(a)
|
the-stack_106_18186
|
"""Storage handers."""
# pylint: disable=import-outside-toplevel
from homeassistant.helpers.json import JSONEncoder
from custom_components.hacs.const import VERSION_STORAGE
from .logger import getLogger
_LOGGER = getLogger()
def get_store_for_key(hass, key):
"""Create a Store object for the key."""
key = key if "/" in key else f"hacs.{key}"
from homeassistant.helpers.storage import Store
return Store(hass, VERSION_STORAGE, key, encoder=JSONEncoder)
async def async_load_from_store(hass, key):
"""Load the retained data from store and return de-serialized data."""
store = get_store_for_key(hass, key)
restored = await store.async_load()
if restored is None:
return {}
return restored
async def async_save_to_store(hass, key, data):
"""Generate dynamic data to store and save it to the filesystem."""
current = await async_load_from_store(hass, key)
if current is None or current != data:
await get_store_for_key(hass, key).async_save(data)
return
_LOGGER.debug(
"Did not store data for '%s'. Content did not change",
key if "/" in key else f"hacs.{key}",
)
async def async_remove_store(hass, key):
"""Remove a store element that should no longer be used"""
if "/" not in key:
return
await get_store_for_key(hass, key).async_remove()
|
the-stack_106_18187
|
import os
import intake
import pandas as pd
import pytest
import xarray as xr
from intake_esm import config
from intake_esm.core import ESMMetadataStoreCatalog
here = os.path.abspath(os.path.dirname(__file__))
def test_build_collection():
with config.set({'database-directory': './tests/test_collections'}):
collection_input_definition = os.path.join(here, 'gmet-test.yml')
col = intake.open_esm_metadatastore(
collection_input_definition=collection_input_definition, overwrite_existing=True
)
assert isinstance(col.df, pd.DataFrame)
def test_search():
with config.set({'database-directory': './tests/test_collections'}):
col = intake.open_esm_metadatastore(collection_name='gmet_test')
cat = col.search(
member_id=[1, 2],
time_range=['19800101-19801231', '19810101-19811231', '19820101-19821231'],
)
assert isinstance(cat.query_results, pd.DataFrame)
assert not cat.query_results.empty
def test_to_xarray():
with config.set({'database-directory': './tests/test_collections'}):
col = intake.open_esm_metadatastore(collection_name='gmet_test')
cat = col.search(direct_access=True)
ds = cat.to_xarray(chunks={'time': 1}, decode_times=True)
assert isinstance(ds, xr.Dataset)
assert 'member_id' in ds.coords
|
the-stack_106_18189
|
"""Tests for the Config Entry Flow helper."""
from unittest.mock import patch, Mock
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.helpers import config_entry_flow
from tests.common import (
MockConfigEntry, MockModule, mock_coro, mock_integration)
@pytest.fixture
def discovery_flow_conf(hass):
"""Register a handler."""
handler_conf = {
'discovered': False,
}
async def has_discovered_devices(hass):
"""Mock if we have discovered devices."""
return handler_conf['discovered']
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_discovery_flow(
'test', 'Test', has_discovered_devices,
config_entries.CONN_CLASS_LOCAL_POLL)
yield handler_conf
@pytest.fixture
def webhook_flow_conf(hass):
"""Register a handler."""
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_webhook_flow(
'test_single', 'Test Single', {}, False)
config_entry_flow.register_webhook_flow(
'test_multiple', 'Test Multiple', {}, True)
yield {}
async def test_single_entry_allowed(hass, discovery_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
MockConfigEntry(domain='test').add_to_hass(hass)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'single_instance_allowed'
async def test_user_no_devices_found(hass, discovery_flow_conf):
"""Test if no devices found."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
flow.context = {
'source': config_entries.SOURCE_USER
}
result = await flow.async_step_confirm(user_input={})
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'no_devices_found'
async def test_user_has_confirmation(hass, discovery_flow_conf):
"""Test user requires no confirmation to setup."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
discovery_flow_conf['discovered'] = True
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
async def test_discovery_single_instance(hass, discovery_flow_conf):
"""Test we ask for confirmation via discovery."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
MockConfigEntry(domain='test').add_to_hass(hass)
result = await flow.async_step_discovery({})
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'single_instance_allowed'
async def test_discovery_confirmation(hass, discovery_flow_conf):
"""Test we ask for confirmation via discovery."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
result = await flow.async_step_discovery({})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'confirm'
result = await flow.async_step_confirm({})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_multiple_discoveries(hass, discovery_flow_conf):
"""Test we only create one instance for multiple discoveries."""
mock_integration(hass, MockModule('test'))
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
# Second discovery
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
async def test_only_one_in_progress(hass, discovery_flow_conf):
"""Test a user initialized one will finish and cancel discovered one."""
mock_integration(hass, MockModule('test'))
# Discovery starts flow
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
# User starts flow
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_USER}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
# Discovery flow has not been aborted
assert len(hass.config_entries.flow.async_progress()) == 2
# Discovery should be aborted once user confirms
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_import_no_confirmation(hass, discovery_flow_conf):
"""Test import requires no confirmation to set up."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
discovery_flow_conf['discovered'] = True
result = await flow.async_step_import(None)
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_import_single_instance(hass, discovery_flow_conf):
"""Test import doesn't create second instance."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
discovery_flow_conf['discovered'] = True
MockConfigEntry(domain='test').add_to_hass(hass)
result = await flow.async_step_import(None)
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
async def test_webhook_single_entry_allowed(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS['test_single']()
flow.hass = hass
MockConfigEntry(domain='test_single').add_to_hass(hass)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'one_instance_allowed'
async def test_webhook_multiple_entries_allowed(hass, webhook_flow_conf):
"""Test multiple entries are allowed when specified."""
flow = config_entries.HANDLERS['test_multiple']()
flow.hass = hass
MockConfigEntry(domain='test_multiple').add_to_hass(hass)
hass.config.api = Mock(base_url='http://example.com')
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
async def test_webhook_config_flow_registers_webhook(hass, webhook_flow_conf):
"""Test setting up an entry creates a webhook."""
flow = config_entries.HANDLERS['test_single']()
flow.hass = hass
hass.config.api = Mock(base_url='http://example.com')
result = await flow.async_step_user(user_input={})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['data']['webhook_id'] is not None
async def test_webhook_create_cloudhook(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
assert await setup.async_setup_component(hass, 'cloud', {})
async_setup_entry = Mock(return_value=mock_coro(True))
async_unload_entry = Mock(return_value=mock_coro(True))
mock_integration(hass, MockModule(
'test_single',
async_setup_entry=async_setup_entry,
async_unload_entry=async_unload_entry,
async_remove_entry=config_entry_flow.webhook_async_remove_entry,
))
result = await hass.config_entries.flow.async_init(
'test_single', context={'source': config_entries.SOURCE_USER})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
coro = mock_coro({
'cloudhook_url': 'https://example.com'
})
with patch('hass_nabucasa.cloudhooks.Cloudhooks.async_create',
return_value=coro) as mock_create, \
patch('homeassistant.components.cloud.async_active_subscription',
return_value=True), \
patch('homeassistant.components.cloud.async_is_logged_in',
return_value=True):
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['description_placeholders']['webhook_url'] == \
'https://example.com'
assert len(mock_create.mock_calls) == 1
assert len(async_setup_entry.mock_calls) == 1
with patch('hass_nabucasa.cloudhooks.Cloudhooks.async_delete',
return_value=coro) as mock_delete:
result = \
await hass.config_entries.async_remove(result['result'].entry_id)
assert len(mock_delete.mock_calls) == 1
assert result['require_restart'] is False
|
the-stack_106_18191
|
import numpy as np
import scipy
import time
import torch
import torch.nn.functional as F
from texttable import Texttable
def get_n_params(model):
pp=0
for p in list(model.parameters()):
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
def args_print(args):
_dict = vars(args)
t = Texttable()
t.add_row(["Parameter", "Value"])
for k in _dict:
t.add_row([k, _dict[k]])
print(t.draw())
def gen_features(rows, cols, v, m, n, y, p=False):
"""
rows: adj row index
cols: adj col index
v: adj value
m, n: adj shape(m ,n)
y: feature
return: adj@y ndarray
"""
s = time.time()
print("start:--------------------")
x = scipy.sparse.coo_matrix((v, (rows, cols)), (m, n))
print('x.shape', x.shape)
if p: print(x.toarray())
norm = x.sum(axis=1)
print('norm.shape', norm.shape)
if p: print(norm)
x_norm = x.multiply(1 / (norm + 0.00001))
print('x_norm.shape', x_norm.shape)
if p: print(x_norm.toarray())
out = x_norm.dot(y)
print('out.shape', out.shape)
print(f"time: {time.time() - s:.4f}s---------" )
return out
class MsgNorm(torch.nn.Module):
def __init__(self, learn_msg_scale=False):
super(MsgNorm, self).__init__()
self.msg_scale = torch.nn.Parameter(torch.Tensor([1.0]),
requires_grad=learn_msg_scale)
self.reset_parameters()
def forward(self, x, msg, p=2):
msg = F.normalize(msg, p=p, dim=1)
x_norm = x.norm(p=p, dim=1, keepdim=True)
msg = msg * x_norm * self.msg_scale
return msg
def reset_parameters(self):
torch.nn.init.ones_(self.msg_scale)
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss):
score = val_loss
if self.best_score is None:
self.best_score = score
elif score < self.best_score - self.delta:
self.counter += 1
print('EarlyStopping counter: {0} out of {1}'.format(self.counter, self.patience))
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
|
the-stack_106_18193
|
# Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.dell_emc import vxflexos
from cinder.tests.unit.volume.drivers.dell_emc.vxflexos import mocks
from cinder.volume import configuration
class TestDeleteVolume(vxflexos.TestVxFlexOSDriver):
"""Test cases for ``VxFlexOSDriver.delete_volume()``"""
def setUp(self):
"""Setup a test case environment.
Creates a fake volume object and sets up the required API responses.
"""
super(TestDeleteVolume, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.volume = fake_volume.fake_volume_obj(
ctx, **{'provider_id': fake.PROVIDER_ID})
self.volume_name_2x_enc = urllib.parse.quote(
urllib.parse.quote(self.driver._id_to_base64(self.volume.id))
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: self.volume.id,
'instances/Volume::{}/action/removeMappedSdc'.format(
self.volume.provider_id): self.volume.provider_id,
'instances/Volume::{}/action/removeVolume'.format(
self.volume.provider_id
): self.volume.provider_id,
},
self.RESPONSE_MODE.BadStatus: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
'instances/Volume::{}/action/removeVolume'.format(
self.volume.provider_id
): mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
},
}
def test_bad_login_and_volume(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.volume)
def test_delete_volume(self):
"""Setting the unmap volume before delete flag for tests """
self.override_config('vxflexos_unmap_volume_before_deletion', True,
configuration.SHARED_CONF_GROUP)
self.driver.delete_volume(self.volume)
|
the-stack_106_18194
|
import importlib.abc
import importlib.machinery
import importlib.util
import os
import platform
import shutil
import sys
import tempfile
import time
import weakref
from pathlib import Path
import pytest
from jinja2 import Environment
from jinja2 import loaders
from jinja2 import PackageLoader
from jinja2.exceptions import TemplateNotFound
from jinja2.loaders import split_template_path
class TestLoaders:
def test_dict_loader(self, dict_loader):
env = Environment(loader=dict_loader)
tmpl = env.get_template("justdict.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_package_loader(self, package_loader):
env = Environment(loader=package_loader)
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_filesystem_loader_overlapping_names(self, filesystem_loader):
t2_dir = Path(filesystem_loader.searchpath[0]) / ".." / "templates2"
# Make "foo" show up before "foo/test.html".
filesystem_loader.searchpath.insert(0, t2_dir)
e = Environment(loader=filesystem_loader)
e.get_template("foo")
# This would raise NotADirectoryError if "t2/foo" wasn't skipped.
e.get_template("foo/test.html")
def test_choice_loader(self, choice_loader):
env = Environment(loader=choice_loader)
tmpl = env.get_template("justdict.html")
assert tmpl.render().strip() == "FOO"
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_function_loader(self, function_loader):
env = Environment(loader=function_loader)
tmpl = env.get_template("justfunction.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_prefix_loader(self, prefix_loader):
env = Environment(loader=prefix_loader)
tmpl = env.get_template("a/test.html")
assert tmpl.render().strip() == "BAR"
tmpl = env.get_template("b/justdict.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing")
def test_caching(self):
changed = False
class TestLoader(loaders.BaseLoader):
def get_source(self, environment, template):
return "foo", None, lambda: not changed
env = Environment(loader=TestLoader(), cache_size=-1)
tmpl = env.get_template("template")
assert tmpl is env.get_template("template")
changed = True
assert tmpl is not env.get_template("template")
changed = False
def test_no_cache(self):
mapping = {"foo": "one"}
env = Environment(loader=loaders.DictLoader(mapping), cache_size=0)
assert env.get_template("foo") is not env.get_template("foo")
def test_limited_size_cache(self):
mapping = {"one": "foo", "two": "bar", "three": "baz"}
loader = loaders.DictLoader(mapping)
env = Environment(loader=loader, cache_size=2)
t1 = env.get_template("one")
t2 = env.get_template("two")
assert t2 is env.get_template("two")
assert t1 is env.get_template("one")
env.get_template("three")
loader_ref = weakref.ref(loader)
assert (loader_ref, "one") in env.cache
assert (loader_ref, "two") not in env.cache
assert (loader_ref, "three") in env.cache
def test_cache_loader_change(self):
loader1 = loaders.DictLoader({"foo": "one"})
loader2 = loaders.DictLoader({"foo": "two"})
env = Environment(loader=loader1, cache_size=2)
assert env.get_template("foo").render() == "one"
env.loader = loader2
assert env.get_template("foo").render() == "two"
def test_dict_loader_cache_invalidates(self):
mapping = {"foo": "one"}
env = Environment(loader=loaders.DictLoader(mapping))
assert env.get_template("foo").render() == "one"
mapping["foo"] = "two"
assert env.get_template("foo").render() == "two"
def test_split_template_path(self):
assert split_template_path("foo/bar") == ["foo", "bar"]
assert split_template_path("./foo/bar") == ["foo", "bar"]
pytest.raises(TemplateNotFound, split_template_path, "../foo")
class TestFileSystemLoader:
searchpath = (Path(__file__) / ".." / "res" / "templates").resolve()
@staticmethod
def _test_common(env):
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
tmpl = env.get_template("foo/test.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_searchpath_as_str(self):
filesystem_loader = loaders.FileSystemLoader(str(self.searchpath))
env = Environment(loader=filesystem_loader)
self._test_common(env)
def test_searchpath_as_pathlib(self):
filesystem_loader = loaders.FileSystemLoader(self.searchpath)
env = Environment(loader=filesystem_loader)
self._test_common(env)
def test_searchpath_as_list_including_pathlib(self):
filesystem_loader = loaders.FileSystemLoader(
["/tmp/templates", self.searchpath]
)
env = Environment(loader=filesystem_loader)
self._test_common(env)
def test_caches_template_based_on_mtime(self):
filesystem_loader = loaders.FileSystemLoader(self.searchpath)
env = Environment(loader=filesystem_loader)
tmpl1 = env.get_template("test.html")
tmpl2 = env.get_template("test.html")
assert tmpl1 is tmpl2
os.utime(self.searchpath / "test.html", (time.time(), time.time()))
tmpl3 = env.get_template("test.html")
assert tmpl1 is not tmpl3
@pytest.mark.parametrize(
("encoding", "expect"),
[
("utf-8", "文字化け"),
("iso-8859-1", "æ\x96\x87\xe5\xad\x97\xe5\x8c\x96\xe3\x81\x91"),
],
)
def test_uses_specified_encoding(self, encoding, expect):
loader = loaders.FileSystemLoader(self.searchpath, encoding=encoding)
e = Environment(loader=loader)
t = e.get_template("mojibake.txt")
assert t.render() == expect
def test_filename_normpath(self):
"""Nested template names should only contain ``os.sep`` in the
loaded filename.
"""
loader = loaders.FileSystemLoader(self.searchpath)
e = Environment(loader=loader)
t = e.get_template("foo/test.html")
assert t.filename == str(self.searchpath / "foo" / "test.html")
class TestModuleLoader:
archive = None
def compile_down(self, prefix_loader, zip="deflated"):
log = []
self.reg_env = Environment(loader=prefix_loader)
if zip is not None:
fd, self.archive = tempfile.mkstemp(suffix=".zip")
os.close(fd)
else:
self.archive = tempfile.mkdtemp()
self.reg_env.compile_templates(self.archive, zip=zip, log_function=log.append)
self.mod_env = Environment(loader=loaders.ModuleLoader(self.archive))
return "".join(log)
def teardown(self):
if hasattr(self, "mod_env"):
if os.path.isfile(self.archive):
os.remove(self.archive)
else:
shutil.rmtree(self.archive)
self.archive = None
def test_log(self, prefix_loader):
log = self.compile_down(prefix_loader)
assert (
'Compiled "a/foo/test.html" as '
"tmpl_a790caf9d669e39ea4d280d597ec891c4ef0404a" in log
)
assert "Finished compiling templates" in log
assert (
'Could not compile "a/syntaxerror.html": '
"Encountered unknown tag 'endif'" in log
)
def _test_common(self):
tmpl1 = self.reg_env.get_template("a/test.html")
tmpl2 = self.mod_env.get_template("a/test.html")
assert tmpl1.render() == tmpl2.render()
tmpl1 = self.reg_env.get_template("b/justdict.html")
tmpl2 = self.mod_env.get_template("b/justdict.html")
assert tmpl1.render() == tmpl2.render()
def test_deflated_zip_compile(self, prefix_loader):
self.compile_down(prefix_loader, zip="deflated")
self._test_common()
def test_stored_zip_compile(self, prefix_loader):
self.compile_down(prefix_loader, zip="stored")
self._test_common()
def test_filesystem_compile(self, prefix_loader):
self.compile_down(prefix_loader, zip=None)
self._test_common()
def test_weak_references(self, prefix_loader):
self.compile_down(prefix_loader)
self.mod_env.get_template("a/test.html")
key = loaders.ModuleLoader.get_template_key("a/test.html")
name = self.mod_env.loader.module.__name__
assert hasattr(self.mod_env.loader.module, key)
assert name in sys.modules
# unset all, ensure the module is gone from sys.modules
self.mod_env = None
try:
import gc
gc.collect()
except BaseException:
pass
assert name not in sys.modules
def test_choice_loader(self, prefix_loader):
self.compile_down(prefix_loader)
self.mod_env.loader = loaders.ChoiceLoader(
[self.mod_env.loader, loaders.DictLoader({"DICT_SOURCE": "DICT_TEMPLATE"})]
)
tmpl1 = self.mod_env.get_template("a/test.html")
assert tmpl1.render() == "BAR"
tmpl2 = self.mod_env.get_template("DICT_SOURCE")
assert tmpl2.render() == "DICT_TEMPLATE"
def test_prefix_loader(self, prefix_loader):
self.compile_down(prefix_loader)
self.mod_env.loader = loaders.PrefixLoader(
{
"MOD": self.mod_env.loader,
"DICT": loaders.DictLoader({"test.html": "DICT_TEMPLATE"}),
}
)
tmpl1 = self.mod_env.get_template("MOD/a/test.html")
assert tmpl1.render() == "BAR"
tmpl2 = self.mod_env.get_template("DICT/test.html")
assert tmpl2.render() == "DICT_TEMPLATE"
def test_path_as_pathlib(self, prefix_loader):
self.compile_down(prefix_loader)
mod_path = self.mod_env.loader.module.__path__[0]
mod_loader = loaders.ModuleLoader(Path(mod_path))
self.mod_env = Environment(loader=mod_loader)
self._test_common()
def test_supports_pathlib_in_list_of_paths(self, prefix_loader):
self.compile_down(prefix_loader)
mod_path = self.mod_env.loader.module.__path__[0]
mod_loader = loaders.ModuleLoader([Path(mod_path), "/tmp/templates"])
self.mod_env = Environment(loader=mod_loader)
self._test_common()
@pytest.fixture()
def package_dir_loader(monkeypatch):
monkeypatch.syspath_prepend(Path(__file__).parent)
return PackageLoader("res")
@pytest.mark.parametrize(
("template", "expect"), [("foo/test.html", "FOO"), ("test.html", "BAR")]
)
def test_package_dir_source(package_dir_loader, template, expect):
source, name, up_to_date = package_dir_loader.get_source(None, template)
assert source.rstrip() == expect
assert name.endswith(os.path.join(*split_template_path(template)))
assert up_to_date()
def test_package_dir_list(package_dir_loader):
templates = package_dir_loader.list_templates()
assert "foo/test.html" in templates
assert "test.html" in templates
@pytest.fixture()
def package_file_loader(monkeypatch):
monkeypatch.syspath_prepend(Path(__file__).parent / "res")
return PackageLoader("__init__")
@pytest.mark.parametrize(
("template", "expect"), [("foo/test.html", "FOO"), ("test.html", "BAR")]
)
def test_package_file_source(package_file_loader, template, expect):
source, name, up_to_date = package_file_loader.get_source(None, template)
assert source.rstrip() == expect
assert name.endswith(os.path.join(*split_template_path(template)))
assert up_to_date()
def test_package_file_list(package_file_loader):
templates = package_file_loader.list_templates()
assert "foo/test.html" in templates
assert "test.html" in templates
@pytest.fixture()
def package_zip_loader(monkeypatch):
package_zip = (Path(__file__) / ".." / "res" / "package.zip").resolve()
monkeypatch.syspath_prepend(package_zip)
return PackageLoader("t_pack")
@pytest.mark.parametrize(
("template", "expect"), [("foo/test.html", "FOO"), ("test.html", "BAR")]
)
def test_package_zip_source(package_zip_loader, template, expect):
source, name, up_to_date = package_zip_loader.get_source(None, template)
assert source.rstrip() == expect
assert name.endswith(os.path.join(*split_template_path(template)))
assert up_to_date is None
@pytest.mark.xfail(
platform.python_implementation() == "PyPy",
reason="PyPy's zipimporter doesn't have a '_files' attribute.",
raises=TypeError,
)
def test_package_zip_list(package_zip_loader):
assert package_zip_loader.list_templates() == ["foo/test.html", "test.html"]
@pytest.mark.parametrize("package_path", ["", ".", "./"])
def test_package_zip_omit_curdir(package_zip_loader, package_path):
"""PackageLoader should not add or include "." or "./" in the root
path, it is invalid in zip paths.
"""
loader = PackageLoader("t_pack", package_path)
assert loader.package_path == ""
source, _, _ = loader.get_source(None, "templates/foo/test.html")
assert source.rstrip() == "FOO"
def test_pep_451_import_hook():
class ImportHook(importlib.abc.MetaPathFinder, importlib.abc.Loader):
def find_spec(self, name, path=None, target=None):
if name != "res":
return None
spec = importlib.machinery.PathFinder.find_spec(name)
return importlib.util.spec_from_file_location(
name,
spec.origin,
loader=self,
submodule_search_locations=spec.submodule_search_locations,
)
def create_module(self, spec):
return None # default behaviour is fine
def exec_module(self, module):
return None # we need this to satisfy the interface, it's wrong
# ensure we restore `sys.meta_path` after putting in our loader
before = sys.meta_path[:]
try:
sys.meta_path.insert(0, ImportHook())
package_loader = PackageLoader("res")
assert "test.html" in package_loader.list_templates()
finally:
sys.meta_path[:] = before
|
the-stack_106_18195
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker as mticker
import matplotlib.ticker as mtick
import numpy as np
import os
from copy import copy
import pdb
import pandas as pd
plt.rcParams['font.size'] = 14
plt.rcParams['axes.linewidth'] = 2
# Usage: summarize.print_num_virus(self.cell_li)
def print_num_virus(cell_li):
total_virions = sum([cell.num_virus for cell in cell_li])
print("Total virus =", total_virions)
return total_virions
# Usage: summarize.num_cells_infected_by_time(self.cell_li, t)
def num_cells_infected_by_time(cell_li, t):
count = 0
for cell in cell_li:
if cell.t < t:
count += 1
return count
# Usage: summarize.find_infectible_perc(self.inf_model.infected_dict)
def find_infectible_perc(infected_dict):
count1 = 0
count2 = 0
for x in infected_dict:
for y in infected_dict[x]:
if infected_dict[x][y] == 1:
count1 += 1
else:
count2 += 1
print("Num infected cells =", count1)
print("Num uninfected cells =", count2)
def output_to_csv(virus_prod_times, fname):
df = pd.DataFrame(virus_prod_times)
df.to_csv(fname)
# Usage: summarize.virus_production_times(self.cell_li, self.sim_folder, self.sim, "viral_load_adv")
def virus_production_times(cell_li, sim_folder, sim, name = "viral_load_adv"):
virus_prod_times = np.zeros(int(sim.num_days*1440) + 1)
virus_prod_times = virus_prod_times[720:] # cut off first 0.5 days
virus_per_min = sim.virions_shed_per_day/(24*60)
for cell in cell_li:
start = int(cell.t/60 + sim.latency_time*24*60) - 720 # cell.t is in s, sim.latency_time in days
finish = int(sim.num_days*24*60) - 720 + 1
virus_prod_times[start:finish] = virus_prod_times[start:finish] + virus_per_min*np.arange(1,finish-start+1)
fig, ax = plt.subplots()
ax.plot(np.arange(finish)/1440, virus_prod_times)
ax.ticklabel_format(axis = "y", scilimits = (0, 0), style = "sci")
plt.xticks(np.arange(0, sim.num_days, 0.5))
plt.xlim((0, sim.num_days - 0.5))
plt.ylim((0, np.max(virus_prod_times)))
plt.title("Total viral load over time")
plt.xlabel("Time (days)")
plt.ylabel("Number of virions")
plt.savefig(os.path.join(sim_folder, "viral_load_adv" + str(sim.v_adv) + ".png"))
output_to_csv(virus_prod_times, os.path.join(sim_folder, "total_viral_load_over_time.csv"))
# Usage: summarize.free_virions_over_time(self.free_virion_dist, self.sim_folder, self.sim, "free_virions")
def free_virions_over_time(free_virion_dist, sim_folder, sim, name = "free_virions"):
plt.clf()
free_virion_dist = free_virion_dist[720:] # cut off first 0.5 days
free_virion_dist = free_virion_dist[:int(len(free_virion_dist)//(1440/2)*1440/2) + 1]
plt.plot(np.arange(len(free_virion_dist))/1440, free_virion_dist)
plt.xlabel("Time (days)")
plt.ylabel("Number of free virions")
plt.title("Free virions over time")
plt.xticks(np.arange(0, sim.num_days - 0.5 + 0.1, 0.5))
plt.ticklabel_format(axis = "y", style = "sci", scilimits = (-3, 3))
plt.tight_layout()
plt.savefig(os.path.join(sim_folder, name + ".png"))
def flux_out(exit_times, sim_folder, name = "flux"):
np.savetxt(os.path.join(sim_folder, name + ".txt"), exit_times/(3600*24), fmt = "%10.5f")
def get_gen_len(sim):
df = pd.read_csv("C:/Users/charg/Desktop/Jan 2021/lung_parameters.csv", sep = "\t")
length = float(df[df["Generation"] == sim.gen]["Length (cm)"])*10000
max_x = length//4
return max_x
# Usage: summarize.infection_map_adv(self.infected_coord_dict, self.sim_folder, self.sim, "default", "infection_map_adv")
def infection_map_adv(infected_coord_dict, sim_folder, sim, mode, name = "infection_map_adv", plot_only_gen = True):
min_x, max_x, min_y, max_y = np.inf, -np.inf, np.inf, -np.inf
for pos in infected_coord_dict:
min_x, max_x, min_y, max_y = min(min_x, pos[0]), max(max_x, pos[0]), min(min_y, pos[1]), max(max_y, pos[1])
if plot_only_gen:
max_x = get_gen_len(sim)
min_x = int(min_x)
max_x = int(max_x) + 1
# For y (horizontal in plot), start the first infected cell in the center.
max_y = int(abs(np.maximum(-min_y, max_y + 1)))
min_y = -max_y
map_x = max_x - min_x + 1
map_y = max_y - min_y + 1
if mode == "default":
map_img = np.zeros((max_x - min_x + 1, max_y - min_y + 1))
zero_loc = (-min_x, -min_y)
elif mode == "compressed":
map_img = np.zeros((int((max_x - min_x)/sim.v_adv) + 1, max_y - min_y + 1))
zero_loc = (int(-min_x/sim.v_adv), -min_y)
else: # log
map_img = np.zeros((max_y - min_y + 1, max_y - min_y + 1))
scale = (max_y - min_y)/np.log10(max_x - min_x + 1)
for pos in infected_coord_dict:
infected_time = infected_coord_dict[pos]/(24*3600)
climit = round(2*sim.num_days)/2
if infected_time <= climit:
if 1:
#try:
if mode == "default":
cur_pos = (int(pos[0] - min_x), int(pos[1] - min_y))
elif mode == "compressed":
cur_pos = (int((pos[0] - min_x)/sim.v_adv), int(pos[1] - min_y))
else:
cur_pos = (int(np.log10(pos[0] - min_x + 1)*scale), int(pos[1] - min_y))
if cur_pos[0] < map_img.shape[0]:
cur_val = map_img[cur_pos]
if cur_val > 0:
map_img[cur_pos] = np.minimum(infected_time - 0.5, cur_val)
else:
map_img[cur_pos] = infected_time - 0.5
map_img = map_img/(map_img > 0)
fig, ax = plt.subplots()
newcmp = copy(plt.get_cmap('coolwarm'))
newcmp.set_bad('black')
if sim.gen != "nasal":
pos = ax.imshow(map_img, cmap = newcmp, interpolation = "none", origin = "lower", vmin = 0, vmax = sim.num_days - 0.5)
else:
pos = ax.imshow(map_img, cmap = newcmp, interpolation = "none", vmin = 0, vmax = sim.num_days - 0.5)
cbar = fig.colorbar(pos)
cbar.set_label("Infection time (days)")
cbar.set_ticks(np.arange(0, sim.num_days - 0.5, 0.25))
ax.axis("off")
plt.savefig(os.path.join(sim_folder, "infection_map_adv" + str(sim.v_adv) + "_" + mode + ".png"))
np.savetxt(os.path.join(sim_folder, "infection_map_adv" + str(sim.v_adv) + "_" + mode + ".txt"), map_img, fmt = "%10.5f")
return (map_x, map_y), map_img.shape, zero_loc
# Usage: summarize.write_summary_data(self.cell_li, total_virions, map, self.sim_folder, self.sim, "summary_data")
def write_summary_data(cell_li, total_virions, map, map_img_dim, zero_loc, sim_folder, sim, name = "summary_data"):
with open(os.path.join(sim_folder, name + ".txt"), "w") as f:
f.writelines("Generation: " + str(sim.gen) + "\n")
f.writelines("Advection: " + str(sim.v_adv) + "\n")
f.writelines("Cell dimensions: " + str(map[0]) + " x " + str(map[1]) + " (# cells)" + "\n")
f.writelines("Map dimensions: " + str(map_img_dim[0]) + " x " + str(map_img_dim[1]) + "\n")
f.writelines("Origin location: " + str(zero_loc[0]) + " x " + str(zero_loc[1]) + "\n")
f.writelines("Total virus: " + str(total_virions) + "\n")
f.writelines("Total cells infected: " + str(len(cell_li)) + "\n")
for i in range(1, int(2*sim.num_days) + 1):
f.writelines("Total cells infected by time t = " + str((i - 1)/2) + " days: " + \
str(num_cells_infected_by_time(cell_li, i*12*3600)) + "\n")
|
the-stack_106_18196
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base constants and handlers."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import base64
import datetime
import hmac
import http.cookies
import json
import logging
import os
import sys
import time
import traceback
import backports.functools_lru_cache
from core.domain import config_domain
from core.domain import config_services
from core.domain import user_services
from core.platform import models
import feconf
import python_utils
import utils
import webapp2
current_user_services = models.Registry.import_current_user_services()
(user_models,) = models.Registry.import_models([models.NAMES.user])
ONE_DAY_AGO_IN_SECS = -24 * 60 * 60
DEFAULT_CSRF_SECRET = 'oppia csrf secret'
CSRF_SECRET = config_domain.ConfigProperty(
'oppia_csrf_secret', {'type': 'unicode'},
'Text used to encrypt CSRF tokens.', DEFAULT_CSRF_SECRET)
def _clear_login_cookies(response_headers):
"""Clears login cookies from the given response headers.
Args:
response_headers: webapp2.ResponseHeaders. Response headers are used
to give a more detailed context of the response.
"""
# App Engine sets the ACSID cookie for http:// and the SACSID cookie
# for https:// . We just unset both below. We also unset dev_appserver_login
# cookie used in local server.
for cookie_name in [b'ACSID', b'SACSID', b'dev_appserver_login']:
cookie = http.cookies.SimpleCookie()
cookie[cookie_name] = ''
cookie[cookie_name]['expires'] = (
datetime.datetime.utcnow() +
datetime.timedelta(seconds=ONE_DAY_AGO_IN_SECS)
).strftime('%a, %d %b %Y %H:%M:%S GMT')
response_headers.add_header(*cookie.output().split(b': ', 1))
@backports.functools_lru_cache.lru_cache(maxsize=128)
def load_template(filename):
"""Return the HTML file contents at filepath.
Args:
filename: str. Name of the requested HTML file.
Returns:
str. The HTML file content.
"""
filepath = os.path.join(feconf.FRONTEND_TEMPLATES_DIR, filename)
with python_utils.open_file(filepath, 'r') as f:
html_text = f.read()
return html_text
class LogoutPage(webapp2.RequestHandler):
"""Class which handles the logout URL."""
def get(self):
"""Logs the user out, and returns them to a specified follow-up
page (or the home page if no follow-up page is specified).
"""
_clear_login_cookies(self.response.headers)
url_to_redirect_to = (
python_utils.convert_to_bytes(
self.request.get('redirect_url', '/')))
self.redirect(url_to_redirect_to)
class UserFacingExceptions(python_utils.OBJECT):
"""This class contains all the exception class definitions used."""
class NotLoggedInException(Exception):
"""Error class for users that are not logged in (error code 401)."""
pass
class InvalidInputException(Exception):
"""Error class for invalid input on the user side (error code 400)."""
pass
class UnauthorizedUserException(Exception):
"""Error class for unauthorized access."""
pass
class PageNotFoundException(Exception):
"""Error class for a page not found error (error code 404)."""
pass
class InternalErrorException(Exception):
"""Error class for an internal server side error (error code 500)."""
pass
class TemporaryMaintenanceException(Exception):
"""Error class for when the server is currently down for temporary
maintenance (error code 503).
"""
pass
class BaseHandler(webapp2.RequestHandler):
"""Base class for all Oppia handlers."""
# Whether to check POST and PUT payloads for CSRF tokens prior to
# processing them. Can be overridden by subclasses if this check is
# not necessary.
REQUIRE_PAYLOAD_CSRF_CHECK = True
# Whether to redirect requests corresponding to a logged-in user who has
# not completed signup in to the signup page. This ensures that logged-in
# users have agreed to the latest terms.
REDIRECT_UNFINISHED_SIGNUPS = True
# What format the get method returns when exception raised, json or html.
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_HTML
POST_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
def __init__(self, request, response): # pylint: disable=super-init-not-called
# Set self.request, self.response and self.app.
self.initialize(request, response)
self.start_time = datetime.datetime.utcnow()
# Initializes the return dict for the handlers.
self.values = {}
if self.request.get('payload'):
self.payload = json.loads(self.request.get('payload'))
else:
self.payload = None
self.iframed = False
self.is_super_admin = (
current_user_services.is_current_user_super_admin())
if feconf.ENABLE_MAINTENANCE_MODE and not self.is_super_admin:
return
self.gae_id = current_user_services.get_current_gae_id()
self.user_id = None
self.username = None
self.partially_logged_in = False
self.user_is_scheduled_for_deletion = False
if self.gae_id:
user_settings = user_services.get_user_settings_by_gae_id(
self.gae_id, strict=False)
if user_settings is None:
# If the user settings are not yet created and the request leads
# to signup page create a new user settings. Otherwise logout
# the not-fully registered user.
email = current_user_services.get_current_user_email()
if 'signup?' in self.request.uri:
user_settings = user_services.create_new_user(
self.gae_id, email)
else:
logging.error(
'Cannot find user %s with email %s on page %s'
% (self.gae_id, email, self.request.uri))
_clear_login_cookies(self.response.headers)
return
self.values['user_email'] = user_settings.email
self.user_id = user_settings.user_id
if user_settings.deleted:
self.user_is_scheduled_for_deletion = user_settings.deleted
elif (self.REDIRECT_UNFINISHED_SIGNUPS and not
user_services.has_fully_registered_account(
user_settings.user_id)):
self.partially_logged_in = True
else:
self.username = user_settings.username
self.values['username'] = self.username
# In order to avoid too many datastore writes, we do not bother
# recording a log-in if the current time is sufficiently close
# to the last log-in time.
if (user_settings.last_logged_in is None or
not utils.are_datetimes_close(
datetime.datetime.utcnow(),
user_settings.last_logged_in)):
user_services.record_user_logged_in(self.user_id)
self.role = (
feconf.ROLE_ID_GUEST
if self.user_id is None else user_settings.role)
self.user = user_services.UserActionsInfo(self.user_id)
self.values['is_moderator'] = user_services.is_at_least_moderator(
self.user_id)
self.values['is_admin'] = user_services.is_admin(self.user_id)
self.values['is_topic_manager'] = (
user_services.is_topic_manager(self.user_id))
self.values['is_super_admin'] = self.is_super_admin
def dispatch(self):
"""Overrides dispatch method in webapp2 superclass.
Raises:
Exception. The CSRF token is missing.
UnauthorizedUserException. The CSRF token is invalid.
"""
# If the request is to the old demo server, redirect it permanently to
# the new demo server.
if self.request.uri.startswith('https://oppiaserver.appspot.com'):
self.redirect(
b'https://oppiatestserver.appspot.com', permanent=True)
return
if feconf.ENABLE_MAINTENANCE_MODE and not self.is_super_admin:
self.handle_exception(
self.TemporaryMaintenanceException(
'Oppia is currently being upgraded, and the site should '
'be up and running again in a few hours. '
'Thanks for your patience!'),
self.app.debug)
return
if self.user_is_scheduled_for_deletion:
self.redirect(
'/logout?redirect_url=%s' % feconf.PENDING_ACCOUNT_DELETION_URL)
return
if self.partially_logged_in:
self.redirect('/logout?redirect_url=%s' % self.request.uri)
return
try:
# If this is a CSRF request and the user is not yet loaded produce
# an error. The user might not be loaded due to an eventual
# consistency that does not guarantee that the UserAuthDetailsModel
# will be returned by a query even when we are sure that the model
# was added to the datastore. More info in #10951.
if 'csrf' in self.request.uri and self.gae_id and not self.user_id:
raise self.UnauthorizedUserException('User details not found.')
if self.payload is not None and self.REQUIRE_PAYLOAD_CSRF_CHECK:
# If user opens a new tab during signup process, the user_id
# parameter is set to None and this causes the signup session
# to expire. The code here checks if user is on the signup
# page and the user_id is None, if that is the case an exception
# is raised which is handled by the frontend by showing a
# continue to registration modal.
if 'signup' in self.request.uri and not self.user_id:
raise self.UnauthorizedUserException(
'Registration session expired.')
csrf_token = self.request.get('csrf_token')
if not csrf_token:
raise self.UnauthorizedUserException(
'Missing CSRF token. Changes were not saved. '
'Please report this bug.')
is_csrf_token_valid = CsrfTokenManager.is_csrf_token_valid(
self.user_id, csrf_token)
if not is_csrf_token_valid:
raise self.UnauthorizedUserException(
'Your session has expired, and unfortunately your '
'changes cannot be saved. Please refresh the page.')
except Exception as e:
logging.error('%s: payload %s', e, self.payload)
self.handle_exception(e, self.app.debug)
return
super(BaseHandler, self).dispatch()
def get(self, *args, **kwargs): # pylint: disable=unused-argument
"""Base method to handle GET requests.
Raises:
PageNotFoundException. Page not found error (error code 404).
"""
raise self.PageNotFoundException
def post(self, *args): # pylint: disable=unused-argument
"""Base method to handle POST requests.
Raises:
PageNotFoundException. Page not found error (error code 404).
"""
raise self.PageNotFoundException
def put(self, *args): # pylint: disable=unused-argument
"""Base method to handle PUT requests.
Raises:
PageNotFoundException. Page not found error (error code 404).
"""
raise self.PageNotFoundException
def delete(self, *args): # pylint: disable=unused-argument
"""Base method to handle DELETE requests.
Raises:
PageNotFoundException. Page not found error (error code 404).
"""
raise self.PageNotFoundException
def render_json(self, values):
"""Prepares JSON response to be sent to the client.
Args:
values: dict. The key-value pairs to encode in the JSON response.
"""
self.response.content_type = b'application/json; charset=utf-8'
self.response.headers[b'Content-Disposition'] = (
b'attachment; filename="oppia-attachment.txt"')
self.response.headers[b'Strict-Transport-Security'] = (
b'max-age=31536000; includeSubDomains')
self.response.headers[b'X-Content-Type-Options'] = b'nosniff'
self.response.headers[b'X-Xss-Protection'] = b'1; mode=block'
json_output = json.dumps(values, cls=utils.JSONEncoderForHTML)
self.response.write('%s%s' % (feconf.XSSI_PREFIX, json_output))
def render_downloadable_file(self, values, filename, content_type):
"""Prepares downloadable content to be sent to the client.
Args:
values: dict. The key-value pairs to include in the response.
filename: str. The name of the file to be rendered.
content_type: str. The type of file to be rendered.
"""
self.response.headers[b'Content-Type'] = python_utils.convert_to_bytes(
content_type)
self.response.headers[
b'Content-Disposition'] = python_utils.convert_to_bytes(
'attachment; filename=%s' % filename)
self.response.write(values)
def render_template(self, filepath, iframe_restriction='DENY'):
"""Prepares an HTML response to be sent to the client.
Args:
filepath: str. The template filepath.
iframe_restriction: str or None. Possible values are
'DENY' and 'SAMEORIGIN':
DENY: Strictly prevents the template to load in an iframe.
SAMEORIGIN: The template can only be displayed in a frame
on the same origin as the page itself.
"""
self.response.cache_control.no_cache = True
self.response.cache_control.must_revalidate = True
self.response.headers[b'Strict-Transport-Security'] = (
b'max-age=31536000; includeSubDomains')
self.response.headers[b'X-Content-Type-Options'] = b'nosniff'
self.response.headers[b'X-Xss-Protection'] = b'1; mode=block'
if iframe_restriction is not None:
if iframe_restriction in ['SAMEORIGIN', 'DENY']:
self.response.headers[
b'X-Frame-Options'] = python_utils.convert_to_bytes(
iframe_restriction)
else:
raise Exception(
'Invalid X-Frame-Options: %s' % iframe_restriction)
self.response.expires = 'Mon, 01 Jan 1990 00:00:00 GMT'
self.response.pragma = 'no-cache'
self.response.write(load_template(filepath))
def _render_exception_json_or_html(self, return_type, values):
"""Renders an error page, or an error JSON response.
Args:
return_type: str. Indicator to return JSON or HTML.
values: dict. The key-value pairs to include in the response.
"""
method = self.request.environ['REQUEST_METHOD']
if return_type == feconf.HANDLER_TYPE_HTML and method == 'GET':
self.values.update(values)
if self.iframed:
self.render_template(
'error-iframed.mainpage.html', iframe_restriction=None)
elif values['status_code'] == 503:
self.render_template('maintenance-page.mainpage.html')
else:
self.render_template(
'error-page-%s.mainpage.html' % values['status_code'])
else:
if return_type != feconf.HANDLER_TYPE_JSON and (
return_type != feconf.HANDLER_TYPE_DOWNLOADABLE):
logging.warning(
'Not a recognized return type: defaulting to render JSON.')
self.render_json(values)
def _render_exception(self, error_code, values):
"""Renders an error page, or an error JSON response.
Args:
error_code: int. The HTTP status code (expected to be one of
400, 401, 404 or 500).
values: dict. The key-value pairs to include in the response.
"""
# The error codes here should be in sync with the error pages
# generated via webpack.common.config.ts.
assert error_code in [400, 401, 404, 500, 503]
values['status_code'] = error_code
method = self.request.environ['REQUEST_METHOD']
if method == 'GET':
self._render_exception_json_or_html(
self.GET_HANDLER_ERROR_RETURN_TYPE, values)
elif method == 'POST':
self._render_exception_json_or_html(
self.POST_HANDLER_ERROR_RETURN_TYPE, values)
elif method == 'PUT':
self._render_exception_json_or_html(
self.PUT_HANDLER_ERROR_RETURN_TYPE, values)
elif method == 'DELETE':
self._render_exception_json_or_html(
self.DELETE_HANDLER_ERROR_RETURN_TYPE, values)
else:
logging.warning('Not a recognized request method.')
self._render_exception_json_or_html(None, values)
def handle_exception(self, exception, unused_debug_mode):
"""Overwrites the default exception handler.
Args:
exception: Exception. The exception that was thrown.
unused_debug_mode: bool. True if the web application is running
in debug mode.
"""
if isinstance(exception, self.NotLoggedInException):
# This checks if the response should be JSON or HTML.
# For GET requests, there is no payload, so we check against
# GET_HANDLER_ERROR_RETURN_TYPE.
# Otherwise, we check whether self.payload exists.
if (self.payload is not None or
self.GET_HANDLER_ERROR_RETURN_TYPE ==
feconf.HANDLER_TYPE_JSON):
self.error(401)
self._render_exception(
401, {
'error': (
'You must be logged in to access this resource.')})
else:
self.redirect(
current_user_services.create_login_url(self.request.uri))
return
logging.error(b''.join(traceback.format_exception(*sys.exc_info())))
if isinstance(exception, self.PageNotFoundException):
logging.warning('Invalid URL requested: %s', self.request.uri)
self.error(404)
self._render_exception(
404, {
'error': 'Could not find the page %s.' % self.request.uri})
return
logging.error('Exception raised: %s', exception)
if isinstance(exception, self.UnauthorizedUserException):
self.error(401)
self._render_exception(401, {'error': python_utils.convert_to_bytes(
exception)})
return
if isinstance(exception, self.InvalidInputException):
self.error(400)
self._render_exception(400, {'error': python_utils.convert_to_bytes(
exception)})
return
if isinstance(exception, self.InternalErrorException):
self.error(500)
self._render_exception(500, {'error': python_utils.convert_to_bytes(
exception)})
return
if isinstance(exception, self.TemporaryMaintenanceException):
self.error(503)
self._render_exception(503, {'error': python_utils.convert_to_bytes(
exception)})
return
self.error(500)
self._render_exception(
500, {'error': python_utils.convert_to_bytes(exception)})
InternalErrorException = UserFacingExceptions.InternalErrorException
InvalidInputException = UserFacingExceptions.InvalidInputException
NotLoggedInException = UserFacingExceptions.NotLoggedInException
PageNotFoundException = UserFacingExceptions.PageNotFoundException
UnauthorizedUserException = UserFacingExceptions.UnauthorizedUserException
TemporaryMaintenanceException = (
UserFacingExceptions.TemporaryMaintenanceException)
class Error404Handler(BaseHandler):
"""Handles 404 errors."""
pass
class CsrfTokenManager(python_utils.OBJECT):
"""Manages page/user tokens in memcache to protect against CSRF."""
# Max age of the token (48 hours).
_CSRF_TOKEN_AGE_SECS = 60 * 60 * 48
# Default user id for non-logged-in users.
_USER_ID_DEFAULT = 'non_logged_in_user'
@classmethod
def init_csrf_secret(cls):
"""Verify that non-default CSRF secret exists; creates one if not."""
# Any non-default value is fine.
if CSRF_SECRET.value and CSRF_SECRET.value != DEFAULT_CSRF_SECRET:
return
# Initialize to random value.
config_services.set_property(
feconf.SYSTEM_COMMITTER_ID, CSRF_SECRET.name,
base64.urlsafe_b64encode(os.urandom(20)))
@classmethod
def _create_token(cls, user_id, issued_on):
"""Creates a new CSRF token.
Args:
user_id: str|None. The user_id for which the token is generated.
issued_on: float. The timestamp at which the token was issued.
Returns:
str. The generated CSRF token.
"""
cls.init_csrf_secret()
# The token has 4 parts: hash of the actor user id, hash of the page
# name, hash of the time issued and plain text of the time issued.
if user_id is None:
user_id = cls._USER_ID_DEFAULT
# Round time to seconds.
issued_on = int(issued_on)
digester = hmac.new(python_utils.convert_to_bytes(CSRF_SECRET.value))
digester.update(python_utils.convert_to_bytes(user_id))
digester.update(':')
digester.update(python_utils.convert_to_bytes(issued_on))
digest = digester.digest()
token = '%s/%s' % (issued_on, base64.urlsafe_b64encode(digest))
return token
@classmethod
def _get_current_time(cls):
"""Returns the current server time.
Returns:
float. The time in seconds as floating point number.
"""
return time.time()
@classmethod
def create_csrf_token(cls, user_id):
"""Creates a CSRF token for the given user_id.
Args:
user_id: str|None. The user_id for whom the token is generated.
Returns:
str. The generated CSRF token.
"""
return cls._create_token(user_id, cls._get_current_time())
@classmethod
def is_csrf_token_valid(cls, user_id, token):
"""Validates a given CSRF token.
Args:
user_id: str|None. The user_id to validate the CSRF token against.
token: str. The CSRF token to validate.
Returns:
bool. Whether the given CSRF token is valid.
"""
try:
parts = token.split('/')
if len(parts) != 2:
return False
issued_on = int(parts[0])
age = cls._get_current_time() - issued_on
if age > cls._CSRF_TOKEN_AGE_SECS:
return False
authentic_token = cls._create_token(user_id, issued_on)
if authentic_token == token:
return True
return False
except Exception:
return False
class CsrfTokenHandler(BaseHandler):
"""Handles sending CSRF tokens to the frontend."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
REDIRECT_UNFINISHED_SIGNUPS = False
def get(self):
csrf_token = CsrfTokenManager.create_csrf_token(
self.user_id)
self.render_json({
'token': csrf_token,
})
|
the-stack_106_18197
|
"""Tests for the mfa setup flow."""
from openpeerpower import data_entry_flow
from openpeerpower.auth import auth_manager_from_config
from openpeerpower.components.auth import mfa_setup_flow
from openpeerpower.setup import async_setup_component
from tests.common import CLIENT_ID, MockUser, ensure_auth_manager_loaded
async def test_ws_setup_depose_mfa(opp, opp_ws_client):
"""Test set up mfa module for current user."""
opp.auth = await auth_manager_from_config(
opp,
provider_configs=[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
module_configs=[
{
"type": "insecure_example",
"id": "example_module",
"data": [{"user_id": "mock-user", "pin": "123456"}],
}
],
)
ensure_auth_manager_loaded(opp.auth)
await async_setup_component(opp, "auth", {"http": {}})
user = MockUser(id="mock-user").add_to_opp(opp)
cred = await opp.auth.auth_providers[0].async_get_or_create_credentials(
{"username": "test-user"}
)
await opp.auth.async_link_user(user, cred)
refresh_token = await opp.auth.async_create_refresh_token(user, CLIENT_ID)
access_token = opp.auth.async_create_access_token(refresh_token)
client = await opp_ws_client(opp, access_token)
await client.send_json({"id": 10, "type": mfa_setup_flow.WS_TYPE_SETUP_MFA})
result = await client.receive_json()
assert result["id"] == 10
assert result["success"] is False
assert result["error"]["code"] == "no_module"
await client.send_json(
{
"id": 11,
"type": mfa_setup_flow.WS_TYPE_SETUP_MFA,
"mfa_module_id": "example_module",
}
)
result = await client.receive_json()
assert result["id"] == 11
assert result["success"]
flow = result["result"]
assert flow["type"] == data_entry_flow.RESULT_TYPE_FORM
assert flow["handler"] == "example_module"
assert flow["step_id"] == "init"
assert flow["data_schema"][0] == {"type": "string", "name": "pin"}
await client.send_json(
{
"id": 12,
"type": mfa_setup_flow.WS_TYPE_SETUP_MFA,
"flow_id": flow["flow_id"],
"user_input": {"pin": "654321"},
}
)
result = await client.receive_json()
assert result["id"] == 12
assert result["success"]
flow = result["result"]
assert flow["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert flow["handler"] == "example_module"
assert flow["data"]["result"] is None
await client.send_json(
{
"id": 13,
"type": mfa_setup_flow.WS_TYPE_DEPOSE_MFA,
"mfa_module_id": "invalid_id",
}
)
result = await client.receive_json()
assert result["id"] == 13
assert result["success"] is False
assert result["error"]["code"] == "disable_failed"
await client.send_json(
{
"id": 14,
"type": mfa_setup_flow.WS_TYPE_DEPOSE_MFA,
"mfa_module_id": "example_module",
}
)
result = await client.receive_json()
assert result["id"] == 14
assert result["success"]
assert result["result"] == "done"
|
the-stack_106_18199
|
"""Setup for the tqdl package."""
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
import versioneer
INSTALL_REQUIRES = ['requests', 'tqdm']
TEST_REQUIRES = [
# testing and coverage
'pytest', 'coverage', 'pytest-cov',
# unmandatory dependencies of the package itself
# NONE
# to be able to run `python setup.py checkdocs`
'collective.checkdocs', 'pygments',
]
with open('README.rst') as f:
README = f.read()
setuptools.setup(
author="Shay Palachy",
author_email="[email protected]",
name='tqdl',
license="MIT",
description='requests-based file downloads with tqdm progress bars',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
long_description=README,
url='https://github.com/shaypal5/tqdl',
packages=setuptools.find_packages(),
include_package_data=True,
python_requires=">=3.5",
install_requires=[
INSTALL_REQUIRES
],
extras_require={
'test': TEST_REQUIRES + INSTALL_REQUIRES,
},
classifiers=[
# Trove classifiers
# (https://pypi.python.org/pypi?%3Aaction=list_classifiers)
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
the-stack_106_18200
|
import torch
import torch.nn as nn
from collections import defaultdict
THRESHOLD = 0.5
INIT_RANGE = 0.5
EPSILON = 1e-10
class Binarize(torch.autograd.Function):
"""Deterministic binarization."""
@staticmethod
def forward(ctx, X):
y = torch.where(X > 0, torch.ones_like(X), torch.zeros_like(X))
return y
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input
class BinarizeLayer(nn.Module):
"""Implement the feature discretization and binarization."""
def __init__(self, n, input_dim, use_not=False, left=None, right=None):
super(BinarizeLayer, self).__init__()
self.n = n
self.input_dim = input_dim
self.disc_num = input_dim[0]
self.use_not = use_not
if self.use_not:
self.disc_num *= 2
self.output_dim = self.disc_num + self.n * self.input_dim[1] * 2
self.layer_type = 'binarization'
self.dim2id = {i: i for i in range(self.output_dim)}
self.register_buffer('left', left)
self.register_buffer('right', right)
if self.input_dim[1] > 0:
if self.left is not None and self.right is not None:
cl = self.left + torch.rand(self.n, self.input_dim[1]) * (self.right - self.left)
cr = self.left + torch.rand(self.n, self.input_dim[1]) * (self.right - self.left)
else:
cl = 3. * (2. * torch.rand(self.n, self.input_dim[1]) - 1.)
cr = 3. * (2. * torch.rand(self.n, self.input_dim[1]) - 1.)
self.register_buffer('cl', cl)
self.register_buffer('cr', cr)
def forward(self, x):
if self.input_dim[1] > 0:
x_disc, x = x[:, 0: self.input_dim[0]], x[:, self.input_dim[0]:]
x = x.unsqueeze(-1)
if self.use_not:
x_disc = torch.cat((x_disc, 1 - x_disc), dim=1)
return torch.cat((x_disc, Binarize.apply(x - self.cl.t()).view(x.shape[0], -1),
1 - Binarize.apply(x - self.cr.t()).view(x.shape[0], -1)), dim=1)
if self.use_not:
x = torch.cat((x, 1 - x), dim=1)
return x
def binarized_forward(self, x):
with torch.no_grad():
return self.forward(x)
def clip(self):
if self.input_dim[1] > 0 and self.left is not None and self.right is not None:
self.cl.data = torch.where(self.cl.data > self.right, self.right, self.cl.data)
self.cl.data = torch.where(self.cl.data < self.left, self.left, self.cl.data)
self.cr.data = torch.where(self.cr.data > self.right, self.right, self.cr.data)
self.cr.data = torch.where(self.cr.data < self.left, self.left, self.cr.data)
def get_bound_name(self, feature_name, mean=None, std=None):
bound_name = []
for i in range(self.input_dim[0]):
bound_name.append(feature_name[i])
if self.use_not:
for i in range(self.input_dim[0]):
bound_name.append('~' + feature_name[i])
if self.input_dim[1] > 0:
for c, op in [(self.cl, '>'), (self.cr, '<')]:
c = c.detach().cpu().numpy()
for i, ci in enumerate(c.T):
fi_name = feature_name[self.input_dim[0] + i]
for j in ci:
if mean is not None and std is not None:
j = j * std[fi_name] + mean[fi_name]
bound_name.append('{} {} {:.3f}'.format(fi_name, op, j))
return bound_name
class Product(torch.autograd.Function):
"""Tensor product function."""
@staticmethod
def forward(ctx, X):
y = (-1. / (-1. + torch.sum(torch.log(X), dim=1)))
ctx.save_for_backward(X, y)
return y
@staticmethod
def backward(ctx, grad_output):
X, y, = ctx.saved_tensors
grad_input = grad_output.unsqueeze(1) * (y.unsqueeze(1) ** 2 / (X + EPSILON))
return grad_input
class EstimatedProduct(torch.autograd.Function):
"""Tensor product function with a estimated derivative."""
@staticmethod
def forward(ctx, X):
y = (-1. / (-1. + torch.sum(torch.log(X), dim=1)))
ctx.save_for_backward(X, y)
return y
@staticmethod
def backward(ctx, grad_output):
X, y, = ctx.saved_tensors
grad_input = grad_output.unsqueeze(1) * ((-1. / (-1. + torch.log(y.unsqueeze(1) ** 2))) / (X + EPSILON))
return grad_input
class LRLayer(nn.Module):
"""The LR layer is used to learn the linear part of the data."""
def __init__(self, n, input_dim):
super(LRLayer, self).__init__()
self.n = n
self.input_dim = input_dim
self.output_dim = self.n
self.layer_type = 'linear'
self.fc1 = nn.Linear(self.input_dim, self.output_dim)
def forward(self, x):
return self.fc1(x)
def binarized_forward(self, x):
return self.forward(x)
def clip(self):
for param in self.fc1.parameters():
param.data.clamp_(-1.0, 1.0)
class ConjunctionLayer(nn.Module):
"""The conjunction layer is used to learn the conjunction of nodes."""
def __init__(self, n, input_dim, use_not=False, estimated_grad=False):
super(ConjunctionLayer, self).__init__()
self.n = n
self.use_not = use_not
self.input_dim = input_dim if not use_not else input_dim * 2
self.output_dim = self.n
self.layer_type = 'conjunction'
self.W = nn.Parameter(INIT_RANGE * torch.rand(self.n, self.input_dim))
self.Product = EstimatedProduct if estimated_grad else Product
self.node_activation_cnt = None
def forward(self, x):
if self.use_not:
x = torch.cat((x, 1 - x), dim=1)
return self.Product.apply(1 - (1 - x).unsqueeze(-1) * self.W.t())
def binarized_forward(self, x):
if self.use_not:
x = torch.cat((x, 1 - x), dim=1)
Wb = Binarize.apply(self.W - THRESHOLD)
return torch.prod(1 - (1 - x).unsqueeze(-1) * Wb.t(), dim=1)
def clip(self):
self.W.data.clamp_(0.0, 1.0)
class DisjunctionLayer(nn.Module):
"""The disjunction layer is used to learn the disjunction of nodes."""
def __init__(self, n, input_dim, use_not=False, estimated_grad=False):
super(DisjunctionLayer, self).__init__()
self.n = n
self.use_not = use_not
self.input_dim = input_dim if not use_not else input_dim * 2
self.output_dim = self.n
self.layer_type = 'disjunction'
self.W = nn.Parameter(INIT_RANGE * torch.rand(self.n, self.input_dim))
self.Product = EstimatedProduct if estimated_grad else Product
self.node_activation_cnt = None
def forward(self, x):
if self.use_not:
x = torch.cat((x, 1 - x), dim=1)
return 1 - self.Product.apply(1 - x.unsqueeze(-1) * self.W.t())
def binarized_forward(self, x):
if self.use_not:
x = torch.cat((x, 1 - x), dim=1)
Wb = Binarize.apply(self.W - THRESHOLD)
return 1 - torch.prod(1 - x.unsqueeze(-1) * Wb.t(), dim=1)
def clip(self):
self.W.data.clamp_(0.0, 1.0)
def extract_rules(prev_layer, skip_connect_layer, layer, pos_shift=0):
dim2id = defaultdict(lambda: -1)
rules = {}
tmp = 0
rule_list = []
Wb = (layer.W > 0.5).type(torch.int).detach().cpu().numpy()
if skip_connect_layer is not None:
shifted_dim2id = {(k + prev_layer.output_dim): (-2, v) for k, v in skip_connect_layer.dim2id.items()}
prev_dim2id = {k: (-1, v) for k, v in prev_layer.dim2id.items()}
merged_dim2id = defaultdict(lambda: -1, {**shifted_dim2id, **prev_dim2id})
else:
merged_dim2id = {k: (-1, v) for k, v in prev_layer.dim2id.items()}
for ri, row in enumerate(Wb):
if layer.node_activation_cnt[ri + pos_shift] == 0 or layer.node_activation_cnt[ri + pos_shift] == layer.forward_tot:
dim2id[ri + pos_shift] = -1
continue
rule = {}
bound = {}
if prev_layer.layer_type == 'binarization' and prev_layer.input_dim[1] > 0:
c = torch.cat((prev_layer.cl.t().reshape(-1), prev_layer.cr.t().reshape(-1))).detach().cpu().numpy()
for i, w in enumerate(row):
if w > 0 and merged_dim2id[i][1] != -1:
if prev_layer.layer_type == 'binarization' and i >= prev_layer.disc_num:
ci = i - prev_layer.disc_num
bi = ci // prev_layer.n
if bi not in bound:
bound[bi] = [i, c[ci]]
rule[(-1, i)] = 1
else:
if (ci < c.shape[0] // 2 and layer.layer_type == 'conjunction') or \
(ci >= c.shape[0] // 2 and layer.layer_type == 'disjunction'):
func = max
else:
func = min
bound[bi][1] = func(bound[bi][1], c[ci])
if bound[bi][1] == c[ci]:
del rule[(-1, bound[bi][0])]
rule[(-1, i)] = 1
bound[bi][0] = i
else:
rule[merged_dim2id[i]] = 1
rule = tuple(sorted(rule.keys()))
if rule not in rules:
rules[rule] = tmp
rule_list.append(rule)
dim2id[ri + pos_shift] = tmp
tmp += 1
else:
dim2id[ri + pos_shift] = rules[rule]
return dim2id, rule_list
class UnionLayer(nn.Module):
"""The union layer is used to learn the rule-based representation."""
def __init__(self, n, input_dim, use_not=False, estimated_grad=False):
super(UnionLayer, self).__init__()
self.n = n
self.use_not = use_not
self.input_dim = input_dim
self.output_dim = self.n * 2
self.layer_type = 'union'
self.forward_tot = None
self.node_activation_cnt = None
self.dim2id = None
self.rule_list = None
self.rule_name = None
self.con_layer = ConjunctionLayer(self.n, self.input_dim, use_not=use_not, estimated_grad=estimated_grad)
self.dis_layer = DisjunctionLayer(self.n, self.input_dim, use_not=use_not, estimated_grad=estimated_grad)
def forward(self, x):
return torch.cat([self.con_layer(x), self.dis_layer(x)], dim=1)
def binarized_forward(self, x):
return torch.cat([self.con_layer.binarized_forward(x),
self.dis_layer.binarized_forward(x)], dim=1)
def clip(self):
self.con_layer.clip()
self.dis_layer.clip()
def get_rules(self, prev_layer, skip_connect_layer):
self.con_layer.forward_tot = self.dis_layer.forward_tot = self.forward_tot
self.con_layer.node_activation_cnt = self.dis_layer.node_activation_cnt = self.node_activation_cnt
con_dim2id, con_rule_list = extract_rules(prev_layer, skip_connect_layer, self.con_layer)
dis_dim2id, dis_rule_list = extract_rules(prev_layer, skip_connect_layer, self.dis_layer, self.con_layer.W.shape[0])
shift = max(con_dim2id.values()) + 1
dis_dim2id = {k: (-1 if v == -1 else v + shift) for k, v in dis_dim2id.items()}
dim2id = defaultdict(lambda: -1, {**con_dim2id, **dis_dim2id})
rule_list = (con_rule_list, dis_rule_list)
self.dim2id = dim2id
self.rule_list = rule_list
return dim2id, rule_list
def get_rule_description(self, prev_rule_name, wrap=False):
self.rule_name = []
for rl, op in zip(self.rule_list, ('&', '|')):
for rule in rl:
name = ''
for i, ri in enumerate(rule):
op_str = ' {} '.format(op) if i != 0 else ''
var_str = ('({})' if wrap else '{}').format(prev_rule_name[2 + ri[0]][ri[1]])
name += op_str + var_str
self.rule_name.append(name)
|
the-stack_106_18201
|
#Take two lists, say for example these two:
# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
# b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
#and write a program that returns a list that contains only the elements that are common between the lists
#(without duplicates). Make sure your program works on two lists of different sizes.
#Extras:
#Randomly generate two lists to test this
#Write this in one line of Python (don’t worry if you can’t figure this out at this point - we’ll get to it soon)
import random
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
c = [int(1000*random.random()) for i in range(10000)]
d = [int(1000*random.random()) for i in range(10000)]
print([num for num in a if num in b])
print(len([num for num in c if num in d]))
|
the-stack_106_18203
|
import logging
import os
from dataclasses import dataclass
from typing import Any, Dict, List
from urllib.parse import urljoin
import requests
from eth_utils import to_checksum_address
logger = logging.getLogger(__name__)
@dataclass
class CoinMarketCapToken:
id: int # CoinMarketCap id
name: str
symbol: str
token_address: str # For tokens
logo_uri: str
class CoinMarketCapClient:
base_url = 'https://pro-api.coinmarketcap.com/'
base_logo_uri = 'https://s2.coinmarketcap.com/static/img/coins/200x200/'
def __init__(self, api_token: str):
self.api_token = api_token
self.headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': api_token,
}
self.http_session = requests.session()
def download_file(self, url: str, taget_folder: str, local_filename: str) -> str:
if not os.path.exists(taget_folder):
os.makedirs(taget_folder)
r = self.http_session.get(url, stream=True)
if not r.ok:
logger.warning("Image not found for url %s", url)
return
with open(os.path.join(taget_folder, local_filename), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return local_filename
def get_map(self) -> List[Dict[str, Any]]:
"""
[
{'id': 1659,
'name': 'Gnosis',
'symbol': 'GNO',
'slug': 'gnosis-gno',
'is_active': 1,
'rank': 137,
'first_historical_data': '2017-05-01T20:09:54.000Z',
'last_historical_data': '2020-06-15T09:24:12.000Z',
'platform': {'id': 1027,
'name': 'Ethereum',
'symbol': 'ETH',
'slug': 'ethereum',
'token_address': '0x6810e776880c02933d47db1b9fc05908e5386b96'}
}, ...
]
:return:
"""
relative_url = 'v1/cryptocurrency/map'
url = urljoin(self.base_url, relative_url)
parameters = {
# 'listing_status': 'active',
# 'start': '1',
'limit': '5000',
}
try:
return self.http_session.get(url, headers=self.headers, params=parameters).json().get('data', [])
except IOError:
logger.warning('Problem getting tokens from coinmarketcap', exc_info=True)
return []
def get_ethereum_tokens(self) -> List[CoinMarketCapToken]:
tokens = []
for token in self.get_map():
if token and token['is_active'] and token['platform'] and token['platform']['symbol'] == 'ETH':
try:
checksummed_address = to_checksum_address(token['platform']['token_address'])
tokens.append(CoinMarketCapToken(token['id'], token['name'], token['symbol'], checksummed_address,
urljoin(self.base_logo_uri, f'{token["id"]}.png'))
)
except ValueError:
logger.warning('Invalid address %s for token %s with id %d', token['platform']['token_address'],
token['name'], token['id'])
return tokens
|
the-stack_106_18204
|
"""
HDF5 Pricing File Format
------------------------
At the top level, the file is keyed by country (to support regional
files containing multiple countries).
Within each country, there are 4 subgroups:
``/data``
^^^^^^^^^
Each field (OHLCV) is stored in a dataset as a 2D array, with a row per
sid and a column per session. This differs from the more standard
orientation of dates x sids, because it allows each compressed block to
contain contiguous values for the same sid, which allows for better
compression.
.. code-block:: none
/data
/open
/high
/low
/close
/volume
``/index``
^^^^^^^^^^
Contains two datasets, the index of sids (aligned to the rows of the
OHLCV 2D arrays) and index of sessions (aligned to the columns of the
OHLCV 2D arrays) to use for lookups.
.. code-block:: none
/index
/sid
/day
``/lifetimes``
^^^^^^^^^^^^^^
Contains two datasets, start_date and end_date, defining the lifetime
for each asset, aligned to the sids index.
.. code-block:: none
/lifetimes
/start_date
/end_date
``/currency``
^^^^^^^^^^^^^
Contains a single dataset, ``code``, aligned to the sids index, which contains
the listing currency of each sid.
Example
^^^^^^^
Sample layout of the full file with multiple countries.
.. code-block:: none
|- /US
| |- /data
| | |- /open
| | |- /high
| | |- /low
| | |- /close
| | |- /volume
| |
| |- /index
| | |- /sid
| | |- /day
| |
| |- /lifetimes
| | |- /start_date
| | |- /end_date
| |
| |- /currency
| |- /code
|
|- /CA
|- /data
| |- /open
| |- /high
| |- /low
| |- /close
| |- /volume
|
|- /index
| |- /sid
| |- /day
|
|- /lifetimes
| |- /start_date
| |- /end_date
|
|- /currency
|- /code
"""
from functools import partial
import h5py
import logbook
import numpy as np
import pandas as pd
from six import iteritems, raise_from, viewkeys
from six.moves import reduce
from zipline.data.bar_reader import (
NoDataAfterDate,
NoDataBeforeDate,
NoDataForSid,
NoDataOnDate,
)
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import bytes_array_to_native_str_object_array
from zipline.utils.pandas_utils import check_indexes_all_same
log = logbook.Logger('HDF5DailyBars')
VERSION = 0
DATA = 'data'
INDEX = 'index'
LIFETIMES = 'lifetimes'
CURRENCY = 'currency'
CODE = 'code'
SCALING_FACTOR = 'scaling_factor'
OPEN = 'open'
HIGH = 'high'
LOW = 'low'
CLOSE = 'close'
VOLUME = 'volume'
FIELDS = (OPEN, HIGH, LOW, CLOSE, VOLUME)
DAY = 'day'
SID = 'sid'
START_DATE = 'start_date'
END_DATE = 'end_date'
# XXX is reserved for "transactions involving no currency".
MISSING_CURRENCY = 'XXX'
DEFAULT_SCALING_FACTORS = {
# Retain 3 decimal places for prices.
OPEN: 1000,
HIGH: 1000,
LOW: 1000,
CLOSE: 1000,
# Volume is expected to be a whole integer.
VOLUME: 1,
}
def coerce_to_uint32(a, scaling_factor):
"""
Returns a copy of the array as uint32, applying a scaling factor to
maintain precision if supplied.
"""
return (a * scaling_factor).round().astype('uint32')
def days_and_sids_for_frames(frames):
"""
Returns the date index and sid columns shared by a list of dataframes,
ensuring they all match.
Parameters
----------
frames : list[pd.DataFrame]
A list of dataframes indexed by day, with a column per sid.
Returns
-------
days : np.array[datetime64[ns]]
The days in these dataframes.
sids : np.array[int64]
The sids in these dataframes.
Raises
------
ValueError
If the dataframes passed are not all indexed by the same days
and sids.
"""
if not frames:
days = np.array([], dtype='datetime64[ns]')
sids = np.array([], dtype='int64')
return days, sids
# Ensure the indices and columns all match.
check_indexes_all_same(
[frame.index for frame in frames],
message='Frames have mismatched days.',
)
check_indexes_all_same(
[frame.columns for frame in frames],
message='Frames have mismatched sids.',
)
return frames[0].index.values, frames[0].columns.values
class HDF5DailyBarWriter(object):
"""
Class capable of writing daily OHLCV data to disk in a format that
can be read efficiently by HDF5DailyBarReader.
Parameters
----------
filename : str
The location at which we should write our output.
date_chunk_size : int
The number of days per chunk in the HDF5 file. If this is
greater than the number of days in the data, the chunksize will
match the actual number of days.
See Also
--------
zipline.data.hdf5_daily_bars.HDF5DailyBarReader
"""
def __init__(self, filename, date_chunk_size):
self._filename = filename
self._date_chunk_size = date_chunk_size
def h5_file(self, mode):
return h5py.File(self._filename, mode)
def write(self,
country_code,
frames,
currency_codes=None,
scaling_factors=None):
"""
Write the OHLCV data for one country to the HDF5 file.
Parameters
----------
country_code : str
The ISO 3166 alpha-2 country code for this country.
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row
for each date and a column for each sid. The dataframes need
to have the same index and columns.
currency_codes : pd.Series, optional
Series mapping sids to 3-digit currency code values for those sids'
listing currencies. If not passed, missing currencies will be
written.
scaling_factors : dict[str, float], optional
A dict mapping each OHLCV field to a scaling factor, which
is applied (as a multiplier) to the values of field to
efficiently store them as uint32, while maintaining desired
precision. These factors are written to the file as metadata,
which is consumed by the reader to adjust back to the original
float values. Default is None, in which case
DEFAULT_SCALING_FACTORS is used.
"""
if scaling_factors is None:
scaling_factors = DEFAULT_SCALING_FACTORS
# Note that this functions validates that all of the frames
# share the same days and sids.
days, sids = days_and_sids_for_frames(list(frames.values()))
# XXX: We should make this required once we're using it everywhere.
if currency_codes is None:
currency_codes = pd.Series(index=sids, data=MISSING_CURRENCY)
# Currency codes should match dataframe columns.
check_sids_arrays_match(
sids,
currency_codes.index.values,
message="currency_codes sids do not match data sids:",
)
# Write start and end dates for each sid.
start_date_ixs, end_date_ixs = compute_asset_lifetimes(frames)
if len(sids):
chunks = (len(sids), min(self._date_chunk_size, len(days)))
else:
# h5py crashes if we provide chunks for empty data.
chunks = None
with self.h5_file(mode='a') as h5_file:
# ensure that the file version has been written
h5_file.attrs['version'] = VERSION
country_group = h5_file.create_group(country_code)
self._write_index_group(country_group, days, sids)
self._write_lifetimes_group(
country_group,
start_date_ixs,
end_date_ixs,
)
self._write_currency_group(country_group, currency_codes)
self._write_data_group(
country_group,
frames,
scaling_factors,
chunks,
)
def write_from_sid_df_pairs(self,
country_code,
data,
currency_codes=None,
scaling_factors=None):
"""
Parameters
----------
country_code : str
The ISO 3166 alpha-2 country code for this country.
data : iterable[tuple[int, pandas.DataFrame]]
The data chunks to write. Each chunk should be a tuple of
sid and the data for that asset.
currency_codes : pd.Series, optional
Series mapping sids to 3-digit currency code values for those sids'
listing currencies. If not passed, missing currencies will be
written.
scaling_factors : dict[str, float], optional
A dict mapping each OHLCV field to a scaling factor, which
is applied (as a multiplier) to the values of field to
efficiently store them as uint32, while maintaining desired
precision. These factors are written to the file as metadata,
which is consumed by the reader to adjust back to the original
float values. Default is None, in which case
DEFAULT_SCALING_FACTORS is used.
"""
data = list(data)
if not data:
empty_frame = pd.DataFrame(
data=None,
index=np.array([], dtype='datetime64[ns]'),
columns=np.array([], dtype='int64'),
)
return self.write(
country_code,
{f: empty_frame.copy() for f in FIELDS},
scaling_factors,
)
sids, frames = zip(*data)
ohlcv_frame = pd.concat(frames)
# Repeat each sid for each row in its corresponding frame.
sid_ix = np.repeat(sids, [len(f) for f in frames])
# Add id to the index, so the frame is indexed by (date, id).
ohlcv_frame.set_index(sid_ix, append=True, inplace=True)
frames = {
field: ohlcv_frame[field].unstack()
for field in FIELDS
}
return self.write(
country_code=country_code,
frames=frames,
scaling_factors=scaling_factors,
currency_codes=currency_codes
)
def _write_index_group(self, country_group, days, sids):
"""Write /country/index.
"""
index_group = country_group.create_group(INDEX)
self._log_writing_dataset(index_group)
index_group.create_dataset(SID, data=sids)
# h5py does not support datetimes, so they need to be stored
# as integers.
index_group.create_dataset(DAY, data=days.astype(np.int64))
def _write_lifetimes_group(self,
country_group,
start_date_ixs,
end_date_ixs):
"""Write /country/lifetimes
"""
lifetimes_group = country_group.create_group(LIFETIMES)
self._log_writing_dataset(lifetimes_group)
lifetimes_group.create_dataset(START_DATE, data=start_date_ixs)
lifetimes_group.create_dataset(END_DATE, data=end_date_ixs)
def _write_currency_group(self, country_group, currencies):
"""Write /country/currency
"""
currency_group = country_group.create_group(CURRENCY)
self._log_writing_dataset(currency_group)
currency_group.create_dataset(
CODE,
data=currencies.values.astype(dtype='S3'),
)
def _write_data_group(self,
country_group,
frames,
scaling_factors,
chunks):
"""Write /country/data
"""
data_group = country_group.create_group(DATA)
self._log_writing_dataset(data_group)
for field in FIELDS:
frame = frames[field]
# Sort rows by increasing sid, and columns by increasing date.
frame.sort_index(inplace=True)
frame.sort_index(axis='columns', inplace=True)
data = coerce_to_uint32(
frame.T.fillna(0).values,
scaling_factors[field],
)
dataset = data_group.create_dataset(
field,
compression='lzf',
shuffle=True,
data=data,
chunks=chunks,
)
self._log_writing_dataset(dataset)
dataset.attrs[SCALING_FACTOR] = scaling_factors[field]
log.debug(
'Writing dataset {} to file {}',
dataset.name, self._filename
)
def _log_writing_dataset(self, dataset):
log.debug("Writing {} to file {}", dataset.name, self._filename)
def compute_asset_lifetimes(frames):
"""
Parameters
----------
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row for
each date and a column for each sid, as passed to write().
Returns
-------
start_date_ixs : np.array[int64]
The index of the first date with non-nan values, for each sid.
end_date_ixs : np.array[int64]
The index of the last date with non-nan values, for each sid.
"""
# Build a 2D array (dates x sids), where an entry is True if all
# fields are nan for the given day and sid.
is_null_matrix = np.logical_and.reduce(
[frames[field].isnull().values for field in FIELDS],
)
if not is_null_matrix.size:
empty = np.array([], dtype='int64')
return empty, empty.copy()
# Offset of the first null from the start of the input.
start_date_ixs = is_null_matrix.argmin(axis=0)
# Offset of the last null from the **end** of the input.
end_offsets = is_null_matrix[::-1].argmin(axis=0)
# Offset of the last null from the start of the input
end_date_ixs = is_null_matrix.shape[0] - end_offsets - 1
return start_date_ixs, end_date_ixs
def convert_price_with_scaling_factor(a, scaling_factor):
conversion_factor = (1.0 / scaling_factor)
zeroes = (a == 0)
return np.where(zeroes, np.nan, a.astype('float64')) * conversion_factor
class HDF5DailyBarReader(CurrencyAwareSessionBarReader):
"""
Parameters
---------
country_group : h5py.Group
The group for a single country in an HDF5 daily pricing file.
"""
def __init__(self, country_group):
self._country_group = country_group
self._postprocessors = {
OPEN: partial(convert_price_with_scaling_factor,
scaling_factor=self._read_scaling_factor(OPEN)),
HIGH: partial(convert_price_with_scaling_factor,
scaling_factor=self._read_scaling_factor(HIGH)),
LOW: partial(convert_price_with_scaling_factor,
scaling_factor=self._read_scaling_factor(LOW)),
CLOSE: partial(convert_price_with_scaling_factor,
scaling_factor=self._read_scaling_factor(CLOSE)),
VOLUME: lambda a: a,
}
@classmethod
def from_file(cls, h5_file, country_code):
"""
Construct from an h5py.File and a country code.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read.
"""
if h5_file.attrs['version'] != VERSION:
raise ValueError(
'mismatched version: file is of version %s, expected %s' % (
h5_file.attrs['version'],
VERSION,
),
)
return cls(h5_file[country_code])
@classmethod
def from_path(cls, path, country_code):
"""
Construct from a file path and a country code.
Parameters
----------
path : str
The path to an HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read.
"""
return cls.from_file(h5py.File(path), country_code)
def _read_scaling_factor(self, field):
return self._country_group[DATA][field].attrs[SCALING_FACTOR]
def load_raw_arrays(self,
columns,
start_date,
end_date,
assets):
"""
Parameters
----------
columns : list of str
'open', 'high', 'low', 'close', or 'volume'
start_date: Timestamp
Beginning of the window range.
end_date: Timestamp
End of the window range.
assets : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
self._validate_timestamp(start_date)
self._validate_timestamp(end_date)
start = start_date.asm8
end = end_date.asm8
date_slice = self._compute_date_range_slice(start, end)
n_dates = date_slice.stop - date_slice.start
# Create a buffer into which we'll read data from the h5 file.
# Allocate an extra row of space that will always contain null values.
# We'll use that space to provide "data" for entries in ``assets`` that
# are unknown to us.
full_buf = np.zeros((len(self.sids) + 1, n_dates), dtype=np.uint32)
# We'll only read values into this portion of the read buf.
mutable_buf = full_buf[:-1]
# Indexer that converts an array aligned to self.sids (which is what we
# pull from the h5 file) into an array aligned to ``assets``.
#
# Unknown assets will have an index of -1, which means they'll always
# pull from the last row of the read buffer. We allocated an extra
# empty row above so that these lookups will cause us to fill our
# output buffer with "null" values.
sid_selector = self._make_sid_selector(assets)
out = []
for column in columns:
# Zero the buffer to prepare to receive new data.
mutable_buf.fill(0)
dataset = self._country_group[DATA][column]
# Fill the mutable portion of our buffer with data from the file.
dataset.read_direct(
mutable_buf,
np.s_[:, date_slice],
)
# Select data from the **full buffer**. Unknown assets will pull
# from the last row, which is always empty.
out.append(self._postprocessors[column](full_buf[sid_selector].T))
return out
def _make_sid_selector(self, assets):
"""
Build an indexer mapping ``self.sids`` to ``assets``.
Parameters
----------
assets : list[int]
List of assets requested by a caller of ``load_raw_arrays``.
Returns
-------
index : np.array[int64]
Index array containing the index in ``self.sids`` for each location
in ``assets``. Entries in ``assets`` for which we don't have a sid
will contain -1. It is caller's responsibility to handle these
values correctly.
"""
assets = np.array(assets)
sid_selector = self.sids.searchsorted(assets)
unknown = np.in1d(assets, self.sids, invert=True)
sid_selector[unknown] = -1
return sid_selector
def _compute_date_range_slice(self, start_date, end_date):
# Get the index of the start of dates for ``start_date``.
start_ix = self.dates.searchsorted(start_date)
# Get the index of the start of the first date **after** end_date.
end_ix = self.dates.searchsorted(end_date, side='right')
return slice(start_ix, end_ix)
def _validate_assets(self, assets):
"""Validate that asset identifiers are contained in the daily bars.
Parameters
----------
assets : array-like[int]
The asset identifiers to validate.
Raises
------
NoDataForSid
If one or more of the provided asset identifiers are not
contained in the daily bars.
"""
missing_sids = np.setdiff1d(assets, self.sids)
if len(missing_sids):
raise NoDataForSid(
'Assets not contained in daily pricing file: {}'.format(
missing_sids
)
)
def _validate_timestamp(self, ts):
if ts.asm8 not in self.dates:
raise NoDataOnDate(ts)
@lazyval
def dates(self):
return self._country_group[INDEX][DAY][:].astype('datetime64[ns]')
@lazyval
def sids(self):
return self._country_group[INDEX][SID][:].astype('int64', copy=False)
@lazyval
def asset_start_dates(self):
return self.dates[self._country_group[LIFETIMES][START_DATE][:]]
@lazyval
def asset_end_dates(self):
return self.dates[self._country_group[LIFETIMES][END_DATE][:]]
@lazyval
def _currency_codes(self):
bytes_array = self._country_group[CURRENCY][CODE][:]
return bytes_array_to_native_str_object_array(bytes_array)
def currency_codes(self, sids):
"""Get currencies in which prices are quoted for the requested sids.
Parameters
----------
sids : np.array[int64]
Array of sids for which currencies are needed.
Returns
-------
currency_codes : np.array[object]
Array of currency codes for listing currencies of ``sids``.
"""
# Find the index of requested sids in our stored sids.
ixs = self.sids.searchsorted(sids, side='left')
result = self._currency_codes[ixs]
# searchsorted returns the index of the next lowest sid if the lookup
# fails. Fill these sids with the special "missing" sentinel.
not_found = (self.sids[ixs] != sids)
result[not_found] = None
return result
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return pd.Timestamp(self.dates[-1], tz='UTC')
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
raise NotImplementedError(
'HDF5 pricing does not yet support trading calendars.'
)
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return pd.Timestamp(self.dates[0], tz='UTC')
@lazyval
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unioning the range for all assets) which the
reader can provide.
"""
return pd.to_datetime(self.dates, utc=True)
def get_value(self, sid, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
self._validate_assets([sid])
self._validate_timestamp(dt)
sid_ix = self.sids.searchsorted(sid)
dt_ix = self.dates.searchsorted(dt.asm8)
value = self._postprocessors[field](
self._country_group[DATA][field][sid_ix, dt_ix]
)
# When the value is nan, this dt may be outside the asset's lifetime.
# If that's the case, the proper NoDataOnDate exception is raised.
# Otherwise (when there's just a hole in the middle of the data), the
# nan is returned.
if np.isnan(value):
if dt.asm8 < self.asset_start_dates[sid_ix]:
raise NoDataBeforeDate()
if dt.asm8 > self.asset_end_dates[sid_ix]:
raise NoDataAfterDate()
return value
def get_last_traded_dt(self, asset, dt):
"""
Get the latest day on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded day.
dt : pd.Timestamp
The dt at which to start searching for the last traded day.
Returns
-------
last_traded : pd.Timestamp
The day of the last trade for the given asset, using the
input dt as a vantage point.
"""
sid_ix = self.sids.searchsorted(asset.sid)
# Used to get a slice of all dates up to and including ``dt``.
dt_limit_ix = self.dates.searchsorted(dt.asm8, side='right')
# Get the indices of all dates with nonzero volume.
nonzero_volume_ixs = np.ravel(
np.nonzero(self._country_group[DATA][VOLUME][sid_ix, :dt_limit_ix])
)
if len(nonzero_volume_ixs) == 0:
return pd.NaT
return pd.Timestamp(self.dates[nonzero_volume_ixs][-1], tz='UTC')
class MultiCountryDailyBarReader(CurrencyAwareSessionBarReader):
"""
Parameters
---------
readers : dict[str -> SessionBarReader]
A dict mapping country codes to SessionBarReader instances to
service each country.
"""
def __init__(self, readers):
self._readers = readers
self._country_map = pd.concat([
pd.Series(index=reader.sids, data=country_code)
for country_code, reader in iteritems(readers)
])
@classmethod
def from_file(cls, h5_file):
"""
Construct from an h5py.File.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
"""
return cls({
country: HDF5DailyBarReader.from_file(h5_file, country)
for country in h5_file.keys()
})
@classmethod
def from_path(cls, path):
"""
Construct from a file path.
Parameters
----------
path : str
Path to an HDF5 daily pricing file.
"""
return cls.from_file(h5py.File(path))
@property
def countries(self):
"""A set-like object of the country codes supplied by this reader.
"""
return viewkeys(self._readers)
def _country_code_for_assets(self, assets):
# 🆗 pandas 1.1.1 1.1.2 如果混合已有标签及不存在的标签,结果居然是None
# country_codes = self._country_map.get(assets)
country_codes = [self._country_map.get(a) for a in assets]
# In some versions of pandas (observed in 0.22), Series.get()
# returns None if none of the labels are in the index.
# if country_codes is not None:
# unique_country_codes = country_codes.dropna().unique()
# num_countries = len(unique_country_codes)
# else:
# num_countries = 0
# 唯一
unique_country_codes = list(set([c for c in country_codes if c]))
num_countries = len(unique_country_codes)
if num_countries == 0:
raise ValueError('At least one valid asset id is required.')
elif num_countries > 1:
raise NotImplementedError(
(
'Assets were requested from multiple countries ({}),'
' but multi-country reads are not yet supported.'
).format(unique_country_codes)
)
# return np.asscalar(unique_country_codes)
# 🆗 numpy > 1.16
# return unique_country_codes.item()
return unique_country_codes[0]
def load_raw_arrays(self,
columns,
start_date,
end_date,
assets):
"""
Parameters
----------
columns : list of str
'open', 'high', 'low', 'close', or 'volume'
start_date: Timestamp
Beginning of the window range.
end_date: Timestamp
End of the window range.
assets : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
country_code = self._country_code_for_assets(assets)
return self._readers[country_code].load_raw_arrays(
columns,
start_date,
end_date,
assets,
)
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return max(
reader.last_available_dt for reader in self._readers.values()
)
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
raise NotImplementedError(
'HDF5 pricing does not yet support trading calendars.'
)
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return min(
reader.first_trading_day for reader in self._readers.values()
)
@property
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unioning the range for all assets) which the
reader can provide.
"""
return pd.to_datetime(
reduce(
np.union1d,
(reader.dates for reader in self._readers.values()),
),
utc=True,
)
def get_value(self, sid, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
NoDataForSid
If the given sid is not valid.
"""
try:
country_code = self._country_code_for_assets([sid])
except ValueError as exc:
raise_from(
NoDataForSid(
'Asset not contained in daily pricing file: {}'.format(sid)
),
exc
)
return self._readers[country_code].get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest day on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded day.
dt : pd.Timestamp
The dt at which to start searching for the last traded day.
Returns
-------
last_traded : pd.Timestamp
The day of the last trade for the given asset, using the
input dt as a vantage point.
"""
country_code = self._country_code_for_assets([asset.sid])
return self._readers[country_code].get_last_traded_dt(asset, dt)
def currency_codes(self, sids):
"""Get currencies in which prices are quoted for the requested sids.
Assumes that a sid's prices are always quoted in a single currency.
Parameters
----------
sids : np.array[int64]
Array of sids for which currencies are needed.
Returns
-------
currency_codes : np.array[S3]
Array of currency codes for listing currencies of ``sids``.
"""
country_code = self._country_code_for_assets(sids)
return self._readers[country_code].currency_codes(sids)
def check_sids_arrays_match(left, right, message):
"""Check that two 1d arrays of sids are equal
"""
if len(left) != len(right):
raise ValueError(
"{}:\nlen(left) ({}) != len(right) ({})".format(
message, len(left), len(right)
)
)
diff = (left != right)
if diff.any():
(bad_locs,) = np.where(diff)
raise ValueError(
"{}:\n Indices with differences: {}".format(message, bad_locs)
)
|
the-stack_106_18205
|
import os
file = os.path.dirname(__file__)
def Cadastrar(i1, i2, c3):
arquivo = open(f"{os.path.join(file, './Contas.txt')}", "a")
nome = i1.input.get().strip()
senha = i2.input.get().strip()
if senha == "" and nome == "":
c3.caixa["text"] = "Os campos Usuário e Senha devem ser preenchidos"
elif senha == "" and nome != "":
c3.caixa["text"] = "O campo Senha deve ser preenchido"
elif senha != "" and nome == "":
c3.caixa["text"] = "O campo Nome deve ser preenchido"
else:
arquivo.write(f"{nome} {senha}\n")
arquivo.close()
|
the-stack_106_18206
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import random
author = 'Charlotte'
doc = """
Two simultaneous Prisoner's dilemma/donation game between two players with two different payoffs.
For the pairings to match that of crosstalk, the app uses a four player structure,
even if participants only interact in pairs of two
Random last round past 20 rounds (50% chance of next round).
Waitpage assigning the round numbers and pairing by arrival time, with a waiting time limited to 5min thanks to
a javascript.
"""
class Constants(BaseConstants):
"""
Here we set our variables that are constants throughout the game.
We set the number of players in a group, the number of rounds (see subsession), the payoffs for each game.
"""
name_in_url = 'multi_prisoner'
players_per_group = 4
num_rounds = 50
"""variables for randomish end round, used in the intro app at the mo"""
min_rounds = 3
proba_next_round = 0.5
conversion = '20pts = £0.05'
"""
Donation game payoffs
b = benefit, c = cost, dd = both defect
"""
b_high = c(5)
c_high = c(1)
dd_high = c(0)
endowment_high = c_high
b_low = c(2)
c_low = c(1)
dd_low = c(0)
endowment_low = c_low
"""Without endowment!! (for the round results)"""
sucker_high = -c_high
temptation_high = b_high
reward_high = b_high - c_high
sucker_low = -c_low
temptation_low = b_low
reward_low = b_low - c_low
class Subsession(BaseSubsession):
"""
Instead of creating_session() we need to use group_by_arrival_time_method().
The function makes sure that only high players play with high players.
I could only implement that retroactively though and assign treatment in the intro app.
The inconveninent is that if 3 people read the instructions, 2 become high and 1 becomes low,
if one of the high one gives and quits the other two cannot play together.
"""
def get_random_number_of_rounds(self):
"""
Creating the random-ish number of rounds a group plays for. PP plays for at least 20 rounds (set on constants),
then they have a 50% chance of another round, and then again 50% chance of another round.
This function creates a last round number following this method.
"""
number_of_rounds = Constants.min_rounds
while Constants.proba_next_round < random.random():
number_of_rounds += 1
return number_of_rounds
def group_by_arrival_time_method(subsession, waiting_players):
"""
Using the number generated above, it is assigned to each participants in newly formed group when they are
in the waitroom. This function is from oTree but had to be tweaked with a little to allow to assign a variable
after the group is formed rather than group the players based on a pre-assigned variable.
We form the group of four here rather than let group_by_arrival_time do it automatically (with the Constants)
"""
if len(waiting_players) >= Constants.players_per_group:
players = [p for _, p in zip(range(4), waiting_players)]
last_round = subsession.get_random_number_of_rounds()
for p in players:
p.participant.vars['last_round'] = last_round
p.last_round = p.participant.vars['last_round'] # p.vars do not appear in the data put player vars do.
return players
class Group(BaseGroup):
pass
class Player(BasePlayer):
"""
These are all variables that depend on a real person's action.
The options for the demographics survey & the decisions in the game.
The last_round variable field is here too.
Any variable defined in Player class becomes a new field attached to the player.
"""
last_round = models.IntegerField()
left_hanging = models.CurrencyField()
age = models.IntegerField(
verbose_name='What is your age?',
min=18, max=100)
gender = models.StringField(
choices=['Female', 'Male', 'Other'],
verbose_name='What gender do you identify as?',
widget=widgets.RadioSelect)
income = models.StringField(
choices=['£9.999 or below', '£10.000 - £29.999', '£30.000 - £49.999',
'£50.000 - £69.999', '£70.000 - £89.999', '£90.000 or over', 'Prefer not to say'],
verbose_name='What is the total combined income of your household?',
widget=widgets.RadioSelect)
education = models.StringField(
choices=['No formal education', 'GCSE or equivalent', 'A-Levels or equivalent', 'Vocational training',
'Undergraduate degree', 'Postgraduate degree', 'Prefer not to say'],
verbose_name='What is the highest level of education you have completed?',
widget=widgets.RadioSelect)
ethnicity = models.StringField(
choices=['Asian/Asian British', 'Black/African/Caribbean/Black British', 'Mixed/Multiple Ethnic groups',
'White', 'Other'],
verbose_name='What is your ethnicity?',
widget=widgets.RadioSelect)
comment_box = models.LongStringField(
verbose_name=''
)
decision_high = models.IntegerField(
choices=[
[1, f'You lose {Constants.c_high} pts for Participant 2 to receive {Constants.b_high} pts.'],
[0, 'You lose 0 pts for Participant 2 to receive 0 pts.'],
],
doc="""This player's decision""",
widget=widgets.RadioSelect
)
decision_low = models.IntegerField(
choices=[
[1, f'You lose {Constants.c_low} pts for Participant 2 to receive {Constants.b_low} pts.'],
[0, 'You lose 0 pts for Participant 2 to receive 0 pts.'],
],
doc="""This player's decision""",
widget=widgets.RadioSelect
)
payoff_high = models.CurrencyField()
payoff_low = models.CurrencyField()
total_payoff = models.CurrencyField()
def get_opponent(self):
"""
Since we have 4 players in a group but only want pairs to play eahc other, we need our own custom function
to assign the correct opponent to the correct player. Hence we cannot just use get_others_in_group()
as there are 3 possible opponents and we want just one.
We create a dictionary (matches) that matches the correct opponent with each player.
We create a list of all the possible opponents in the group (so 3 players without oneself).
Then for each player, we pick the matching opponents from the dic and the 3 other players,
and the id that match in both lists make the new opponents list.
"""
matches = {1: [2], 2: [1], 3: [4], 4: [3]}
list_opponents = self.get_others_in_group()
# print(self.get_others_in_group())
# print(self.id_in_group)
opponent = []
for opponent_id in matches[self.id_in_group]: # picks the two opponents from the matches dict
for other_player in list_opponents: #
if other_player.id_in_group == opponent_id:
opponent.append(other_player)
# print('subgroup is', self.opponent)
return opponent
def set_payoff(self):
"""
The payoff function layout is from the prisoner template. There is one matrix per game using two separate
decision variables. Bottom lines calculate the payoff based on actual choices with, again, one for each game.
They are added up for the round total (self.total_payoff). The opponent variables need to match our new
set_opponent function.
"""
opponent = self.get_opponent()
# print([opponent.id_in_group for opponent in opponents])
payoff_matrix_high = {
1:
{
1: Constants.endowment_high + (Constants.b_high - Constants.c_high),
0: Constants.endowment_high + (-Constants.c_high)
},
0:
{
1: Constants.endowment_high + Constants.b_high,
0: Constants.endowment_high + Constants.dd_high
}
}
self.payoff_high = payoff_matrix_high[self.decision_high][opponent[0].decision_high]
payoff_matrix_low = {
1:
{
1: Constants.endowment_low + (Constants.b_low - Constants.c_low),
0: Constants.endowment_low + (-Constants.c_low)
},
0:
{
1: Constants.endowment_low + Constants.b_low,
0: Constants.endowment_low + Constants.dd_low
}
}
self.payoff_low = payoff_matrix_low[self.decision_low][opponent[0].decision_low]
self.total_payoff = self.payoff_high + self.payoff_low
self.payoff = self.payoff_high + self.payoff_low
# print('self.total_payoff', self.total_payoff)
# print('Player ID', self.id_in_group)
|
the-stack_106_18207
|
import io
import os
from collections import OrderedDict
from pathlib import Path
from typing import Optional
import yaml
from serverless.aws.features.stepfunctions import StepFunctions
from serverless.service.configuration import Configuration
from serverless.service.functions import FunctionManager
from serverless.service.package import Package
from serverless.service.plugins import PluginsManager
from serverless.service.resources import ResourceManager
from serverless.service.types import Identifier, Provider, YamlOrderedDict
class Builder:
def __init__(self, service):
self.service = service
self.function = service.provider.function_builder
class PreSetAttributesBuilder(Builder):
def __init__(self, service, preset):
super().__init__(service)
self._preset = preset
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __getattr__(self, item):
def wrapper(*args, **kwargs):
return getattr(self.function, item)(*args, **{**kwargs, **self._preset})
return wrapper
class Service(OrderedDict, yaml.YAMLObject):
yaml_tag = "!Service"
def __init__(
self,
name: str,
description: str,
provider: Provider,
config: Optional[Configuration] = None,
custom: Optional[dict] = None,
regions=None,
**kwds,
):
super().__init__(**kwds)
self.service = Identifier(name)
self.package = Package(["!./**/**", f"{self.service.snake}/**"])
self.variablesResolutionMode = 20210326
self.custom = YamlOrderedDict(vars="${file(./variables.yml):${sls:stage}}", **(custom or {}))
self.config = config or Configuration()
self.regions = regions
provider.configure(self)
self.provider = provider
self.plugins = PluginsManager(self)
self.functions = FunctionManager(self)
self.resources = ResourceManager(self, description)
self.builder = Builder(self)
self.stepFunctions = StepFunctions(self)
self.features = []
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, item):
return self.get(item)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.builder = Builder(self)
return self
def preset(self, **kwargs):
self.builder = PreSetAttributesBuilder(self, kwargs)
return self.builder
def enable(self, feature):
self.features.append(feature)
feature.enable(self)
def render(self, output=None, auto_generated_warning=True):
if "SERVERLESS_BUILDER_DISABLE_RENDER" in os.environ:
return
import __main__ as main
output = output if output else open(Path(main.__file__).stem, "w+")
content = (
"# DO NOT edit this file directly, it was generated based on serverless.yml.py\n\n" + str(self)
if auto_generated_warning
else str(self)
)
output.write(content)
def __str__(self):
buf = io.StringIO()
yaml.dump(self, buf, sort_keys=False, indent=2, width=1000)
buf.seek(0)
tmp_buf = io.StringIO()
for line in buf:
if line.split(":")[0] in ("provider", "plugins", "package", "custom", "functions", "resources", "vars"):
tmp_buf.write("\n")
tmp_buf.write(line)
tmp_buf.seek(0)
return tmp_buf.read()
def has(self, feature):
return len(list(filter(lambda x: isinstance(x, feature), self.features))) > 0
@classmethod
def to_yaml(cls, dumper, data):
data.pop("builder", None)
data.pop("config", None)
data.pop("function_builder", None)
if not data.stepFunctions.stateMachines:
data.pop("stepFunctions", None)
for plugin in data.plugins.all():
plugin.pre_render(data)
for feature in data.features:
feature.pre_render(data)
data.pop("features", None)
data.pop("regions", None)
return dumper.represent_dict(data)
|
the-stack_106_18212
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Outlines document text given an image.
Example:
python doctext.py resources/text_menu.jpg
"""
# [START vision_document_text_tutorial]
# [START vision_document_text_tutorial_imports]
import os
import argparse
from enum import Enum
import io
from google.cloud import vision
from google.cloud.vision import types
from PIL import Image, ImageDraw
# [END vision_document_text_tutorial_imports]
PAGES_DIR = os.path.join(os.path.sep, 'midata', 'manceps', 'unredact', 'mooney', 'pages')
class FeatureType(Enum):
PAGE = 1
BLOCK = 2
PARA = 3
WORD = 4
SYMBOL = 5
def draw_boxes(image, bounds, color):
"""Draw a border around the image using the hints in the vector list."""
draw = ImageDraw.Draw(image)
for bound in bounds:
draw.polygon([
bound.vertices[0].x, bound.vertices[0].y,
bound.vertices[1].x, bound.vertices[1].y,
bound.vertices[2].x, bound.vertices[2].y,
bound.vertices[3].x, bound.vertices[3].y], None, color)
return image
def detect_document(image_file):
"""Returns google cloud vision document object (nested json dict) given an image file."""
# [START vision_document_text_tutorial_detect_bounds]
client = vision.ImageAnnotatorClient()
with io.open(image_file, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.document_text_detection(image=image)
return response.full_text_annotation
def get_document_bounds(document, feature):
"""Returns document bounds given an image file or a document object already processed through GCV OCR."""
document = detect_document(document) if isinstance(document, str) else document
bounds = []
# Collect specified feature bounds by enumerating all document features
for page in document.pages:
for block in page.blocks:
for paragraph in block.paragraphs:
for word in paragraph.words:
for symbol in word.symbols:
if (feature == FeatureType.SYMBOL):
bounds.append(symbol.bounding_box)
if (feature == FeatureType.WORD):
bounds.append(word.bounding_box)
if (feature == FeatureType.PARA):
bounds.append(paragraph.bounding_box)
if (feature == FeatureType.BLOCK):
bounds.append(block.bounding_box)
if (feature == FeatureType.PAGE):
bounds.append(block.bounding_box)
# The list `bounds` contains the coordinates of the bounding boxes.
# [END vision_document_text_tutorial_detect_bounds]
return bounds
def render_doc_text(detect_file, out_file='doctext_outfile.jpg'):
document = detect_document(detect_file)
image = Image.open(detect_file)
bounds = get_document_bounds(document, FeatureType.PAGE)
draw_boxes(image, bounds, 'blue')
bounds = get_document_bounds(document, FeatureType.PARA)
draw_boxes(image, bounds, 'red')
bounds = get_document_bounds(document, FeatureType.WORD)
draw_boxes(image, bounds, 'yellow')
if out_file:
image.save(out_file)
else:
image.show()
if __name__ == '__main__':
# [START vision_document_text_tutorial_run_application]
parser = argparse.ArgumentParser()
parser.add_argument('detect_dir', help='Directory of images for text detection.')
parser.add_argument('detect_file', help='The image for text detection.')
parser.add_argument('-out_file', help='Optional output file', default=0)
parser.add_argument('-out_dir', help='Optional output directory', default=0)
args = parser.parse_args()
render_doc_text(detect_file=args.detect_file, out_file=args.out_file)
# [END vision_document_text_tutorial_run_application]
# [END vision_document_text_tutorial]
|
the-stack_106_18213
|
import datetime
from Poem.api.models import MyAPIKey
from Poem.api.views import NotFound
from Poem.poem import models as poem_models
from django.db.models import Q
from django_tenants.utils import get_public_schema_name
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.response import Response
from rest_framework.views import APIView
from .utils import error_response
class ListAPIKeys(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, name=None):
if name:
try:
if request.tenant.schema_name == get_public_schema_name():
regular_user = None
regular_user_no_perms = None
else:
userprofile = poem_models.UserProfile.objects.get(
user=request.user
)
regular_user = not request.user.is_superuser and (
len(userprofile.groupsofaggregations.all()) > 0 or
len(userprofile.groupsofmetricprofiles.all()) > 0 or
len(userprofile.groupsofthresholdsprofiles.all()) > 0
)
regular_user_no_perms = not request.user.is_superuser and (
len(userprofile.groupsofaggregations.all()) == 0 and
len(userprofile.groupsofmetricprofiles.all()) == 0 and
len(userprofile.groupsofthresholdsprofiles.all()) == 0
)
if request.user.is_superuser or (
regular_user and name.startswith('WEB-API')
) or (
regular_user_no_perms and name == 'WEB-API-RO'
):
apikey = MyAPIKey.objects.get(name=name)
api_format = dict(
id=apikey.id,
name=apikey.name,
token=apikey.token,
created=datetime.datetime.strftime(
apikey.created, '%Y-%m-%d %H:%M:%S'
),
revoked=apikey.revoked
)
else:
return error_response(
detail='You do not have permission for fetching this '
'API key.',
status_code=status.HTTP_401_UNAUTHORIZED
)
except MyAPIKey.DoesNotExist:
raise NotFound(status=404, detail='API key not found')
except poem_models.UserProfile.DoesNotExist:
raise NotFound(
status=404,
detail='User profile for authenticated user not found.'
)
else:
if request.user.is_superuser:
apikeys = MyAPIKey.objects.all().order_by('name')
else:
apikeys = MyAPIKey.objects.filter(
name__startswith='WEB-API'
).order_by('name')
api_format = [
dict(
id=e.id,
name=e.name,
created=datetime.datetime.strftime(
e.created, '%Y-%m-%d %H:%M:%S'
),
revoked=e.revoked
) for e in apikeys
]
return Response(api_format)
def put(self, request):
if request.user.is_superuser:
try:
names = MyAPIKey.objects.filter(
~Q(id=request.data['id'])
).values_list('name', flat=True)
if request.data['name'] not in names:
obj = MyAPIKey.objects.get(id=request.data['id'])
obj.name = request.data['name']
obj.revoked = request.data['revoked']
obj.save()
else:
return error_response(
detail='API key with this name already exists',
status_code=status.HTTP_400_BAD_REQUEST
)
return Response(status=status.HTTP_201_CREATED)
except MyAPIKey.DoesNotExist:
raise NotFound(status=404, detail='API key not found')
else:
return error_response(
detail='You do not have permission to change API keys.',
status_code=status.HTTP_401_UNAUTHORIZED
)
def post(self, request):
if request.user.is_superuser:
names = MyAPIKey.objects.get_usable_keys().values_list(
'name', flat=True
)
if request.data['name'] not in names:
token = request.data.get('token', False)
if token:
MyAPIKey.objects.create_key(
name=request.data['name'],
token=token
)
else:
MyAPIKey.objects.create_key(
name=request.data['name']
)
return Response(status=status.HTTP_201_CREATED)
else:
return error_response(
detail='API key with this name already exists',
status_code=status.HTTP_400_BAD_REQUEST
)
else:
return error_response(
detail='You do not have permission to add API keys.',
status_code=status.HTTP_401_UNAUTHORIZED
)
def delete(self, request, name=None):
if request.user.is_superuser:
if name:
try:
apikey = MyAPIKey.objects.get(name=name)
apikey.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except MyAPIKey.DoesNotExist:
raise NotFound(status=404, detail='API key not found')
else:
return error_response(
detail='API key name must be defined',
status_code=status.HTTP_400_BAD_REQUEST
)
else:
return error_response(
detail='You do not have permission to delete API keys.',
status_code=status.HTTP_401_UNAUTHORIZED
)
class ListPublicAPIKey(APIView):
authentication_classes = ()
permission_classes = ()
def get(self, request):
try:
apikey = MyAPIKey.objects.get(name='WEB-API-RO')
api_format = dict(
id=apikey.id,
name=apikey.name,
token=apikey.token,
created=datetime.datetime.strftime(apikey.created,
'%Y-%m-%d %H:%M:%S'),
revoked=apikey.revoked
)
except MyAPIKey.DoesNotExist:
raise NotFound(status=404, detail='API key not found')
return Response(api_format)
|
the-stack_106_18214
|
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
import warnings
from unittest import mock
import nose.tools as nt
from IPython.core import display
from IPython.core.getipython import get_ipython
from IPython.utils.io import capture_output
from IPython.utils.tempdir import NamedFileInTemporaryDirectory
from IPython import paths as ipath
from IPython.testing.tools import AssertNotPrints
import IPython.testing.decorators as dec
def test_image_size():
"""Simple test for display.Image(args, width=x,height=y)"""
thisurl = 'http://www.google.fr/images/srpr/logo3w.png'
img = display.Image(url=thisurl, width=200, height=200)
nt.assert_equal(u'<img src="%s" width="200" height="200"/>' % (thisurl), img._repr_html_())
img = display.Image(url=thisurl, metadata={'width':200, 'height':200})
nt.assert_equal(u'<img src="%s" width="200" height="200"/>' % (thisurl), img._repr_html_())
img = display.Image(url=thisurl, width=200)
nt.assert_equal(u'<img src="%s" width="200"/>' % (thisurl), img._repr_html_())
img = display.Image(url=thisurl)
nt.assert_equal(u'<img src="%s"/>' % (thisurl), img._repr_html_())
img = display.Image(url=thisurl, unconfined=True)
nt.assert_equal(u'<img src="%s" class="unconfined"/>' % (thisurl), img._repr_html_())
def test_image_mimes():
fmt = get_ipython().display_formatter.format
for format in display.Image._ACCEPTABLE_EMBEDDINGS:
mime = display.Image._MIMETYPES[format]
img = display.Image(b'garbage', format=format)
data, metadata = fmt(img)
nt.assert_equal(sorted(data), sorted([mime, 'text/plain']))
def test_geojson():
gj = display.GeoJSON(data={
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [-81.327, 296.038]
},
"properties": {
"name": "Inca City"
}
},
url_template="http://s3-eu-west-1.amazonaws.com/whereonmars.cartodb.net/{basemap_id}/{z}/{x}/{y}.png",
layer_options={
"basemap_id": "celestia_mars-shaded-16k_global",
"attribution": "Celestia/praesepe",
"minZoom": 0,
"maxZoom": 18,
})
nt.assert_equal(u'<IPython.core.display.GeoJSON object>', str(gj))
def test_retina_png():
here = os.path.dirname(__file__)
img = display.Image(os.path.join(here, "2x2.png"), retina=True)
nt.assert_equal(img.height, 1)
nt.assert_equal(img.width, 1)
data, md = img._repr_png_()
nt.assert_equal(md['width'], 1)
nt.assert_equal(md['height'], 1)
def test_embed_svg_url():
import gzip
from io import BytesIO
svg_data = b'<svg><circle x="0" y="0" r="1"/></svg>'
url = 'http://test.com/circle.svg'
gzip_svg = BytesIO()
with gzip.open(gzip_svg, 'wb') as fp:
fp.write(svg_data)
gzip_svg = gzip_svg.getvalue()
def mocked_urlopen(*args, **kwargs):
class MockResponse:
def __init__(self, svg):
self._svg_data = svg
self.headers = {'content-type': 'image/svg+xml'}
def read(self):
return self._svg_data
if args[0] == url:
return MockResponse(svg_data)
elif args[0] == url + 'z':
ret= MockResponse(gzip_svg)
ret.headers['content-encoding']= 'gzip'
return ret
return MockResponse(None)
with mock.patch('urllib.request.urlopen', side_effect=mocked_urlopen):
svg = display.SVG(url=url)
nt.assert_true(svg._repr_svg_().startswith('<svg'))
svg = display.SVG(url=url + 'z')
nt.assert_true(svg._repr_svg_().startswith('<svg'))
# do it for real: 6.1kB of data
url = "https://upload.wikimedia.org/wikipedia/commons/3/30/Vector-based_example.svg"
svg = display.SVG(url=url)
nt.assert_true(svg._repr_svg_().startswith('<svg'))
def test_retina_jpeg():
here = os.path.dirname(__file__)
img = display.Image(os.path.join(here, "2x2.jpg"), retina=True)
nt.assert_equal(img.height, 1)
nt.assert_equal(img.width, 1)
data, md = img._repr_jpeg_()
nt.assert_equal(md['width'], 1)
nt.assert_equal(md['height'], 1)
def test_base64image():
display.Image("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej3aAAAAAWJLR0QAiAUdSAAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB94BCRQnOqNu0b4AAAAKSURBVAjXY2AAAAACAAHiIbwzAAAAAElFTkSuQmCC")
def test_image_filename_defaults():
'''test format constraint, and validity of jpeg and png'''
tpath = ipath.get_ipython_package_dir()
nt.assert_raises(ValueError, display.Image, filename=os.path.join(tpath, 'testing/tests/badformat.zip'),
embed=True)
nt.assert_raises(ValueError, display.Image)
nt.assert_raises(ValueError, display.Image, data='this is not an image', format='badformat', embed=True)
# check boths paths to allow packages to test at build and install time
imgfile = os.path.join(tpath, 'core/tests/2x2.png')
img = display.Image(filename=imgfile)
nt.assert_equal('png', img.format)
nt.assert_is_not_none(img._repr_png_())
img = display.Image(filename=os.path.join(tpath, 'testing/tests/logo.jpg'), embed=False)
nt.assert_equal('jpeg', img.format)
nt.assert_is_none(img._repr_jpeg_())
def _get_inline_config():
from ipykernel.pylab.config import InlineBackend
return InlineBackend.instance()
@dec.skip_without('matplotlib')
def test_set_matplotlib_close():
cfg = _get_inline_config()
cfg.close_figures = False
display.set_matplotlib_close()
assert cfg.close_figures
display.set_matplotlib_close(False)
assert not cfg.close_figures
_fmt_mime_map = {
'png': 'image/png',
'jpeg': 'image/jpeg',
'pdf': 'application/pdf',
'retina': 'image/png',
'svg': 'image/svg+xml',
}
@dec.skip_without('matplotlib')
def test_set_matplotlib_formats():
from matplotlib.figure import Figure
formatters = get_ipython().display_formatter.formatters
for formats in [
('png',),
('pdf', 'svg'),
('jpeg', 'retina', 'png'),
(),
]:
active_mimes = {_fmt_mime_map[fmt] for fmt in formats}
display.set_matplotlib_formats(*formats)
for mime, f in formatters.items():
if mime in active_mimes:
nt.assert_in(Figure, f)
else:
nt.assert_not_in(Figure, f)
@dec.skip_without('matplotlib')
def test_set_matplotlib_formats_kwargs():
from matplotlib.figure import Figure
ip = get_ipython()
cfg = _get_inline_config()
cfg.print_figure_kwargs.update(dict(foo='bar'))
kwargs = dict(quality=10)
display.set_matplotlib_formats('png', **kwargs)
formatter = ip.display_formatter.formatters['image/png']
f = formatter.lookup_by_type(Figure)
cell = f.__closure__[0].cell_contents
expected = kwargs
expected.update(cfg.print_figure_kwargs)
nt.assert_equal(cell, expected)
def test_display_available():
"""
Test that display is available without import
We don't really care if it's in builtin or anything else, but it should
always be available.
"""
ip = get_ipython()
with AssertNotPrints('NameError'):
ip.run_cell('display')
try:
ip.run_cell('del display')
except NameError:
pass # it's ok, it might be in builtins
# even if deleted it should be back
with AssertNotPrints('NameError'):
ip.run_cell('display')
def test_textdisplayobj_pretty_repr():
p = display.Pretty("This is a simple test")
nt.assert_equal(repr(p), '<IPython.core.display.Pretty object>')
nt.assert_equal(p.data, 'This is a simple test')
p._show_mem_addr = True
nt.assert_equal(repr(p), object.__repr__(p))
def test_displayobject_repr():
h = display.HTML('<br />')
nt.assert_equal(repr(h), '<IPython.core.display.HTML object>')
h._show_mem_addr = True
nt.assert_equal(repr(h), object.__repr__(h))
h._show_mem_addr = False
nt.assert_equal(repr(h), '<IPython.core.display.HTML object>')
j = display.Javascript('')
nt.assert_equal(repr(j), '<IPython.core.display.Javascript object>')
j._show_mem_addr = True
nt.assert_equal(repr(j), object.__repr__(j))
j._show_mem_addr = False
nt.assert_equal(repr(j), '<IPython.core.display.Javascript object>')
@mock.patch('warnings.warn')
def test_encourage_iframe_over_html(m_warn):
display.HTML()
m_warn.assert_not_called()
display.HTML('<br />')
m_warn.assert_not_called()
display.HTML('<html><p>Lots of content here</p><iframe src="http://a.com"></iframe>')
m_warn.assert_not_called()
display.HTML('<iframe src="http://a.com"></iframe>')
m_warn.assert_called_with('Consider using IPython.display.IFrame instead')
m_warn.reset_mock()
display.HTML('<IFRAME SRC="http://a.com"></IFRAME>')
m_warn.assert_called_with('Consider using IPython.display.IFrame instead')
def test_progress():
p = display.ProgressBar(10)
nt.assert_in('0/10',repr(p))
p.html_width = '100%'
p.progress = 5
nt.assert_equal(p._repr_html_(), "<progress style='width:100%' max='10' value='5'></progress>")
def test_progress_iter():
with capture_output(display=False) as captured:
for i in display.ProgressBar(5):
out = captured.stdout
nt.assert_in('{0}/5'.format(i), out)
out = captured.stdout
nt.assert_in('5/5', out)
def test_json():
d = {'a': 5}
lis = [d]
metadata = [
{'expanded': False, 'root': 'root'},
{'expanded': True, 'root': 'root'},
{'expanded': False, 'root': 'custom'},
{'expanded': True, 'root': 'custom'},
]
json_objs = [
display.JSON(d),
display.JSON(d, expanded=True),
display.JSON(d, root='custom'),
display.JSON(d, expanded=True, root='custom'),
]
for j, md in zip(json_objs, metadata):
nt.assert_equal(j._repr_json_(), (d, md))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
j = display.JSON(json.dumps(d))
nt.assert_equal(len(w), 1)
nt.assert_equal(j._repr_json_(), (d, metadata[0]))
json_objs = [
display.JSON(lis),
display.JSON(lis, expanded=True),
display.JSON(lis, root='custom'),
display.JSON(lis, expanded=True, root='custom'),
]
for j, md in zip(json_objs, metadata):
nt.assert_equal(j._repr_json_(), (lis, md))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
j = display.JSON(json.dumps(lis))
nt.assert_equal(len(w), 1)
nt.assert_equal(j._repr_json_(), (lis, metadata[0]))
def test_video_embedding():
"""use a tempfile, with dummy-data, to ensure that video embedding doesn't crash"""
v = display.Video("http://ignored")
assert not v.embed
html = v._repr_html_()
nt.assert_not_in('src="data:', html)
nt.assert_in('src="http://ignored"', html)
with nt.assert_raises(ValueError):
v = display.Video(b'abc')
with NamedFileInTemporaryDirectory('test.mp4') as f:
f.write(b'abc')
f.close()
v = display.Video(f.name)
assert not v.embed
html = v._repr_html_()
nt.assert_not_in('src="data:', html)
v = display.Video(f.name, embed=True)
html = v._repr_html_()
nt.assert_in('src="data:video/mp4;base64,YWJj"',html)
v = display.Video(f.name, embed=True, mimetype='video/other')
html = v._repr_html_()
nt.assert_in('src="data:video/other;base64,YWJj"',html)
v = display.Video(b'abc', embed=True, mimetype='video/mp4')
html = v._repr_html_()
nt.assert_in('src="data:video/mp4;base64,YWJj"',html)
v = display.Video(u'YWJj', embed=True, mimetype='video/xyz')
html = v._repr_html_()
nt.assert_in('src="data:video/xyz;base64,YWJj"',html)
def test_html_metadata():
s = "<h1>Test</h1>"
h = display.HTML(s, metadata={"isolated": True})
nt.assert_equal(h._repr_html_(), (s, {"isolated": True}))
def test_display_id():
ip = get_ipython()
with mock.patch.object(ip.display_pub, 'publish') as pub:
handle = display.display('x')
nt.assert_is(handle, None)
handle = display.display('y', display_id='secret')
nt.assert_is_instance(handle, display.DisplayHandle)
handle2 = display.display('z', display_id=True)
nt.assert_is_instance(handle2, display.DisplayHandle)
nt.assert_not_equal(handle.display_id, handle2.display_id)
nt.assert_equal(pub.call_count, 3)
args, kwargs = pub.call_args_list[0]
nt.assert_equal(args, ())
nt.assert_equal(kwargs, {
'data': {
'text/plain': repr('x')
},
'metadata': {},
})
args, kwargs = pub.call_args_list[1]
nt.assert_equal(args, ())
nt.assert_equal(kwargs, {
'data': {
'text/plain': repr('y')
},
'metadata': {},
'transient': {
'display_id': handle.display_id,
},
})
args, kwargs = pub.call_args_list[2]
nt.assert_equal(args, ())
nt.assert_equal(kwargs, {
'data': {
'text/plain': repr('z')
},
'metadata': {},
'transient': {
'display_id': handle2.display_id,
},
})
def test_update_display():
ip = get_ipython()
with mock.patch.object(ip.display_pub, 'publish') as pub:
with nt.assert_raises(TypeError):
display.update_display('x')
display.update_display('x', display_id='1')
display.update_display('y', display_id='2')
args, kwargs = pub.call_args_list[0]
nt.assert_equal(args, ())
nt.assert_equal(kwargs, {
'data': {
'text/plain': repr('x')
},
'metadata': {},
'transient': {
'display_id': '1',
},
'update': True,
})
args, kwargs = pub.call_args_list[1]
nt.assert_equal(args, ())
nt.assert_equal(kwargs, {
'data': {
'text/plain': repr('y')
},
'metadata': {},
'transient': {
'display_id': '2',
},
'update': True,
})
def test_display_handle():
ip = get_ipython()
handle = display.DisplayHandle()
nt.assert_is_instance(handle.display_id, str)
handle = display.DisplayHandle('my-id')
nt.assert_equal(handle.display_id, 'my-id')
with mock.patch.object(ip.display_pub, 'publish') as pub:
handle.display('x')
handle.update('y')
args, kwargs = pub.call_args_list[0]
nt.assert_equal(args, ())
nt.assert_equal(kwargs, {
'data': {
'text/plain': repr('x')
},
'metadata': {},
'transient': {
'display_id': handle.display_id,
}
})
args, kwargs = pub.call_args_list[1]
nt.assert_equal(args, ())
nt.assert_equal(kwargs, {
'data': {
'text/plain': repr('y')
},
'metadata': {},
'transient': {
'display_id': handle.display_id,
},
'update': True,
})
|
the-stack_106_18215
|
"""Subgraph structure that belongs to the Optimum-Path Forest.
"""
import numpy as np
import opfython.stream.loader as loader
import opfython.stream.parser as p
import opfython.utils.constants as c
import opfython.utils.exception as e
import opfython.utils.logging as l
from opfython.core import Node
logger = l.get_logger(__name__)
class Subgraph:
"""A Subgraph class is used as a collection of Nodes and the basic structure to work with OPF.
"""
def __init__(self, X=None, Y=None, I=None, from_file=None):
"""Initialization method.
Args:
X (np.array): Array of features.
Y (np.array): Array of labels.
I (np.array): Array of indexes.
from_file (bool): Whether Subgraph should be directly created from a file.
"""
# Number of nodes
self.n_nodes = 0
# Number of features
self.n_features = 0
# List of nodes
self.nodes = []
# List of indexes of ordered nodes
self.idx_nodes = []
# Whether the subgraph is trained or not
self.trained = False
# Checks if data should be loaded from a file
if from_file:
X, Y = self._load(from_file)
# Checks if data has been properly loaded
if X is not None:
# Checks if labels are provided or not
if Y is None:
# If not, creates an empty numpy array
Y = np.zeros(len(X), dtype=int)
self._build(X, Y, I)
else:
logger.error('Subgraph has not been properly created.')
@property
def n_nodes(self):
"""int: Number of nodes.
"""
return len(self.nodes)
@n_nodes.setter
def n_nodes(self, n_nodes):
if not isinstance(n_nodes, int):
raise e.TypeError('`n_nodes` should be an integer')
if n_nodes < 0:
raise e.ValueError('`n_nodes` should be >= 0')
self._n_nodes = n_nodes
@property
def n_features(self):
"""int: Number of features.
"""
return self._n_features
@n_features.setter
def n_features(self, n_features):
if not isinstance(n_features, int):
raise e.TypeError('`n_features` should be an integer')
if n_features < 0:
raise e.ValueError('`n_features` should be >= 0')
self._n_features = n_features
@property
def nodes(self):
"""list: List of nodes that belongs to the Subgraph.
"""
return self._nodes
@nodes.setter
def nodes(self, nodes):
if not isinstance(nodes, list):
raise e.TypeError('`nodes` should be a list')
self._nodes = nodes
@property
def idx_nodes(self):
"""list: List of ordered nodes indexes.
"""
return self._idx_nodes
@idx_nodes.setter
def idx_nodes(self, idx_nodes):
if not isinstance(idx_nodes, list):
raise e.TypeError('`idx_nodes` should be a list')
self._idx_nodes = idx_nodes
@property
def trained(self):
"""bool: Indicate whether the subgraph is trained.
"""
return self._trained
@trained.setter
def trained(self, trained):
if not isinstance(trained, bool):
raise e.TypeError('`trained` should be a boolean')
self._trained = trained
def _load(self, file_path):
"""Loads and parses a dataframe from a file.
Args:
file_path (str): File to be loaded.
Returns:
Arrays holding the features and labels.
"""
# Getting file extension
extension = file_path.split('.')[-1]
if extension == 'csv':
data = loader.load_csv(file_path)
elif extension == 'txt':
data = loader.load_txt(file_path)
elif extension == 'json':
data = loader.load_json(file_path)
else:
raise e.ArgumentError('File extension not recognized. It should be `.csv`, `.json` or `.txt`')
X, Y = p.parse_loader(data)
return X, Y
def _build(self, X, Y, I):
"""This method serves as the object building process.
One can define several commands here that does not necessarily
needs to be on its initialization.
Args:
X (np.array): Features array.
Y (np.array): Labels array.
"""
# Iterate over every possible sample
for i, (feature, label) in enumerate(zip(X, Y)):
# Checks if indexes are supplied
if I is not None:
node = Node(I[i].item(), label.item(), feature)
else:
node = Node(i, label.item(), feature)
# Appends the node to the list
self.nodes.append(node)
# Calculates the number of features
self.n_features = self.nodes[0].features.shape[0]
def destroy_arcs(self):
"""Destroy the arcs present in the subgraph.
"""
for i in range(self.n_nodes):
# Reset the number of adjacent nodes and adjacency list
self.nodes[i].n_plateaus = 0
self.nodes[i].adjacency = []
def mark_nodes(self, i):
"""Marks a node and its whole path as relevant.
Args:
i (int): An identifier of the node to start the marking.
"""
# While the node still has a predecessor
while self.nodes[i].pred != c.NIL:
# Marks current node as relevant
self.nodes[i].relevant = c.RELEVANT
# Gathers the predecessor node of current node
i = self.nodes[i].pred
# Marks the first node as relevant
self.nodes[i].relevant = c.RELEVANT
def reset(self):
"""Resets the subgraph predecessors and arcs.
"""
for i in range(self.n_nodes):
self.nodes[i].pred = c.NIL
self.nodes[i].relevant = c.IRRELEVANT
self.destroy_arcs()
|
the-stack_106_18216
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from doc_builder.convert_to_notebook import (
_re_copyright,
_re_header,
_re_math_delimiter,
_re_python_code,
_re_youtube,
expand_links,
parse_input_output,
split_frameworks,
)
class ConvertToNotebookTester(unittest.TestCase):
def test_re_math_delimiter(self):
self.assertEqual(_re_math_delimiter.search("\\\\(lala\\\\)").groups()[0], "lala")
self.assertListEqual(_re_math_delimiter.findall("\\\\(lala\\\\)xx\\\\(loulou\\\\)"), ["lala", "loulou"])
def test_re_copyright(self):
self.assertIsNotNone(
_re_copyright.search("<!--Copyright 2021 Hugging Face\n more more more\n--> rest of text")
)
def test_re_youtube(self):
self.assertEqual(_re_youtube.search('<Youtube id="tiZFewofSLM"/>').groups()[0], "tiZFewofSLM")
def test_re_header(self):
self.assertIsNotNone(_re_header.search("# Title"))
self.assertIsNotNone(_re_header.search("### Subesection"))
self.assertIsNone(_re_header.search("Title"))
def test_re_python(self):
self.assertIsNotNone(_re_python_code.search("```py"))
self.assertIsNotNone(_re_python_code.search("```python"))
self.assertIsNone(_re_python_code.search("```bash"))
self.assertIsNone(_re_python_code.search("```"))
def test_parse_inputs_output(self):
expected = "from transformers import pipeline\n\nclassifier = pipeline('sentiment-analysis')"
doctest_lines_no_output = [
">>> from transformers import pipeline",
"",
">>> classifier = pipeline('sentiment-analysis')",
]
doctest_lines_with_output = [
">>> from transformers import pipeline",
"",
">>> classifier = pipeline('sentiment-analysis')",
"output",
]
regular_lines = ["from transformers import pipeline", "", "classifier = pipeline('sentiment-analysis')"]
self.assertListEqual(parse_input_output(regular_lines), [(expected, None)])
self.assertListEqual(parse_input_output(doctest_lines_no_output), [(expected, None)])
self.assertListEqual(parse_input_output(doctest_lines_with_output), [(expected, "output")])
def test_parse_inputs_output_multiple_outputs(self):
expected_1 = "from transformers import pipeline"
expected_2 = "classifier = pipeline('sentiment-analysis')"
doctest_lines_with_output = [
">>> from transformers import pipeline",
"output 1",
">>> classifier = pipeline('sentiment-analysis')",
"output 2",
]
self.assertListEqual(
parse_input_output(doctest_lines_with_output), [(expected_1, "output 1"), (expected_2, "output 2")]
)
doctest_lines_with_one_output = [
">>> from transformers import pipeline",
"output 1",
">>> classifier = pipeline('sentiment-analysis')",
]
self.assertListEqual(
parse_input_output(doctest_lines_with_one_output), [(expected_1, "output 1"), (expected_2, None)]
)
def test_split_framewors(self):
test_content = """
Intro
```py
common_code_sample
```
Content
<frameworkcontent>
<pt>
```py
pt_sample
```
</pt>
<tf>
```py
tf_sample
```
</tf>
</frameworkcontent>
End
"""
mixed_content = """
Intro
```py
common_code_sample
```
Content
```py
pt_sample
```
```py
tf_sample
```
End
"""
pt_content = """
Intro
```py
common_code_sample
```
Content
```py
pt_sample
```
End
"""
tf_content = """
Intro
```py
common_code_sample
```
Content
```py
tf_sample
```
End
"""
for expected, obtained in zip([mixed_content, pt_content, tf_content], split_frameworks(test_content)):
self.assertEqual(expected, obtained)
def test_expand_links(self):
page_info = {"package_name": "transformers", "page": "quicktour.html"}
self.assertEqual(
expand_links("Checkout the [task summary](task-summary)", page_info),
"Checkout the [task summary](https://huggingface.co/docs/transformers/master/en/task-summary)",
)
self.assertEqual(
expand_links("Checkout the [`Trainer`](/docs/transformers/master/en/trainer#Trainer)", page_info),
"Checkout the [`Trainer`](https://huggingface.co/docs/transformers/master/en/trainer#Trainer)",
)
page_info = {"package_name": "datasets", "page": "quicktour.html", "version": "stable", "language": "fr"}
self.assertEqual(
expand_links("Checkout the [task summary](task-summary)", page_info),
"Checkout the [task summary](https://huggingface.co/docs/datasets/stable/fr/task-summary)",
)
page_info = {"package_name": "transformers", "page": "data/quicktour.html"}
self.assertEqual(
expand_links("Checkout the [task summary](task-summary)", page_info),
"Checkout the [task summary](https://huggingface.co/docs/transformers/master/en/data/task-summary)",
)
|
the-stack_106_18217
|
"""Support for Netatmo energy devices (relays, thermostats and valves)."""
from __future__ import annotations
import logging
from abc import ABC
from collections import defaultdict
from typing import Any
from .auth import AbstractAsyncAuth, NetatmoOAuth2
from .exceptions import InvalidRoom, NoSchedule
from .helpers import _BASE_URL, extract_raw_data
LOG = logging.getLogger(__name__)
_GETHOMESDATA_REQ = _BASE_URL + "api/homesdata"
_GETHOMESTATUS_REQ = _BASE_URL + "api/homestatus"
_SETTHERMMODE_REQ = _BASE_URL + "api/setthermmode"
_SETROOMTHERMPOINT_REQ = _BASE_URL + "api/setroomthermpoint"
_GETROOMMEASURE_REQ = _BASE_URL + "api/getroommeasure"
_SWITCHHOMESCHEDULE_REQ = _BASE_URL + "api/switchhomeschedule"
class AbstractHomeData(ABC):
"""Abstract class of Netatmo energy devices."""
raw_data: dict = defaultdict(dict)
homes: dict = defaultdict(dict)
modules: dict = defaultdict(dict)
rooms: dict = defaultdict(dict)
schedules: dict = defaultdict(dict)
zones: dict = defaultdict(dict)
setpoint_duration: dict = defaultdict(dict)
def process(self) -> None:
"""Process data from API."""
self.homes = {d["id"]: d for d in self.raw_data}
for item in self.raw_data:
home_id = item.get("id")
if not (home_name := item.get("name")):
home_name = "Unknown"
self.homes[home_id]["name"] = home_name
if "modules" not in item:
continue
for module in item["modules"]:
self.modules[home_id][module["id"]] = module
self.setpoint_duration[home_id] = item.get(
"therm_setpoint_default_duration",
)
for room in item.get("rooms", []):
self.rooms[home_id][room["id"]] = room
for schedule in item.get("schedules", []):
schedule_id = schedule["id"]
self.schedules[home_id][schedule_id] = schedule
if schedule_id not in self.zones[home_id]:
self.zones[home_id][schedule_id] = {}
for zone in schedule["zones"]:
self.zones[home_id][schedule_id][zone["id"]] = zone
def _get_selected_schedule(self, home_id: str) -> dict:
"""Get the selected schedule for a given home ID."""
for value in self.schedules.get(home_id, {}).values():
if "selected" in value.keys():
return value
return {}
def get_hg_temp(self, home_id: str) -> float | None:
"""Return frost guard temperature value."""
return self._get_selected_schedule(home_id).get("hg_temp")
def get_away_temp(self, home_id: str) -> float | None:
"""Return the configured away temperature value."""
return self._get_selected_schedule(home_id).get("away_temp")
def get_thermostat_type(self, home_id: str, room_id: str) -> str | None:
"""Return the thermostat type of the room."""
for module in self.modules.get(home_id, {}).values():
if module.get("room_id") == room_id:
return module.get("type")
return None
def is_valid_schedule(self, home_id: str, schedule_id: str):
"""Check if valid schedule."""
schedules = (
self.schedules[home_id][s]["id"] for s in self.schedules.get(home_id, {})
)
return schedule_id in schedules
class HomeData(AbstractHomeData):
"""Class of Netatmo energy devices."""
def __init__(self, auth: NetatmoOAuth2) -> None:
"""Initialize the Netatmo home data.
Arguments:
auth {NetatmoOAuth2} -- Authentication information with a valid access token
"""
self.auth = auth
def update(self) -> None:
"""Fetch and process data from API."""
resp = self.auth.post_request(url=_GETHOMESDATA_REQ)
self.raw_data = extract_raw_data(resp.json(), "homes")
self.process()
def switch_home_schedule(self, home_id: str, schedule_id: str) -> Any:
"""Switch the schedule for a give home ID."""
if not self.is_valid_schedule(home_id, schedule_id):
raise NoSchedule(f"{schedule_id} is not a valid schedule id")
post_params = {"home_id": home_id, "schedule_id": schedule_id}
resp = self.auth.post_request(url=_SWITCHHOMESCHEDULE_REQ, params=post_params)
LOG.debug("Response: %s", resp)
class AsyncHomeData(AbstractHomeData):
"""Class of Netatmo energy devices."""
def __init__(self, auth: AbstractAsyncAuth) -> None:
"""Initialize the Netatmo home data.
Arguments:
auth {AbstractAsyncAuth} -- Authentication information with a valid access token
"""
self.auth = auth
async def async_update(self):
"""Fetch and process data from API."""
resp = await self.auth.async_post_request(url=_GETHOMESDATA_REQ)
assert not isinstance(resp, bytes)
self.raw_data = extract_raw_data(await resp.json(), "homes")
self.process()
async def async_switch_home_schedule(self, home_id: str, schedule_id: str) -> None:
"""Switch the schedule for a give home ID."""
if not self.is_valid_schedule(home_id, schedule_id):
raise NoSchedule(f"{schedule_id} is not a valid schedule id")
resp = await self.auth.async_post_request(
url=_SWITCHHOMESCHEDULE_REQ,
params={"home_id": home_id, "schedule_id": schedule_id},
)
LOG.debug("Response: %s", resp)
class AbstractHomeStatus(ABC):
"""Abstract class of the Netatmo home status."""
raw_data: dict = defaultdict(dict)
rooms: dict = defaultdict(dict)
thermostats: dict = defaultdict(dict)
valves: dict = defaultdict(dict)
relays: dict = defaultdict(dict)
def process(self) -> None:
"""Process data from API."""
for room in self.raw_data.get("rooms", []):
self.rooms[room["id"]] = room
for module in self.raw_data.get("modules", []):
if module["type"] in {"NATherm1", "OTM"}:
self.thermostats[module["id"]] = module
elif module["type"] == "NRV":
self.valves[module["id"]] = module
elif module["type"] in {"OTH", "NAPlug"}:
self.relays[module["id"]] = module
def get_room(self, room_id: str) -> dict:
"""Return room data for a given room id."""
for value in self.rooms.values():
if value["id"] == room_id:
return value
raise InvalidRoom(f"No room with ID {room_id}")
def get_thermostat(self, room_id: str) -> dict:
"""Return thermostat data for a given room id."""
for value in self.thermostats.values():
if value["id"] == room_id:
return value
raise InvalidRoom(f"No room with ID {room_id}")
def get_relay(self, room_id: str) -> dict:
"""Return relay data for a given room id."""
for value in self.relays.values():
if value["id"] == room_id:
return value
raise InvalidRoom(f"No room with ID {room_id}")
def get_valve(self, room_id: str) -> dict:
"""Return valve data for a given room id."""
for value in self.valves.values():
if value["id"] == room_id:
return value
raise InvalidRoom(f"No room with ID {room_id}")
def set_point(self, room_id: str) -> float | None:
"""Return the setpoint of a given room."""
return self.get_room(room_id).get("therm_setpoint_temperature")
def set_point_mode(self, room_id: str) -> str | None:
"""Return the setpointmode of a given room."""
return self.get_room(room_id).get("therm_setpoint_mode")
def measured_temperature(self, room_id: str) -> float | None:
"""Return the measured temperature of a given room."""
return self.get_room(room_id).get("therm_measured_temperature")
def boiler_status(self, module_id: str) -> bool | None:
"""Return the status of the boiler status."""
return self.get_thermostat(module_id).get("boiler_status")
class HomeStatus(AbstractHomeStatus):
"""Class of the Netatmo home status."""
def __init__(self, auth: NetatmoOAuth2, home_id: str):
"""Initialize the Netatmo home status.
Arguments:
auth {NetatmoOAuth2} -- Authentication information with a valid access token
home_id {str} -- ID for targeted home
"""
self.auth = auth
self.home_id = home_id
def update(self) -> None:
"""Fetch and process data from API."""
resp = self.auth.post_request(
url=_GETHOMESTATUS_REQ,
params={"home_id": self.home_id},
)
self.raw_data = extract_raw_data(resp.json(), "home")
self.process()
def set_thermmode(
self,
mode: str,
end_time: int = None,
schedule_id: str = None,
) -> str | None:
"""Set thermotat mode."""
post_params = {"home_id": self.home_id, "mode": mode}
if end_time is not None and mode in {"hg", "away"}:
post_params["endtime"] = str(end_time)
if schedule_id is not None and mode == "schedule":
post_params["schedule_id"] = schedule_id
return self.auth.post_request(url=_SETTHERMMODE_REQ, params=post_params).json()
def set_room_thermpoint(
self,
room_id: str,
mode: str,
temp: float = None,
end_time: int = None,
) -> str | None:
"""Set room themperature set point."""
post_params = {"home_id": self.home_id, "room_id": room_id, "mode": mode}
# Temp and endtime should only be send when mode=='manual', but netatmo api can
# handle that even when mode == 'home' and these settings don't make sense
if temp is not None:
post_params["temp"] = str(temp)
if end_time is not None:
post_params["endtime"] = str(end_time)
return self.auth.post_request(
url=_SETROOMTHERMPOINT_REQ,
params=post_params,
).json()
class AsyncHomeStatus(AbstractHomeStatus):
"""Class of the Netatmo home status."""
def __init__(self, auth: AbstractAsyncAuth, home_id: str):
"""Initialize the Netatmo home status.
Arguments:
auth {AbstractAsyncAuth} -- Authentication information with a valid access token
home_id {str} -- ID for targeted home
"""
self.auth = auth
self.home_id = home_id
async def async_update(self) -> None:
"""Fetch and process data from API."""
resp = await self.auth.async_post_request(
url=_GETHOMESTATUS_REQ,
params={"home_id": self.home_id},
)
assert not isinstance(resp, bytes)
self.raw_data = extract_raw_data(await resp.json(), "home")
self.process()
async def async_set_thermmode(
self,
mode: str,
end_time: int = None,
schedule_id: str = None,
) -> str | None:
"""Set thermotat mode."""
post_params = {"home_id": self.home_id, "mode": mode}
if end_time is not None and mode in {"hg", "away"}:
post_params["endtime"] = str(end_time)
if schedule_id is not None and mode == "schedule":
post_params["schedule_id"] = schedule_id
resp = await self.auth.async_post_request(
url=_SETTHERMMODE_REQ,
params=post_params,
)
assert not isinstance(resp, bytes)
return await resp.json()
async def async_set_room_thermpoint(
self,
room_id: str,
mode: str,
temp: float = None,
end_time: int = None,
) -> str | None:
"""Set room themperature set point."""
post_params = {"home_id": self.home_id, "room_id": room_id, "mode": mode}
# Temp and endtime should only be send when mode=='manual', but netatmo api can
# handle that even when mode == 'home' and these settings don't make sense
if temp is not None:
post_params["temp"] = str(temp)
if end_time is not None:
post_params["endtime"] = str(end_time)
resp = await self.auth.async_post_request(
url=_SETROOMTHERMPOINT_REQ,
params=post_params,
)
assert not isinstance(resp, bytes)
return await resp.json()
|
the-stack_106_18218
|
# coding: utf-8
"""
Properties
All HubSpot objects store data in default and custom properties. These endpoints provide access to read and modify object properties in HubSpot. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import six
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None, key_type=None):
"""Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n" "Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
|
the-stack_106_18221
|
from bomber_monkey.utils.vector import Vector
def test_equal():
v = Vector.create(2, 4)
assert v == [2, 4]
assert not v == [2, 5]
assert not v == [3, 4]
def test_add():
v1 = Vector.create(4, 6)
v2 = Vector.create(3, 3)
v3 = v1 + v2
assert v3 == [7, 9]
def test_modify():
v = Vector.create(2, 4)
v.x += 1
v.y *= 5
assert v == [3, 20]
def test_operators():
v = Vector.create(5, 6)
v += 2
assert v == [7, 8]
v = v + 3
assert v == [10, 11]
v = -v
assert v == [-10, -11]
v = v * 4
v *= -3
assert v == [120, 132]
v = v % 7
assert v == [1, 6]
v += 4
v = v % 7
assert v == [5, 3]
def test_add_tuple():
v = Vector.create(5, 6)
v += (10, 20)
assert v == [15, 26]
def test_mul_tuple():
v = Vector.create(2, 3)
v *= (5, 6)
assert v == [10, 18]
|
the-stack_106_18222
|
import unittest
import os
import torch
from tests.util import create_config, get_dataset_folder
from kge import Dataset
from kge.indexing import KvsAllIndex
class TestDataset(unittest.TestCase):
def setUp(self) -> None:
self.dataset_name = "dataset_test"
self.dataset_folder = get_dataset_folder(self.dataset_name)
self.config = create_config(self.dataset_name)
self.remove_pickle_files()
def tearDown(self) -> None:
self.remove_pickle_files()
def remove_pickle_files(self):
dataset_files = os.listdir(self.dataset_folder)
for item in dataset_files:
if item.endswith(".pckl"):
os.remove(os.path.join(self.dataset_folder, item))
def test_store_data_pickle(self):
# this will create new pickle files for train, valid, test
dataset = Dataset.create(
config=self.config, folder=self.dataset_folder, preload_data=True
)
pickle_filenames = [
"train.del-t.pckl",
"valid.del-t.pckl",
"test.del-t.pckl",
"entity_ids.del-True-t-False.pckl",
"relation_ids.del-True-t-False.pckl",
]
for filename in pickle_filenames:
self.assertTrue(
os.path.isfile(os.path.join(self.dataset_folder, filename)),
msg=filename,
)
def test_store_index_pickle(self):
dataset = Dataset.create(
config=self.config, folder=self.dataset_folder, preload_data=True
)
for index_key in dataset.index_functions.keys():
dataset.index(index_key)
pickle_filename = os.path.join(
self.dataset_folder,
Dataset._to_valid_filename(f"index-{index_key}.pckl"),
)
self.assertTrue(
os.path.isfile(os.path.join(self.dataset_folder, pickle_filename)),
msg=pickle_filename,
)
def test_data_pickle_correctness(self):
# this will create new pickle files for train, valid, test
dataset = Dataset.create(
config=self.config, folder=self.dataset_folder, preload_data=True
)
# create new dataset which loads the triples from stored pckl files
dataset_load_by_pickle = Dataset.create(
config=self.config, folder=self.dataset_folder, preload_data=True
)
for split in dataset._triples.keys():
self.assertTrue(
torch.all(
torch.eq(dataset_load_by_pickle.split(split), dataset.split(split))
)
)
self.assertEqual(dataset._meta, dataset_load_by_pickle._meta)
def test_index_pickle_correctness(self):
def _create_dataset_and_indexes():
data = Dataset.create(
config=self.config, folder=self.dataset_folder, preload_data=True
)
indexes = []
for index_key in data.index_functions.keys():
indexes.append(data.index(index_key))
return data, indexes
# this will create new pickle files for train, valid, test
dataset, dataset_indexes = _create_dataset_and_indexes()
# create new dataset. This will load the triples from stored pickle files
# from previous dataset creation
(
dataset_load_by_pickle,
dataset_indexes_by_pickle,
) = _create_dataset_and_indexes()
for index, index_by_pickle in zip(dataset_indexes, dataset_indexes_by_pickle):
self.assertEqualTorch(index, index_by_pickle)
def assertEqualTorch(self, first, second, msg=None):
"""Compares first and second using ==, except for PyTorch tensors,
where `torch.eq` is used."""
# TODO factor out to utility class
self.assertEqual(type(first), type(second), msg=msg)
if isinstance(first, dict):
self.assertEqual(len(first), len(second), msg=msg)
for key in first.keys():
self.assertTrue(key in second, msg=msg)
self.assertEqualTorch(first[key], second[key], msg=msg)
elif isinstance(first, list):
self.assertEqual(len(first), len(second), msg=msg)
for i in range(len(first)):
self.assertEqualTorch(first[i], second[i], msg=msg)
elif isinstance(first, KvsAllIndex):
first_attributes = [a for a in dir(first) if not a.startswith("__")]
second_attributes = [a for a in dir(second) if not a.startswith("__")]
for first_attribute, second_attribute in zip(
first_attributes, second_attributes
):
self.assertEqualTorch(first_attribute, second_attribute)
else:
if type(first) is torch.Tensor:
self.assertTrue(torch.all(torch.eq(first, second)), msg=msg)
else:
self.assertEqual(first, second, msg=msg)
|
the-stack_106_18223
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common modules used by many agents."""
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
def get_vars(scope_name):
"""Returns variables in scope."""
scope_var = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name)
return scope_var
def encoder(inputs,
embeddings,
n_hidden_unit,
trainable=True,
reuse=False,
name='encoder',
time_major=False,
cell_collection=None):
"""One layer GRU unit encoder.
Args:
inputs: a batch of sequences of integers (indices of tokens)
embeddings: word embedding matrix
n_hidden_unit: number of hidden units the encoder has
trainable: whether the weights are trainable
reuse: whether to reuse the parameters
name: optional name of the encoder
time_major: whether the format is time major
cell_collection: optional list to put the encoder cell in
Returns:
encoder_outputs: output of the encoder
encoder_final_state: the final hidden state of the encoder
"""
with tf.variable_scope(name, reuse=reuse):
input_embedding = tf.nn.embedding_lookup(embeddings, inputs)
encoder_cell = tf.contrib.rnn.GRUCell(n_hidden_unit, trainable=trainable)
encoder_outputs, encoder_final_state = tf.nn.dynamic_rnn(
encoder_cell,
input_embedding,
dtype=tf.float32,
time_major=time_major,
)
if cell_collection: cell_collection.append(encoder_cell)
return encoder_outputs, encoder_final_state
def stack_dense_layer(inputs, layer_cfg):
"""Stack dense layers per layer_cfg.
Args:
inputs: input tensor
layer_cfg: list of integer specifying the number of units at each layer
Returns:
output after all layers
"""
for cfg in layer_cfg[:-1]:
inputs = tf.layers.dense(inputs, cfg, activation=tf.nn.relu)
return tf.layers.dense(inputs, layer_cfg[-1])
def stack_conv_layer(inputs, layer_cfg, padding='same'):
"""Stack convolution layers per layer_cfg.
Args:
inputs: input tensor
layer_cfg: list of integer tuples specifying the parameter each layer;
each tuple should be (channel, kernel size, strides)
padding: what kind of padding the conv layers use
Returns:
output after all layers
"""
for cfg in layer_cfg[:-1]:
inputs = tf.layers.conv2d(
inputs,
filters=cfg[0],
kernel_size=cfg[1],
strides=cfg[2],
activation=tf.nn.relu,
padding=padding
)
final_cfg = layer_cfg[-1]
return tf.layers.conv2d(
inputs, final_cfg[0], final_cfg[1], final_cfg[2], padding=padding)
def film_params(sentence_embedding, n_layer_channel):
"""Generate FiLM parameters from a sentence embedding.
Generate FiLM parameters from a sentence embedding. This method assumes a
batch dimension exists.
Args:
sentence_embedding: a tensor containing batched sentenced embedding to be
transformed
n_layer_channel: a list of integers specifying how many channels are
at each hidden layer to be FiLM'ed
Returns:
A tuple of tensors the same length as n_layer_channel. Each element
contains all gamma_i and beta_i for a single hidden layer.
"""
n_total = sum(n_layer_channel) * 2
all_params = tf.layers.dense(sentence_embedding, n_total)
return tf.split(all_params, [c*2 for c in n_layer_channel], 1)
def vector_tensor_product(a, b):
""""Conduct an outer product between two tensors.
Instead of conducting scalar multiplication like regular outer product, this
operation does 1-D vector concatenation. It also does it over entire batch.
Args:
a: a tensor of shape [B, ?, d_a]
b: a tensor of shape [B, ?, d_b]
Returns:
a tensor of shape [B, ?, ?, d_a + d_b]
"""
# a shape: [B, ?, d], b shape: [B, ?, d]
variable_length = tf.shape(a)[1] # variable_len = ?
a = tf.expand_dims(a, axis=2) # a shape: [B, ?, 1, d]
b = tf.expand_dims(b, axis=2) # b shape: [B, ?, 1, d]
a = tf.tile(a, multiples=[1, 1, variable_length, 1]) # a shape: [B, ?, ?, d]
b = tf.tile(b, multiples=[1, 1, variable_length, 1]) # b shape: [B, ?, ?, d]
b = tf.transpose(b, perm=[0, 2, 1, 3]) # b shape: [B, ?, ?, d]
return tf.concat([a, b], axis=-1) # shape: [B, ?, ?, 2*d]
def tensor_concat(a, b, c):
"""Do tensor product between 3 vectors."""
# a shape = [B, dc, de], b shape = [B, db, de], c shape = [B, dc, de]
dim_a, dim_b, dim_c = tf.shape(a)[1], tf.shape(b)[1], tf.shape(c)[1]
a = tf.expand_dims(a, axis=2) # [B, da, 1, de]
b = tf.expand_dims(b, axis=2) # [B, db, 1, de]
c = tf.expand_dims(c, axis=2) # [B, dc, 1, de]
c = tf.expand_dims(c, axis=3) # [B, dc, 1, 1, de]
a = tf.tile(a, multiples=[1, 1, dim_b, 1]) # [B, da, db, de]
b = tf.tile(b, multiples=[1, 1, dim_a, 1]) # [B, db, da, de]
c = tf.tile(c, multiples=[1, 1, dim_a, dim_b, 1]) # [B, dc, da, db, de]
b = tf.transpose(b, perm=[0, 2, 1, 3]) # [B, da, db, de]
ab = tf.concat([a, b], axis=-1) # [B, da, db, de*2]
ab = tf.expand_dims(ab, axis=3) # [B, da, db, 1, de*2]
ab = tf.tile(ab, multiples=[1, 1, 1, dim_c, 1]) # [B, da, db, dc, 2*de]
c = tf.transpose(c, perm=[0, 2, 3, 1, 4]) # [B, da, db, dc, de]
abc = tf.concat([ab, c], axis=-1) # [B, da, db, dc, 3*de]
return tf.identity(abc)
def factor_concat(factors):
"""Generalization of tensor_concat to any numbers of batched 2D tensors."""
assert len(factors) >= 2
primary_fac = factors[0]
final_factor_shape = [-1, primary_fac.get_shape()[1]]
for fac in factors[1:]:
primary_fac = tf.expand_dims(primary_fac, axis=2)
fac = tf.expand_dims(fac, axis=2)
fac_shape = fac.get_shape()
primary_shape = primary_fac.get_shape()
# tiling primary to match the shape of fac
primary_fac = tf.tile(primary_fac, multiples=[1, 1, fac_shape[1], 1])
# tiling current fac to the shape of primary
fac = tf.tile(fac, multiples=[1, 1, primary_shape[1], 1])
# transpose the current fac
fac = tf.transpose(fac, perm=[0, 2, 1, 3])
primary_fac = tf.concat([primary_fac, fac], axis=-1)
primary_fac = tf.reshape(
primary_fac,
shape=[
-1,
fac_shape[1]*primary_shape[1],
primary_fac.get_shape()[-1]
]
)
final_factor_shape.append(fac_shape[1])
return primary_fac
|
the-stack_106_18224
|
from dateutil.parser import parserinfo, parser
class BrParserInfo(parserinfo):
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"as", "a", "e", "de", "do", "da", "em"]
WEEKDAYS = [("Seg", "Segunda"),
("Ter", "Terça"),
("Qua", "Quarta"),
("Qui", "Quinta"),
("Sex", "Sexta"),
("Sab", "Sábado"),
("Dom", "Domingo")]
MONTHS = [("Jan", "Janeiro"),
("Fev", "Fevereiro"),
("Mar", "Março"),
("Abr", "Abril"),
("Mai", "Maio"),
("Jun", "Junho"),
("Jul", "Julho"),
("Aug", "Agosto"),
("Set", "Setembro"),
("Out", "Outubro"),
("Nov", "Novembro"),
("Dez", "Dezembro")]
HMS = [("h", "hora", "horas", "hs"),
("m", "minuto", "minutos", "min"),
("s", "segundo", "segundos")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z", "z"]
PERTAIN = ["de"]
TZOFFSET = {}
def __init__(self, dayfirst=True, yearfirst=False):
super().__init__(dayfirst, yearfirst)
|
the-stack_106_18226
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test basic for Creating tokens ."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
import json
import http.client
import urllib.parse
class ManagedBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-txindex=1"]]
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "litecoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
def run_test(self):
self.log.info("Preparing the workspace...")
# mining 200 blocks
self.nodes[0].generate(200)
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
addresses = []
accounts = ["john", "doe", "pat", "dan", "david", "lihki"]
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
self.log.info("Creating sender address")
addresses = tradelayer_createAddresses(accounts, conn, headers)
self.log.info("Funding addresses with LTC")
amount = 0.1
tradelayer_fundingAddresses(addresses, amount, conn, headers)
self.log.info("Checking the LTC balance in every account")
tradelayer_checkingBalance(accounts, amount, conn, headers)
self.log.info("Self Attestation for addresses")
tradelayer_selfAttestation(addresses,conn, headers)
self.log.info("Checking attestations")
out = tradelayer_HTTP(conn, headers, False, "tl_list_attestation")
# self.log.info(out)
result = []
registers = out['result']
for addr in addresses:
for i in registers:
if i['att sender'] == addr and i['att receiver'] == addr and i['kyc_id'] == 0:
result.append(True)
assert_equal(result, [True, True, True, True, True, True])
self.log.info("Creating new tokens (sendissuancemanaged)")
array = [0]
params = str([addresses[0], 2, 0, "lihki", "", "", array]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_sendissuancemanaged",params)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the property")
params = str([4])
out = tradelayer_HTTP(conn, headers, True, "tl_getproperty",params)
assert_equal(out['result']['propertyid'],4)
assert_equal(out['result']['name'],'lihki')
assert_equal(out['result']['issuer'], addresses[0])
assert_equal(out['result']['data'],'')
assert_equal(out['result']['url'],'')
assert_equal(out['result']['divisible'],True)
assert_equal(out['result']['totaltokens'],'0.00000000')
self.log.info("Checking token balance equal zero in every address")
for addr in addresses:
params = str([addr, 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'0.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Sending 2000 tokens to receiver (sendgrant)")
params = str([addresses[0], addresses[0], 4, "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_sendgrant",params)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking tokens in receiver address")
params = str([addresses[0], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'1000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking the property (with new issuer)")
params = str([4])
out = tradelayer_HTTP(conn, headers, True, "tl_getproperty",params)
assert_equal(out['result']['propertyid'],4)
assert_equal(out['result']['name'],'lihki')
assert_equal(out['result']['issuer'], addresses[0])
assert_equal(out['result']['data'],'')
assert_equal(out['result']['url'],'')
assert_equal(out['result']['divisible'],True)
assert_equal(out['result']['totaltokens'],'1000.00000000')
self.log.info("Checking all addresses")
self.log.info(addresses)
self.log.info("Testing sendtomany")
params = '["' + addresses[0] + '", {"' + addresses[2] + '":10,"' + addresses[3] + '":20,"' + addresses[4] + '": 5.647,"' + addresses[5] + '": 0.5}, 4]'
self.log.info(params)
out = tradelayer_HTTP(conn, headers, False, "tl_sendmany",params)
self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Checking balances")
params = str([addresses[0], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'963.85300000')
assert_equal(out['result']['reserve'],'0.00000000')
params = str([addresses[2], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'10.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
params = str([addresses[3], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'20.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
params = str([addresses[4], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'5.64700000')
assert_equal(out['result']['reserve'],'0.00000000')
params = str([addresses[5], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'0.50000000')
assert_equal(out['result']['reserve'],'0.00000000')
conn.close()
self.stop_nodes()
if __name__ == '__main__':
ManagedBasicsTest ().main ()
|
the-stack_106_18228
|
# Python program to mail merger
# Names are in the file names.txt
# Body of the mail is in body.txt
# open names.txt for reading
with open("names.txt", 'r', encoding='utf-8') as names_file:
# open body.txt for reading
with open("body.txt", 'r', encoding='utf-8') as body_file:
# read entire content of the body
body = body_file.read()
# iterate over names
for name in names_file:
mail = "Hello " + name.strip() + "\n" + body
# write the mails to individual files
with open(name.strip()+".txt", 'w', encoding='utf-8') as mail_file:
mail_file.write(mail)
|
the-stack_106_18229
|
from itertools import product
from typing import TYPE_CHECKING, Optional, Union, Tuple, Callable
from gym import spaces
import numpy as np
from highway_env import utils
from highway_env.vehicle.dynamics import BicycleVehicle
from highway_env.vehicle.kinematics import Vehicle
from highway_env.vehicle.controller import MDPVehicle
if TYPE_CHECKING:
from highway_env.envs.common.abstract import AbstractEnv
Action = Union[int, np.ndarray]
class ActionType(object):
"""A type of action specifies its definition space, and how actions are executed in the environment"""
def __init__(self, env: 'AbstractEnv', **kwargs) -> None:
self.env = env
self.__controlled_vehicle = None
def space(self) -> spaces.Space:
"""The action space."""
raise NotImplementedError
@property
def vehicle_class(self) -> Callable:
"""
The class of a vehicle able to execute the action.
Must return a subclass of :py:class:`highway_env.vehicle.kinematics.Vehicle`.
"""
raise NotImplementedError
def act(self, action: Action) -> None:
"""
Execute the action on the ego-vehicle.
Most of the action mechanics are actually implemented in vehicle.act(action), where
vehicle is an instance of the specified :py:class:`highway_env.envs.common.action.ActionType.vehicle_class`.
Must some pre-processing can be applied to the action based on the ActionType configurations.
:param action: the action to execute
"""
raise NotImplementedError
@property
def controlled_vehicle(self):
"""The vehicle acted upon.
If not set, the first controlled vehicle is used by default."""
return self.__controlled_vehicle or self.env.vehicle
@controlled_vehicle.setter
def controlled_vehicle(self, vehicle):
self.__controlled_vehicle = vehicle
class ContinuousAction(ActionType):
"""
An continuous action space for throttle and/or steering angle.
If both throttle and steering are enabled, they are set in this order: [throttle, steering]
The space intervals are always [-1, 1], but are mapped to throttle/steering intervals through configurations.
"""
ACCELERATION_RANGE = (-5, 5.0)
"""Acceleration range: [-x, x], in m/s²."""
STEERING_RANGE = (-np.pi / 4, np.pi / 4)
"""Steering angle range: [-x, x], in rad."""
def __init__(self,
env: 'AbstractEnv',
acceleration_range: Optional[Tuple[float, float]] = None,
steering_range: Optional[Tuple[float, float]] = None,
longitudinal: bool = True,
lateral: bool = True,
dynamical: bool = False,
clip: bool = True,
**kwargs) -> None:
"""
Create a continuous action space.
:param env: the environment
:param acceleration_range: the range of acceleration values [m/s²]
:param steering_range: the range of steering values [rad]
:param longitudinal: enable throttle control
:param lateral: enable steering control
:param dynamical: whether to simulate dynamics (i.e. friction) rather than kinematics
:param clip: clip action to the defined range
"""
super().__init__(env)
self.acceleration_range = acceleration_range if acceleration_range else self.ACCELERATION_RANGE
self.steering_range = steering_range if steering_range else self.STEERING_RANGE
self.lateral = lateral
self.longitudinal = longitudinal
if not self.lateral and not self.longitudinal:
raise ValueError("Either longitudinal and/or lateral control must be enabled")
self.dynamical = dynamical
self.clip = clip
self.size = 2 if self.lateral and self.longitudinal else 1
self.last_action = np.zeros(self.size)
def space(self) -> spaces.Box:
return spaces.Box(-1., 1., shape=(self.size,), dtype=np.float32)
@property
def vehicle_class(self) -> Callable:
return Vehicle if not self.dynamical else BicycleVehicle
def act(self, action: np.ndarray) -> None:
if self.clip:
action = np.clip(action, -1, 1)
if self.longitudinal and self.lateral:
self.controlled_vehicle.act({
"acceleration": utils.lmap(action[0], [-1, 1], self.acceleration_range),
"steering": utils.lmap(action[1], [-1, 1], self.steering_range),
})
elif self.longitudinal:
self.controlled_vehicle.act({
"acceleration": utils.lmap(action[0], [-1, 1], self.acceleration_range),
"steering": 0,
})
elif self.lateral:
self.controlled_vehicle.act({
"acceleration": 0,
"steering": utils.lmap(action[0], [-1, 1], self.steering_range)
})
self.last_action = action
class DiscreteAction(ContinuousAction):
def __init__(self,
env: 'AbstractEnv',
acceleration_range: Optional[Tuple[float, float]] = None,
steering_range: Optional[Tuple[float, float]] = None,
longitudinal: bool = True,
lateral: bool = True,
dynamical: bool = False,
clip: bool = True,
actions_per_axis: int = 3,
**kwargs) -> None:
super().__init__(env, acceleration_range=acceleration_range, steering_range=steering_range,
longitudinal=longitudinal, lateral=lateral, dynamical=dynamical, clip=clip)
self.actions_per_axis = actions_per_axis
def space(self) -> spaces.Discrete:
return spaces.Discrete(self.actions_per_axis**self.size)
def act(self, action: int) -> None:
cont_space = super().space()
axes = np.linspace(cont_space.low, cont_space.high, self.actions_per_axis)
all_actions = list(product(axes))
super().act(all_actions[action])
class DiscreteMetaAction(ActionType):
"""
An discrete action space of meta-actions: lane changes, and cruise control set-point.
"""
ACTIONS_ALL = {
0: 'LANE_LEFT',
1: 'IDLE',
2: 'LANE_RIGHT',
3: 'FASTER',
4: 'SLOWER'
}
"""A mapping of action indexes to labels."""
ACTIONS_LONGI = {
0: 'SLOWER',
1: 'IDLE',
2: 'FASTER'
}
ACTIONS_LONGI_FAST = {
0: 'FASTER'
}
ACTIONS_LONGI_SLOW = {
0: 'SLOWER'
}
"""A mapping of longitudinal action indexes to labels."""
ACTIONS_LAT = {
0: 'LANE_LEFT',
1: 'IDLE',
2: 'LANE_RIGHT'
}
"""A mapping of lateral action indexes to labels."""
def __init__(self,
env: 'AbstractEnv',
longitudinal: bool = True,
lateral: bool = True,
fast: bool = False,
slow: bool = False,
**kwargs) -> None:
"""
Create a discrete action space of meta-actions.
:param env: the environment
:param longitudinal: include longitudinal actions
:param lateral: include lateral actions
"""
super().__init__(env)
self.longitudinal = longitudinal
self.lateral = lateral
self.actions = self.ACTIONS_ALL if longitudinal and lateral \
else self.ACTIONS_LONGI if longitudinal \
else self.ACTIONS_LAT if lateral \
else self.ACTIONS_LONGI_FAST if fast \
else self.ACTIONS_LONGI_SLOW if slow \
else None
if self.actions is None:
raise ValueError("At least longitudinal or lateral actions must be included")
self.actions_indexes = {v: k for k, v in self.actions.items()}
def space(self) -> spaces.Space:
return spaces.Discrete(len(self.actions))
@property
def vehicle_class(self) -> Callable:
return MDPVehicle
def act(self, action: int) -> None:
self.controlled_vehicle.act(self.actions[int(action)])
if self.controlled_vehicle.new_action:
self.controlled_vehicle.new_action = self.actions_indexes[self.controlled_vehicle.new_action]
class MultiAgentAction(ActionType):
def __init__(self,
env: 'AbstractEnv',
action_config: dict,
**kwargs) -> None:
super().__init__(env)
self.action_config = action_config
self.agents_action_types = []
for vehicle in self.env.controlled_vehicles:
action_type = action_factory(self.env, self.action_config)
action_type.controlled_vehicle = vehicle
self.agents_action_types.append(action_type)
def space(self) -> spaces.Space:
# return self.agents_action_types[0].space()
return spaces.Tuple([action_type.space() for action_type in self.agents_action_types])
@property
def vehicle_class(self) -> Callable:
return action_factory(self.env, self.action_config).vehicle_class
def act(self, action: Action) -> None:
assert isinstance(action, tuple)
for agent_action, action_type in zip(action, self.agents_action_types):
action_type.act(agent_action)
def action_factory(env: 'AbstractEnv', config: dict) -> ActionType:
if config["type"] == "ContinuousAction":
return ContinuousAction(env, **config["action_config"])
if config["type"] == "DiscreteAction":
return DiscreteAction(env, **config["action_config"])
elif config["type"] == "DiscreteMetaAction":
return DiscreteMetaAction(env, **config["action_config"])
elif config["type"] == "MultiAgentAction":
return MultiAgentAction(env, **config)
else:
raise ValueError("Unknown action type")
|
the-stack_106_18230
|
#!/usr/bin/env python3
import filecmp # https://stackoverflow.com/questions/254350
import os
import platform # https://stackoverflow.com/questions/1854
import shutil
# Copy following files:
# If there are changes:
# target/release/raspberry-web -> /usr/local/bin/
# raspberry-web-db/raspberry-web.sqlite -> /usr/local/raspberry-web/database/
# If it does not exist
# config/configuration.toml -> /usr/local/raspberry-web/
# If on Linux, and if it does not exist
# config/raspberry-web.service -> /etc/systemd/system/
# Colors for highlighting status
# https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py
# https://stackoverflow.com/questions/287871/print-in-terminal-with-colors
okgreen = "\033[1;32;40m"
failred = "\033[1;31;40m"
highlight = "\033[1;34;40m"
endc = "\033[0m"
OK = okgreen + " OK" + endc
ERROR = failred + "ERROR: " + endc
# Path to crate root
crate_root = os.path.dirname(os.getcwd()) + "/"
# Filenames
binary = "raspberry-web"
database = "raspberry-web.sqlite"
config = "configuration.toml"
service = "raspberry-web.service"
# Source paths
binary_source = crate_root + "target/release/"
config_source = crate_root + "config/"
database_soure = crate_root + "raspberry-web-db/"
systemd_source = crate_root + "config/"
# Target paths
binary_target = "/usr/local/bin/"
config_target = "/usr/local/raspberry-web/"
database_target = "/usr/local/raspberry-web/database/"
systemd_target = "/etc/systemd/system/"
def mkdir_if_not_exists(path):
""" Create directory 'path' if it does not exist
"""
if not os.path.exists(path):
os.makedirs(path)
print("Created directory" + highlight + path + endc + OK)
else:
print(highlight + path + endc + " is already present" + OK)
def cp_if_dst_different(source, target):
""" Copy file from 'source' to 'target' if target file is different than source,
or if it does not exist. Overwrite if target is different than source.
Return true if file is copied, false otherwise
"""
if os.path.isfile(target):
if not filecmp.cmp(source, target, shallow=False):
shutil.copy2(source, target)
print("Added new version of file " + highlight + target + endc + OK)
return True
else:
print(highlight + target + endc + " already present in same version" + OK)
return False
else:
shutil.copy2(source, target)
print("Added file " + highlight + target + endc + OK)
return True
def cp_if_dst_nonexistant(source, target):
""" Copy file from 'source' to 'target' if target file does not exist.
Return true if file is copied, false otherwise
"""
if not os.path.isfile(target):
shutil.copy2(source, target)
print("Added file " + highlight + target + endc + OK)
return True
else:
print(highlight + target + endc + " already present" + OK)
return False
def check_file_exists_or_exit(path):
""" Usecase: check if source files are present - else exit.
"""
if os.path.isfile(path):
print("Found source file " + highlight + path + endc + OK)
else:
print(ERROR + highlight + path + endc + " not present.")
exit()
# Supported OS's for this installer
supported_os = ['Linux', 'Darwin']
if __name__ == "__main__":
# Check if OS is supported
system = platform.system()
if system not in supported_os:
print(ERROR + highlight + system + endc + " is not supported.")
exit()
# Check if source files exists - else exit:
print("Checking source files...")
soure_files = [
binary_source+binary,
config_source+config,
database_soure+database,
systemd_source+service]
for path in soure_files:
check_file_exists_or_exit(path)
print()
# Create config_path and database_path if they do not exist
print("Checking target directories...")
for path in [config_target, database_target]:
mkdir_if_not_exists(path)
print()
# Binary
print("Copying files...")
binary_copied = cp_if_dst_different(binary_source+binary, binary_target+binary)
if binary_copied:
os.chmod(binary_target+binary, 0o755)
# Configuration
config_copied = cp_if_dst_nonexistant(config_source+config, config_target+config)
if config_copied:
os.chmod(config_target+config, 0o644)
# Database
database_copied = cp_if_dst_nonexistant(database_soure+database, database_target+database)
if database_copied:
os.chmod(database_target+database, 0o644)
# Service - only on Linux
if system == 'Linux':
service_copied = cp_if_dst_nonexistant(systemd_source+service, systemd_target+service)
if service_copied:
os.chmod(systemd_target+service, 0o644)
else:
print("Not adding " + highlight + service + endc + " to " + highlight + systemd_target \
+ endc + " since we are running " + highlight + system + endc + OK)
print()
print(okgreen + "Done." + endc)
|
the-stack_106_18233
|
from ..utils.utils import *
#==============================================================================
""" Implementation of quick sort algorithm.
This modules provides two functions for sorting a list using quicksort
algorithm.
"""
def quick_sort(a):
"""Sorts given list using Quicksort algorithm.
Args:
a (list): The list to be sorted
"""
def _quick_sort(a, low, high):
if low >= high:
return
pivot_idx = partition(a, low, high)
_quick_sort(a, low, pivot_idx - 1)
_quick_sort(a, pivot_idx + 1, high)
def partition(a, low, high):
pivot, idx = a[high], low
for i in range(low, high):
if a[i] < pivot:
swap(a, idx, i)
idx = idx + 1
swap(a, idx, high)
return idx
# called the quicksort procedure
_quick_sort(a, 0, len(a) - 1)
def quick_sort_v2(a):
"""Sorts given list using Quicksort algorithm.
Args:
a (list): The list to be sorted
"""
def _quick_sort_v2(a, low, high):
if low >= high:
return
pi = partition_v2(a, low, high) # pi -> Pivot Index
_quick_sort_v2(a, low, pi - 1)
_quick_sort_v2(a, pi + 1, high)
def partition_v2(a, low, high):
"""partition the subarray a[low, ..., high].
Partitions the subarray a[low, ..., high] so that a[lo..j-1]
<= a[j] <= a[j+1..hi] and return the index j.
Args:
a : the array to be partioned.
low : the lower index into the specified array.
high: the upper index into the specified array.
"""
pivot, i, j = a[low], low + 1, high
while True:
while a[i] < pivot: # find item on low to swap
if i == high:
break
i += 1
while a[j] >= pivot: # find item on high to swap
if j == low:
break # redundant since a[low] acts as sentinel
j -= 1
if i >= j:
break # check if pointers cross
swap(a, i, j)
swap(a, low, j) # put partitioning item pivot at a[j]
# now, a[low ... j-1] <= a[j] <= a[j+1 ... high]
return j
# called the quicksort procedure
_quick_sort_v2(a, 0, len(a) - 1)
|
the-stack_106_18234
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="splom.dimension", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
the-stack_106_18235
|
import numpy as np
import matplotlib.pyplot as plt
import sympy as sp
def onehat(x, u):
if x < u - 1:
return 0
if x > u + 1:
return 0
if x < u:
return x - u + 1
if x > u:
return -x + u + 1
def onehat_vec(x, u):
z1 = x < u - 1
z2 = x > u + 1
z = ~np.logical_or(z1, z2)
xl = x < u
xr = x > u
return ((xl * (x - u + 1)) + (xr * (-x + u + 1)))*z
def onehat_vec2(x, i, n):
z1 = x < (i-1)/(n+1)
z2 = x > (i+1)/(n+1)
z = ~np.logical_or(z1, z2)
xl = x < i/(n+1)
xr = x > i/(n+1)
xlv = (n + 1)*x - i + 1
xrv = -(n + 1)*x + i + 1
return xl * xlv * z + xr * xrv * z
if __name__ == "__main__":
# y = []
# p = np.random.rand(3)*2.-1
# for x in np.linspace(-2, 2):
# yy = np.sum([pp*onehat(x, u) for (pp, u) in zip(p, [-1, 0, 1])])
# y.append(yy)
# plt.plot(y)
# plt.show()
# nn = [4]
# bb = (0, 1)
# xx = np.linspace(*bb, 1000)
# for n in nn:
# uu = np.arange(n)+1
# pp = np.random.normal(size=(len(uu),))
# f = []
# for u in uu:
# f.append(onehat_vec((n+1)*xx, u))
# plt.figure()
# fa = np.array(f)
# plt.plot(fa.T)
# plt.plot((pp[:, None] * fa).sum(axis=0), color='black')
# plt.show()
nn = [4, 10]
bb = (0, 1)
xx = np.linspace(*bb, 1000)
for n in nn:
pp = np.random.normal(size=(n,))
f = []
for i in range(1, n+1):
f.append(onehat_vec2(xx, i, n))
plt.figure()
fa = np.array(f)
plt.plot(fa.T)
plt.plot((pp[:, None] * fa).sum(axis=0), color='black')
plt.show()
|
the-stack_106_18236
|
#!/usr/bin/env python3
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import glob
import swifter
from tqdm.auto import tqdm
tqdm.pandas()
from bs4 import BeautifulSoup
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import os
os.makedirs("classified", exist_ok=True)
analyzer = SentimentIntensityAnalyzer()
files = sorted(glob.glob("data/climate_tweets_*.csv"))
#print(files)
pd.set_option('display.max_colwidth', -1)
def classify(row):
soup = BeautifulSoup(row.html, "lxml")
s = []
for child in soup.find("p").children:
if child.name == None:
s.append(child)
elif child.name == "img":
s.append(child["alt"])
else:
s.append(child.text)
text_with_emoji = " ".join(s)
row["text_with_emoji"] = text_with_emoji
vs = analyzer.polarity_scores(text_with_emoji)
for k,v in vs.items():
row[k] = v
return row
for f in tqdm(files):
df = pd.read_csv(f, sep=";")
df = df.swifter.allow_dask_on_strings().apply(classify, axis=1)
print("Complete - writing csv")
new_filename = "classified/" + os.path.splitext(os.path.basename(f))[0] + ".csv"
df.to_csv(new_filename, sep=";", index=False)
|
the-stack_106_18239
|
import copy
import os
import numpy as np
import torch
import torch.nn as nn
from reinforcement_learning.policy import LearningPolicy
from reinforcement_learning.replay_buffer import ReplayBuffer
# https://lilianweng.github.io/lil-log/2018/04/08/policy-gradient-algorithms.html
class EpisodeBuffers:
def __init__(self):
self.reset()
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
def reset(self):
self.memory = {}
def get_transitions(self, handle):
return self.memory.get(handle, [])
def push_transition(self, handle, transition):
transitions = self.get_transitions(handle)
transitions.append(transition)
self.memory.update({handle: transitions})
# Actor module
class FeatureExtractorNetwork(nn.Module):
def __init__(self, state_size, device, hidsize1=512, hidsize2=256):
super(FeatureExtractorNetwork, self).__init__()
self.device = device
self.nn_layer_outputsize = hidsize2
self.model = nn.Sequential(
nn.Linear(state_size, hidsize1),
nn.Tanh(),
nn.Linear(hidsize1, hidsize2),
nn.Tanh()
).to(self.device)
def forward(self, X):
return self.model(X)
def save(self, filename):
# print("Saving model from checkpoint:", filename)
torch.save(self.model.state_dict(), filename + ".ppo_feature_extractor")
def _load(self, obj, filename):
if os.path.exists(filename):
print(' >> ', filename)
try:
obj.load_state_dict(torch.load(filename, map_location=self.device))
except:
print(" >> failed!")
return obj
def load(self, filename):
print("load model from file", filename)
self.model = self._load(self.model, filename + ".ppo_feature_extractor")
class ActorNetwork(nn.Module):
def __init__(self, state_size, action_size, device, feature_extractor_model: FeatureExtractorNetwork = None,
hidsize=256,
learning_rate=0.5e-3):
super(ActorNetwork, self).__init__()
self.device = device
self.feature_extractor_model = feature_extractor_model
self.model = nn.Sequential(
nn.Linear(state_size, hidsize) if (self.feature_extractor_model is None)
else nn.Linear(feature_extractor_model.nn_layer_outputsize, hidsize),
nn.Tanh(),
nn.Linear(hidsize, hidsize),
nn.Tanh(),
nn.Linear(hidsize, action_size),
nn.Softmax(dim=-1)
).to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
def forward(self, input):
if self.feature_extractor_model is None:
return self.model(input)
return self.model(self.feature_extractor_model(input))
def get_actor_dist(self, state):
probs = self.forward(state)
dist = torch.distributions.Categorical(probs=probs)
return dist, probs
def evaluate(self, states, actions):
dist, action_probs = self.get_actor_dist(states)
action_logprobs = dist.log_prob(actions)
dist_entropy = dist.entropy()
return action_logprobs, dist_entropy
def save(self, filename):
torch.save(self.model.state_dict(), filename + ".ppo_actor")
torch.save(self.optimizer.state_dict(), filename + ".ppo_optimizer_actor")
def _load(self, obj, filename):
if os.path.exists(filename):
print(' >> ', filename)
try:
obj.load_state_dict(torch.load(filename, map_location=self.device))
except:
print(" >> failed!")
return obj
def load(self, filename):
print("load model from file", filename)
self.model = self._load(self.model, filename + ".ppo_actor")
self.optimizer = self._load(self.optimizer, filename + ".ppo_optimizer_actor")
# Critic module
class CriticNetwork(nn.Module):
def __init__(self, state_size, device, feature_extractor_model: FeatureExtractorNetwork = None, hidsize=256,
learning_rate=0.5e-3):
super(CriticNetwork, self).__init__()
self.device = device
self.feature_extractor_model = feature_extractor_model
self.model = nn.Sequential(
nn.Linear(state_size, hidsize) if (self.feature_extractor_model is None)
else nn.Linear(feature_extractor_model.nn_layer_outputsize, hidsize),
nn.Tanh(),
nn.Linear(hidsize, 1)
).to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
def forward(self, input):
if self.feature_extractor_model is None:
return self.model(input)
return self.model(self.feature_extractor_model(input))
def evaluate(self, states):
state_value = self.forward(states)
return torch.squeeze(state_value)
def save(self, filename):
torch.save(self.model.state_dict(), filename + ".ppo_critic")
torch.save(self.optimizer.state_dict(), filename + ".ppo_optimizer_critic")
def _load(self, obj, filename):
if os.path.exists(filename):
print(' >> ', filename)
try:
obj.load_state_dict(torch.load(filename, map_location=self.device))
except:
print(" >> failed!")
return obj
def load(self, filename):
print("load model from file", filename)
self.model = self._load(self.model, filename + ".ppo_critic")
self.optimizer = self._load(self.optimizer, filename + ".ppo_optimizer_critic")
class PPOPolicy(LearningPolicy):
def __init__(self, state_size, action_size, use_replay_buffer=False, in_parameters=None,
buffer_size=10_000, batch_size=1024, K_epoch=10,
use_shared_feature_extractor=False, clip_grad_norm=0.5,
enable_replay_curiosity_sampling=True,
skip_unfinished_agent=0.0):
print(">> PPOPolicy")
super(PPOPolicy, self).__init__()
# parameters
self.state_size = state_size
self.action_size = action_size
self.ppo_parameters = in_parameters
if self.ppo_parameters is not None:
self.hidsize = self.ppo_parameters.hidden_size
self.buffer_size = self.ppo_parameters.buffer_size
self.batch_size = self.ppo_parameters.batch_size
self.learning_rate = self.ppo_parameters.learning_rate
self.gamma = self.ppo_parameters.gamma
# Device
if self.ppo_parameters.use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda:0")
print("🐇 Using GPU")
else:
self.device = torch.device("cpu")
print("🐢 Using CPU")
else:
self.hidsize = 128
self.learning_rate = 0.5e-4
self.gamma = 0.99
self.buffer_size = buffer_size
self.batch_size = batch_size
self.device = torch.device("cpu")
self.K_epoch = K_epoch
self.surrogate_eps_clip = 0.2
self.weight_loss = 0.5
self.weight_entropy = 0.001
self.lmbda = 0.9
self.skip_unfinished_agent = skip_unfinished_agent
self.buffer_min_size = 0
self.use_replay_buffer = use_replay_buffer
self.enable_replay_curiosity_sampling = enable_replay_curiosity_sampling
self.enable_replay_curiosity_fix_size_batch_size = True
self.current_episode_memory = EpisodeBuffers()
self.agent_done = {}
self.memory = ReplayBuffer(action_size, self.buffer_size, self.batch_size, self.device)
self.loss = 0
self.feature_extractor_model = None
if use_shared_feature_extractor:
self.feature_extractor_model = FeatureExtractorNetwork(state_size,
self.device,
hidsize1=self.hidsize,
hidsize2=self.hidsize)
self.actor = ActorNetwork(state_size,
action_size,
self.device,
feature_extractor_model=self.feature_extractor_model,
hidsize=self.hidsize,
learning_rate=self.learning_rate)
self.critic = CriticNetwork(state_size,
self.device,
feature_extractor_model=self.feature_extractor_model,
hidsize=self.hidsize,
learning_rate=2.0 * self.learning_rate)
self.loss_function = nn.MSELoss()
self.clip_grad_norm = clip_grad_norm
def set_loss_function(self, nn_loss_function):
# nn.BCEWithLogitsLoss()
# nn.MSELoss() * default
# nn.SmoothL1Loss()
self.loss_function = nn_loss_function
def rollout_extra_reward(self, transitions_array, all_done):
return 0
def reset(self, env):
pass
def act(self, handle, state, eps=0.0):
action, _, _ = self.act_intern(handle, state, eps)
return action
def act_intern(self, handle, state, eps=0.0):
torch_state = torch.tensor(state, dtype=torch.float).to(self.device)
dist, action_probs = self.actor.get_actor_dist(torch_state)
action = dist.sample()
return action.item(), dist, action_probs
def step(self, handle, state, action, reward, next_state, done):
if self.agent_done.get(handle, False):
return # remove? if not Flatland?
# record transitions ([state] -> [action] -> [reward, next_state, done])
torch_action = torch.tensor(action, dtype=torch.float).to(self.device)
torch_state = torch.tensor(state, dtype=torch.float).to(self.device)
torch_next_state = torch.tensor(state, dtype=torch.float).to(self.device)
# evaluate actor
dist, _ = self.actor.get_actor_dist(torch_state)
value = self.critic.evaluate(torch_state).detach().cpu().numpy()
next_value = self.critic.evaluate(torch_next_state).detach().cpu().numpy()
action_logprobs = dist.log_prob(torch_action)
transition = (state, action, reward, next_state, action_logprobs.item(), done, value, next_value)
self.current_episode_memory.push_transition(handle, transition)
if done:
self.agent_done.update({handle: done})
def _push_transitions_to_replay_buffer(self,
state_list,
action_list,
reward_list,
state_next_list,
done_list,
prob_a_list,
advantages_list):
for idx in range(len(reward_list)):
state_i = state_list[idx]
action_i = action_list[idx]
reward_i = reward_list[idx]
state_next_i = state_next_list[idx]
done_i = done_list[idx]
prob_action_i = prob_a_list[idx]
advantage_i = advantages_list[idx]
self.memory.add(state_i, action_i, reward_i, state_next_i, done_i, prob_action_i, advantage_i)
def _rollout_episode_buffer(self, transitions_array, all_done):
# build empty lists(arrays)
state_list, action_list, return_list, state_next_list, prob_a_list, done_list, advantages_list = \
[], [], [], [], [], [], []
# set discounted_reward to zero
discounted_reward = 0
extra_reward = self.rollout_extra_reward(transitions_array, all_done)
for transition in transitions_array[::-1]:
state_i, action_i, reward_i, state_next_i, prob_action_i, done_i, value_i, next_value_i = transition
reward_i += extra_reward
extra_reward = 0
state_list.insert(0, state_i)
action_list.insert(0, action_i)
done_list.insert(0, int(done_i))
mask_i = 1.0 - int(done_i)
discounted_reward = reward_i + self.gamma * mask_i * discounted_reward
return_list.insert(0, discounted_reward)
advantages_list.insert(0, discounted_reward - value_i)
state_next_list.insert(0, state_next_i)
prob_a_list.insert(0, prob_action_i)
if self.use_replay_buffer:
self._push_transitions_to_replay_buffer(state_list, action_list,
return_list, state_next_list,
done_list, prob_a_list, advantages_list)
# convert data to torch tensors
states, actions, returns, states_next, dones, prob_actions, advantages = \
torch.tensor(state_list, dtype=torch.float).to(self.device), \
torch.tensor(action_list).to(self.device), \
torch.tensor(return_list, dtype=torch.float).to(self.device), \
torch.tensor(state_next_list, dtype=torch.float).to(self.device), \
torch.tensor(done_list, dtype=torch.float).to(self.device), \
torch.tensor(prob_a_list).to(self.device), \
torch.tensor(advantages_list).to(self.device),
# Normalize the rewards and advantages
returns = (returns - returns.mean()) / (returns.std() + 1.0e-8)
advantages = (advantages - advantages.mean()) / (advantages.std() + 1.0e-8)
return states, actions, returns, states_next, dones, prob_actions, advantages
def _sample_replay_buffer(self, states, actions, returns, states_next, dones, probs_action, advantages):
# https://arxiv.org/pdf/1611.01224v2.pdf
if len(self.memory) > self.buffer_min_size and len(self.memory) > self.batch_size:
states, actions, returns, states_next, dones, probs_action, advantages = self.memory.sample()
states = torch.squeeze(states)
actions = torch.squeeze(actions)
returns = torch.squeeze(returns)
states_next = torch.squeeze(states_next)
dones = torch.squeeze(dones)
probs_action = torch.squeeze(probs_action)
advantages = torch.squeeze(advantages)
# curiosity filtering - just use the 50 percent highest positive difference
if self.enable_replay_curiosity_sampling:
# Focus on observed rewards which are higher than the cirtic expect (estimate) - surprise
deltas = (returns - self.critic.evaluate(states).detach()).pow(2.0)
# Find indices for filtering
indices = torch.nonzero(deltas.ge(deltas.median()), as_tuple=False).squeeze(1)
# Apply filter
states = torch.index_select(states, 0, indices)
actions = torch.index_select(actions, 0, indices)
returns = torch.index_select(returns, 0, indices)
states_next = torch.index_select(states_next, 0, indices)
dones = torch.index_select(dones, 0, indices)
probs_action = torch.index_select(probs_action, 0, indices)
advantages = torch.index_select(advantages, 0, indices)
if self.enable_replay_curiosity_fix_size_batch_size:
# Enforce fix-size batch_size -> fit the batch_size by appending missing data with randomized picked
states2, actions2, returns2, states_next2, dones2, probs_action2, advantages2 = \
self.memory.sample(k_samples=max(10, self.memory.batch_size - len(indices)))
# concatenate the data
states = torch.cat((states, torch.squeeze(states2)))
actions = torch.cat((actions, torch.squeeze(actions2)))
returns = torch.cat((returns, torch.squeeze(returns2)))
states_next = torch.cat((states_next, torch.squeeze(states_next2)))
dones = torch.cat((dones, torch.squeeze(dones2)))
probs_action = torch.cat((probs_action, torch.squeeze(probs_action2)))
advantages = torch.cat((advantages, torch.squeeze(advantages2)))
return states, actions, returns, states_next, dones, probs_action, advantages
def train_net(self):
# All agents have to propagate their experiences made during past episode
all_done = True
for handle in range(len(self.current_episode_memory)):
all_done = all_done and self.agent_done.get(handle, False)
for handle in range(len(self.current_episode_memory)):
if (not self.agent_done.get(handle, False)) and (np.random.random() < self.skip_unfinished_agent):
continue
# Extract agent's episode history (list of all transitions)
agent_episode_history = self.current_episode_memory.get_transitions(handle)
if len(agent_episode_history) > 0:
# Convert the replay buffer to torch tensors (arrays)
states, actions, returns, states_next, dones, probs_action, advantages = \
self._rollout_episode_buffer(agent_episode_history, all_done)
# Optimize policy for K epochs:
do_k_epoch = int(np.ceil(max(1.0, self.K_epoch / max(1, len(self.agent_done)))))
for k_loop in range(do_k_epoch):
# update by random sampling
if self.use_replay_buffer:
states, actions, returns, states_next, dones, probs_action, advantages = \
self._sample_replay_buffer(
states, actions, returns, states_next, dones, probs_action, advantages
)
# Evaluating actions (actor) and values (critic)
logprobs, dist_entropy = self.actor.evaluate(states, actions)
# Finding the ratios (pi_thetas / pi_thetas_replayed):
delta_logprobs = logprobs - probs_action.detach()
ratios = torch.exp(delta_logprobs)
# Calculate the current values
state_values = self.critic.evaluate(states)
# Finding Surrogate Loos
surr1 = ratios * advantages
surr2 = torch.clamp(ratios,
1. - self.surrogate_eps_clip,
1. + self.surrogate_eps_clip) * advantages
# The loss function is used to estimate the gardient and use the entropy function based
# heuristic to penalize the gradient function when the policy becomes deterministic this would let
# the gradient becomes very flat and so the gradient is no longer useful.
loss_actor = \
-torch.min(surr1, surr2).mean() \
- self.weight_entropy * dist_entropy.mean()
loss_critic = \
self.weight_loss * self.loss_function(state_values, returns)
loss = \
loss_actor + \
loss_critic
# Make a gradient step -> update actor and critic
self.actor.optimizer.zero_grad()
self.critic.optimizer.zero_grad()
loss_actor.backward()
loss_critic.backward()
torch.nn.utils.clip_grad_norm_(self.actor.model.parameters(), self.clip_grad_norm)
torch.nn.utils.clip_grad_norm_(self.critic.model.parameters(), self.clip_grad_norm)
self.actor.optimizer.step()
self.critic.optimizer.step()
# Transfer the current loss to the agents loss (information) for debug purpose only
self.loss = loss.mean().detach().cpu().numpy()
def end_episode(self, train):
if train:
self.train_net()
# Reset all collect transition data
self.current_episode_memory.reset()
self.agent_done = {}
# Checkpointing methods
def save(self, filename):
# print("Saving model from checkpoint:", filename)
if self.feature_extractor_model is not None:
self.feature_extractor_model.save(filename)
self.actor.save(filename)
self.critic.save(filename)
def _load(self, obj, filename):
if os.path.exists(filename):
print(' >> ', filename)
try:
obj.load_state_dict(torch.load(filename, map_location=self.device))
except:
print(" >> failed!")
else:
print(" >> file not found!")
return obj
def load(self, filename):
print("load policy and optimizer from file", filename)
if self.feature_extractor_model is not None:
self.feature_extractor_model.load(filename)
self.actor.load(filename)
self.critic.load(filename)
def clone(self):
policy = PPOPolicy(self.state_size, self.action_size)
if self.feature_extractor_model is not None:
policy.feature_extractor_model = copy.deepcopy(self.feature_extractor_model)
policy.actor = copy.deepcopy(self.actor)
policy.critic = copy.deepcopy(self.critic)
return self
class FLATLandPPOPolicy(PPOPolicy):
def __init__(self, state_size, action_size, use_replay_buffer=False, in_parameters=None,
buffer_size=10_000, batch_size=1024, K_epoch=10,
use_shared_feature_extractor=False, clip_grad_norm=0.5, enable_replay_curiosity_sampling=True,
skip_unfinished_agent=0.0):
print(">> FLATLandPPOPolicy")
super(FLATLandPPOPolicy, self).__init__(state_size, action_size, use_replay_buffer, in_parameters,
buffer_size, batch_size, K_epoch, use_shared_feature_extractor,
clip_grad_norm, enable_replay_curiosity_sampling,
skip_unfinished_agent)
self.deadlocked_agent = {}
def act(self, handle, state, eps=0.0):
return super(FLATLandPPOPolicy, self).act(handle, state, eps)
def rollout_extra_reward(self, transitions_array, all_done):
return 0
if all_done and len(self.deadlocked_agent.keys()) == 0:
return pow(len(self.agent_done), 2.0)
return 0
def end_episode(self, train):
super(FLATLandPPOPolicy, self).end_episode(train)
self.deadlocked_agent = {}
def shape_reward(self, handle, action, state, reward, done, deadlocked=None):
if self.deadlocked_agent.get(handle, False):
return 0.0
if self.agent_done.get(handle, False):
return 0.0
is_deadlocked = False
if deadlocked is not None:
is_deadlocked = deadlocked
if is_deadlocked:
self.deadlocked_agent.update({handle: True})
return -0.01
if done:
return 1.0 + 1000.0 / (1.0 + len(self.current_episode_memory.get_transitions(handle)))
return -0.00001
|
the-stack_106_18240
|
from dacy.augmenters.keyboard import Keyboard, qwerty_da_array
from dacy.augmenters import create_keyboard_augmenter
from spacy.lang.da import Danish
from spacy.training import Example
def test_Keyboard():
kb = Keyboard(keyboard_array = qwerty_da_array)
assert kb.coordinate("q") == (1, 0)
assert kb.is_shifted("q") is False
assert kb.euclidian_distance("q", "a") <= 1
assert len(set(kb.all_keys())) > 28*2
assert "w" in kb.get_neighboors("q")
kb.create_distance_dict()
def test_make_keyboard_augmenter():
aug = create_keyboard_augmenter(doc_level=1, char_level=1, keyboard="QWERTY_DA")
nlp = Danish()
doc = nlp("q")
example = Example(doc, doc)
examples = aug(nlp, example)
example_aug = next(examples)
assert example_aug.x.text in "12wsa"
|
the-stack_106_18241
|
"""
Document Library
Copyright: 2011-2021 (c) Sahana Software Foundation
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("DocumentEntityModel",
"DocumentLibrary",
"DocumentTagModel",
"DocumentCKEditorModel",
"DocumentDataCardModel",
"doc_image_represent",
"doc_document_list_layout",
)
import os
from io import BytesIO
from uuid import uuid4
from gluon import *
from gluon.storage import Storage
from ..core import *
# =============================================================================
class DocumentEntityModel(DataModel):
names = ("doc_entity",
)
def model(self):
T = current.T
settings = current.deployment_settings
# ---------------------------------------------------------------------
# Document-referencing entities
#
entity_types = {"asset_asset": T("Asset"),
"cap_resource": T("CAP Resource"),
"cms_post": T("Post"),
"cr_shelter": T("Shelter"),
"deploy_mission": T("Mission"),
"dc_response": T(settings.get_dc_response_label()),
"dvr_case": T("Case"),
"dvr_case_activity": T("Case Activity"),
"event_event": T("Event"),
"event_incident": T("Incident"),
"event_incident_report": T("Incident Report"),
"event_scenario": T("Scenario"),
"event_sitrep": T("Situation Report"),
"fin_expense": T("Expense"),
"fire_station": T("Fire Station"),
"hms_hospital": T("Hospital"),
"hrm_human_resource": T("Human Resource"),
"hrm_training_event_report": T("Training Event Report"),
"inv_adj": T("Stock Adjustment"),
"inv_recv": T("Incoming Shipment"),
"inv_send": T("Sent Shipment"),
"inv_warehouse": T("Warehouse"),
"pr_group": T("Team"),
"project_project": T("Project"),
"project_activity": T("Project Activity"),
"project_task": T("Task"),
"org_facility": T("Facility"),
"org_group": T("Organization Group"),
"org_office": T("Office"),
"req_need": T("Need"),
"req_need_response": T("Activity Group"),
"req_req": T("Request"),
"security_seized_item": T("Seized Item"),
}
tablename = "doc_entity"
self.super_entity(tablename, "doc_id", entity_types)
# Components
self.add_components(tablename,
doc_document = "doc_id",
doc_image = "doc_id",
)
# =============================================================================
class DocumentLibrary(DataModel):
names = ("doc_document",
"doc_document_id",
"doc_image",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
person_comment = self.pr_person_comment
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
NONE = current.messages["NONE"]
# Shortcuts
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
folder = current.request.folder
super_link = self.super_link
# ---------------------------------------------------------------------
# Documents
#
tablename = "doc_document"
define_table(tablename,
# Instance
super_link("source_id", "stats_source"),
# Component not instance
super_link("doc_id", "doc_entity"),
# @ToDo: Remove since Site Instances are doc entities?
super_link("site_id", "org_site"),
Field("file", "upload",
label = T("File"),
autodelete = True,
length = current.MAX_FILENAME_LENGTH,
represent = self.doc_file_represent,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
),
Field("mime_type",
readable = False,
writable = False,
),
Field("name", length=128,
# Allow Name to be added onvalidation
requires = IS_LENGTH(128),
label = T("Name")
),
Field("url",
label = T("URL"),
represent = lambda url: \
url and A(url, _href=url) or NONE,
requires = IS_EMPTY_OR(IS_URL()),
),
# Mailmerge template?
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
person_id(
# Enable when-required
label = T("Author"),
readable = False,
writable = False,
comment = person_comment(T("Author"),
T("The Author of this Document (optional)"))
),
organisation_id(# Enable when-required
readable = False,
writable = False,
),
s3_date(label = T("Date Published"),
),
# @ToDo: Move location to link table
location_id(# Enable when-required
readable = False,
writable = False,
),
s3_comments(),
Field("has_been_indexed", "boolean",
default = False,
readable = False,
writable = False,
),
Field("checksum",
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Reference Document"),
title_display = T("Document Details"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List Documents"),
label_delete_button = T("Delete Document"),
msg_record_created = T("Document added"),
msg_record_modified = T("Document updated"),
msg_record_deleted = T("Document deleted"),
msg_list_empty = T("No Documents found")
)
# Filter Widgets
# - define in-template if-required
# Resource Configuration
if settings.get_base_solr_url():
onaccept = self.document_onaccept
ondelete = self.document_ondelete
else:
onaccept = None
ondelete = None
configure(tablename,
context = {"organisation": "organisation_id",
"person": "person_id",
"site": "site_id",
},
deduplicate = self.document_duplicate,
list_layout = doc_document_list_layout,
onaccept = onaccept,
ondelete = ondelete,
onvalidation = self.document_onvalidation,
super_entity = "stats_source",
)
# Reusable field
represent = doc_DocumentRepresent(lookup = tablename,
fields = ("name", "file", "url"),
labels = "%(name)s",
show_link = True)
document_id = S3ReusableField("document_id", "reference %s" % tablename,
label = T("Document"),
ondelete = "CASCADE",
represent = represent,
requires = IS_ONE_OF(db,
"doc_document.id",
represent),
)
add_components(tablename,
doc_document_tag = document_id,
msg_attachment = document_id,
)
# ---------------------------------------------------------------------
# Images
#
# @ToDo: Field to determine which is the default image to use for
# e.g. a Map popup (like the profile picture)
# readable/writable=False except in the cases where-needed
#
doc_image_type_opts = {1: T("Photograph"),
2: T("Map"),
3: T("Document Scan"),
99: T("other")
}
tablename = "doc_image"
define_table(tablename,
# Component not instance
super_link("doc_id", "doc_entity"),
super_link("pe_id", "pr_pentity"), # @ToDo: Remove & make Persons doc entities instead?
super_link("site_id", "org_site"), # @ToDo: Remove since Site Instances are doc entities?
Field("file", "upload",
autodelete = True,
label = T("File"),
length = current.MAX_FILENAME_LENGTH,
represent = doc_image_represent,
requires = IS_EMPTY_OR(
IS_IMAGE(extensions = (s3.IMAGE_EXTENSIONS)),
# Distinguish from prepop
null = "",
),
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads",
"images"),
widget = S3ImageCropWidget((600, 600)),
),
Field("mime_type",
readable = False,
writable = False,
),
Field("name", length=128,
label = T("Name"),
# Allow Name to be added onvalidation
requires = IS_LENGTH(128),
),
Field("url",
label = T("URL"),
requires = IS_EMPTY_OR(IS_URL()),
),
Field("type", "integer",
default = 1,
label = T("Image Type"),
represent = represent_option(doc_image_type_opts),
requires = IS_IN_SET(doc_image_type_opts,
zero=None),
),
person_id(label = T("Author"),
),
organisation_id(),
s3_date(label = T("Date Taken"),
),
# @ToDo: Move location to link table
location_id(),
s3_comments(),
Field("checksum",
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Photo"),
title_display = T("Photo Details"),
title_list = T("Photos"),
title_update = T("Edit Photo"),
label_list_button = T("List Photos"),
label_delete_button = T("Delete Photo"),
msg_record_created = T("Photo added"),
msg_record_modified = T("Photo updated"),
msg_record_deleted = T("Photo deleted"),
msg_list_empty = T("No Photos found"))
# Resource Configuration
configure(tablename,
deduplicate = self.document_duplicate,
onvalidation = lambda form: \
self.document_onvalidation(form, document=False)
)
# ---------------------------------------------------------------------
# Pass model-global names to response.s3
#
return {"doc_document_id": document_id,
}
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if the module is disabled """
return {"doc_document_id": S3ReusableField.dummy("document_id"),
}
# -------------------------------------------------------------------------
@staticmethod
def doc_file_represent(filename):
"""
File representation
Args:
filename: the stored file name (field value)
Returns:
a link to download the file
"""
if filename:
try:
# Check whether file exists and extract the original
# file name from the stored file name
origname = current.db.doc_document.file.retrieve(filename)[0]
except IOError:
return current.T("File not found")
else:
return A(origname,
_href=URL(c="default", f="download", args=[filename]))
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def document_duplicate(item):
""" Import item de-duplication """
data = item.data
query = None
filename = data.get("file")
if filename:
table = item.table
query = (table.file == filename)
else:
url = data.get("url")
if url:
table = item.table
query = (table.url == url)
if query:
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def document_onvalidation(form, document=True):
""" Form validation for both, documents and images """
form_vars = form.vars
doc = form_vars.file
if doc is None:
# If this is a prepop, then file not in form
# Interactive forms with empty doc has this as "" not None
return
if not document:
encoded_file = form_vars.get("imagecrop-data", None)
if encoded_file:
# S3ImageCropWidget
import base64
metadata, encoded_file = encoded_file.split(",")
#filename, datatype, enctype = metadata.split(";")
filename = metadata.split(";", 1)[0]
f = Storage()
f.filename = uuid4().hex + filename
f.file = BytesIO(base64.b64decode(encoded_file))
doc = form_vars.file = f
if not form_vars.name:
form_vars.name = filename
if not hasattr(doc, "file"):
# Record update without new file upload => keep existing
record_id = current.request.post_vars.id
if record_id:
db = current.db
if document:
tablename = "doc_document"
else:
tablename = "doc_image"
table = db[tablename]
record = db(table.id == record_id).select(table.file,
limitby = (0, 1),
).first()
if record:
doc = record.file
if not hasattr(doc, "file") and not doc and not form_vars.url:
if document:
msg = current.T("Either file upload or document URL required.")
else:
msg = current.T("Either file upload or image URL required.")
if "file" in form_vars:
form.errors.file = msg
if "url" in form_vars:
form.errors.url = msg
if hasattr(doc, "file"):
name = form_vars.name
if not name:
# Use filename as document/image title
form_vars.name = doc.filename
# Do a checksum on the file to see if it's a duplicate
#import cgi
#if isinstance(doc, cgi.FieldStorage) and doc.filename:
# f = doc.file
# form_vars.checksum = doc_checksum(f.read())
# f.seek(0)
# if not form_vars.name:
# form_vars.name = doc.filename
#if form_vars.checksum is not None:
# # Duplicate allowed if original version is deleted
# query = ((table.checksum == form_vars.checksum) & \
# (table.deleted == False))
# result = db(query).select(table.name,
# limitby=(0, 1)).first()
# if result:
# doc_name = result.name
# form.errors["file"] = "%s %s" % \
# (T("This file already exists on the server as"), doc_name)
# -------------------------------------------------------------------------
@staticmethod
def document_onaccept(form):
"""
Build a full-text index
"""
form_vars = form.vars
doc = form_vars.file
table = current.db.doc_document
document = json.dumps({"filename": doc,
"name": table.file.retrieve(doc)[0],
"id": form_vars.id,
})
current.s3task.run_async("document_create_index",
args = [document],
)
# -------------------------------------------------------------------------
@staticmethod
def document_ondelete(row):
"""
Remove the full-text index
"""
db = current.db
table = db.doc_document
record = db(table.id == row.id).select(table.file,
limitby=(0, 1)).first()
document = json.dumps({"filename": record.file,
"id": row.id,
})
current.s3task.run_async("document_delete_index",
args = [document],
)
# =============================================================================
class DocumentTagModel(DataModel):
"""
Document Tags
"""
names = ("doc_document_tag",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Document Tags
# - Key-Value extensions
# - can be used to provide conversions to external systems
# - can be a Triple Store for Semantic Web support
# - can be used to add a document type
# - can be used to add custom fields
#
tablename = "doc_document_tag"
self.define_table(tablename,
self.doc_document_id(),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("document_id",
"tag",
),
),
)
# Pass names back to global scope (s3.*)
return None
# =============================================================================
def doc_image_represent(filename):
"""
Represent an image as a clickable thumbnail
Args:
filename: name of the image file
"""
if not filename:
return current.messages["NONE"]
return DIV(A(IMG(_src=URL(c="default", f="download",
args=filename),
_height=40),
_class="zoom",
_href=URL(c="default", f="download",
args=filename)))
# @todo: implement/activate the JavaScript for this:
#anchor = "zoom-media-image-%s" % uuid4()
#return DIV(A(IMG(_src=URL(c="default", f="download",
#args=filename),
#_height=40),
#_class="zoom",
#_href="#%s" % anchor),
#DIV(IMG(_src=URL(c="default", f="download",
#args=filename),
#_width=600),
#_id="%s" % anchor,
#_class="hide"))
# =============================================================================
def doc_checksum(docstr):
""" Calculate a checksum for a file """
import hashlib
converted = hashlib.sha1(docstr).hexdigest()
return converted
# =============================================================================
def doc_document_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Documents, e.g. on the HRM Profile
NB The CSS classes here refer to static/themes/bootstrap/cards.css & newsfeed.css
- so this CSS either needs moving to core or else this needs modifying for default CSS
Args:
list_id: the HTML ID of the list
item_id: the HTML ID of the item
resource: the CRUDResource to render
rfields: the S3ResourceFields to render
record: the record as dict
"""
record_id = record["doc_document.id"]
item_class = "thumbnail"
raw = record._row
title = record["doc_document.name"]
filename = raw["doc_document.file"] or ""
url = raw["doc_document.url"] or ""
comments = raw["doc_document.comments"] or ""
if filename:
try:
# Check whether file exists and extract the original
# file name from the stored file name
origname = current.s3db.doc_document.file.retrieve(filename)[0]
except (IOError, TypeError):
origname = current.messages["NONE"]
doc_url = URL(c="default", f="download", args=[filename])
body = P(ICON("attachment"),
" ",
SPAN(A(origname,
_href=doc_url,
)
),
" ",
_class="card_1_line",
)
elif url:
body = P(ICON("link"),
" ",
SPAN(A(url,
_href=url,
)),
" ",
_class="card_1_line",
)
else:
# Shouldn't happen!
body = P(_class="card_1_line")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.doc_document
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="doc", f="document",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Document"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(body,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class doc_DocumentRepresent(S3Represent):
""" Representation of Documents """
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
Args:
k: the key (doc_document.id)
v: the representation of the key
row: the row with this key
"""
if row:
try:
filename = row["doc_document.file"]
url = row["doc_document.url"]
except AttributeError:
return v
else:
if filename:
url = URL(c="default", f="download", args=filename)
return A(v, _href=url)
elif url:
return A(v, _href=url)
return v
# =============================================================================
class DocumentCKEditorModel(DataModel):
"""
Storage for Images used by CKEditor
- and hence the s3_richtext_widget
Based on https://github.com/timrichardson/web2py_ckeditor4
"""
names = ("doc_ckeditor",
"doc_filetype",
)
def model(self):
#T = current.T
# ---------------------------------------------------------------------
# Images
#
tablename = "doc_ckeditor"
self.define_table(tablename,
Field("title", length=255),
Field("filename", length=255),
Field("flength", "integer"),
Field("mime_type", length=128),
Field("upload", "upload",
#uploadfs = self.settings.uploadfs,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(maxsize=10485760, # 10 Mb
minsize=0)],
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"doc_filetype": self.doc_filetype,
}
# -------------------------------------------------------------------------
@staticmethod
def doc_filetype(filename):
"""
Takes a filename and returns a category based on the file type.
Categories: word, excel, powerpoint, flash, pdf, image, video, audio, archive, other.
"""
ftype = "other"
parts = os.path.splitext(filename)
if len(parts) > 1:
ext = parts[1][1:].lower()
if ext in ("png", "jpg", "jpeg", "gif"):
ftype = "image"
elif ext in ("avi", "mp4", "m4v", "ogv", "wmv", "mpg", "mpeg"):
ftype = "video"
elif ext in ("mp3", "m4a", "wav", "ogg", "aiff"):
ftype = "audio"
elif ext in ("zip", "7z", "tar", "gz", "tgz", "bz2", "rar"):
ftype = "archive"
elif ext in ("doc", "docx", "dot", "dotx", "rtf"):
ftype = "word"
elif ext in ("xls", "xlsx", "xlt", "xltx", "csv"):
ftype = "excel"
elif ext in ("ppt", "pptx"):
ftype = "powerpoint"
elif ext in ("flv", "swf"):
ftype = "flash"
elif ext == "pdf":
ftype = "pdf"
return ftype
# =============================================================================
class DocumentDataCardModel(DataModel):
"""
Model to manage context-specific features of printable
data cards (S3PDFCard)
"""
names = ("doc_card_config",
"doc_card_types",
"doc_update_card_type_requires",
)
def model(self):
T = current.T
#db = current.db
s3 = current.response.s3
#define_table = self.define_table
crud_strings = s3.crud_strings
# ---------------------------------------------------------------------
# Card Types
#
card_types = {"VOLID": T("Volunteer ID Card"),
}
# ---------------------------------------------------------------------
# Card Configuration
#
uploadfolder = os.path.join(current.request.folder, "uploads", "signatures")
tablename = "doc_card_config"
self.define_table(tablename,
# Context links (e.g. Organisation):
self.org_organisation_id(),
# Card Type:
Field("card_type",
label = T("Card Type"),
requires = IS_IN_SET(card_types,
sort = True,
zero = None,
),
represent = represent_option(card_types),
),
# Card Feature Configurations:
Field("authority_statement", "text",
label = T("Authority Statement"),
represent = s3_text_represent,
widget = s3_comments_widget,
),
Field("org_statement", "text",
label = T("Organization Statement"),
represent = s3_text_represent,
widget = s3_comments_widget,
),
Field("signature", "upload",
label = T("Signature"),
autodelete = True,
length = current.MAX_FILENAME_LENGTH,
represent = doc_image_represent,
requires = IS_EMPTY_OR(IS_IMAGE(extensions=(s3.IMAGE_EXTENSIONS)),
# Distinguish from prepop
null = "",
),
uploadfolder = uploadfolder,
),
Field("signature_text", "text",
label = T("Signature Text"),
represent = s3_text_represent,
widget = s3_comments_widget,
),
Field("validity_period", "integer",
default = 12,
label = T("Validity Period (Months)"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0)),
represent = lambda v: (T("%(months)s months") % {"months": v}) if v else "-",
),
s3_comments(),
*s3_meta_fields())
# Table configuration
self.configure(tablename,
deduplicate = S3Duplicate(primary=("organisation_id", "card_type"),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Card Configuration"),
title_display = T("Card Configuration Details"),
title_list = T("Card Configuration"),
title_update = T("Edit Card Configuration"),
label_list_button = T("List Card Configurations"),
label_delete_button = T("Delete Card Configuration"),
msg_record_created = T("Card Configuration created"),
msg_record_modified = T("Card Configuration updated"),
msg_record_deleted = T("Card Configuration deleted"),
msg_list_empty = T("No Card Configuration currently registered"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"doc_card_types": card_types,
"doc_update_card_type_requires": self.update_card_type_requires,
}
# -------------------------------------------------------------------------
@classmethod
def defaults(cls):
""" Safe defaults for names in case the module is disabled """
return {"doc_card_types": {},
"doc_update_card_type_requires": cls.update_card_type_requires,
}
# -------------------------------------------------------------------------
@staticmethod
def update_card_type_requires(record_id, organisation_id):
"""
Make sure each card type can be defined only once per org
Args:
record_id: the current doc_card_config record ID
(when currently editing a record)
organisation_id: the organisation record ID
"""
s3db = current.s3db
# Find out which card types are already configured for this org
table = s3db.doc_card_config
query = (table.organisation_id == organisation_id) & \
(table.deleted == False)
rows = current.db(query).select(table.id,
table.card_type,
)
this = None
defined = set()
for row in rows:
if str(row.id) == str(record_id):
this = row.card_type
defined.add(row.card_type)
# Determine which card types can still be configured
card_types = {k: v for (k, v) in s3db.doc_card_types.items()
if k == this or k not in defined}
# Limit selection to these card types
table.card_type.requires = IS_IN_SET(card_types,
sort = True,
zero = None,
)
if not card_types:
# No further card types can be configured
s3db.configure("doc_card_config",
insertable = False,
)
elif this and list(card_types.keys()) == [this]:
# All other types are already configured => can't change this
table.card_type.writable = False
# END =========================================================================
|
the-stack_106_18242
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def prerelease_local_scheme(version):
"""Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') == 'master':
return ''
else:
return get_local_node_and_date(version)
with open('README.rst') as f:
readme = f.read()
installReqs = [
'boto3',
'botocore',
# CherryPy version is restricted due to a bug in versions >=11.1
# https://github.com/cherrypy/cherrypy/issues/1662
'CherryPy',
'click',
'click-plugins',
'dogpile.cache',
'filelock',
"funcsigs ; python_version < '3'",
'jsonschema',
'Mako',
'passlib [bcrypt,totp]',
'pymongo>=3.6',
'PyYAML',
'psutil',
'pyOpenSSL',
# python-dateutil is pinned because of github.com/boto/botocore/issues/1872
'python-dateutil<=2.8.0',
'pytz',
'requests',
"shutilwhich ; python_version < '3'",
'six>=1.9',
]
extrasReqs = {
'sftp': [
'paramiko'
],
'mount': [
'fusepy>=3.0'
]
}
setup(
name='girder',
use_scm_version={'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm'],
description='Web-based data management platform',
long_description=readme,
author='Kitware, Inc.',
author_email='[email protected]',
url='https://girder.readthedocs.org',
license='Apache 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6'
],
packages=find_packages(
exclude=('girder.test', 'tests.*', 'tests', '*.plugin_tests.*', '*.plugin_tests')
),
include_package_data=True,
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
install_requires=installReqs,
extras_require=extrasReqs,
zip_safe=False,
entry_points={
'console_scripts': [
'girder-server = girder.cli.serve:main',
'girder-sftpd = girder.cli.sftpd:main',
'girder-shell = girder.cli.shell:main',
'girder = girder.cli:main'
],
'girder.cli_plugins': [
'serve = girder.cli.serve:main',
'mount = girder.cli.mount:main',
'shell = girder.cli.shell:main',
'sftpd = girder.cli.sftpd:main',
'build = girder.cli.build:main'
]
}
)
|
the-stack_106_18243
|
# encoding: utf-8
import codecs
import os
from xlwt import *
import xlrd
import numpy as np
import xgboost as xgb
from xgboost import plot_importance
import time
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['Arial Unicode MS'] # 指定默认字体:解决plot不能显示中文问题
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
# 参数
version = 'v1_0'
date = '20210408-20210414'
dataset = 'all'
# 数据路径:模型,测试集
feature_dir = "/Users/sunhaowen/Documents/data/3.ivr_log/final_data/{}".format(version)
# 加载模型
bst = pickle.load(open(feature_dir + "/model.dat", "rb"))
def read_txt(file_name):
with codecs.open(file_name, "r") as fr:
data = fr.read()
data = [line for line in data.split("\n") if line]
return data
def write_txt(data, file_name):
fw = open(file_name, "w+")
for line in data:
if isinstance(line, list):
fw.write("\t".join([str(elem) for elem in line]) + "\n")
else:
fw.write(str(line) + "\n")
fw.close()
# 打开测试文件
data_dir = feature_dir
if not os.path.exists(data_dir):
os.makedirs(data_dir)
file_name = 'acfeature_{}_'.format(date)
xg_test = xgb.DMatrix(data_dir + "/" + file_name + "{}.txt".format(dataset))
data = read_txt(data_dir + "/" + file_name + "{}.txt".format(dataset))
# 获取Y
test_Y = []
for line in data:
line_split = line.split(" ")
test_Y.append(int(line_split[0]))
test_Y = np.array(test_Y)
param = {}
param['num_class'] = 13
# Note: this convention has been changed since xgboost-unity
# get prediction, this is in 1D array, need reshape to (ndata, nclass)
time_begin = time.time()
yprob = bst.predict(xg_test).reshape(test_Y.shape[0], param['num_class'])
ylabel = np.argmax(yprob, axis=1)
time_end = time.time()
print('解析query用时: {}'.format(time_end - time_begin))
print('predicting, classification error=%f' % (
sum(int(ylabel[i]) != test_Y[i] for i in range(len(test_Y))) / float(len(test_Y))))
result = []
for cur_label, line in zip(test_Y, yprob):
one_result = []
sorted_result = sorted(enumerate(line), key=lambda x: x[1], reverse=True)
for index in range(len(sorted_result)):
# 输出top几
if index >= 1:
break
one_result.append(str(int(sorted_result[index][0])) + "_" + str(float(sorted_result[index][1])))
result.append(str(int(cur_label)) + "\t" + "\t".join(one_result))
# 输出文件
write_txt(result, feature_dir + "/pred_result/pred_{}_{}_py.txt".format(date, dataset))
|
the-stack_106_18244
|
from threading import Thread
from typing import Optional
from torch.utils.data.dataset import IterableDataset as TorchIterableDataset
import persia.env as env
from persia.ctx import cnt_ctx
from persia.logger import get_default_logger
from persia.prelude import (
PyPersiaBatchDataChannel,
PyPersiaBatchDataReceiver,
PyPersiaBatchDataSender,
init_responder,
)
_logger = get_default_logger()
class IterableDataset(TorchIterableDataset):
r"""IterableChannelBase wrap the PyPersiaBatchDataChannel that provide the channel sender and
receiver.
Arguments:
buffer_size (int): PyPersiaBatchDataChannel buffer size
"""
def __init__(self, buffer_size: int):
self.persia_batch_channel = PyPersiaBatchDataChannel(buffer_size)
@property
def receiver(self) -> PyPersiaBatchDataReceiver:
"""Get PyPersiaBatchDataReceiver python wrapper"""
return self.persia_batch_channel.get_receiver()
@property
def sender(self) -> PyPersiaBatchDataSender:
"""Get PyPersiaBatchDataSender python wrapper"""
return self.persia_batch_channel.get_sender()
class StreamingDataset(IterableDataset):
r"""NatsStreamingChannel receive data from nats publisher
Arguments:
buffer_size (int): PyPersiaBatchDataChannel buffer size
"""
def __init__(
self,
buffer_size: int,
):
super(StreamingDataset, self).__init__(buffer_size)
self.initialized = False
def __iter__(self):
if not self.initialized:
world_size = env.get_world_size()
assert world_size != -1, "WORLD_SIZE not set"
init_responder(world_size, self.sender)
_logger.info("initialize the responder")
self.initialized = True
while True:
yield None
class PersiaDataset(IterableDataset):
r"""Persia data channel that provide asynchronous data handler feature to improve the performance of data preprocess.
Not support synchronous data handler temporary.
Arguments:
buffer_size (int): PyPersiaBatchDataChannel buffer size
async_iterator (bool, optional): launch the thread to generate the data asynchronous
"""
def __init__(
self,
buffer_size: int,
async_iterator: bool = True,
):
super(PersiaDataset, self).__init__(
buffer_size,
)
self.async_iterator = async_iterator
def fetch_data(self, sender: PyPersiaBatchDataSender):
"""Callback function to put the data into PyPersiaBatchDataSender
Arguments:
sender (PyPersiaBatchDataSender): PersiaBatchData sender channel to send the generate data
to the PersiaBatchData receive channel
"""
raise NotImplementedError("implement this function to fetch data")
def __iter__(self):
if self.async_iterator:
handler = Thread(target=self.fetch_data, args=(self.sender,), daemon=True)
handler.start()
for _val in range(len(self)):
yield _val
if self.async_iterator:
handler.join()
class Dataloder(object):
r"""Dataloder provide the interface to fetch the PythonBatchData from PyForward
wrapper.
Arguments:
dataset (IterableChannelBase): dataset for Dataloder to retrive replica info and sender channel
forward_buffer_size: (int, optional): gpu forward channel buffer size, this args effect the gpu memory cost
is_training (bool, optional): whether current forward status is training or not
timeout_ms (int, optional): timeout for PyForward to fetch data, millisecond unit
num_workers (int, optional): spawn thread worker number for PyForward to lookup embedding and PythonBatchData prefetch
reproducible (bool, optional): iterate the data in fixed order, make the dataflow deterministic
embedding_staleness (int, optional): max number of batched staleness embedding each rank. A staleness embedding means it prefetched from embedding server before gradient updated.
"""
def __init__(
self,
dataset: IterableDataset,
forward_buffer_size: int = 10,
is_training: bool = True,
timeout_ms: int = 1000 * 60 * 10,
num_workers: int = 10,
reproducible: bool = False,
embedding_staleness: Optional[int] = None,
):
# dynamic import the PyForward due to conditional compilation
from persia.prelude import PyForward
self.dataset = dataset
self.timeout_ms = timeout_ms
self.num_workers = num_workers
current_ctx = cnt_ctx()
assert current_ctx is not None, "Current conext is None!"
self.forward_engine = PyForward(
forward_buffer_size,
is_training,
reproducible,
embedding_staleness,
)
self.forward_engine.set_input_channel(dataset.receiver)
self.forward_engine.launch(self.num_workers)
def __iter__(self):
for _ in self.dataset:
try:
yield self.forward_engine.get_batch(self.timeout_ms)
except TimeoutError:
_logger.warn("get_batch time out, stop iter stream data")
break
def __len__(self):
return len(self.dataset)
def __del__(self):
self.forward_engine.shutdown()
|
the-stack_106_18245
|
"""
Convert Datasets for the Step Placement models into datasets for the
Step Generation model.
"""
import os
import numpy as np
from sklearn.metrics import f1_score
import h5py
import warnings
from deepSM import SMData
from deepSM import SMDUtils
from deepSM import StepPlacement
from deepSM import utils
from deepSM import bpm_estimator
from torch.utils import data as datautils
__version__ = '1-0'
from importlib import reload
reload(utils)
def compute_thresh(model, dataset):
"""
Not in use. See bin/get_thresholds.py.
"""
# Currently unused, due to gen dataset directly using labels.
# Should use this for final prediction pipeline.
# Chosen from optimization of F1 score.
loader = datautils.DataLoader(dataset)
output_list, labels_list = model.predict(dataset, return_list=True)
outputs = torch.cat(list(map(lambda x: x[0,:,0], output_list)))
labels = torch.cat(list(map(lambda x: x[0,:], labels_list)))
probs = torch.sigmoid(outputs).numpy()
def f1_fn(thresh):
preds = (probs > thresh).astype(int)
return f1_score(labels, preds)
scores = []
threshes = range(1e-2, 0.5, 1000)
for i in threshes:
scores.append(f1_fn(i))
thresh_idx = np.argmax(scores)
thresh = threshes[thresh_idx]
f1 = scores[thresh_idx]
print(f"Threshold: {thresh} F1 score: {f1}")
return thresh
def get_generation_features(smd, bpm, frame_idxs_list=None, use_labels=True):
"""
Builds the step generation features from a step placement dataset.
Computes the appropriate STFT features, changes in time, and the
frame index of each timestep.
frame_idxs_list should be a list of preds.
If frame_idxs is None, then use step pos labels.
"""
diff_order = []
diff_features = {}
# Iterate through each data point (difficulty) of the dataset.
for i, d in enumerate(smd):
# Mantain order of difficulties.
diff_code = np.where(d['diff'][0])[0][0]
diff = utils.inv_difficulties[diff_code] # String
diff_order.append(diff)
if frame_idxs_list is None:
frame_idxs = np.where(d['step_pos_labels'])[0]
else:
frame_idxs = frame_idxs_list[i]
fft_features = d['fft_features'][frame_idxs]
if use_labels:
step_type_labels = d['step_type_labels'][frame_idxs]
# Compute delta_time, delta_beat
bps = 60 / bpm
delta_frame = frame_idxs[1:] - frame_idxs[:-1]
delta_time = delta_frame * 512/44100
delta_beat = delta_time / bps
# 4 measures of time before and after the first and last notes.
beats_before = np.r_[12, delta_beat]
beats_after = np.r_[delta_beat, 12]
diff_features[diff] = {
'fft_features': fft_features,
'beats_before': beats_before,
'beats_after': beats_after,
'frame_idxs': frame_idxs
}
if use_labels:
diff_features[diff]['step_type_labels'] = step_type_labels
return diff_order, diff_features
def convert_dataset(
dataset_name,
new_dataset_name=None,
raw_data_name=None,
thresh=None,
model_name=None,
base_path=utils.BASE_PATH):
"""
datasets assumed to be stored in {base_path}/datasets/{dataset_name}.
Output will be in {base_path}/datasets/{new_dataset_name}.
If raw_data_name is points to a directory, then use the sm files for
bpm estimation.
If model is provided, then steps will be based off of predictions, and
threshold will be estimated.
"""
if new_dataset_name is None:
new_dataset_name = f"{dataset_name}_gen_{__version__}"
print(f"New dataset name: {new_dataset_name}")
new_ds_path = f"{base_path}/datasets/{new_dataset_name}"
if not os.path.isdir(new_ds_path):
os.mkdir(new_ds_path)
smds = SMDUtils.get_dataset_from_file(
dataset_name, 'placement', chunk_size=-1, concat=False)
# Compute threshold if model is provided.
if model_name is not None:
model = StepPlacement.RegularizedRecurrentStepPlacementModel()
model.load_state_dict(torch.load(model_name))
model.cuda()
thresh_ds = datautils.ConcatDataset(smds[:10])
thresh = compute_thresh(model, thresh_ds)
# Get BPM estimations.
for smd in smds:
print(smd.song_name)
n_diffs = len(smd)
if raw_data_name is None:
# Compute BPM.
pos_labels = []
for d in smd:
pos_labels.append(d['step_pos_labels'])
pos_labels = np.concatenate(pos_labels)
# For training, use ground truth step positions.
bpm = bpm_estimator.est_bpm(pos_labels)
else:
sm = SMData.SMFile(smd.song_name, raw_data_name, base_path)
try:
bpm = bpm_estimator.true_bpm(sm)
except ValueError as e:
print(e)
print(f"Skipping song {smd.song_name}")
continue
bps = 60 / bpm # Seconds per beat
frame_idxs = None
if model_name is not None:
predict_loader = datautils.DataLoader(smd)
outputs_list, labels_list = model.predict(
predict_loader, return_list=True)
outputs_list = list(map(lambda l: l[0,:,0], outputs_list))
labels_list = list(map(lambda l: l[0, :], labels_list))
frame_idxs = list(map(
lambda outputs: np.where(outputs > thresh)[0],
outputs_list))
diff_order, diff_features = get_generation_features(
smd, bpm, frame_idxs)
song_path = f'{new_ds_path}/{smd.song_name}'
fname = f'{song_path}/{smd.song_name}.h5'
if not os.path.isdir(song_path):
os.mkdir(song_path)
with h5py.File(fname, 'w') as hf:
hf.attrs['song_name'] = smd.song_name
hf.attrs['diff_names'] = np.array(diff_order).astype('S9')
for diff in diff_order:
diff_group = hf.create_group(diff)
diff_data = diff_features[diff]
for key in diff_data.keys():
diff_group.create_dataset(key, data=diff_data[key])
return new_dataset_name
|
the-stack_106_18247
|
# Copyright (c) 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
import webob.exc
from neutron.common import constants as n_const
from neutron.common import ipv6_utils
from neutron.db import ipam_backend_mixin
from neutron.db import ipam_pluggable_backend
from neutron.db import models_v2
from neutron.ipam import requests as ipam_req
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_base
class UseIpamMixin(object):
def setUp(self):
cfg.CONF.set_override("ipam_driver", 'internal')
super(UseIpamMixin, self).setUp()
class TestIpamHTTPResponse(UseIpamMixin, test_db_base.TestV2HTTPResponse):
pass
class TestIpamPorts(UseIpamMixin, test_db_base.TestPortsV2):
pass
class TestIpamNetworks(UseIpamMixin, test_db_base.TestNetworksV2):
pass
class TestIpamSubnets(UseIpamMixin, test_db_base.TestSubnetsV2):
pass
class TestIpamSubnetPool(UseIpamMixin, test_db_base.TestSubnetPoolsV2):
pass
class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
def setUp(self):
cfg.CONF.set_override("ipam_driver", 'internal')
super(TestDbBasePluginIpam, self).setUp()
self.tenant_id = uuidutils.generate_uuid()
self.subnet_id = uuidutils.generate_uuid()
def _prepare_mocks(self, address_factory=None, subnet_factory=None):
if address_factory is None:
address_factory = ipam_req.AddressRequestFactory
if subnet_factory is None:
subnet_factory = ipam_req.SubnetRequestFactory
mocks = {
'driver': mock.Mock(),
'subnet': mock.Mock(),
'subnets': mock.Mock(),
'subnet_request': ipam_req.SpecificSubnetRequest(
self.tenant_id,
self.subnet_id,
'10.0.0.0/24',
'10.0.0.1',
[netaddr.IPRange('10.0.0.2', '10.0.0.254')]),
}
mocks['driver'].get_subnet.return_value = mocks['subnet']
mocks['driver'].allocate_subnet.return_value = mocks['subnet']
mocks['driver'].get_allocator.return_value = mocks['subnets']
mocks['subnets'].allocate.return_value = (
mock.sentinel.address, mock.sentinel.subnet_id)
mocks['driver'].get_subnet_request_factory.return_value = (
subnet_factory)
mocks['driver'].get_address_request_factory.return_value = (
address_factory)
mocks['subnet'].get_details.return_value = mocks['subnet_request']
return mocks
def _prepare_ipam(self):
mocks = self._prepare_mocks()
mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
return mocks
def _prepare_mocks_with_pool_mock(self, pool_mock, address_factory=None,
subnet_factory=None):
mocks = self._prepare_mocks(address_factory=address_factory,
subnet_factory=subnet_factory)
pool_mock.get_instance.return_value = mocks['driver']
return mocks
def _get_allocate_mock(self, subnet_id, auto_ip='10.0.0.2',
fail_ip='127.0.0.1',
exception=None):
if exception is None:
exception = n_exc.InvalidInput(error_message='SomeError')
def allocate_mock(request):
if type(request) == ipam_req.SpecificAddressRequest:
if request.address == netaddr.IPAddress(fail_ip):
raise exception
else:
return str(request.address), subnet_id
else:
return auto_ip, subnet_id
return allocate_mock
def _get_deallocate_mock(self, fail_ip='127.0.0.1', exception=None):
if exception is None:
exception = n_exc.InvalidInput(error_message='SomeError')
def deallocate_mock(ip):
if str(ip) == fail_ip:
raise exception
return deallocate_mock
def _validate_allocate_calls(self, expected_calls, mocks):
self.assertTrue(mocks['subnets'].allocate.called)
actual_calls = mocks['subnets'].allocate.call_args_list
self.assertEqual(len(expected_calls), len(actual_calls))
i = 0
for call in expected_calls:
if call['ip_address']:
self.assertIsInstance(actual_calls[i][0][0],
ipam_req.SpecificAddressRequest)
self.assertEqual(netaddr.IPAddress(call['ip_address']),
actual_calls[i][0][0].address)
else:
self.assertIsInstance(actual_calls[i][0][0],
ipam_req.AnyAddressRequest)
i += 1
def _convert_to_ips(self, data):
ips = [{'ip_address': ip,
'subnet_id': data[ip][1],
'subnet_cidr': data[ip][0]} for ip in data]
return sorted(ips, key=lambda t: t['subnet_cidr'])
def _gen_subnet_id(self):
return uuidutils.generate_uuid()
def test_deallocate_single_ip(self):
mocks = self._prepare_ipam()
ip = '192.168.12.45'
data = {ip: ['192.168.12.0/24', self._gen_subnet_id()]}
ips = self._convert_to_ips(data)
mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'],
mock.ANY, ips)
mocks['driver'].get_subnet.assert_called_once_with(data[ip][1])
mocks['subnet'].deallocate.assert_called_once_with(ip)
def test_deallocate_multiple_ips(self):
mocks = self._prepare_ipam()
data = {'192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()],
'172.23.158.84': ['172.23.128.0/17', self._gen_subnet_id()],
'8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]}
ips = self._convert_to_ips(data)
mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'],
mock.ANY, ips)
get_calls = [mock.call(data[ip][1]) for ip in data]
mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True)
ip_calls = [mock.call(ip) for ip in data]
mocks['subnet'].deallocate.assert_has_calls(ip_calls, any_order=True)
def _single_ip_allocate_helper(self, mocks, ip, network, subnet):
ips = [{'subnet_cidr': network,
'subnet_id': subnet}]
if ip:
ips[0]['ip_address'] = ip
allocated_ips = mocks['ipam']._ipam_allocate_ips(
mock.ANY, mocks['driver'], mock.ANY, ips)
mocks['driver'].get_allocator.assert_called_once_with([subnet])
self.assertTrue(mocks['subnets'].allocate.called)
request = mocks['subnets'].allocate.call_args[0][0]
return {'ips': allocated_ips,
'request': request}
def test_allocate_single_fixed_ip(self):
mocks = self._prepare_ipam()
ip = '192.168.15.123'
subnet_id = self._gen_subnet_id()
mocks['subnets'].allocate.return_value = ip, subnet_id
results = self._single_ip_allocate_helper(mocks,
ip,
'192.168.15.0/24',
subnet_id)
self.assertIsInstance(results['request'],
ipam_req.SpecificAddressRequest)
self.assertEqual(netaddr.IPAddress(ip), results['request'].address)
self.assertEqual(ip, results['ips'][0]['ip_address'],
'Should allocate the same ip as passed')
def test_allocate_single_any_ip(self):
mocks = self._prepare_ipam()
network = '192.168.15.0/24'
ip = '192.168.15.83'
subnet_id = self._gen_subnet_id()
mocks['subnets'].allocate.return_value = ip, subnet_id
results = self._single_ip_allocate_helper(mocks, '', network,
subnet_id)
self.assertIsInstance(results['request'], ipam_req.AnyAddressRequest)
self.assertEqual(ip, results['ips'][0]['ip_address'])
def test_allocate_eui64_ip(self):
mocks = self._prepare_ipam()
ip = {'subnet_id': self._gen_subnet_id(),
'subnet_cidr': '2001:470:abcd::/64',
'mac': '6c:62:6d:de:cf:49',
'eui64_address': True}
eui64_ip = ipv6_utils.get_ipv6_addr_by_EUI64(ip['subnet_cidr'],
ip['mac'])
mocks['ipam']._ipam_allocate_ips(mock.ANY, mocks['driver'],
mock.ANY, [ip])
request = mocks['subnets'].allocate.call_args[0][0]
self.assertIsInstance(request, ipam_req.AutomaticAddressRequest)
self.assertEqual(eui64_ip, request.address)
def test_allocate_multiple_ips(self):
mocks = self._prepare_ipam()
subnet_id = self._gen_subnet_id()
data = {'': ['172.23.128.0/17', subnet_id],
'192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()],
'8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]}
ips = self._convert_to_ips(data)
mocks['subnets'].allocate.side_effect = self._get_allocate_mock(
subnet_id, auto_ip='172.23.128.94')
mocks['ipam']._ipam_allocate_ips(
mock.ANY, mocks['driver'], mock.ANY, ips)
get_calls = [mock.call([data[ip][1]]) for ip in data]
mocks['driver'].get_allocator.assert_has_calls(
get_calls, any_order=True)
self._validate_allocate_calls(ips, mocks)
def _test_allocate_multiple_ips_with_exception(self,
exc_on_deallocate=False):
mocks = self._prepare_ipam()
fail_ip = '192.168.43.15'
auto_ip = '172.23.128.94'
subnet_id = self._gen_subnet_id()
data = {'': ['172.23.128.0/17', subnet_id],
fail_ip: ['192.168.43.0/24', self._gen_subnet_id()],
'8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]}
ips = self._convert_to_ips(data)
mocks['subnets'].allocate.side_effect = self._get_allocate_mock(
subnet_id, auto_ip=auto_ip, fail_ip=fail_ip,
exception=db_exc.DBDeadlock())
# Exception should be raised on attempt to allocate second ip.
# Revert action should be performed for the already allocated ips,
# In this test case only one ip should be deallocated
# and original error should be reraised
self.assertRaises(db_exc.DBDeadlock,
mocks['ipam']._ipam_allocate_ips,
mock.ANY,
mocks['driver'],
mock.ANY,
ips)
# get_subnet should be called only for the first two networks
get_calls = [mock.call([data[ip][1]]) for ip in ['', fail_ip]]
mocks['driver'].get_allocator.assert_has_calls(
get_calls, any_order=True)
# Allocate should be called for the first two ips only
self._validate_allocate_calls(ips[:-1], mocks)
# Deallocate should be called for the first ip only
mocks['subnet'].deallocate.assert_called_once_with(auto_ip)
def test_allocate_multiple_ips_with_exception(self):
self._test_allocate_multiple_ips_with_exception()
def test_allocate_multiple_ips_with_exception_on_rollback(self):
# Validate that original exception is not replaced with one raised on
# rollback (during deallocate)
self._test_allocate_multiple_ips_with_exception(exc_on_deallocate=True)
def test_deallocate_multiple_ips_with_exception(self):
mocks = self._prepare_ipam()
fail_ip = '192.168.43.15'
data = {fail_ip: ['192.168.43.0/24', self._gen_subnet_id()],
'0.10.8.8': ['0.10.0.0/8', self._gen_subnet_id()]}
ips = self._convert_to_ips(data)
mocks['subnet'].deallocate.side_effect = self._get_deallocate_mock(
fail_ip=fail_ip, exception=db_exc.DBDeadlock())
mocks['subnet'].allocate.side_effect = ValueError('Some-error')
# Validate that exception from deallocate (DBDeadlock) is not replaced
# by exception from allocate (ValueError) in rollback block,
# so original exception is not changed
self.assertRaises(db_exc.DBDeadlock,
mocks['ipam']._ipam_deallocate_ips,
mock.ANY,
mocks['driver'],
mock.ANY,
ips)
mocks['subnets'].allocate.assert_called_once_with(mock.ANY)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_subnet_over_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
cidr = '192.168.0.0/24'
allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}]
with self.subnet(allocation_pools=allocation_pools,
cidr=cidr):
pool_mock.get_instance.assert_called_once_with(None, mock.ANY)
self.assertTrue(mocks['driver'].allocate_subnet.called)
request = mocks['driver'].allocate_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_ipv6_pd_subnet_over_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
cfg.CONF.set_override('ipv6_pd_enabled', True)
cidr = n_const.PROVISIONAL_IPV6_PD_PREFIX
allocation_pools = [netaddr.IPRange('::2', '::ffff:ffff:ffff:ffff')]
with self.subnet(cidr=None, ip_version=6,
subnetpool_id=constants.IPV6_PD_POOL_ID,
ipv6_ra_mode=n_const.IPV6_SLAAC,
ipv6_address_mode=n_const.IPV6_SLAAC):
pool_mock.get_instance.assert_called_once_with(None, mock.ANY)
self.assertTrue(mocks['driver'].allocate_subnet.called)
request = mocks['driver'].allocate_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
self.assertEqual(allocation_pools, request.allocation_pools)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_subnet_over_ipam_with_rollback(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
mocks['driver'].allocate_subnet.side_effect = ValueError
cidr = '10.0.2.0/24'
with self.network() as network:
self._create_subnet(self.fmt, network['network']['id'],
cidr, expected_res_status=500)
pool_mock.get_instance.assert_called_once_with(None, mock.ANY)
self.assertTrue(mocks['driver'].allocate_subnet.called)
request = mocks['driver'].allocate_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
# Verify no subnet was created for network
req = self.new_show_request('networks', network['network']['id'])
res = req.get_response(self.api)
net = self.deserialize(self.fmt, res)
self.assertEqual(0, len(net['network']['subnets']))
@mock.patch('neutron.ipam.driver.Pool')
def test_ipam_subnet_deallocated_if_create_fails(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
cidr = '10.0.2.0/24'
with mock.patch.object(
ipam_backend_mixin.IpamBackendMixin, '_save_subnet',
side_effect=ValueError), self.network() as network:
self._create_subnet(self.fmt, network['network']['id'],
cidr, expected_res_status=500)
pool_mock.get_instance.assert_any_call(None, mock.ANY)
self.assertEqual(2, pool_mock.get_instance.call_count)
self.assertTrue(mocks['driver'].allocate_subnet.called)
request = mocks['driver'].allocate_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
# Verify remove ipam subnet was called
mocks['driver'].remove_subnet.assert_called_once_with(
self.subnet_id)
@mock.patch('neutron.ipam.driver.Pool')
def test_update_subnet_over_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
with self.subnet(allocation_pools=allocation_pools,
cidr=cidr) as subnet:
data = {'subnet': {'allocation_pools': [
{'start': '10.0.0.10', 'end': '10.0.0.20'},
{'start': '10.0.0.30', 'end': '10.0.0.40'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(200, res.status_code)
pool_mock.get_instance.assert_any_call(None, mock.ANY)
self.assertEqual(2, pool_mock.get_instance.call_count)
self.assertTrue(mocks['driver'].update_subnet.called)
request = mocks['driver'].update_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
ip_ranges = [netaddr.IPRange(p['start'],
p['end']) for p in data['subnet']['allocation_pools']]
self.assertEqual(ip_ranges, request.allocation_pools)
@mock.patch('neutron.ipam.driver.Pool')
def test_delete_subnet_over_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
pool_mock.get_instance.assert_any_call(None, mock.ANY)
self.assertEqual(2, pool_mock.get_instance.call_count)
mocks['driver'].remove_subnet.assert_called_once_with(
subnet['subnet']['id'])
@mock.patch('neutron.ipam.driver.Pool')
def test_delete_subnet_over_ipam_with_rollback(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
mocks['driver'].remove_subnet.side_effect = ValueError
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPServerError.code, res.status_int)
pool_mock.get_instance.assert_any_call(None, mock.ANY)
self.assertEqual(2, pool_mock.get_instance.call_count)
mocks['driver'].remove_subnet.assert_called_once_with(
subnet['subnet']['id'])
# Verify subnet was recreated after failed ipam call
subnet_req = self.new_show_request('subnets',
subnet['subnet']['id'])
raw_res = subnet_req.get_response(self.api)
sub_res = self.deserialize(self.fmt, raw_res)
self.assertIn(sub_res['subnet']['cidr'], cidr)
self.assertIn(sub_res['subnet']['gateway_ip'],
gateway_ip)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_port_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
auto_ip = '10.0.0.2'
expected_calls = [{'ip_address': ''}]
with self.subnet() as subnet:
mocks['subnets'].allocate.side_effect = self._get_allocate_mock(
subnet['subnet']['id'], auto_ip=auto_ip)
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ips[0]['ip_address'], auto_ip)
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._validate_allocate_calls(expected_calls, mocks)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_port_ipam_with_rollback(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
mocks['subnet'].allocate.side_effect = ValueError
with self.network() as network:
with self.subnet(network=network):
net_id = network['network']['id']
data = {
'port': {'network_id': net_id,
'tenant_id': network['network']['tenant_id']}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPServerError.code,
res.status_int)
# verify no port left after failure
req = self.new_list_request('ports', self.fmt,
"network_id=%s" % net_id)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(0, len(res['ports']))
@mock.patch('neutron.ipam.driver.Pool')
def test_update_port_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
auto_ip = '10.0.0.2'
new_ip = '10.0.0.15'
expected_calls = [{'ip_address': ip} for ip in ['', new_ip]]
with self.subnet() as subnet:
mocks['subnets'].allocate.side_effect = self._get_allocate_mock(
subnet['subnet']['id'], auto_ip=auto_ip)
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(auto_ip, ips[0]['ip_address'])
# Update port with another new ip
data = {"port": {"fixed_ips": [{
'subnet_id': subnet['subnet']['id'],
'ip_address': new_ip}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(new_ip, ips[0]['ip_address'])
# Allocate should be called for the first two networks
self._validate_allocate_calls(expected_calls, mocks)
# Deallocate should be called for the first ip only
mocks['subnet'].deallocate.assert_called_once_with(auto_ip)
@mock.patch('neutron.ipam.driver.Pool')
def test_delete_port_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
auto_ip = '10.0.0.2'
with self.subnet() as subnet:
mocks['subnets'].allocate.side_effect = self._get_allocate_mock(
subnet['subnet']['id'], auto_ip=auto_ip)
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(auto_ip, ips[0]['ip_address'])
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
mocks['subnet'].deallocate.assert_called_once_with(auto_ip)
def test_recreate_port_ipam(self):
with self.subnet() as subnet:
subnet_cidr = subnet['subnet']['cidr']
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
orig_ip = ips[0]['ip_address']
self.assertIn(netaddr.IPAddress(ips[0]['ip_address']),
netaddr.IPSet(netaddr.IPNetwork(subnet_cidr)))
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
with self.port(subnet=subnet, fixed_ips=ips) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(orig_ip, ips[0]['ip_address'])
def test_recreate_port_ipam_specific_ip(self):
with self.subnet() as subnet:
ip = '10.0.0.2'
fixed_ip_data = [{'subnet_id': subnet['subnet']['id'],
'ip_address': ip}]
with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ip, ips[0]['ip_address'])
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
with self.port(subnet=subnet, fixed_ips=ips) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ip, ips[0]['ip_address'])
@mock.patch('neutron.ipam.driver.Pool')
def test_update_ips_for_port_passes_port_dict_to_factory(self, pool_mock):
address_factory = mock.Mock()
mocks = self._prepare_mocks_with_pool_mock(
pool_mock, address_factory=address_factory)
context = mock.Mock()
new_ips = mock.Mock()
original_ips = mock.Mock()
mac = mock.Mock()
ip_dict = {'ip_address': '192.1.1.10',
'subnet_id': uuidutils.generate_uuid()}
changes = ipam_pluggable_backend.IpamPluggableBackend.Changes(
add=[ip_dict], original=[], remove=[])
changes_mock = mock.Mock(return_value=changes)
fixed_ips_mock = mock.Mock(return_value=changes.add)
mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
mocks['ipam']._get_changed_ips_for_port = changes_mock
mocks['ipam']._ipam_get_subnets = mock.Mock()
mocks['ipam']._test_fixed_ips_for_port = fixed_ips_mock
mocks['ipam']._update_ips_for_pd_subnet = mock.Mock(return_value=[])
port_dict = {'device_owner': uuidutils.generate_uuid(),
'network_id': uuidutils.generate_uuid()}
mocks['ipam']._update_ips_for_port(context, port_dict,
original_ips, new_ips, mac)
mocks['driver'].get_address_request_factory.assert_called_once_with()
mocks['ipam']._ipam_get_subnets.assert_called_once_with(
context, network_id=port_dict['network_id'], host=None)
# Validate port_dict is passed into address_factory
address_factory.get_request.assert_called_once_with(context,
port_dict,
ip_dict)
@mock.patch('neutron.ipam.driver.Pool')
def test_update_ips_for_port_passes_port_id_to_factory(self, pool_mock):
port_id = mock.Mock()
network_id = uuidutils.generate_uuid()
address_factory = mock.Mock()
mocks = self._prepare_mocks_with_pool_mock(
pool_mock, address_factory=address_factory)
context = mock.Mock()
ip_dict = {'ip_address': '192.1.1.10',
'subnet_id': uuidutils.generate_uuid()}
port_dict = {'port': {'device_owner': uuidutils.generate_uuid(),
'network_id': network_id,
'fixed_ips': [ip_dict]}}
subnets = [{'id': ip_dict['subnet_id'],
'network_id': network_id,
'cidr': '192.1.1.0/24',
'ip_version': 4,
'ipv6_address_mode': None,
'ipv6_ra_mode': None}]
get_subnets_mock = mock.Mock(return_value=subnets)
get_subnet_mock = mock.Mock(return_value=subnets[0])
mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
mocks['ipam']._ipam_get_subnets = get_subnets_mock
mocks['ipam']._get_subnet = get_subnet_mock
mocks['ipam'].allocate_ips_for_port_and_store(context,
port_dict,
port_id)
mocks['driver'].get_address_request_factory.assert_called_once_with()
port_dict_with_id = port_dict['port'].copy()
port_dict_with_id['id'] = port_id
# Validate port id is added to port dict before address_factory call
address_factory.get_request.assert_called_once_with(context,
port_dict_with_id,
ip_dict)
# Verify incoming port dict is not changed ('id' is not added to it)
self.assertIsNone(port_dict['port'].get('id'))
def _test_update_db_subnet(self, pool_mock, subnet, expected_subnet,
old_pools):
subnet_factory = mock.Mock()
context = mock.Mock()
mocks = self._prepare_mocks_with_pool_mock(
pool_mock, subnet_factory=subnet_factory)
mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
mocks['ipam'].update_db_subnet(context, id, subnet, old_pools)
mocks['driver'].get_subnet_request_factory.assert_called_once_with()
subnet_factory.get_request.assert_called_once_with(context,
expected_subnet,
None)
@mock.patch('neutron.ipam.driver.Pool')
def test_update_db_subnet_unchanged_pools(self, pool_mock):
old_pools = [netaddr.IPRange('192.1.1.2', '192.1.1.254')]
subnet = {'id': uuidutils.generate_uuid(),
'network_id': uuidutils.generate_uuid(),
'cidr': '192.1.1.0/24',
'ipv6_address_mode': None,
'ipv6_ra_mode': None}
subnet_with_pools = subnet.copy()
subnet_with_pools['allocation_pools'] = old_pools
# if subnet has no allocation pools set, then old pools has to
# be added to subnet dict passed to request factory
self._test_update_db_subnet(pool_mock, subnet, subnet_with_pools,
old_pools)
@mock.patch('neutron.ipam.driver.Pool')
def test_update_db_subnet_new_pools(self, pool_mock):
old_pools = [netaddr.IPRange('192.1.1.2', '192.1.1.254')]
subnet = {'id': uuidutils.generate_uuid(),
'network_id': uuidutils.generate_uuid(),
'cidr': '192.1.1.0/24',
'allocation_pools': [
netaddr.IPRange('192.1.1.10', '192.1.1.254')],
'ipv6_address_mode': None,
'ipv6_ra_mode': None}
# make a copy of subnet for validation, since update_subnet changes
# incoming subnet dict
expected_subnet = subnet.copy()
# validate that subnet passed to request factory is the same as
# incoming one, i.e. new pools in it are not overwritten by old pools
self._test_update_db_subnet(pool_mock, subnet, expected_subnet,
old_pools)
@mock.patch('neutron.ipam.driver.Pool')
def test_update_db_subnet_new_pools_exception(self, pool_mock):
context = mock.Mock()
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
new_port = {'fixed_ips': [{'ip_address': '192.168.1.20',
'subnet_id': 'some-id'},
{'ip_address': '192.168.1.50',
'subnet_id': 'some-id'}]}
db_port = models_v2.Port(id='id', network_id='some-net-id')
old_port = {'fixed_ips': [{'ip_address': '192.168.1.10',
'subnet_id': 'some-id'},
{'ip_address': '192.168.1.50',
'subnet_id': 'some-id'}]}
changes = mocks['ipam'].Changes(
add=[{'ip_address': '192.168.1.20',
'subnet_id': 'some-id'}],
original=[{'ip_address': '192.168.1.50',
'subnet_id': 'some-id'}],
remove=[{'ip_address': '192.168.1.10',
'subnet_id': 'some-id'}])
mocks['ipam']._make_port_dict = mock.Mock(return_value=old_port)
mocks['ipam']._update_ips_for_port = mock.Mock(return_value=changes)
mocks['ipam']._update_db_port = mock.Mock(
side_effect=db_exc.DBDeadlock)
# emulate raising exception on rollback actions
mocks['ipam']._ipam_deallocate_ips = mock.Mock(side_effect=ValueError)
mocks['ipam']._ipam_allocate_ips = mock.Mock(side_effect=ValueError)
# Validate original exception (DBDeadlock) is not overriden by
# exception raised on rollback (ValueError)
self.assertRaises(db_exc.DBDeadlock,
mocks['ipam'].update_port_with_ips,
context,
db_port,
new_port,
mock.Mock())
mocks['ipam']._ipam_deallocate_ips.assert_called_once_with(
context, mocks['driver'], db_port,
changes.add, revert_on_fail=False)
mocks['ipam']._ipam_allocate_ips.assert_called_once_with(
context, mocks['driver'], db_port,
changes.remove, revert_on_fail=False)
|
the-stack_106_18248
|
# -*- coding: utf-8 -*-
"""Cisco Identity Services Engine PAN HA API wrapper.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
get_next_page,
)
class PanHa(object):
"""Identity Services Engine PAN HA API (version: 3.0.0).
Wraps the Identity Services Engine PAN HA
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new PanHa
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the Identity Services Engine service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(PanHa, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_pan_ha_status(self,
headers=None,
**query_parameters):
"""In a high availability configuration, the Primary Administration
Node (PAN) is in the active state. The Secondary PAN
(backup PAN) is in the standby state, which means it
receives all configuration updates from the Primary PAN,
but is not active in the ISE network. You can configure
ISE to automatically the promote the secondary PAN when
the primary PAN becomes unavailable.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
e_url = ('/api/v1/deployment/pan-ha')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_daa171ab765a02a714c48376b3790d_v3_0_0', _api_response)
def enable_pan_ha(self,
failed_attempts=None,
is_enabled=None,
polling_interval=None,
primary_health_check_node=None,
secondary_health_check_node=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""To deploy the auto-failover feature, you must have at least
three nodes, where two of the nodes assume the
Administration persona, and one node acts as the health
check node. A health check node is a non-administration
node and can be a Policy Service, Monitoring, or pxGrid
node, or a combination of these. If the PANs are in
different data centers, you must have a health check
node for each PAN.
Args:
failed_attempts(integer): failedAttempts, property of
the request body.
is_enabled(boolean): isEnabled, property of the request
body.
polling_interval(integer): pollingInterval, property of
the request body.
primary_health_check_node(string):
primaryHealthCheckNode, property of the
request body. Constraints: maxLength set
to 64 and minLength set to 1.
secondary_health_check_node(string):
secondaryHealthCheckNode, property of
the request body. Constraints: maxLength
set to 64 and minLength set to 1.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = 'application/xml' in _headers.get('Content-Type', [])
if active_validation and is_xml_payload:
check_type(payload, basestring)
if active_validation and not is_xml_payload:
check_type(payload, dict)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {
'isEnabled':
is_enabled,
'primaryHealthCheckNode':
primary_health_check_node,
'secondaryHealthCheckNode':
secondary_health_check_node,
'pollingInterval':
polling_interval,
'failedAttempts':
failed_attempts,
}
_payload = {
'request': dict_from_items_with_values(_tmp_payload)
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation and not is_xml_payload:
self._request_validator('jsd_fc9a4ee495785518bd2251b6b4fb41f4_v3_0_0')\
.validate(_payload)
e_url = ('/api/v1/deployment/pan-ha')
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = {'data': _payload} if is_xml_payload else {'json': _payload}
if with_custom_headers:
_api_response = self._session.post(endpoint_full_url, params=_params,
headers=_headers,
**request_params)
else:
_api_response = self._session.post(endpoint_full_url, params=_params,
**request_params)
return self._object_factory('bpm_fc9a4ee495785518bd2251b6b4fb41f4_v3_0_0', _api_response)
def disable_pan_ha(self,
headers=None,
**query_parameters):
"""Disable the automatic PAN failover.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
e_url = ('/api/v1/deployment/pan-ha')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_a1e3cde0c3f254b39caaaf7c907ae67e_v3_0_0', _api_response)
|
the-stack_106_18251
|
import scrapy
from ..items import Mobile
class AmazonScraper(scrapy.Spider):
name = "amazon_scraper"
# How many pages you want to scrape
no_of_pages = 1
# Headers to fix 503 service unavailable error
# Spoof headers to force servers to think that request coming from browser ;)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.2840.71 Safari/539.36'}
def start_requests(self):
# starting urls for scraping
urls = ["https://www.amazon.in/s?k=mobile&ref=nb_sb_noss_2"]
for url in urls: yield scrapy.Request(url = url, callback = self.parse, headers = self.headers)
def parse(self, response):
self.no_of_pages -= 1
# print(response.text)
mobiles = response.xpath("//a[@class='a-link-normal a-text-normal']").xpath("@href").getall()
# print(len(mobiles))
for mobile in mobiles:
final_url = response.urljoin(mobile)
yield scrapy.Request(url=final_url, callback = self.parse_mobile, headers = self.headers)
# break
# print(final_url)
# print(response.body)
# title = response.xpath("//span[@class='a-size-medium a-color-base a-text-normal']//text()").getall()
# title = response.css('span').getall()
# print(title)
if(self.no_of_pages > 0):
next_page_url = response.xpath("//ul[@class='a-pagination']/li[@class='a-last']/a").xpath("@href").get()
final_url = response.urljoin(next_page_url)
yield scrapy.Request(url = final_url, callback = self.parse, headers = self.headers)
def parse_mobile(self, response):
title = response.xpath("//span[@id='productTitle']//text()").get() or response.xpath("//h1[@id='title']//text()").get()
brand = response.xpath("//a[@id='bylineInfo']//text()").get() or "not specified"
rating = response.xpath("//div[@id='averageCustomerReviews_feature_div']").xpath("//span[@class='a-icon-alt']//text()").get()
price = response.xpath("//span[@id='priceblock_ourprice']//text()") or response.xpath("//span[@id='priceblock_dealprice']//text()")
print(price)
if len(price) > 1: price = price[1].get()
elif len(price) == 1: price = price[0].get()
else : price = price.get()
colour = response.xpath("//div[@id='variation_color_name']/div/span[@class='selection']//text()").get() or "not defined"
instock = response.xpath("//div[@id='availability']").xpath("//span[@class='a-size-medium a-color-success']//text()").get() or "Out Stock"
instock = instock.strip() == "In stock."
reviews = response.xpath("//div[@class='a-expander-content reviewText review-text-content a-expander-partial-collapse-content']/span//text()").getall()
description_raw = response.xpath("//div[@id='featurebullets_feature_div']//span[@class='a-list-item']//text()").getall()
img_url = response.xpath("//img[@id='landingImage']/@data-old-hires").get() or response.xpath("//img[@id='imgBlkFront']/@src").get()
description = []
for description_temp in description_raw:
description.append(description_temp.strip())
print(title, brand, rating, price, colour, instock, img_url)
# print(final_review)
# print(reviews)
# print(description)
yield Mobile(title = title.strip(), brand = brand.strip(), rating = rating.strip(), price = price.strip(), colour = colour.strip(), instock = instock, reviews = reviews, description = description, image_urls = [img_url])
|
the-stack_106_18254
|
from pathlib import Path
import pandas as pd
def structure_id_path_to_string(structure_id_path):
"""
Given a path (as a list of structure ids) to a specific structure,
return as a string of "/" separated structure ids
Parameters
----------
structure_id_path : list
list of ints defining the path to a region (which is the last element)
Returns
-------
str:
"/" separated string of structure ids
"""
path_string = "/"
for element in structure_id_path:
path_string = path_string + str(element) + "/"
return path_string
def get_parent_id(structure_id_path, root=997):
"""
Given a path (as a list of structure ids) to a specific structure,
return the id of the parent structure
Parameters
----------
structure_id_path : list
list of ints defining the path to a region (which is the last element)
root : int (optional)
Value for the root (whole brain) structure that has no parent.
Returns
-------
int or None :
id of the parent structure (or None if no parent)
"""
if structure_id_path == [root]:
return None
else:
return int(structure_id_path[-2])
def convert_structure_json_to_csv(
structure_json_path, destination_path=None, root=997
):
"""
Converts an atlas structure json dictionary to csv. For cellfinder
compatibility and ease of browsing.
Parameters
----------
structure_json_path : str or Path object
path to the json file
destination_path : str or Path object (optional)
Where to save the resulting csv file. Defaults to the same directory
as the json file.
"""
structure_json_path = Path(structure_json_path)
df = pd.read_json(structure_json_path)
df = df.drop(columns=["rgb_triplet"])
df["parent_structure_id"] = df["structure_id_path"].apply(
get_parent_id, root=root
)
df["structure_id_path"] = df["structure_id_path"].apply(
structure_id_path_to_string
)
df = df.sort_values("name")
if destination_path is None:
destination_path = structure_json_path.with_suffix(".csv")
df.to_csv(destination_path, index=False)
|
the-stack_106_18255
|
from .Scraper import Scraper
from .ConnectionScraper import ConnectionScraper
import json
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException, NoSuchElementException
import time
from .Profile import Profile
from .utils import AnyEC
class ProfileScraper(Scraper):
"""
Scraper for Personal LinkedIn Profiles. See inherited Scraper class for
details about the constructor.
"""
MAIN_SELECTOR = '.scaffold-layout__main'
ERROR_SELECTOR = '.profile-unavailable'
def scrape_by_email(self, email):
self.load_profile_page(
'https://www.linkedin.com/sales/gmail/profile/proxy/{}'.format(email))
return self.get_profile()
def scrape(self, url='', user=None):
self.load_profile_page(url, user)
return self.get_profile()
def load_profile_page(self, url='', user=None):
"""Load profile page and all async content
Params:
- url {str}: url of the profile to be loaded
Raises:
ValueError: If link doesn't match a typical profile url
"""
if user:
url = 'https://www.linkedin.com/in/' + user
if 'com/in/' not in url and 'sales/gmail/profile/proxy/' not in url:
raise ValueError(
"Url must look like... .com/in/NAME or... '.com/sales/gmail/profile/proxy/EMAIL")
self.driver.get(url)
# Wait for page to load dynamically via javascript
try:
myElem = WebDriverWait(self.driver, self.timeout).until(AnyEC(
EC.presence_of_element_located(
(By.CSS_SELECTOR, self.MAIN_SELECTOR)),
EC.presence_of_element_located(
(By.CSS_SELECTOR, self.ERROR_SELECTOR))
))
except TimeoutException as e:
raise ValueError(
"""Took too long to load profile. Common problems/solutions:
1. Invalid LI_AT value: ensure that yours is correct (they
update frequently)
2. Slow Internet: increase the time out parameter in the Scraper
constructor
3. Invalid e-mail address (or user does not allow e-mail scrapes) on scrape_by_email call
""")
# Check if we got the 'profile unavailable' page
try:
self.driver.find_element_by_css_selector(self.MAIN_SELECTOR)
except:
raise ValueError(
'Profile Unavailable: Profile link does not match any current Linkedin Profiles')
# Scroll to the bottom of the page incrementally to load any lazy-loaded content
self.scroll_to_bottom()
self.expand_given_recommendations()
def expand_given_recommendations(self):
try:
given_recommendation_tab = self.driver.find_element_by_css_selector(
'section.pv-recommendations-section button[aria-selected="false"].artdeco-tab')
# Scrolls the desired element into view
self.driver.execute_script(
"arguments[0].scrollIntoView(false);", given_recommendation_tab)
given_recommendation_tab.click()
self.click_expandable_buttons()
# self.scroll_to_bottom()
except:
pass
def get_profile(self):
try:
profile = self.driver.find_element_by_css_selector(
self.MAIN_SELECTOR).get_attribute("outerHTML")
except:
raise Exception(
"Could not find profile wrapper html. This sometimes happens for exceptionally long profiles. Try decreasing scroll-increment.")
contact_info = self.get_contact_info()
return Profile(profile + contact_info)
def get_contact_info(self):
try:
# Scroll to top to put clickable button in view
self.driver.execute_script("window.scrollTo(0, 0);")
button = self.driver.find_element_by_css_selector(
'a[data-control-name="contact_see_more"]')
button.click()
contact_info = self.wait_for_el('.pv-contact-info')
return contact_info.get_attribute('outerHTML')
except Exception as e:
print(e)
return ""
def get_mutual_connections(self):
try:
link = self.driver.find_element_by_partial_link_text(
'Mutual Connection')
except NoSuchElementException as e:
print("NO MUTUAL CONNS")
return []
with ConnectionScraper(scraperInstance=self) as cs:
cs.driver.get(link.get_attribute('href'))
cs.wait_for_el('.search-s-facet--facetNetwork form button')
return cs.scrape_all_pages()
|
the-stack_106_18257
|
#!/usr/bin/env python
"""cert_util.py: X509 certificate parsing utility.
Usage:
cert_util.py <command> [flags] [cert_file ...]
Known commands:
print: print information about the certificates in given files
Each file must contain either one or more PEM-encoded certificates,
or a single DER certificate.
For example:
cert_util.py print cert.pem - pretty-print the certificate(s)
cert_util.py print c1.pem c2.pem - pretty-print certificates from
multiple files
cert_util.py print cert.der - both PEM and DER are accepted formats
(use --filetype to force a format)
cert_util.py print --debug cert.pem - print full ASN.1 structure
cert_util.py print --subject cert.pem - print the subject name
cert_util.py print --issuer cert.pem - print the issuer name
cert_util.py print --fingerprint cert.pem
- print the SHA-1 fingerprint
cert_util.py print --fingerprint --digest="sha256" cert.pem
- print the SHA-256 fingerprint
"""
from __future__ import print_function
import sys
from absl import flags as gflags
from ct.crypto import cert
from ct.crypto import error
from ct.crypto import pem
from ct.crypto.asn1 import print_util
FLAGS = gflags.FLAGS
gflags.DEFINE_bool("subject", False, "Print option: prints certificate subject")
gflags.DEFINE_bool("issuer", False, "Print option: prints certificate issuer")
gflags.DEFINE_bool("fingerprint", False, "Print option: prints certificate "
"fingerprint")
gflags.DEFINE_string("digest", "sha1", "Print option: fingerprint digest to use")
gflags.DEFINE_bool("debug", False,
"Print option: prints full ASN.1 debug information")
gflags.DEFINE_string("filetype", "", "Read option: specify an input file "
"format (pem or der). If no format is specified, the "
"parser attempts to detect the format automatically.")
gflags.register_validator("filetype", lambda value: not value or
value.lower() in {"pem", "der"},
message="--filetype must be one of pem or der")
def print_cert(certificate):
if not FLAGS.subject and not FLAGS.issuer and not FLAGS.fingerprint:
if FLAGS.debug:
print("%r" % certificate)
else:
print(certificate)
else:
if FLAGS.subject:
print("subject:\n%s" % certificate.print_subject_name())
if FLAGS.issuer:
print("issuer:\n%s" % certificate.print_issuer_name())
if FLAGS.fingerprint:
# Print in a format familiar from OpenSSL.
print("%s fingerprint: %s\n" % (
FLAGS.digest.upper(), print_util.bytes_to_hex(
certificate.fingerprint(hashfunc=FLAGS.digest))))
def print_certs(cert_file):
"""Print the certificates, or parts thereof, as specified by flags."""
# If no format is specified, try PEM first, and automatically fall back
# to DER. The advantage is that usage is more convenient; the disadvantage
# is that error messages are less helpful because we don't know the expected
# file format.
printed = False
if not FLAGS.filetype or FLAGS.filetype.lower() == "pem":
if not FLAGS.filetype:
print("Attempting to read PEM")
try:
for c in cert.certs_from_pem_file(cert_file, strict_der=False):
print_cert(c)
printed = True
except pem.PemError as e:
if not printed:
# Immediate error
print("File is not a valid PEM file: %s" % e)
else:
exit_with_message("Error while scanning PEM blocks: %s" % e)
except error.ASN1Error as e:
exit_with_message("Bad DER encoding: %s" % e)
if not printed and FLAGS.filetype.lower() != "pem":
if not FLAGS.filetype:
print("Attempting to read raw DER")
try:
print_cert(cert.Certificate.from_der_file(cert_file,
strict_der=False))
except error.ASN1Error as e:
exit_with_message("Failed to parse DER from %s" % cert_file)
def exit_with_message(error_message):
print(error_message)
print("Use --helpshort or --help to get help.")
sys.exit(1)
def main(argv):
if len(argv) <= 1 or argv[1][0] == "-":
# No command. Parse flags anyway to trigger help flags.
try:
argv = FLAGS(argv)
exit_with_message("No command")
except gflags.Error as e:
exit_with_message("Error parsing flags: %s" % e)
argv = argv[1:]
try:
argv = FLAGS(argv)
except gflags.Error as e:
exit_with_message("Error parsing flags: %s" % e)
command, argv = argv[0], argv[1:]
if command != "print":
exit_with_message("Unknown command %s" % command)
if not argv:
exit_with_message("No certificate file given")
for filename in argv:
print_certs(filename)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
|
the-stack_106_18258
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cstr
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
class VehicleLog(Document):
def validate(self):
if flt(self.odometer) < flt(self.last_odometer):
frappe.throw(_("Current Odometer Value should be greater than Last Odometer Value {0}").format(self.last_odometer))
def on_submit(self):
frappe.db.set_value("Vehicle", self.license_plate, "last_odometer", self.odometer)
def on_cancel(self):
distance_travelled = self.odometer - self.last_odometer
if(distance_travelled > 0):
updated_odometer_value = int(frappe.db.get_value("Vehicle", self.license_plate, "last_odometer")) - distance_travelled
frappe.db.set_value("Vehicle", self.license_plate, "last_odometer", updated_odometer_value)
@frappe.whitelist()
def make_expense_claim(docname):
expense_claim = frappe.db.exists("Expense Claim", {"vehicle_log": docname})
if expense_claim:
frappe.throw(_("Expense Claim {0} already exists for the Vehicle Log").format(expense_claim))
vehicle_log = frappe.get_doc("Vehicle Log", docname)
service_expense = sum([flt(d.expense_amount) for d in vehicle_log.service_detail])
claim_amount = service_expense + (flt(vehicle_log.price) * flt(vehicle_log.fuel_qty) or 1)
if not claim_amount:
frappe.throw(_("No additional expenses has been added"))
exp_claim = frappe.new_doc("Expense Claim")
exp_claim.employee = vehicle_log.employee
exp_claim.vehicle_log = vehicle_log.name
exp_claim.remark = _("Expense Claim for Vehicle Log {0}").format(vehicle_log.name)
exp_claim.append("expenses", {
"expense_date": vehicle_log.date,
"description": _("Vehicle Expenses"),
"amount": claim_amount
})
return exp_claim.as_dict()
|
the-stack_106_18259
|
from swampdragon import route_handler
from swampdragon.route_handler import BaseRouter
class ChatRouter(BaseRouter):
route_name = 'chat-route'
valid_verbs = ['chat', 'subscribe']
def get_subscription_channels(self, **kwargs):
return ['chatroom']
def chat(self, *args, **kwargs):
errors = {}
if 'name' not in kwargs or len(kwargs['name']) is 0:
errors['name'] = 'Specify a name'
if 'message' not in kwargs or len(kwargs['message']) is 0:
errors['message'] = 'Enter a chat messge'
if errors:
self.send_error(errors)
else:
self.send({'status': 'ok'})
self.publish(self.get_subscription_channels(), kwargs)
route_handler.register(ChatRouter)
|
the-stack_106_18260
|
#!/usr/bin/python3
#######
#
# Runs command line programs and then saves the output to a file
#
#######
import subprocess
import sys
#######
# A check to make sure that we aren't going to try to read from/write to something that doesn't exist
if (len(sys.argv) != 4):
print("Correct format is <program name> <input filename> <output file name> <num of nodes>")
sys.exit()
# create list of graph files
#
# opens and closes the INPUT file
# reads in all the lines, removes the newline character, and stores them in the graphList list
with open(sys.argv[1]) as f:
graphList = f.read().splitlines()
# open the OUTPUT file in APPEND mode
outfile = open(sys.argv[2], "a")
runtimeMicroSec = 0
runtimeMilliSec = 0.000
timeString = ""
a = len(graphList)
# loop over the list
#
for i in list(range(a)):
# zero out the timeString
timeString = ""
# set the graphfilename
graphfilename = graphList[i]
# print name of graph file to outfile
#
outfile.write(graphfilename + "\n")
outfile.write("\n")
j = 1
while (j <= int(sys.argv[3])):
timeString = "" # zero it out
outfile.write("Number of nodes: " + str(j) + "\n")
outfile.write("\n")
for k in range(5):
runtimeMicroSec = 0
runtimeMilliSec = 0.000
result = subprocess.run(['./CUDA_FW', graphfilename, 'next', 'no_save_dist', 'no_save_next', 'no_console', 'no_check'], stdout=subprocess.PIPE).stdout.decode('utf-8')
runtimeMicroSec = int(result)
runtimeMilliSec = runtimeMicroSec / 1000
if (k < 4):
timeString += str(runtimeMilliSec) + ","
else:
timeString += (str(runtimeMilliSec) + " milliseconds\n")
#### end k for
outfile.write(timeString + "\n")
outfile.write("\n")
j += 1
#### end j for
outfile.write("-------\n")
outfile.write("\n")
#### end i for
outfile.close()
|
the-stack_106_18261
|
import sys, yaml, os, pytz, pyaml, json, re
from os.path import exists, join, isdir
from subprocess import Popen, PIPE
from copy import deepcopy
from elasticsearch.helpers import scan
if (sys.version_info > (3, 0)):
PY3 = True
string = str
else:
PY3 = False
string = basestring
Exception = StandardError
try:
from textwrap import indent
except ImportError:
def indent(s, ind):
result = []
for line in s.splitlines(1):
result.append(ind+line)
return ''.join(result)
import logging
log = logging.getLogger('elastico.util')
from datetime import datetime,date
from dateutil.parser import parse as dt_parse
def start_of_day(dt):
return datetime.combine(to_dt(dt).date(), datetime.min.time())
def end_of_day(dt):
return datetime.combine(to_dt(dt).date(), datetime.max.time())
def dt_isoformat(dt, sep='T', timespec='seconds'):
if not isinstance(dt, (datetime, date)):
dt = dt_parse(dt)
try:
result = dt.isoformat(sep, timespec)
result = result.rsplit('+', 1)[0]
except TypeError:
result = dt.isoformat(sep)
result = result.rsplit('+', 1)[0]
if timespec == 'hours':
result = result.split(':')[0]
elif timespec == 'minutes':
result = result.rsplit(':', 1)[0]
elif timespec == 'seconds':
if '.' in result:
result = result.rsplit('.', 1)[0]
else:
raise Exception("timespec %s not supported", timespec)
return result+"Z"
def to_dt(x, *args, **kwargs):
if isinstance(x, date):
x = dt_isoformat(x)
if not isinstance(x, datetime):
x = dt_parse(x, *args, **kwargs)
if x.tzinfo is None:
return pytz.UTC.localize(x)
else:
return x
def get_netrc_login_data(data, name):
"""
raises LookupError, in case "name" not in "data"
:returns:
"""
# netrc configuration
nrc = data.get(name, {})
return get_netrc_login_data_from_value(nrc)
def get_netrc_login_data_from_value(nrc):
if not nrc:
raise LookupError("no netrc data present")
if not isinstance(nrc, dict):
filename = None
machine = nrc
else:
filename = nrc.get('file')
machine = nrc.get('machine')
if machine is None:
raise LookupError("no netrc data present")
if nrc:
import netrc
(user, account, password) = netrc.netrc(filename).authenticators(machine)
return (user, password)
def read_config_dir(path, config, name, recursive=False):
'''read configuration files and extend config
Read all yaml files from directory `path` (recursive) and extract all YAML
documents (also multidocument YAML files) and append to configuration list
named `name`.
'''
if name not in config:
config[name] = []
path = path.format(**config)
if not exists(path): return
if recursive:
for root, dirs, files in os.walk(path):
for fn in files:
with open(join(root, fn), 'r') as f:
for _doc in yaml.load_all(f):
config[name].append(_doc)
else:
for fn in os.listdir(path):
_fn = join(path, fn)
if isdir(_fn): continue
with open(_fn, 'r') as f:
for _doc in yaml.load_all(f):
config[name].append(_doc)
def get_config_value(config, key, default=None):
key_parts = key.split('.')
try:
result = format_value(config, config.get(key_parts[0], default))
except Exception as e:
log.debug("error in formatting %s", e)
return default
for k in key_parts[1:]:
if k not in result:
return default
result = result[k]
return result
def format_value(data, current=None):
if current is None:
current = data
if isinstance(current, string):
return current.format(**data)
if isinstance(current, (list, tuple)):
return [format_value(data, v) for v in current]
if isinstance(current, dict):
result = {}
for k,v in current.items():
result[k] = format_value(data, v)
return result
else:
return current
#
# except Exception as e:
# log.debug("error formatting %s: %s", current, e)
# return default
def first_value(d):
'''return the first value of dictionary d'''
if PY3:
return list(d.values())[0]
else:
return d.values()[0]
def write_output(config, data):
output_format = config.get('output_format', 'yaml')
if output_format == 'yaml':
pyaml.p(data)
elif output_format == 'json':
print(json.dumps(data, indent=2))
def sendmail(host='localhost', port=0, use_ssl=False,
username=None, password=None,
sender=None, recipients=[], message=''):
log.debug("sendmail")
if use_ssl:
from smtplib import SMTP_SSL as SMTP
else:
from smtplib import SMTP
smtp = SMTP()
smtp.connect(host=host, port=port)
# if user and password are given, use them to smtp.login(user, pass)
if username is not None:
smtp.login(username, password)
result = smtp.sendmail(sender, recipients, message)
smtp.quit()
return result
def run_command(kwargs, data=None):
log = logging.getLogger('elastico.util.command')
log.debug("run_command -- kwargs=%s", kwargs)
if isinstance(kwargs, string):
kwargs = {'args': kwargs, 'shell': True}
elif isinstance(kwargs, (list, tuple)):
kwargs = {'args': kwargs}
else:
kwargs = dict(kwargs)
if isinstance(kwargs['args'], string):
if 'shell' not in kwargs:
kwargs['shell'] = True
def _get_capture_value(name):
if name in kwargs:
return kwargs.pop(name)
elif data is not None and name in data:
return data[name]
else:
return False
return
capture_stdout = _get_capture_value('stdout')
capture_stderr = _get_capture_value('stderr')
if 'input' in kwargs:
input = kwargs.pop('input')
kwargs['stdin'] = PIPE
else:
input = None
log.info("Popen -- kwargs=%s", kwargs)
p = Popen(stdout=PIPE, stderr=PIPE, **kwargs)
(stdout, stderr) = p.communicate(input)
result = p.wait()
if data is not None:
_result = {}
if capture_stdout:
if stdout.count("\n".encode('utf-8')) == 1:
stdout = stdout.strip()
_result['stdout'] = stdout
if capture_stderr:
_result['stderr'] = stderr
_result['exit_code'] = result
data['result'] = _result
return (result, stdout, stderr)
log.debug("capture_stdout=%s, capture_stderr=%s", capture_stdout, capture_stderr)
if rule is not None:
if capture_stdout:
if stdout.count("\n".encode('utf-8')) == 1:
stdout = stdout.strip()
rule['result.stdout'] = stdout
if capture_stderr:
rule['result.stderr'] = stderr
rule['result.exit_code'] = result
log.debug("rule: %s", rule)
return (result, stdout, stderr)
def stripped(s, count=100):
if len(s) > count:
s = s[:count]+"..."
return s
def get_alerts(_alerts, context):
from .config import Config
if isinstance(_alerts, list):
result = {}
for a in _alerts:
result[a['type']] = a
_alerts = result
else:
for k,v in _alerts:
if 'type' not in v:
v['type'] = k
return Config.object(_alerts)
def F(s):
return s.format(**sys._getframe(1).f_locals)
def slugify(s, strip_=False, prefix_=None, suffix_=None):
log.debug("func='slugify' s=%r", s)
result = re.sub(r'[^\w]+', '_', s.lower())
if result == '_':
result = 'x'
if prefix_ is not None:
if result.startswith('_'):
result = prefix_+result
if suffix_ is not None:
if result.endswith('_'):
result = result+suffix_
if strip_:
result = result.strip('_')
# if result.endswith('_'):
# result += 'x'
# if result.startswith('_'):
# #result = 'x'+result
log.debug("func='slugify' result=%r", result)
return result
|
the-stack_106_18262
|
# The Raw ElasticSearch functions, no frills, just wrappers around the HTTP calls
import requests, json, urllib
from models import QueryBuilder
class ESWireException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
##################################################################
## Connection to the index
class Connection(object):
def __init__(self, host, index, port=9200, auth=None, verify_ssl=True):
self.host = host
self.index = index
self.port = port
self.auth = auth
self.verify_ssl = verify_ssl
# make sure that host starts with "http://" or equivalent
if not self.host.startswith("http"):
self.host = "http://" + self.host
# some people might tack the port onto the host
if len(self.host.split(":")) > 2:
self.port = self.host[self.host.rindex(":") + 1:]
self.host = self.host[:self.host.rindex(":")]
def make_connection(connection, host, port, index, auth=None):
if connection is not None:
return connection
return Connection(host, index, port, auth)
####################################################################
## URL management
def elasticsearch_url(connection, type=None, endpoint=None, params=None, omit_index=False):
index = connection.index
host = connection.host
port = connection.port
# normalise the indexes input
if omit_index:
index = ""
elif index is None and not omit_index:
index = "_all"
if isinstance(index, list):
index = ",".join(index)
# normalise the types input
if type is None:
type = ""
if isinstance(type, list):
type = ",".join(type)
# normalise the host
if not host.startswith("http"):
host = "http://" + host
if host.endswith("/"):
host = host[:-1]
if port is not None:
host += ":" + str(port)
host += "/"
url = host + index
if type is not None and type != "":
url += "/" + type
if endpoint is not None:
if not url.endswith("/"):
url += "/"
url += endpoint
# FIXME: NOT URL SAFE - do this properly
if params is not None:
args = []
for k, v in params.iteritems():
args.append(k + "=" + v)
q = "&".join(args)
url += "?" + q
return url
###############################################################
## HTTP Requests
def _do_head(url, conn, **kwargs):
if conn.auth is not None:
if kwargs is None:
kwargs = {}
kwargs["auth"] = conn.auth
kwargs["verify"] = conn.verify_ssl
return requests.head(url, **kwargs)
def _do_get(url, conn, **kwargs):
if conn.auth is not None:
if kwargs is None:
kwargs = {}
kwargs["auth"] = conn.auth
kwargs["verify"] = conn.verify_ssl
return requests.get(url, **kwargs)
def _do_post(url, conn, data=None, **kwargs):
if conn.auth is not None:
if kwargs is None:
kwargs = {}
kwargs["auth"] = conn.auth
kwargs["verify"] = conn.verify_ssl
return requests.post(url, data, **kwargs)
def _do_put(url, conn, data=None, **kwargs):
if conn.auth is not None:
if kwargs is None:
kwargs = {}
kwargs["auth"] = conn.auth
kwargs["verify"] = conn.verify_ssl
return requests.put(url, data, **kwargs)
def _do_delete(url, conn, **kwargs):
if conn.auth is not None:
if kwargs is None:
kwargs = {}
kwargs["auth"] = conn.auth
kwargs["verify"] = conn.verify_ssl
return requests.delete(url, **kwargs)
# 2016-11-09 TD : A new search interface returning different output formats, e.g. csv
# : Needs plugin org.codelibs/elasticsearch-dataformat/[version tag] ,
# : (see https://github.com/codelibs/elasticsearch-dataformat for any details!)
###############################################################
## Dataformat Search
def data(connection, type=None, query=None, fmt="csv", method="POST", url_params=None):
if url_params is None:
url_params = { "format" : fmt }
elif not isinstance(url_params, dict):
url_params = { "format" : fmt }
else:
url_params["format"] = fmt
url = elasticsearch_url(connection, type, "_data", url_params)
if query is None:
query = QueryBuilder.match_all()
if not isinstance(query, dict):
query = QueryBuilder.query_string(query)
resp = None
if method == "POST":
headers = {"content-type" : "application/json"}
resp = _do_post(url, connection, data=json.dumps(query), headers=headers)
elif method == "GET":
resp = _do_get(url + "&source=" + urllib.quote_plus(json.dumps(query)), connection)
return resp
###############################################################
## Regular Search
def search(connection, type=None, query=None, method="POST", url_params=None):
url = elasticsearch_url(connection, type, "_search", url_params)
if query is None:
query = QueryBuilder.match_all()
if not isinstance(query, dict):
query = QueryBuilder.query_string(query)
resp = None
if method == "POST":
headers = {"content-type" : "application/json"}
resp = _do_post(url, connection, data=json.dumps(query), headers=headers)
elif method == "GET":
resp = _do_get(url + "?source=" + urllib.quote_plus(json.dumps(query)), connection)
return resp
def unpack_result(requests_response):
j = requests_response.json()
return unpack_json_result(j)
def unpack_json_result(j):
objects = [i.get("_source") if "_source" in i else i.get("fields") for i in j.get('hits', {}).get('hits', [])]
return objects
def get_facet_terms(json_result, facet_name):
return json_result.get("facets", {}).get(facet_name, {}).get("terms", [])
#################################################################
## Scroll search
# 2018-12-19 TD : raise default keepalive value to '10m'
# def initialise_scroll(connection, type=None, query=None, keepalive="1m"):
def initialise_scroll(connection, type=None, query=None, keepalive="10m"):
return search(connection, type, query, url_params={"scroll" : keepalive})
# 2018-12-19 TD : see simply previous comment
# def scroll_next(connection, scroll_id, keepalive="1m"):
def scroll_next(connection, scroll_id, keepalive="10m"):
url = elasticsearch_url(connection, endpoint="_search/scroll", params={"scroll_id" : scroll_id, "scroll" : keepalive}, omit_index=True)
resp = _do_get(url, connection)
return resp
def scroll_timedout(requests_response):
return requests_response.status_code == 500
def unpack_scroll(requests_response):
j = requests_response.json()
objects = unpack_json_result(j)
sid = j.get("_scroll_id")
return objects, sid
#################################################################
## Record retrieval
def get(connection, type, id):
url = elasticsearch_url(connection, type, endpoint=id)
resp = _do_get(url, connection)
return resp
def unpack_get(requests_response):
j = requests_response.json()
return j.get("_source")
def mget(connection, type, ids, fields=None):
if ids is None:
raise ESWireException("mget requires one or more ids")
docs = {"docs" : []}
if fields is None:
docs = {"ids" : ids}
else:
fields = [] if fields is None else fields if isinstance(fields, list) else [fields]
for id in ids:
docs["docs"].append({"_id" : id, "fields" : fields})
url = elasticsearch_url(connection, type, endpoint="_mget")
resp = _do_post(url, connection, data=json.dumps(docs))
return resp
def unpack_mget(requests_response):
j = requests_response.json()
objects = [i.get("_source") if "_source" in i else i.get("fields") for i in j.get("docs")]
return objects
####################################################################
## Mappings
def put_mapping(connection, type=None, mapping=None, make_index=True, es_version="0.90.13"):
if mapping is None:
raise ESWireException("cannot put empty mapping")
if not index_exists(connection):
if make_index:
create_index(connection)
else:
raise ESWireException("index '" + str(connection.index) + "' does not exist")
if es_version.startswith("0.9"):
url = elasticsearch_url(connection, type, "_mapping")
r = _do_put(url, connection, json.dumps(mapping))
return r
elif es_version.startswith("1."):
url = elasticsearch_url(connection, "_mapping", type)
r = _do_put(url, connection, json.dumps(mapping))
return r
elif es_version.startswith("2."):
url = elasticsearch_url(connection, "_mapping", type)
r = _do_put(url, connection, json.dumps(mapping))
return r
def has_mapping(connection, type, es_version="0.90.13"):
if es_version.startswith("0.9"):
url = elasticsearch_url(connection, type, endpoint="_mapping")
resp = _do_get(url, connection)
return resp.status_code == 200
elif es_version.startswith("1."):
url = elasticsearch_url(connection, "_mapping", type)
resp = _do_get(url, connection)
return resp.status_code == 200
elif es_version.startswith("2."):
url = elasticsearch_url(connection, "_mapping", type)
resp = _do_get(url, connection)
return resp.status_code == 200
def get_mapping(connection, type, es_version="0.90.13"):
if es_version.startswith("0.9"):
url = elasticsearch_url(connection, type, endpoint="_mapping")
resp = _do_get(url, connection)
return resp
elif es_version.startswith("1."):
url = elasticsearch_url(connection, "_mapping", type)
resp = _do_get(url, connection)
return resp
elif es_version.startswith("2."):
url = elasticsearch_url(connection, "_mapping", type)
resp = _do_get(url, connection)
return resp
##########################################################
## Existence checks
def type_exists(connection, type, es_version="0.90.13"):
url = elasticsearch_url(connection, type)
if es_version.startswith("0"):
resp = _do_get(url, connection)
else:
resp = _do_head(url, connection)
return resp.status_code == 200
def index_exists(connection):
iurl = elasticsearch_url(connection, endpoint="_mapping")
resp = _do_get(iurl, connection)
return resp.status_code == 200
###########################################################
## Index create
def create_index(connection, mapping=None):
iurl = elasticsearch_url(connection)
if mapping is None:
resp = _do_post(iurl, connection)
else:
resp = _do_post(iurl, connection, data=json.dumps(mapping))
return resp
############################################################
## Store records
def store(connection, type, record, id=None, params=None):
url = elasticsearch_url(connection, type, endpoint=id, params=params)
resp = None
if id is not None:
resp = _do_put(url, connection, data=json.dumps(record))
else:
resp = _do_post(url, connection, data=json.dumps(record))
return resp
def bulk(connection, type, records, idkey='id'):
data = ''
for r in records:
data += json.dumps( {'index':{'_id':r[idkey]}} ) + '\n'
data += json.dumps( r ) + '\n'
url = elasticsearch_url(connection, type, endpoint="_bulk")
resp = _do_post(url, connection, data=data)
return resp
############################################################
## Delete records
def delete(connection, type=None, id=None):
url = elasticsearch_url(connection, type, endpoint=id)
resp = _do_delete(url, connection)
return resp
def delete_by_query(connection, type, query, es_version="0.90.13"):
url = elasticsearch_url(connection, type, endpoint="_query")
if "query" in query and es_version.startswith("0.9"):
# we have to unpack the query, as the endpoint covers that
query = query["query"]
resp = _do_delete(url, connection, data=json.dumps(query))
return resp
##############################################################
## Refresh
def refresh(connection):
url = elasticsearch_url(connection, endpoint="_refresh")
resp = _do_post(url, connection)
return resp
|
the-stack_106_18263
|
#!/usr/bin/env python3
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfmetrics
from random import randint
def printHeader(doc,topmargin=770, leftmargin=30):
doc.drawString(leftmargin,topmargin,"Naam:________________________________")
def printColumns(doc, exercises, columns=2,topmargin=740, bottommargin=50, leftmargin=30):
number = len(exercises)
rows = int(number/columns)
space = int((topmargin-bottommargin)//((number/columns)-1))
som = []
for x in range(0,number):
som.append("{: >3d} {:^3s} {: >3d} {:_<15s}".format(*exercises[x],"="))
if len(som) == 2:
regel = "{:<40}{:<40}".format(*som)
doc.drawString(leftmargin,topmargin - ((x//2)*space),regel)
som = []
def addPlusExcercises(number, start=1, end=9):
return generateExerciseList(number,'+', start,end)
def addMinusExcercises(number, start=1, end=9):
return generateExerciseList(number,'-', start,end)
def addMultipleExcercises(number, start=1, end=9):
return generateExerciseList(number,'x', start,end)
def addPartExcercises(number, start=1, end=9):
return generateExerciseList(number,'/', start,end)
def validateOperands(lvalue, rvalue, operator):
if operator == '-':
if lvalue < rvalue:
lvalue, rvalue = rvalue, lvalue
if operator == '/':
if rvalue == 0:
rvalue = 1
return (lvalue,operator,rvalue)
def generateExerciseList(number, operator, start=1, end=9):
exercises = []
for x in range(0,number):
duplicate = True
while duplicate:
if(operator == '/'):
lvalue = randint(2, 10)
else:
lvalue = randint(start, end)
rvalue = randint(start, end)
som = validateOperands(rvalue,lvalue,operator)
if som not in exercises:
duplicate = False
exercises.append(som)
return exercises
def mix(exercises):
mixed = []
while (len(exercises) != 0):
mixed.append( exercises.pop(randint(0, len(exercises)-1)))
return mixed
if __name__ == "__main__":
exercises = []
# exercises += addMultipleExcercises(4)
# exercises += addMultipleExcercises(10,3,7)
# exercises += addMultipleExcercises(10,6,9)
# exercises += addPlusExcercises(5,15,70)
# exercises += addMinusExcercises(5,15,70)
exercises += addPartExcercises(30,10,100)
canvas = Canvas("worksheet.pdf")
pdfmetrics.registerFont(TTFont("OpenDyslexicMono-Regular", "fonts/OpenDyslexicMono-Regular.ttf"))
canvas.setFont('OpenDyslexicMono-Regular', 10)
printHeader(canvas)
printColumns(canvas, mix(exercises))
canvas.save()
|
the-stack_106_18264
|
__author__ = 'Aaron Yang'
__email__ = '[email protected]'
__date__ = '6/29/2020 11:37 AM'
def merge_dictionaries(a, b):
return {**a, **b}
a = { 'x': 1, 'y': 2}
b = { 'y': 3, 'z': 4}
print(merge_dictionaries(a, b))
# {'y': 3, 'x': 1, 'z': 4}
def most_frequent(list):
print(list.count(1)) # 3
return sorted(set(list), key = list.count, reverse = True)
list = [1,2,1,2,3,2,1,4,2]
print(most_frequent(list))
print(list[::-1])
|
the-stack_106_18265
|
# Reference: https://github.com/RocketFlash/CAP_augmentation
import cv2
import numpy as np
import random
from glob import glob
import random
# how to use: cap_aug(p=0.5, n_objects_range=[1, 3], glob_split='_', retry_iters=30, min_inter_area=10, glob_suffix='*.png')()
# the pasted objects(images) glob path will be PATH_ROOT + CLS + glob_split + OBJ + glob_suffix
PATH_ROOT = r'/path/to/pasted objects/'
CLS = ['car', 'person', 'bus', 'motorbike', 'bicycle']
PROB_CLS = [0.0, 0.1, 0.3, 0.3, 0.3]
OBJ = ['small', 'medium', 'large']
PROB_OBJ = [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1/3, 1/3, 1/3], [1/3, 1/3, 1/3], [1.0, 0.0, 0.0]]
# so, the file directorys of pasted objects are: /path/to/pasted objects/(car|person|bus|motorbike|bicycle)_(small|medium|large)*.png (using as glob path)
def resize_keep_ar(image, height=500, scale=None):
"image:HWC"
if scale is not None:
image = cv2.resize(image, None, fx=float(scale), fy=float(scale))
else:
r = height / float(image.shape[0])
width = r * image.shape[1]
width = 1 if int(width) == 0 else int(width)
image = cv2.resize(image, (int(width), int(height)))
return image
class CAP_AUG_Multiclass(object):
'''
cap_augs - list of cap augmentations for each class
probabilities - list of probabilities for each augmentation
class_idxs - class indexes
'''
def __init__(self, cap_augs, probabilities, p=0.5):
self.cap_augs = cap_augs
self.probabilities = probabilities
self.p = p
assert len(self.cap_augs) == len(self.probabilities)
def __call__(self, image, annots=None):
if random.uniform(0, 1) > self.p:
return image, annots
return self.generate_objects(image, annots)
def generate_objects(self, image, annots=None):
result_image = image.copy()
total_result_coords = []
total_semantic_masks = []
total_instance_masks = []
for cap_aug, p in zip(self.cap_augs, self.probabilities):
# return image_dst, {'coords_all': coords_all, 'semantic_mask': semantic_mask, 'instance_mask': instance_mask}
if p >= np.random.uniform(0, 1, size=1):
result_image, result_dict = cap_aug(result_image, annots)
# result_image, result_coords, semantic_mask, instance_mask =
if annots is None:
total_result_coords.append(result_dict['coords_all'])
total_semantic_masks.append(result_dict['semantic_mask'])
total_instance_masks.append(result_dict['instance_mask'])
else:
# print(result_dict)
annots['bbox'] = np.vstack((result_dict['bbox'], annots['bbox']))
annots['cls'] = np.hstack((np.reshape(result_dict['cls'], (-1)), annots['cls']))
# print(annots)
if annots is None:
if len(total_result_coords) > 0:
total_result_coords = np.vstack(total_result_coords)
return result_image, {'total_result_coords': total_result_coords, 'total_semantic_masks': total_semantic_masks, 'total_instance_masks': total_instance_masks}
else:
return result_image, annots
class CAP_AUG(object):
'''
source_images - list of images paths
bev_transform - bird's eye view transformation
probability_map - mask with probability values
mean_h_norm - mean normilized height
n_objects_range - [min, max] number of objects
s_range - range of scales of original image size
h_range - range of objects heights
if bev_transform is not None range in meters, else in pixels
x_range - if bev_transform is None -> range in the image coordinate system (in pixels) [int, int]
else -> range in camera coordinate system (in meters) [float, float]
y_range - if bev_transform is None -> range in the image coordinate system (in pixels) [int, int]
else -> range in camera coordinate system (in meters) [float, float]
z_range - if bev_transform is None -> range in the image coordinate system (in pixels) [int, int]
else -> range in camera coordinate system (in meters) [float, float]
objects_idxs - objects indexes from dataset to paste [idx1, idx2, ...]
random_h_flip - source image random horizontal flip
random_v_flip - source image random vertical flip
histogram_matching - apply histogram matching
hm_offset - histogram matching offset
blending_coeff - coefficient of image blending
image_format - color image format : {bgr, rgb}
coords_format - output coordinates format: {xyxy, xywh, yolo}
normilized_range - range in normilized image coordinates (all values are in range [0, 1])
class_idx - class id to result bounding boxes, output bboxes will be in [x1, y1, x2, y2, class_idx] format
albu_transforms - albumentations transformations applied to pasted objects
'''
def __init__(self, source_images,
retry_iters=50,
min_inter_area=10,
n_objects_range=[1, 3],
h_range=None,
z_range=None,
y_range=None,
s_range=None,
x_range=None,
# s_range=[0.5, 1.5],
# x_range=[200, 500],
# y_range=[100, 300],
# z_range=[0, 0],
objects_idxs=None,
random_h_flip=True,
random_v_flip=False,
image_format='bgr',
coords_format='xyxy',
class_idx=None,
albu_transforms=None):
if isinstance(source_images, str):
self.source_images = glob(source_images)
else: # list
self.source_images = source_images
self.retry_iters = retry_iters
self.min_inter_area = min_inter_area
self.bboxes = []
self.n_objects_range = n_objects_range
self.s_range = s_range
self.h_range = h_range
self.x_range = x_range
self.y_range = y_range
self.z_range = z_range
self.objects_idxs = objects_idxs
self.random_h_flip = random_h_flip
self.random_v_flip = random_v_flip
self.image_format = image_format
self.coords_format = coords_format
self.class_idx = class_idx
self.albu_transforms = albu_transforms
def __call__(self, image, annots=None):
# img HWC
# ann['bbox'] N*4 xyxy
# ann['cls'] 1*N
if annots is not None:
# self.bboxes = np.hstack((annots['bbox'][:, [1, 0, 3, 2]], np.reshape(annots['cls'], (-1, 1))))
self.bboxes = list(annots['bbox'][:, [1, 0, 3, 2]])
h, w, _ = image.shape
self.h_range = [min(16, h), min(64, h)]
# self.s_range = [0.5, 1.5]
self.x_range = [0, w]
self.y_range = [0, h]
image_dst, coords_all, semantic_mask, instance_mask = self.generate_objects(image)
if annots is None:
return image_dst, {'coords_all': coords_all, 'semantic_mask': semantic_mask, 'instance_mask': instance_mask}
else:
# print('corrds_all:'+str(coords_all))
bboxtmp, clstmp = np.hsplit(np.array(coords_all), [4])
# annots['bbox'] = np.vstack((bboxtmp[:, [1, 0, 3, 2]], annots['bbox']))
# annots['cls'] = np.vstack((np.reshape(clstmp, (-1)), annots['cls']))
ann = dict()
ann['bbox'] = bboxtmp[:, [1, 0, 3, 2]]
ann['cls'] = np.reshape(clstmp, (-1))
# print('ann:'+str(ann))
return image_dst, ann
def select_image(self, object_idx):
source_image_path = self.source_images[object_idx]
image_src = cv2.imread(str(source_image_path), cv2.IMREAD_UNCHANGED)
if image_src.shape[2] == 4:
if self.image_format == 'rgb':
image_src = cv2.cvtColor(image_src, cv2.COLOR_BGRA2RGBA)
return image_src
if self.image_format == 'rgb':
image_src = cv2.cvtColor(image_src, cv2.COLOR_BGR2RGBA)
else:
image_src = cv2.cvtColor(image_src, cv2.COLOR_BGR2BGRA)
return image_src
def check_bbox_no_overlap(self, x1, y1, x2, y2, coords_all):
for bbox in (self.bboxes+coords_all):
x3, y3, x4, y4 = bbox
left_max = max(x1, x3)
top_max = max(y1, y3)
right_min = min(x2, x4)
bottom_min = min(y2, y4)
inter = max(0, (right_min-left_max)) * max(0, (bottom_min-top_max))
if inter > self.min_inter_area:
return False
return True
def generate_objects(self, image):
n_objects = random.randint(*self.n_objects_range)
if self.objects_idxs is None:
objects_idxs = [random.randint(0, len(self.source_images)-1) for _ in range(n_objects)]
else:
objects_idxs = self.objects_idxs
image_dst = image.copy()
dst_h, dst_w, _ = image_dst.shape
coords_all = []
semantic_mask = np.zeros((dst_h, dst_w), dtype=np.uint8)
instance_mask = np.zeros((dst_h, dst_w), dtype=np.uint8)
for i in range(n_objects):
src_img = self.select_image(objects_idxs[i])
h, w, _ = src_img.shape
for _ in range(self.retry_iters):
point = np.random.randint(low=[self.x_range[0], self.y_range[0]],
high=[self.x_range[1], self.y_range[1]],
size=(2))
height = scale = None
if self.s_range is not None:
scale = random.uniform(*self.s_range)
# w = round(w * scale)
# h = round(h * scale)
elif self.h_range is not None:
height = random.randint(*self.h_range)
# r = height / float(h)
# h = height
# w = r * w
else:
print("s_range and h_range is both None.")
src_img = resize_keep_ar(src_img, height=height, scale=scale)
h, w, _ = src_img.shape
if w <= 0 or h <= 0 or w >= dst_w or h >= dst_h:
continue
x1, x2 = point[0]-w, point[0]
y1, y2 = point[1]-h, point[1]
# print(f'{h} {w} {x1} {y1} {x2} {y2}')
if x1 < 0 or y1 < 0:
continue
if not self.check_bbox_no_overlap(x1, y1, x2, y2, coords_all):
continue
# print(f'{h} {w} {x1} {y1} {x2} {y2}')
image_dst, mask = self.paste_object(image_dst, src_img, x1, y1, x2, y2)
curr_mask = mask/255
curr_mask = curr_mask.astype(np.uint8)
curr_mask_ins = curr_mask*(i+1)
roi_mask_sem = semantic_mask[y1:y2, x1:x2]
roi_mask_ins = instance_mask[y1:y2, x1:x2]
mask_inv = cv2.bitwise_not(curr_mask*255)
img_sem_bg = cv2.bitwise_and(roi_mask_sem, roi_mask_sem, mask=mask_inv)
img_ins_bg = cv2.bitwise_and(roi_mask_ins, roi_mask_ins, mask=mask_inv)
dst_sem = cv2.add(img_sem_bg, curr_mask)
dst_ins = cv2.add(img_ins_bg, curr_mask_ins)
semantic_mask[y1:y2, x1:x2] = dst_sem
instance_mask[y1:y2, x1:x2] = dst_ins
coords_all.append([x1, y1, x2, y2])
break
coords_all = np.array(coords_all)
if self.coords_format == 'yolo':
x = coords_all.copy()
x = x.astype(float)
dw = 1./dst_w
dh = 1./dst_h
ws = (coords_all[:, 2] - coords_all[:, 0])
hs = (coords_all[:, 3] - coords_all[:, 1])
x[:, 0] = dw * ((coords_all[:, 0] + ws/2.0)-1)
x[:, 1] = dh * ((coords_all[:, 1] + hs/2.0)-1)
x[:, 2] = dw * ws
x[:, 3] = dh * hs
coords_all = x
elif self.coords_format == 'xywh':
x = coords_all.copy()
x[:, 2] = coords_all[:, 2] - coords_all[:, 0]
x[:, 3] = coords_all[:, 3] - coords_all[:, 1]
coords_all = x
if self.class_idx is not None:
coords_all = np.c_[coords_all, self.class_idx*np.ones(len(coords_all))]
# print(coords_all)
return image_dst, coords_all, semantic_mask, instance_mask
def paste_object(self, image_dst, image_src, x1, y1, x2, y2):
src_h, src_w, _ = image_src.shape
y1_m, y2_m = 0, src_h
x1_m, x2_m = 0, src_w
if self.random_h_flip:
if random.uniform(0, 1) > 0.5:
image_src = cv2.flip(image_src, 1)
if self.random_v_flip:
if random.uniform(0, 1) > 0.5:
image_src = cv2.flip(image_src, 0)
# Simple cut and paste without preprocessing
mask_src = image_src[:, :, 3]
rgb_img = image_src[:, :, :3]
# can't resize. make sure the all image is still the object to be pasted
if self.albu_transforms is not None:
transformed = self.albu_transforms(image=rgb_img, mask=mask_src)
rgb_img = transformed['image']
mask_src = transformed['mask']
mask_inv = cv2.bitwise_not(mask_src)
# print(f'{src_h} {src_w} {x1} {y1} {x2} {y2}')
# print(image_dst[y1:y2, x1:x2].shape)
# print(image_dst.shape)
# print(mask_inv[y1_m:y2_m, x1_m:x2_m].shape)
# print(type(mask_inv[y1_m:y2_m, x1_m:x2_m][0, 0]))
img1_bg = cv2.bitwise_and(image_dst[y1:y2, x1:x2], image_dst[y1:y2,
x1:x2], mask=mask_inv[y1_m:y2_m, x1_m:x2_m])
img2_fg = cv2.bitwise_and(rgb_img[y1_m:y2_m, x1_m:x2_m], rgb_img[y1_m:y2_m,
x1_m:x2_m], mask=mask_src[y1_m:y2_m, x1_m:x2_m])
out_img = cv2.add(img1_bg, img2_fg)
mask_visible = mask_src[y1_m:y2_m, x1_m:x2_m]
image_dst[y1:y2, x1:x2] = out_img
return image_dst, mask_visible
def cap_aug(p=0.5, n_objects_range=[1, 3], glob_split='_', retry_iters=30, min_inter_area=10, glob_suffix='*.png'):
'''
return a instance of class:CAP_AUG_Multiclass
'''
# get presum of prob
prob_cls_presum_value = 0
prob_cls_presum = []
for prob in PROB_CLS:
prob_cls_presum_value += prob
prob_cls_presum.append(prob_cls_presum_value)
prob_obj_presum = []
for cls in PROB_OBJ:
prob_obj_presum_value = 0
prob_obj_presum_list = []
for prob in cls:
prob_obj_presum_value += prob
prob_obj_presum_list.append(prob_obj_presum_value)
prob_obj_presum.append(prob_obj_presum_list)
n_objects = random.randint(*n_objects_range)
# get prefix of source images path.
# a list of 2nd-level-prefix(cls+obj). len:n_objects
path_prefix = {}
cap_augs = []
for _ in range(n_objects):
prefix = PATH_ROOT
prob_cls_idx_value = random.uniform(0, 1)
for i in range(len(CLS)):
if prob_cls_idx_value < prob_cls_presum[i]:
# cls_idx.append(i)
prefix += CLS[i]
prob_obj_idx_value = random.uniform(0, 1)
for j in range(len(OBJ)):
if prob_obj_idx_value < prob_obj_presum[i][j]:
prefix += glob_split + OBJ[j]
if prefix in path_prefix.keys():
path_prefix[prefix][0] += 1
else:
path_prefix[prefix] = [1, i+1]
break
break
for glob_prefix, [num, idx] in path_prefix.items():
cap_augs.append(CAP_AUG(source_images=glob_prefix+glob_suffix,
retry_iters=retry_iters,
min_inter_area=min_inter_area,
s_range=[0.5, 1.5],
n_objects_range=[num, num],
random_h_flip=True,
image_format='bgr',
coords_format='xyxy',
class_idx=idx,
albu_transforms=None))
return CAP_AUG_Multiclass(cap_augs=cap_augs,
probabilities=[1]*len(cap_augs),
p=p)
# cap_aug()
|
the-stack_106_18266
|
import os
import random
import soundfile as sf
import torch
import yaml
import json
import argparse
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from asteroid.metrics import get_metrics
from asteroid.losses import PITLossWrapper, pairwise_neg_sisdr
from asteroid.data.wham_dataset import WhamDataset
from asteroid.utils import tensors_to_device
from model import load_best_model
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, required=True,
help='One of `enh_single`, `enh_both`, '
'`sep_clean` or `sep_noisy`')
parser.add_argument('--test_dir', type=str, required=True,
help='Test directory including the json files')
parser.add_argument('--use_gpu', type=int, default=0,
help='Whether to use the GPU for model execution')
parser.add_argument('--exp_dir', default='exp/tmp',
help='Experiment root')
parser.add_argument('--n_save_ex', type=int, default=50,
help='Number of audio examples to save, -1 means all')
compute_metrics = ['si_sdr', 'sdr', 'sir', 'sar', 'stoi']
def main(conf):
model = load_best_model(conf['train_conf'], conf['exp_dir'])
# Handle device placement
if conf['use_gpu']:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(conf['test_dir'], conf['task'],
sample_rate=conf['sample_rate'],
nondefault_nsrc=model.masker.n_src,
segment=None) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf['exp_dir'], 'examples/')
if conf['n_save_ex'] == -1:
conf['n_save_ex'] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None],
return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np,
sample_rate=conf['sample_rate'])
utt_metrics['mix_path'] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0],
conf['sample_rate'])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(local_save_dir + "s{}.wav".format(src_idx+1), src,
conf['sample_rate'])
for src_idx, est_src in enumerate(est_sources_np):
sf.write(local_save_dir + "s{}_estimate.wav".format(src_idx+1),
est_src, conf['sample_rate'])
# Write local metrics to the example folder.
with open(local_save_dir + 'metrics.json', 'w') as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf['exp_dir'], 'all_metrics.csv'))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = 'input_' + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + '_imp'] = ldf.mean()
print('Overall metrics :')
pprint(final_results)
with open(os.path.join(conf['exp_dir'], 'final_metrics.json'), 'w') as f:
json.dump(final_results, f, indent=0)
if __name__ == '__main__':
args = parser.parse_args()
arg_dic = dict(vars(args))
# Load training config
conf_path = os.path.join(args.exp_dir, 'conf.yml')
with open(conf_path) as f:
train_conf = yaml.safe_load(f)
arg_dic['sample_rate'] = train_conf['data']['sample_rate']
arg_dic['train_conf'] = train_conf
if args.task != arg_dic['train_conf']['data']['task']:
print("Warning : the task used to test is different than "
"the one from training, be sure this is what you want.")
main(arg_dic)
|
the-stack_106_18269
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
TAP plus
=============
@author: Juan Carlos Segovia
@contact: [email protected]
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 30 jun. 2016
"""
from astropy.extern import six
try:
from six.moves.tkinter import Tk as TKTk
except ImportError:
TKTk = None
try:
from six.moves.tkinter import Toplevel as TKToplevel
except ImportError:
TKToplevel = None
try:
from six.moves.tkinter import Button as TKButton
except ImportError:
TKButton = None
try:
from six.moves.tkinter import Label as TKLabel
except ImportError:
TKLabel = None
try:
from six.moves.tkinter import Entry as TKEntry
except ImportError:
TKEntry = None
class LoginDialog(object):
def __init__(self, host):
self.__interna_init()
self.__host = host
self.__initialized = False
if TKTk is not None:
self.__create_content()
self.__initialized = True
def __interna_init(self):
self.__rootFrame = None
self.__top = None
self.__usrEntry = None
self.__pwdEntry = None
self.__accepted = False
self.__host = None
self.__usr = None
self.__pwd = None
def __cancel_action(self):
self.__accepted = False
self.__rootFrame.destroy()
def __login_action(self):
self.__accepted = True
self.__usr = self.__usrEntry.get()
self.__pwd = self.__pwdEntry.get()
self.__rootFrame.destroy()
def __enter_action(self, event):
self.__login_action()
def __create_content(self):
self.__rootFrame = TKTk()
self.__rootFrame.withdraw()
self.__top = TKToplevel(self.__rootFrame)
self.__top.title("Login")
self.__top.protocol("WM_DELETE_WINDOW", self.__rootFrame.destroy)
self.__top.bind('<Return>', self.__enter_action)
self.__top.update_idletasks()
width = self.__top.winfo_width()
height = self.__top.winfo_height()
x = (self.__top.winfo_screenwidth() // 2) - (width // 2)
y = (self.__top.winfo_screenheight() // 2) - (height // 2)
self.__top.geometry("+%d+%d" % (x, y))
row = 0
expLabel = TKLabel(self.__top, text='Login to host:')
expLabel.grid(row=row, column=0, columnspan=4, padx=5, pady=2)
row = row+1
urlLabel = TKLabel(self.__top, text=self.__host)
urlLabel.grid(row=row, column=0, columnspan=4, padx=5, pady=2)
row = row+1
usrLabel = TKLabel(self.__top, text='User')
usrLabel.grid(row=row, column=0, columnspan=2, padx=20, pady=5)
self.__usrEntry = TKEntry(self.__top, width=20)
self.__usrEntry.grid(row=row, column=2, columnspan=2, padx=5, pady=5)
row = row+1
pwdLabel = TKLabel(self.__top, text='Password')
pwdLabel.grid(row=row, column=0, columnspan=2, padx=20, pady=5)
self.__pwdEntry = TKEntry(self.__top, width=20, show="*")
self.__pwdEntry.grid(row=row, column=2, columnspan=2, padx=5, pady=5)
row = row+1
cancelButton = TKButton(self.__top, text='Cancel', command=self.__cancel_action)
cancelButton.grid(row=row, column=1, padx=5, pady=5)
loginButton = TKButton(self.__top, text='Login', command=self.__login_action)
loginButton.grid(row=row, column=2, padx=5, pady=5)
def show_login(self):
if self.__initialized:
self.__usrEntry.focus_set()
self.__rootFrame.mainloop()
else:
print("tkinter python module is not available.\n\
Please, install tkinter module or use command line login utility.")
def is_accepted(self):
return self.__accepted
def get_user(self):
return self.__usr
def get_password(self):
return self.__pwd
|
the-stack_106_18270
|
#!/usr/bin/env python3
# coding:utf-8
# $Id: tktest06a_array.py 1303 $
# SPDX-License-Identifier: BSD-2-Clause
import tkinter as tk
from tkinter import ttk
root = tk.Tk()
height = 5
width = 5
for i in range(height): #Rows
for j in range(width): #Columns
entrée = str((i, j))
b = tk.Canvas(root)
lbl = ttk.Label(b, text=entrée)
cell = ttk.Entry(b, text='')
lbl.grid(row=i, column=j, sticky='ew')
cell.grid(row=i, column=j + 1, sticky='ew')
b.grid(row=i, column=j, sticky='ew')
tk.mainloop()
|
the-stack_106_18272
|
from urllib.parse import urljoin
import graphene
from django.conf import settings
from ....product.templatetags.product_images import get_thumbnail
from ...translations.enums import LanguageCodeEnum
from ..enums import (
AccountErrorCode,
AppErrorCode,
AttributeErrorCode,
ChannelErrorCode,
CheckoutErrorCode,
CollectionErrorCode,
DiscountErrorCode,
ExportErrorCode,
GiftCardErrorCode,
InvoiceErrorCode,
JobStatusEnum,
MenuErrorCode,
MetadataErrorCode,
OrderErrorCode,
OrderSettingsErrorCode,
PageErrorCode,
PaymentErrorCode,
PermissionEnum,
PermissionGroupErrorCode,
PluginErrorCode,
ProductErrorCode,
ShippingErrorCode,
ShopErrorCode,
StockErrorCode,
TranslationErrorCode,
UploadErrorCode,
WarehouseErrorCode,
WebhookErrorCode,
WeightUnitsEnum,
WishlistErrorCode,
)
from .money import VAT
class CountryDisplay(graphene.ObjectType):
code = graphene.String(description="Country code.", required=True)
country = graphene.String(description="Country name.", required=True)
vat = graphene.Field(VAT, description="Country tax.")
class LanguageDisplay(graphene.ObjectType):
code = LanguageCodeEnum(
description="ISO 639 representation of the language name.", required=True
)
language = graphene.String(description="Full name of the language.", required=True)
class Permission(graphene.ObjectType):
code = PermissionEnum(description="Internal code for permission.", required=True)
name = graphene.String(
description="Describe action(s) allowed to do by permission.", required=True
)
class Meta:
description = "Represents a permission object in a friendly form."
class Error(graphene.ObjectType):
field = graphene.String(
description=(
"Name of a field that caused the error. A value of `null` indicates that "
"the error isn't associated with a particular field."
),
required=False,
)
message = graphene.String(description="The error message.")
class Meta:
description = "Represents an error in the input of a mutation."
class AccountError(Error):
code = AccountErrorCode(description="The error code.", required=True)
class AppError(Error):
code = AppErrorCode(description="The error code.", required=True)
permissions = graphene.List(
graphene.NonNull(PermissionEnum),
description="List of permissions which causes the error.",
required=False,
)
class AttributeError(Error):
code = AttributeErrorCode(description="The error code.", required=True)
class StaffError(AccountError):
permissions = graphene.List(
graphene.NonNull(PermissionEnum),
description="List of permissions which causes the error.",
required=False,
)
groups = graphene.List(
graphene.NonNull(graphene.ID),
description="List of permission group IDs which cause the error.",
required=False,
)
users = graphene.List(
graphene.NonNull(graphene.ID),
description="List of user IDs which causes the error.",
required=False,
)
class ChannelError(Error):
code = ChannelErrorCode(description="The error code.", required=True)
class CheckoutError(Error):
code = CheckoutErrorCode(description="The error code.", required=True)
variants = graphene.List(
graphene.NonNull(graphene.ID),
description="List of varint IDs which causes the error.",
required=False,
)
class ProductWithoutVariantError(Error):
products = graphene.List(
graphene.NonNull(graphene.ID),
description="List of products IDs which causes the error.",
)
class DiscountError(ProductWithoutVariantError):
code = DiscountErrorCode(description="The error code.", required=True)
channels = graphene.List(
graphene.NonNull(graphene.ID),
description="List of channels IDs which causes the error.",
required=False,
)
class ExportError(Error):
code = ExportErrorCode(description="The error code.", required=True)
class MenuError(Error):
code = MenuErrorCode(description="The error code.", required=True)
class OrderSettingsError(Error):
code = OrderSettingsErrorCode(description="The error code.", required=True)
class MetadataError(Error):
code = MetadataErrorCode(description="The error code.", required=True)
class OrderError(Error):
code = OrderErrorCode(description="The error code.", required=True)
warehouse = graphene.ID(
description="Warehouse ID which causes the error.",
required=False,
)
order_line = graphene.ID(
description="Order line ID which causes the error.",
required=False,
)
variants = graphene.List(
graphene.NonNull(graphene.ID),
description="List of product variants that are associated with the error",
required=False,
)
class InvoiceError(Error):
code = InvoiceErrorCode(description="The error code.", required=True)
class PermissionGroupError(Error):
code = PermissionGroupErrorCode(description="The error code.", required=True)
permissions = graphene.List(
graphene.NonNull(PermissionEnum),
description="List of permissions which causes the error.",
required=False,
)
users = graphene.List(
graphene.NonNull(graphene.ID),
description="List of user IDs which causes the error.",
required=False,
)
class ProductError(Error):
code = ProductErrorCode(description="The error code.", required=True)
attributes = graphene.List(
graphene.NonNull(graphene.ID),
description="List of attributes IDs which causes the error.",
required=False,
)
class CollectionError(ProductWithoutVariantError):
code = CollectionErrorCode(description="The error code.", required=True)
class ProductChannelListingError(ProductError):
channels = graphene.List(
graphene.NonNull(graphene.ID),
description="List of channels IDs which causes the error.",
required=False,
)
class CollectionChannelListingError(ProductError):
channels = graphene.List(
graphene.NonNull(graphene.ID),
description="List of channels IDs which causes the error.",
required=False,
)
class BulkProductError(ProductError):
index = graphene.Int(
description="Index of an input list item that caused the error."
)
warehouses = graphene.List(
graphene.NonNull(graphene.ID),
description="List of warehouse IDs which causes the error.",
required=False,
)
channels = graphene.List(
graphene.NonNull(graphene.ID),
description="List of channel IDs which causes the error.",
required=False,
)
class ShopError(Error):
code = ShopErrorCode(description="The error code.", required=True)
class ShippingError(Error):
code = ShippingErrorCode(description="The error code.", required=True)
warehouses = graphene.List(
graphene.NonNull(graphene.ID),
description="List of warehouse IDs which causes the error.",
required=False,
)
channels = graphene.List(
graphene.NonNull(graphene.ID),
description="List of channels IDs which causes the error.",
required=False,
)
class PageError(Error):
code = PageErrorCode(description="The error code.", required=True)
attributes = graphene.List(
graphene.NonNull(graphene.ID),
description="List of attributes IDs which causes the error.",
required=False,
)
class PaymentError(Error):
code = PaymentErrorCode(description="The error code.", required=True)
class GiftCardError(Error):
code = GiftCardErrorCode(description="The error code.", required=True)
class PluginError(Error):
code = PluginErrorCode(description="The error code.", required=True)
class StockError(Error):
code = StockErrorCode(description="The error code.", required=True)
class BulkStockError(ProductError):
index = graphene.Int(
description="Index of an input list item that caused the error."
)
class UploadError(Error):
code = UploadErrorCode(description="The error code.", required=True)
class WarehouseError(Error):
code = WarehouseErrorCode(description="The error code.", required=True)
class WebhookError(Error):
code = WebhookErrorCode(description="The error code.", required=True)
class WishlistError(ProductWithoutVariantError):
code = WishlistErrorCode(description="The error code.", required=True)
class TranslationError(Error):
code = TranslationErrorCode(description="The error code.", required=True)
class SeoInput(graphene.InputObjectType):
title = graphene.String(description="SEO title.")
description = graphene.String(description="SEO description.")
class Weight(graphene.ObjectType):
unit = WeightUnitsEnum(description="Weight unit.", required=True)
value = graphene.Float(description="Weight value.", required=True)
class Meta:
description = "Represents weight value in a specific weight unit."
class Image(graphene.ObjectType):
url = graphene.String(required=True, description="The URL of the image.")
alt = graphene.String(description="Alt text for an image.")
class Meta:
description = "Represents an image."
@staticmethod
def get_adjusted(image, alt, size, rendition_key_set, info):
"""Return Image adjusted with given size."""
if size:
url = get_thumbnail(
image_file=image,
size=size,
method="thumbnail",
rendition_key_set=rendition_key_set,
)
else:
url = image.url
url = info.context.build_absolute_uri(url)
return Image(url, alt)
class File(graphene.ObjectType):
url = graphene.String(required=True, description="The URL of the file.")
content_type = graphene.String(
required=False, description="Content type of the file."
)
@staticmethod
def resolve_url(root, info):
return info.context.build_absolute_uri(urljoin(settings.MEDIA_URL, root.url))
class PriceRangeInput(graphene.InputObjectType):
gte = graphene.Float(description="Price greater than or equal to.", required=False)
lte = graphene.Float(description="Price less than or equal to.", required=False)
class DateRangeInput(graphene.InputObjectType):
gte = graphene.Date(description="Start date.", required=False)
lte = graphene.Date(description="End date.", required=False)
class DateTimeRangeInput(graphene.InputObjectType):
gte = graphene.DateTime(description="Start date.", required=False)
lte = graphene.DateTime(description="End date.", required=False)
class IntRangeInput(graphene.InputObjectType):
gte = graphene.Int(description="Value greater than or equal to.", required=False)
lte = graphene.Int(description="Value less than or equal to.", required=False)
class TaxType(graphene.ObjectType):
"""Representation of tax types fetched from tax gateway."""
description = graphene.String(description="Description of the tax type.")
tax_code = graphene.String(
description="External tax code used to identify given tax group."
)
class Job(graphene.Interface):
status = JobStatusEnum(description="Job status.", required=True)
created_at = graphene.DateTime(
description="Created date time of job in ISO 8601 format.", required=True
)
updated_at = graphene.DateTime(
description="Date time of job last update in ISO 8601 format.", required=True
)
message = graphene.String(description="Job message.")
@classmethod
def resolve_type(cls, instance, _info):
"""Map a data object to a Graphene type."""
MODEL_TO_TYPE_MAP = {
# <DjangoModel>: <GrapheneType>
}
return MODEL_TO_TYPE_MAP.get(type(instance))
|
the-stack_106_18276
|
import warnings
import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
from mmdet.ops import RoIAlign, RoIPool
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
"""A simple pipeline to load image"""
def __call__(self, results):
"""Call function to load images into results
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
# Use torchvision ops for CPU mode instead
for m in model.modules():
if isinstance(m, (RoIPool, RoIAlign)):
if not m.aligned:
# aligned=False is not implemented on CPU
# set use_torchvision on-the-fly
m.use_torchvision = True
warnings.warn('We set use_torchvision=True in CPU mode.')
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'][0].data
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
async def async_inference_detector(model, img):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
Awaitable detection results.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
result = await model.aforward_test(rescale=True, **data)
return result
def show_result_pyplot(model, img, result, score_thr=0.3, fig_size=(15, 10)):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
score_thr (float): The threshold to visualize the bboxes and masks.
fig_size (tuple): Figure size of the pyplot figure.
"""
if hasattr(model, 'module'):
model = model.module
img = model.show_result(img, result, score_thr=score_thr, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.show()
|
the-stack_106_18277
|
import datetime
import numpy
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
# load dataset
dataframe = pandas.read_csv("data/Car_sales.csv")
dataset = dataframe.values
# split into input (X) and output (Y) variables
prices = []
features = []
makes = {}
models = {}
vehicle_type = {}
makes_i = 0
models_i = 0
vehicle_types_i = 0
# Go through the dataset and find all the possible make, model, type classes.
for i in dataset:
d = list(i)
i_make = d[0]
i_model = d[1]
i_vehicle_type = d[4]
if i_make.lower() not in makes:
makes[i_make.lower()] = makes_i
makes_i += 1
if i_model.lower() not in models:
models[i_model.lower()] = models_i
models_i += 1
if i_vehicle_type.lower() not in vehicle_type:
vehicle_type[i_vehicle_type.lower()] = vehicle_types_i
vehicle_types_i += 1
# Clean the data.
for i in dataset:
p = i[5]
# Any nulls in the data will kill the neural network.
if str(p) == "nan":
continue
prices.append(i[5])
# Make a copy to operate on
d = list(i)
# Find the indices for the categorical data.
i_make = makes[d[0].lower()]
i_model = models[d[1].lower()]
i_vehicle_type = vehicle_type[d[4].lower()]
# Get the year, we could do it like the reg plates for a bit more accuracy.
i_made = datetime.datetime.strptime(d[14], "%m/%d/%Y")
i_made = i_made.year
# Get rid of the string values.
del d[14]
del d[4]
del d[1]
del d[0]
# Add our cleaned values to the end.
d.append(i_make)
d.append(i_model)
d.append(i_vehicle_type)
d.append(i_made)
d = list(numpy.nan_to_num(d))
features.append(numpy.array(d))
X = numpy.array(features)
Y = numpy.array(prices)
fifth_of_data = int(len(X) / 5)
x_train = X[:fifth_of_data * 4]
y_train = Y[:fifth_of_data * 4]
x_test = X[(fifth_of_data * 4) + 1:]
y_test = Y[(fifth_of_data * 4) + 1:]
def baseline_model():
model = Sequential()
model.add(Dense(13, input_dim=len(features[0]), kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# evaluate model with standardized dataset
estimator = KerasRegressor(build_fn=baseline_model, epochs=100, batch_size=5, verbose=0)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, X, Y, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
model = baseline_model()
model.fit(x_train, y_train, epochs=100, batch_size=5, verbose=0)
for i in range(len(X)):
y = y_test[i]
pred = model.predict(x_test[i].reshape(1, -1))
print ("predicted price (thousands)", pred[0][0], "actual", y)
|
the-stack_106_18278
|
# modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_act.py # noqa:E501
import torch
import torch.nn.functional as F
from torch import nn
from torch.autograd import Function
from ..utils import ext_loader
ext_module = ext_loader.load_ext('_ext', ['fused_bias_leakyrelu'])
class FusedBiasLeakyReLUFunctionBackward(Function):
"""Calculate second order deviation.
This function is to compute the second order deviation for the fused leaky
relu operation.
"""
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = ext_module.fused_bias_leakyrelu(
grad_output,
empty,
out,
act=3,
grad=1,
alpha=negative_slope,
scale=scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
# The second order deviation, in fact, contains two parts, while the
# the first part is zero. Thus, we direct consider the second part
# which is similar with the first order deviation in implementation.
gradgrad_out = ext_module.fused_bias_leakyrelu(
gradgrad_input,
gradgrad_bias,
out,
act=3,
grad=1,
alpha=ctx.negative_slope,
scale=ctx.scale)
return gradgrad_out, None, None, None
class FusedBiasLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = ext_module.fused_bias_leakyrelu(
input,
bias,
empty,
act=3,
grad=0,
alpha=negative_slope,
scale=scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedBiasLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class FusedBiasLeakyReLU(nn.Module):
"""Fused bias leaky ReLU.
This function is introduced in the StyleGAN2:
http://arxiv.org/abs/1912.04958
The bias term comes from the convolution operation. In addition, to keep
the variance of the feature map or gradients unchanged, they also adopt a
scale similarly with Kaiming initalization. However, since the
:math:`1 + \alpha^2` : is too small, we can just ignore it. Therefore, the
final sacle is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
your own scale.
TODO: Implement the CPU version.
Args:
channel (int): The channnel number of the feature map.
negative_slope (float, optional): Same as nn.LeakyRelu.
Defaults to 0.2.
scale (float, optional): A scalar to adjust the variance of the feature
map. Defaults to 2**0.5.
"""
def __init__(self, num_channels, negative_slope=0.2, scale=2**0.5):
super(FusedBiasLeakyReLU, self).__init__()
self.bias = nn.Parameter(torch.zeros(num_channels))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_bias_leakyrelu(input, self.bias, self.negative_slope,
self.scale)
def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=2**0.5):
"""Fused bias leaky ReLU function.
This function is introduced in the StyleGAN2:
http://arxiv.org/abs/1912.04958
The bias term comes from the convolution operation. In addition, to keep
the variance of the feature map or gradients unchanged, they also adopt a
scale similarly with Kaiming initalization. However, since the
:math:`1 + \alpha^2` : is too small, we can just ignore it. Therefore, the
final sacle is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
your own scale.
Args:
input (torch.Tensor): Input feature map.
bias (nn.Parameter): The bias from convolution operation.
negative_slope (float, optional): Same as nn.LeakyRelu.
Defaults to 0.2.
scale (float, optional): A scalar to adjust the variance of the feature
map. Defaults to 2**0.5.
Returns:
torch.Tensor: Feature map after non-linear activation.
"""
if not input.is_cuda:
return bias_leakyrelu_ref(input, bias, negative_slope, scale)
return FusedBiasLeakyReLUFunction.apply(input, bias.to(input.dtype),
negative_slope, scale)
def bias_leakyrelu_ref(x, bias, negative_slope=0.2, scale=2**0.5):
if bias is not None:
assert bias.ndim == 1
assert bias.shape[0] == x.shape[1]
x = x + bias.reshape([-1 if i == 1 else 1 for i in range(x.ndim)])
x = F.leaky_relu(x, negative_slope)
if scale != 1:
x = x * scale
return x
|
the-stack_106_18279
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Top-level module of TensorFlow. By convention, we refer to this module as
`tf` instead of `tensorflow`, following the common practice of importing
TensorFlow via the command `import tensorflow as tf`.
The primary function of this module is to import all of the public TensorFlow
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
Note that the file `__init__.py` in the TensorFlow source code tree is actually
only a placeholder to enable test cases to run. The TensorFlow build replaces
this file with a file generated from [`api_template.__init__.py`](https://www.github.com/tensorflow/tensorflow/blob/master/tensorflow/api_template.__init__.py)
"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import distutils as _distutils
import inspect as _inspect
import logging as _logging
import os as _os
import site as _site
import six as _six
import sys as _sys
from tensorflow.python.tools import module_util as _module_util
from tensorflow.python.util.lazy_loader import LazyLoader as _LazyLoader
# Make sure code inside the TensorFlow codebase can use tf2.enabled() at import.
_os.environ['TF2_BEHAVIOR'] = '1'
from tensorflow.python import tf2 as _tf2
_tf2.enable()
# API IMPORTS PLACEHOLDER
# WRAPPER_PLACEHOLDER
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = _sys.modules[__name__].bitwise
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Hook external TensorFlow modules.
# Import compat before trying to import summary from tensorboard, so that
# reexport_tf_summary can get compat from sys.modules. Only needed if using
# lazy loading.
_current_module.compat.v2 # pylint: disable=pointless-statement
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
setattr(_current_module, "summary", summary)
except ImportError:
_logging.warning(
"Limited tf.summary API due to missing TensorBoard installation.")
# Lazy-load estimator.
_estimator_module = "tensorflow_estimator.python.estimator.api._v2.estimator"
estimator = _LazyLoader("estimator", globals(), _estimator_module)
_module_dir = _module_util.get_parent_dir_for_name(_estimator_module)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "estimator", estimator)
if _os.environ.get("_PREFER_OSS_KERAS", False):
try:
from keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass
else:
try:
from .python.keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass
# Explicitly import lazy-loaded modules to support autocompletion.
# pylint: disable=g-import-not-at-top
if not _six.PY2:
import typing as _typing
if _typing.TYPE_CHECKING:
from tensorflow_estimator.python.estimator.api._v2 import estimator
# pylint: enable=g-import-not-at-top
# Enable TF2 behaviors
from tensorflow.python.compat import v2_compat as _compat # pylint: disable=g-import-not-at-top
_compat.enable_v2_behavior()
_major_api_version = 2
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
if _site.ENABLE_USER_SITE and _site.USER_SITE is not None:
_site_packages_dirs += [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
# TODO(gunan): Add sanity checks to loaded modules here.
for _s in _site_packages_dirs:
# Load first party dynamic kernels.
_main_dir = _os.path.join(_s, 'tensorflow/core/kernels')
if _os.path.exists(_main_dir):
_ll.load_library(_main_dir)
# Load third party dynamic kernels.
_plugin_dir = _os.path.join(_s, 'tensorflow-plugins')
if _os.path.exists(_plugin_dir):
_ll.load_library(_plugin_dir)
# Load Pluggable Device Library
_ll.load_pluggable_device_library(_plugin_dir)
# Add module aliases
if hasattr(_current_module, 'keras'):
losses = keras.losses
metrics = keras.metrics
optimizers = keras.optimizers
initializers = keras.initializers
setattr(_current_module, "losses", losses)
setattr(_current_module, "metrics", metrics)
setattr(_current_module, "optimizers", optimizers)
setattr(_current_module, "initializers", initializers)
# pylint: enable=undefined-variable
# Delete modules that should be hidden from dir().
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
# does not have 'python', 'core' directories. Then, it will be copied
# to tensorflow/ which does have these two directories.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
try:
del core
except NameError:
pass
try:
del compiler
except NameError:
pass
# __all__ PLACEHOLDER
|
the-stack_106_18285
|
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from cinder.common import constants
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_service
from cinder.tests.unit import utils
from cinder.tests.unit import volume as base
import cinder.volume
from cinder.volume import manager
from cinder.volume import rpcapi as volume_rpcapi
CONF = cfg.CONF
@ddt.ddt
class ReplicationTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(ReplicationTestCase, self).setUp()
self.host = 'host@backend#pool'
self.manager = manager.VolumeManager(host=self.host)
@mock.patch('cinder.objects.VolumeList.get_all')
@mock.patch('cinder.volume.driver.BaseVD.failover_host',
side_effect=exception.InvalidReplicationTarget(''))
@ddt.data(('backend2', 'default', fields.ReplicationStatus.FAILED_OVER),
('backend2', 'backend3', fields.ReplicationStatus.FAILED_OVER),
(None, 'backend2', fields.ReplicationStatus.ENABLED),
('', 'backend2', fields.ReplicationStatus.ENABLED))
@ddt.unpack
def test_failover_host_invalid_target(self, svc_backend, new_backend,
expected, mock_failover,
mock_getall):
"""Test replication failover_host with invalid_target.
When failingover fails due to an invalid target exception we return
replication_status to its previous status, and we decide what that is
depending on the currect active backend.
"""
svc = utils.create_service(
self.context,
{'host': self.host,
'binary': constants.VOLUME_BINARY,
'active_backend_id': svc_backend,
'replication_status': fields.ReplicationStatus.FAILING_OVER})
self.manager.failover_host(self.context, new_backend)
mock_getall.assert_called_once_with(self.context,
filters={'host': self.host})
mock_failover.assert_called_once_with(self.context,
mock_getall.return_value,
secondary_id=new_backend)
db_svc = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual(expected, db_svc.replication_status)
@mock.patch('cinder.volume.driver.BaseVD.failover_host',
mock.Mock(side_effect=exception.VolumeDriverException('')))
def test_failover_host_driver_exception(self):
svc = utils.create_service(
self.context,
{'host': self.host,
'binary': constants.VOLUME_BINARY,
'active_backend_id': None,
'replication_status': fields.ReplicationStatus.FAILING_OVER})
self.manager.failover_host(self.context, mock.sentinel.backend_id)
db_svc = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR,
db_svc.replication_status)
@mock.patch('cinder.objects.Service.is_up', True)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(objects.ServiceList, 'get_all')
def test_failover(self, mock_get_all, mock_db_update, mock_failover):
"""Test replication failover."""
service = fake_service.fake_service_obj(self.context,
binary='cinder-volume')
mock_get_all.return_value = [service]
mock_db_update.return_value = {'replication_status': 'enabled'}
volume_api = cinder.volume.api.API()
volume_api.failover(self.context, host=CONF.host, cluster_name=None)
mock_failover.assert_called_once_with(self.context, service, None)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get_all')
def test_failover_unexpected_status(self, mock_db_get_all, mock_db_update,
mock_failover):
"""Test replication failover unexpected status."""
mock_db_get_all.return_value = [fake_service.fake_service_obj(
self.context,
binary='cinder-volume')]
mock_db_update.return_value = None
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.failover,
self.context,
host=CONF.host,
cluster_name=None)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
@mock.patch.object(cinder.db, 'conditional_update', return_value=1)
@mock.patch.object(cinder.objects.ServiceList, 'get_all')
def test_freeze_host(self, mock_get_all, mock_db_update,
mock_freeze):
"""Test replication freeze_host."""
service = fake_service.fake_service_obj(self.context,
binary='cinder-volume')
mock_get_all.return_value = [service]
mock_freeze.return_value = True
volume_api = cinder.volume.api.API()
volume_api.freeze_host(self.context, host=CONF.host, cluster_name=None)
mock_freeze.assert_called_once_with(self.context, service)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get_all')
def test_freeze_host_unexpected_status(self, mock_get_all,
mock_db_update,
mock_freeze):
"""Test replication freeze_host unexpected status."""
mock_get_all.return_value = [fake_service.fake_service_obj(
self.context,
binary='cinder-volume')]
mock_db_update.return_value = None
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.freeze_host,
self.context,
host=CONF.host,
cluster_name=None)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
@mock.patch.object(cinder.db, 'conditional_update', return_value=1)
@mock.patch.object(cinder.objects.ServiceList, 'get_all')
def test_thaw_host(self, mock_get_all, mock_db_update,
mock_thaw):
"""Test replication thaw_host."""
service = fake_service.fake_service_obj(self.context,
binary='cinder-volume')
mock_get_all.return_value = [service]
mock_thaw.return_value = True
volume_api = cinder.volume.api.API()
volume_api.thaw_host(self.context, host=CONF.host, cluster_name=None)
mock_thaw.assert_called_once_with(self.context, service)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get_all')
def test_thaw_host_unexpected_status(self, mock_get_all,
mock_db_update,
mock_thaw):
"""Test replication thaw_host unexpected status."""
mock_get_all.return_value = [fake_service.fake_service_obj(
self.context,
binary='cinder-volume')]
mock_db_update.return_value = None
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.thaw_host,
self.context,
host=CONF.host, cluster_name=None)
@mock.patch('cinder.volume.driver.BaseVD.failover_completed')
def test_failover_completed(self, completed_mock):
rep_field = fields.ReplicationStatus
svc = objects.Service(self.context, host=self.volume.host,
binary=constants.VOLUME_BINARY,
replication_status=rep_field.ENABLED)
svc.create()
self.volume.failover_completed(
self.context,
{'active_backend_id': 'secondary',
'replication_status': rep_field.FAILED_OVER})
service = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual('secondary', service.active_backend_id)
self.assertEqual('failed-over', service.replication_status)
completed_mock.assert_called_once_with(self.context, 'secondary')
@mock.patch('cinder.volume.driver.BaseVD.failover_completed', wraps=True)
def test_failover_completed_driver_failure(self, completed_mock):
rep_field = fields.ReplicationStatus
svc = objects.Service(self.context, host=self.volume.host,
binary=constants.VOLUME_BINARY,
replication_status=rep_field.ENABLED)
svc.create()
self.volume.failover_completed(
self.context,
{'active_backend_id': 'secondary',
'replication_status': rep_field.FAILED_OVER})
service = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual('secondary', service.active_backend_id)
self.assertEqual(rep_field.ERROR, service.replication_status)
self.assertTrue(service.disabled)
self.assertIsNotNone(service.disabled_reason)
completed_mock.assert_called_once_with(self.context, 'secondary')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')
def test_finish_failover_non_clustered(self, completed_mock):
svc = mock.Mock(is_clustered=None)
self.volume.finish_failover(self.context, svc, mock.sentinel.updates)
svc.update.assert_called_once_with(mock.sentinel.updates)
svc.save.assert_called_once_with()
completed_mock.assert_not_called()
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')
def test_finish_failover_clustered(self, completed_mock):
svc = mock.Mock(cluster_name='cluster_name')
updates = {'status': 'error'}
self.volume.finish_failover(self.context, svc, updates)
completed_mock.assert_called_once_with(self.context, svc, updates)
svc.cluster.status = 'error'
svc.cluster.save.assert_called_once()
@ddt.data(None, 'cluster_name')
@mock.patch('cinder.volume.manager.VolumeManager.finish_failover')
@mock.patch('cinder.volume.manager.VolumeManager._get_my_volumes')
def test_failover_manager(self, cluster, get_vols_mock, finish_mock):
"""Test manager's failover method for clustered and not clustered."""
rep_field = fields.ReplicationStatus
svc = objects.Service(self.context, host=self.volume.host,
binary=constants.VOLUME_BINARY,
cluster_name=cluster,
replication_status=rep_field.ENABLED)
svc.create()
vol = objects.Volume(self.context, host=self.volume.host)
vol.create()
get_vols_mock.return_value = [vol]
with mock.patch.object(self.volume, 'driver') as driver:
called, not_called = driver.failover_host, driver.failover
if cluster:
called, not_called = not_called, called
called.return_value = ('secondary', [{'volume_id': vol.id,
'updates': {'status': 'error'}}])
self.volume.failover(self.context,
secondary_backend_id='secondary')
not_called.assert_not_called()
called.assert_called_once_with(self.context, [vol],
secondary_id='secondary')
expected_update = {'replication_status': rep_field.FAILED_OVER,
'active_backend_id': 'secondary',
'disabled': True,
'disabled_reason': 'failed-over'}
finish_mock.assert_called_once_with(self.context, svc, expected_update)
volume = objects.Volume.get_by_id(self.context, vol.id)
self.assertEqual('error', volume.status)
@ddt.data(('host1', None), (None, 'mycluster'))
@ddt.unpack
def test_failover_api_fail_multiple_results(self, host, cluster):
"""Fail if we try to failover multiple backends in the same request."""
rep_field = fields.ReplicationStatus
clusters = [
objects.Cluster(self.context,
name='mycluster@backend1',
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
objects.Cluster(self.context,
name='mycluster@backend2',
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY)
]
clusters[0].create()
clusters[1].create()
services = [
objects.Service(self.context, host='host1@backend1',
cluster_name=clusters[0].name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
objects.Service(self.context, host='host1@backend2',
cluster_name=clusters[1].name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
]
services[0].create()
services[1].create()
self.assertRaises(exception.Invalid,
self.volume_api.failover, self.context, host,
cluster)
def test_failover_api_not_found(self):
self.assertRaises(exception.ServiceNotFound, self.volume_api.failover,
self.context, 'host1', None)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
def test_failover_api_success_multiple_results(self, failover_mock):
"""Succeed to failover multiple services for the same backend."""
rep_field = fields.ReplicationStatus
cluster_name = 'mycluster@backend1'
cluster = objects.Cluster(self.context,
name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY)
cluster.create()
services = [
objects.Service(self.context, host='host1@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
objects.Service(self.context, host='host2@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
]
services[0].create()
services[1].create()
self.volume_api.failover(self.context, None, cluster_name,
mock.sentinel.secondary_id)
for service in services + [cluster]:
self.assertEqual(rep_field.ENABLED, service.replication_status)
service.refresh()
self.assertEqual(rep_field.FAILING_OVER,
service.replication_status)
failover_mock.assert_called_once_with(self.context, mock.ANY,
mock.sentinel.secondary_id)
self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
def test_failover_api_success_multiple_results_not_updated(self,
failover_mock):
"""Succeed to failover even if a service is not updated."""
rep_field = fields.ReplicationStatus
cluster_name = 'mycluster@backend1'
cluster = objects.Cluster(self.context,
name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY)
cluster.create()
services = [
objects.Service(self.context, host='host1@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
objects.Service(self.context, host='host2@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ERROR,
binary=constants.VOLUME_BINARY),
]
services[0].create()
services[1].create()
self.volume_api.failover(self.context, None, cluster_name,
mock.sentinel.secondary_id)
for service in services[:1] + [cluster]:
service.refresh()
self.assertEqual(rep_field.FAILING_OVER,
service.replication_status)
services[1].refresh()
self.assertEqual(rep_field.ERROR, services[1].replication_status)
failover_mock.assert_called_once_with(self.context, mock.ANY,
mock.sentinel.secondary_id)
self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
def test_failover_api_fail_multiple_results_not_updated(self,
failover_mock):
"""Fail if none of the services could be updated."""
rep_field = fields.ReplicationStatus
cluster_name = 'mycluster@backend1'
cluster = objects.Cluster(self.context,
name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY)
cluster.create()
down_time = timeutils.datetime.datetime(1970, 1, 1)
services = [
# This service is down
objects.Service(self.context, host='host1@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ENABLED,
created_at=down_time,
updated_at=down_time,
modified_at=down_time,
binary=constants.VOLUME_BINARY),
# This service is not with the right replication status
objects.Service(self.context, host='host2@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ERROR,
binary=constants.VOLUME_BINARY),
]
services[0].create()
services[1].create()
self.assertRaises(exception.InvalidInput,
self.volume_api.failover, self.context, None,
cluster_name, mock.sentinel.secondary_id)
for service in services:
svc = objects.Service.get_by_id(self.context, service.id)
self.assertEqual(service.replication_status,
svc.replication_status)
cluster.refresh()
self.assertEqual(rep_field.ENABLED, cluster.replication_status)
failover_mock.assert_not_called()
|
the-stack_106_18286
|
# Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from pyjamas.ui.Composite import Composite
from pyjamas.ui import Event
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Label import Label
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.ClickDelegatePanel import ClickDelegatePanel
from pyjamas.ui import HasAlignment
class TabBar(Composite):
STYLENAME_DEFAULT = "gwt-TabBarItem"
def __init__(self, **kwargs):
if not kwargs.has_key('StyleName'): kwargs['StyleName']="gwt-TabBar"
# this is awkward: HorizontalPanel is the composite,
# so we either the element here, and pass it in to HorizontalPanel.
element = None
if kwargs.has_key('Element'):
element = kwargs.pop('Element')
self.panel = HorizontalPanel(Element=element)
self.selectedTab = None
self.tabListeners = []
self.panel.setVerticalAlignment(HasAlignment.ALIGN_BOTTOM)
first = HTML(" ", True)
rest = HTML(" ", True)
first.setStyleName("gwt-TabBarFirst")
rest.setStyleName("gwt-TabBarRest")
first.setHeight("100%")
rest.setHeight("100%")
self.panel.add(first)
self.panel.add(rest)
first.setHeight("100%")
self.panel.setCellHeight(first, "100%")
self.panel.setCellWidth(rest, "100%")
Composite.__init__(self, self.panel, **kwargs)
self.sinkEvents(Event.ONCLICK)
def addTab(self, text, asHTML=False):
self.insertTab(text, asHTML, self.getTabCount())
def addTabListener(self, listener):
self.tabListeners.append(listener)
def getSelectedTab(self):
if self.selectedTab is None:
return -1
return self.panel.getWidgetIndex(self.selectedTab) - 1
def getTabCount(self):
return self.panel.getWidgetCount() - 2
def getTabWidget(self, index):
if index >= self.getTabCount():
return None
delPanel = self.panel.getWidget(index + 1)
focusablePanel = delPanel.getFocusablePanel()
widget = focusablePanel.getWidget()
return widget
def getTabHTML(self, index):
widget = self.getTabWidget(index)
if hasattr(widget, "getHTML"):
return widget.getHTML()
elif hasattr(widget, "getText"): # assume it's a Label if it has getText
return widget.getText()
else:
fpe = DOM.getParent(self.focusablePanel.getElement())
return DOM.getInnerHTML(fpe)
def createTabTextWrapper(self):
return None
def insertTab(self, text, asHTML, beforeIndex=None):
""" 1st arg can, instead of being 'text', be a widget.
1st arg can also be None, which results in a blank
space between tabs. Use this to push subsequent
tabs out to the right hand end of the TabBar.
(the "blank" tab, by not being focussable, is not
clickable).
"""
if beforeIndex is None:
beforeIndex = asHTML
asHTML = False
if (beforeIndex < 0) or (beforeIndex > self.getTabCount()):
#throw new IndexOutOfBoundsException();
pass
if text is None:
text = HTML(" ", True)
text.setWidth("100%")
text.setStyleName("gwt-TabBarRest")
self.panel.insert(text, beforeIndex + 1)
self.panel.setCellWidth(text, "100%")
return
istext = isinstance(text, basestring)
if istext:
if asHTML:
item = HTML(text)
else:
item = Label(text)
item.setWordWrap(False)
else:
# passing in a widget, it's expected to have its own style
item = text
self.insertTabWidget(item, beforeIndex)
def insertTabWidget(self, widget, beforeIndex):
delWidget = ClickDelegatePanel(self, widget, self, self)
delWidget.setStyleName(self.STYLENAME_DEFAULT)
focusablePanel = delWidget.getFocusablePanel()
self.panel.insert(delWidget, beforeIndex + 1)
self.setStyleName(DOM.getParent(delWidget.getElement()),
self.STYLENAME_DEFAULT + "-wrapper", True)
#print "insertTabWidget", DOM.getParent(delWidget.getElement()), DOM.getAttribute(DOM.getParent(delWidget.getElement()), "className")
def onClick(self, sender=None):
for i in range(1, self.panel.getWidgetCount() - 1):
if DOM.isOrHasChild(self.panel.getWidget(i).getElement(),
sender.getElement()):
return self.selectTab(i - 1)
return False
def removeTab(self, index):
self.checkTabIndex(index)
toRemove = self.panel.getWidget(index + 1)
if toRemove == self.selectedTab:
self.selectedTab = None
self.panel.remove(toRemove)
def removeTabListener(self, listener):
self.tabListeners.remove(listener)
def selectTab(self, index):
self.checkTabIndex(index)
for listener in self.tabListeners:
if not listener.onBeforeTabSelected(self, index):
return False
self.setSelectionStyle(self.selectedTab, False)
if index == -1:
self.selectedTab = None
return True
self.selectedTab = self.panel.getWidget(index + 1)
self.setSelectionStyle(self.selectedTab, True)
for listener in self.tabListeners:
listener.onTabSelected(self, index)
return True
def checkTabIndex(self, index):
if (index < -1) or (index >= self.getTabCount()):
#throw new IndexOutOfBoundsException();
pass
def setSelectionStyle(self, item, selected):
if item is not None:
if selected:
item.addStyleName("gwt-TabBarItem-selected")
self.setStyleName(DOM.getParent(item.getElement()),
"gwt-TabBarItem-wrapper-selected", True)
else:
item.removeStyleName("gwt-TabBarItem-selected")
self.setStyleName(DOM.getParent(item.getElement()),
"gwt-TabBarItem-wrapper-selected", False)
Factory.registerClass('pyjamas.ui.TabBar', 'TabBar', TabBar)
|
the-stack_106_18287
|
"""
Adapted from https://github.com/lukemelas/simple-bert
"""
import numpy as np
from torch import nn
from torch import Tensor
from torch.nn import functional as F
import torch
def split_last(x, shape):
"split the last dimension to giveTransformern shape"
shape = list(shape)
assert shape.count(-1) <= 1
if -1 in shape:
shape[shape.index(-1)] = int(x.size(-1) / -np.prod(shape))
return x.view(*x.size()[:-1], *shape)
def merge_last(x, n_dims):
"merge the last n_dims to a dimension"
s = x.size()
assert n_dims > 1 and n_dims < len(s)
return x.view(*s[:-n_dims], -1)
class MultiHeadedSelfAttention(nn.Module):
"""Multi-Headed Dot Product Attention"""
def __init__(self, dim, num_heads, dropout):
super().__init__()
self.proj_q = nn.Linear(dim, dim)
self.proj_k = nn.Linear(dim, dim)
self.proj_v = nn.Linear(dim, dim)
self.drop = nn.Dropout(dropout)
self.n_heads = num_heads
self.scores = None # for visualization
def forward(self, x, mask):
"""
x, q(query), k(key), v(value) : (B(batch_size), S(seq_len), D(dim))
mask : (B(batch_size) x S(seq_len))
* split D(dim) into (H(n_heads), W(width of head)) ; D = H * W
"""
# (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)
q, k, v = self.proj_q(x), self.proj_k(x), self.proj_v(x)
q, k, v = (split_last(x, (self.n_heads, -1)).transpose(1, 2) for x in [q, k, v])
# (B, H, S, W) @ (B, H, W, S) -> (B, H, S, S) -softmax-> (B, H, S, S)
scores = q @ k.transpose(-2, -1) / np.sqrt(k.size(-1))
if mask is not None:
mask = mask[:, None, None, :].float()
scores -= 10000.0 * (1.0 - mask)
scores = self.drop(F.softmax(scores, dim=-1))
# (B, H, S, S) @ (B, H, S, W) -> (B, H, S, W) -trans-> (B, S, H, W)
h = (scores @ v).transpose(1, 2).contiguous()
# -merge-> (B, S, D)
h = merge_last(h, 2)
self.scores = scores
return h
class PositionWiseFeedForward(nn.Module):
"""FeedForward Neural Networks for each position"""
def __init__(self, dim, ff_dim):
super().__init__()
self.fc1 = nn.Linear(dim, ff_dim)
self.fc2 = nn.Linear(ff_dim, dim)
def forward(self, x):
# (B, S, D) -> (B, S, D_ff) -> (B, S, D)
return self.fc2(F.gelu(self.fc1(x)))
class Block(nn.Module):
"""Transformer Block"""
def __init__(self, dim, num_heads, ff_dim, dropout):
super().__init__()
self.attn = MultiHeadedSelfAttention(dim, num_heads, dropout)
self.proj = nn.Linear(dim, dim)
self.norm1 = nn.LayerNorm(dim, eps=1e-6)
self.pwff = PositionWiseFeedForward(dim, ff_dim)
self.norm2 = nn.LayerNorm(dim, eps=1e-6)
self.drop = nn.Dropout(dropout)
def forward(self, x, mask):
h = self.drop(self.proj(self.attn(self.norm1(x), mask)))
x = x + h
h = self.drop(self.pwff(self.norm2(x)))
x = x + h
return x
class Transformer(nn.Module):
"""Transformer with Self-Attentive Blocks"""
def __init__(self, num_layers, dim, num_heads, ff_dim, dropout):
super().__init__()
self.blocks = nn.ModuleList([
Block(dim, num_heads, ff_dim, dropout) for _ in range(num_layers)])
def forward(self, x, mask=None, output_layer_ind = -1):
for i, block in enumerate(self.blocks):
x = block(x, mask)
if i==output_layer_ind: #if output_layer_ind is -1 it will aplly the whole network
break
return x
class AnomalyTransformer(nn.Module):
"""Transformer with Self-Attentive Blocks"""
def __init__(self, num_layers, dim, num_heads, ff_dim, dropout):
super().__init__()
self.blocks = nn.ModuleList([
Block(dim, num_heads, ff_dim, dropout) for _ in range(num_layers)])
self.cloned_blocks = nn.ModuleList([
Block(dim, num_heads, ff_dim, dropout) for _ in range(num_layers)])
# if isinstance(clone_block_ind, int):
# clone_block_ind = [clone_block_ind]
#
# self.clone_block_ind = clone_block_ind
# if self.clone_block_ind == -1:
# self.clone_block_ind = num_layers - 1
#
# self.cloned_block = Block(dim, num_heads, ff_dim, dropout)
def forward(self, x, mask=None, clone_block_ind =None):
if clone_block_ind is None:
clone_block_ind = list(range(len(self.blocks)))
if isinstance(clone_block_ind, int):
if clone_block_ind ==-1:
clone_block_ind = len(self.blocks)-1
clone_block_ind = [clone_block_ind]
origin_block_outputs, cloned_block_outputs = [], []
for i, block in enumerate(self.blocks):
_x = x
x = block(x, mask)
if i in clone_block_ind:
cloned_block = self.cloned_blocks[i]
cloned_x = cloned_block(_x, mask)
origin_block_outputs.append(x)
cloned_block_outputs.append(cloned_x)
return torch.stack(origin_block_outputs), torch.stack(cloned_block_outputs)
class OlderAnomalyTransformer(nn.Module):
"""Transformer with Self-Attentive Blocks"""
def __init__(self, num_layers, dim, num_heads, ff_dim, dropout, clone_block_ind):
super().__init__()
self.blocks = nn.ModuleList([
Block(dim, num_heads, ff_dim, dropout) for _ in range(num_layers)])
self.clone_block_ind = clone_block_ind
if self.clone_block_ind == -1:
self.clone_block_ind = num_layers - 1
self.cloned_block = Block(dim, num_heads, ff_dim, dropout)
def forward(self, x, mask=None, output_layer_ind=-1):
for i, block in enumerate(self.blocks):
_x = x
x = block(x, mask)
# if i==output_layer_ind: #if output_layer_ind is -1 it will aplly the whole network
# break
if i == self.clone_block_ind:
origin_block_outputs = x
cloned_block_outputs = self.cloned_block(_x, mask)
break
return origin_block_outputs, cloned_block_outputs
|
the-stack_106_18289
|
from django.shortcuts import render, get_object_or_404
from django.shortcuts import redirect
from django.contrib.auth import authenticate, login, update_session_auth_hash
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from Crypto.Util import asn1
from base64 import b64decode
from .forms import *
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, QueryDict
from . import forms
from django.contrib.auth import forms as auth_forms
from django.contrib.auth import update_session_auth_hash
from . import models
from django.contrib.auth import models as authmodels
from django.core.files.storage import FileSystemStorage
import datetime
from django import forms as djangoforms
from django.core.urlresolvers import reverse
from Crypto.PublicKey import RSA
from Crypto import Random
import base64
import os
import binascii
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt
import json
@csrf_exempt
def fda_getreports(request,username,password):
user = authenticate(username=username, password=password)
login(request, user)
your_reports = models.Report.objects.filter(owned_by=user)
other_reports = models.Report.objects.filter(private=False).exclude(owned_by=user)
viewable_reports = []
for your_report in your_reports:
num_attachments = len(your_report.files.all())
report_data = {"report_id": your_report.id, "title": your_report.short_desc, "attachments": num_attachments}
viewable_reports.append(report_data)
for other_report in other_reports:
num_attachments = len(other_report.files.all())
report_data = {"report_id": other_report.id, "title": other_report.short_desc,
"attachments": num_attachments}
viewable_reports.append(report_data)
return HttpResponse(json.dumps(viewable_reports))
@csrf_exempt
def fda_login(request, username, password):
# print("password: " + password)
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return HttpResponse("true")
return HttpResponse("false")
@csrf_exempt
def fda_report_files(request, report_id):
user = request.user
if user is not None:
report = models.Report.objects.filter(pk=report_id)[0]
downloadable_files = []
for file in report.files.all():
filedata = {"id": file.id, "file_name": file.file_attached.name, "is_encrypted": file.is_encrypted,
"file_hash": file.file_hash}
downloadable_files.append(filedata)
return HttpResponse(json.dumps(downloadable_files))
return HttpResponse('Login Failed!')
def register_success(request):
return render(request, 'fileshare/register_success.html')
@login_required(login_url='login')
def main(request):
your_reports = models.Report.objects.filter(owned_by=request.user)
num_reports = len(your_reports)
other_reports = models.Report.objects.filter(private=False).exclude(owned_by=request.user)
folders = models.Folder.objects.filter(owned_by=request.user)
activity = models.Activity.objects.filter(owned_by=request.user).order_by('time').reverse()
if request.method == 'POST':
folder_form = FolderForm(request.POST)
search_form = SearchForm(request.POST)
if search_form.is_valid():
cd = search_form.cleaned_data
# request.session['param'] = cd.get('param')
# request.session['query'] = cd.get('query')
# return redirect('search_results')
elif folder_form.is_valid():
folder = models.Folder.objects.create(
name=request.POST.get('name'),
owned_by=request.user,
created=datetime.datetime.now()
)
folder.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Created " + str(folder.name))
newactivity.save()
else:
folder_form = FolderForm()
search_form = SearchForm()
return render(request, 'fileshare/main.html',
{'your_reports': your_reports, 'num_reports': num_reports, 'other_reports': other_reports,
'folder_form': folder_form, 'folders': folders, 'search_form': search_form, 'activity': activity})
@login_required(login_url='login')
def create_report(request):
if request.method == 'POST':
report_form = ReportForm(request.POST, request.FILES)
if report_form.is_valid():
newdoc = models.Report.objects.create(
owned_by=request.user,
created=datetime.datetime.now(),
last_modified=datetime.datetime.now(),
last_modified_by=request.user.username,
short_desc=report_form.cleaned_data['short_desc'],
long_desc=report_form.cleaned_data['long_desc'],
private=report_form.cleaned_data['private'],
is_encrypted=report_form.cleaned_data['is_encrypted']
)
# newdoc.save()
json_data = request.POST.get('file_hash')
if json_data != "":
file_hashes = json.loads(json_data)
print("file_hash: " + json_data)
for f in request.FILES.getlist('files'):
fHash = getFileHashFromData(file_hashes, f.name)
d = models.Documents.objects.create(file_attached=f,
is_encrypted=report_form.cleaned_data['is_encrypted'],
file_hash=fHash)
newdoc.files.add(d)
newdoc.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Created " + newdoc.short_desc)
newactivity.save()
return redirect('main')
else:
report_form = ReportForm()
return render(request, 'fileshare/create_report.html', {'report_form': report_form})
@login_required(login_url='login')
def view_report(request, report_id):
report = get_object_or_404(models.Report, pk=report_id)
files = report.files
encrypted = report.is_encrypted
report_comments = report.comments
# if(request.user.is_staff == False):
if report.private and request.user != report.owned_by and request.user.is_staff is False:
return redirect('main')
elif request.method == "POST":
print("here1")
update_form = ReportForm(request.POST, request.FILES, instance=report)
comment_form = ReportCommentsForm(request.POST)
if request.POST.get('action')[0] == "f":
print("here2")
report.last_modified = datetime.datetime.now()
report.last_modified_by = request.user.username
d = get_object_or_404(models.Documents, pk=request.POST.get('action')[1:])
# remove document from the file system
fs = FileSystemStorage()
fs.delete(d.file_attached)
# remove document from database
d.delete()
report.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Modified " +
str(report.short_desc))
newactivity.save()
elif comment_form.is_valid():
c = models.ReportComments.objects.create(
creator=request.user.profile,
timestamp=datetime.datetime.now(),
comment=request.POST.get('comment')
)
report.comments.add(c)
report.save()
c.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="You commented on " +
str(report.short_desc))
newactivity.save()
if (report.owned_by != request.user):
newactivity = models.Activity.objects.create(owned_by=report.owned_by, time=datetime.datetime.now(),
description=str(request.user.username) + " commented on " +
str(report.short_desc))
newactivity.save()
elif update_form.is_valid():
print("here3")
if request.POST.get('action') == "Save Changes":
print("here4")
report.last_modified = datetime.datetime.now()
report.last_modified_by = request.user.username
is_encrypted = not request.POST.get('is_encrypted', None) == None
json_data = request.POST.get('file_hash')
if json_data != "":
file_hashes = json.loads(json_data)
print("file_hash: " + json_data)
for f in request.FILES.getlist('files'):
fHash = getFileHashFromData(file_hashes, f.name)
d = models.Documents.objects.create(file_attached=f, is_encrypted=is_encrypted, file_hash=fHash)
report.files.add(d)
report.save()
update_form.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Modified " +
str(report.short_desc))
newactivity.save()
return redirect('main')
if request.POST.get('action')[0] == "f":
print("here5")
report.last_modified = datetime.datetime.now()
report.last_modified_by = request.user.username
d = get_object_or_404(models.Documents, pk=request.POST.get('action')[1:])
d.delete()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Modified " +
str(report.short_desc))
newactivity.save()
return redirect('main')
else:
print("here6")
report.delete()
return redirect('main')
else:
update_form = ReportForm(instance=report,initial={'is_encrypted':report.is_encrypted,'private':report.private})
comment_form = ReportCommentsForm()
return render(request, 'fileshare/view_report.html',
{'report': report, 'update_form': update_form, 'files': files, 'num_files': files.count(),
'encrypted': encrypted, 'comment_form': comment_form, 'report_comments': report_comments})
def getFileHashFromData(data, filename):
for file in data:
if file["filename"] == filename:
return file["file_hash"]
@login_required(login_url='login')
def view_group_report(request, report_id, profilegroup_id):
report = get_object_or_404(models.Report, pk=report_id)
group = get_object_or_404(models.ProfileGroup, pk=profilegroup_id)
files = report.files
encrypted = report.is_encrypted
if request.user.profile not in group.members.all():
return redirect('main')
return render(request, 'fileshare/view_group_report.html',
{'report': report, 'group': group, 'encrypted': encrypted, 'files': files,
'num_files': files.count()})
@login_required(login_url='login')
def user_delete_report(request, report_id):
report = get_object_or_404(models.Report, pk=report_id)
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Deleted " + report.short_desc)
newactivity.save()
# remove all file attachments from the file system
fs = FileSystemStorage()
files = report.files.all()
for file in files:
fs.delete(file.file_attached)
file.delete()
# remove report from database
report.delete()
return HttpResponseRedirect('/main')
@login_required(login_url='login')
def account_update_success(request):
return render(request, 'fileshare/account_update_success.html')
@login_required(login_url='login')
def account(request):
return render(request, 'fileshare/account.html')
@login_required(login_url='login')
def messages(request):
user = request.user
user.profile.unreadmessages = "false"
user.profile.save()
form = forms.messageForm(request.POST or None)
if request.method == 'POST':
if (request.POST['newmessagefield'] == "Yes"):
# sender data
newconvo = models.Conversation.objects.create(sender=user,
reciever=models.User.objects.get(id=request.POST['sender']),
reciever_name=user.username + "-" + models.User.objects.get(
id=request.POST[
'sender']).username,
recently_used=datetime.datetime.now(), unreadmessages="0"
)
newconvo.save()
if (request.POST['thekey'] != "True"):
newmessage = models.Message.objects.create(owned_by=newconvo,
sender=user,
messagecontent=request.POST['messagecontent'],
time=datetime.datetime.now(), key=request.POST['thekey'])
newmessage.save()
# reciever data
newconvo2 = models.Conversation.objects.create(reciever=user,
sender=models.User.objects.get(id=request.POST['sender']),
reciever_name=models.User.objects.get(
id=request.POST[
'sender']).username + "-" + user.username,
recently_used=datetime.datetime.now(), unreadmessages="1"
)
newconvo2.save()
otheruser = newconvo2.sender
print(otheruser.username)
print(otheruser.profile.unreadmessages)
otheruser.profile.unreadmessages = "true"
otheruser.profile.save()
print(otheruser.profile.unreadmessages)
if (request.POST['thekey'] == "True"):
thekey = RSA.importKey(newconvo2.sender.profile.publickey)
messagetoencrypt = str(request.POST['messagecontent'])
encryptedmessage = thekey.encrypt(messagetoencrypt.encode(), 1)
encryptedmessage = encryptedmessage[0];
encryptedmessage = base64.b16encode(encryptedmessage)
encryptedmessage = str(encryptedmessage, 'ascii')
newmessage2 = models.Message.objects.create(owned_by=newconvo2,
sender=user,
messagecontent=encryptedmessage,
time=datetime.datetime.now(), key=request.POST['thekey'])
newmessage2.save()
else:
newmessage2 = models.Message.objects.create(owned_by=newconvo2,
sender=user,
messagecontent=str(request.POST['messagecontent']),
time=datetime.datetime.now(), key=request.POST['thekey'])
newmessage2.save()
return redirect("/messages")
elif form.is_valid():
# sender data
if (request.POST['thekey'] != "True"):
newmessage = models.Message.objects.create(owned_by=form.cleaned_data['owned_by'],
sender=user,
messagecontent=str(request.POST['messagecontent']),
time=datetime.datetime.now(), key=request.POST['thekey'])
newmessage.save()
convo = form.cleaned_data['owned_by']
convo.recently_used = newmessage.time
convo.save()
convo = form.cleaned_data['owned_by']
# reciever data
convo2 = models.Conversation.objects.get(reciever=convo.sender, sender=convo.reciever)
if (request.POST['thekey'] == "True"):
thekey = RSA.importKey(convo2.sender.profile.publickey)
messagetoencrypt = str(request.POST['messagecontent'])
encryptedmessage = thekey.encrypt(messagetoencrypt.encode(), 1)
encryptedmessage = encryptedmessage[0];
encryptedmessage = base64.b16encode(encryptedmessage)
encryptedmessage = str(encryptedmessage, 'ascii')
newmessage2 = models.Message.objects.create(owned_by=convo2,
sender=user,
messagecontent=encryptedmessage,
time=datetime.datetime.now(), key=request.POST['thekey'])
newmessage2.save()
else:
newmessage2 = models.Message.objects.create(owned_by=convo2,
sender=user,
messagecontent=str(request.POST['messagecontent']),
time=datetime.datetime.now(), key=request.POST['thekey'])
newmessage2.save()
convo2.recently_used = newmessage2.time
currentcount = int(convo2.unreadmessages)
currentcount += 1
convo2.unreadmessages = str(currentcount)
convo2.save()
otheruser = convo2.sender
print(otheruser.username)
print(otheruser.profile.unreadmessages)
otheruser.profile.unreadmessages = "true"
otheruser.profile.save()
print(otheruser.profile.unreadmessages)
return redirect("/messages")
conversation_list = models.Conversation.objects.all().filter(sender=user).order_by('recently_used').reverse()
message_list = []
for convo in conversation_list:
message_list.append(models.Message.objects.all().filter(owned_by=convo).order_by('time').reverse)
reciever_list = models.User.objects.all()
forms.messageForm.base_fields['owned_by'] = djangoforms.ModelChoiceField(queryset=conversation_list, required=False)
form = forms.messageForm()
return render(request, 'fileshare/messages.html',
{'reciever_list': reciever_list, 'conversation_list': conversation_list, 'message_list': message_list,
'form': form})
@login_required(login_url='login')
def update_profile(request):
user = request.user
form = forms.UpdateProfile(request.POST or None,
initial={'first_name': user.first_name, 'last_name': user.last_name,
'email': user.email})
if request.method == 'POST':
if form.is_valid():
user.first_name = request.POST['first_name']
user.last_name = request.POST['last_name']
user.email = request.POST['email']
user.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Updated profile.")
newactivity.save()
return HttpResponseRedirect('/account/view')
context = {
"form": form
}
return render(request, "fileshare/account_update.html", context)
@login_required(login_url='login')
def password_change(request):
form = auth_forms.PasswordChangeForm(user=request.user, data=request.POST)
if request.method == 'POST':
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Changed password.")
newactivity.save()
return HttpResponseRedirect('/logout')
context = {
"form": form
}
return render(request, "fileshare/changepassword.html", context)
@login_required(login_url='login')
def deletemessage(request, message_pk):
query = models.Message.objects.get(pk=message_pk)
query.delete()
return redirect("/messages")
@login_required(login_url='login')
def create_group(request):
if request.method == 'POST':
group_form = GroupForm(request.POST)
members = models.Profile.objects.all().exclude(user=request.user)
if group_form.is_valid():
group_form.save()
instance = models.ProfileGroup.objects.get(name=request.POST.get('name'))
instance.members.add(request.user.profile)
request.user.profile.groups_in.add(instance)
members_added = request.POST.getlist('members')
for m in members_added:
m = get_object_or_404(models.Profile, pk=m)
m.groups_in.add(instance)
instance.members.add(m)
newactivity = models.Activity.objects.create(owned_by=m.user, time=datetime.datetime.now(),
description=str(
request.user.username) + " added you to " + str(
instance.name))
newactivity.save()
m.save()
instance.creator = request.user
instance.save()
request.user.profile.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Created " + str(instance.name))
newactivity.save()
return redirect('main') # group made successfully
else:
# group_form=GroupForm()
members = models.Profile.objects.all().exclude(user=request.user)
return render(request, 'fileshare/create_group.html', {'members': members})
@login_required(login_url='login')
def view_group(request, group_id):
group = get_object_or_404(models.ProfileGroup, pk=group_id)
# private_reports = request.user.profile.reports_owned.filter(private=True)
private_reports = models.Report.objects.filter(owned_by=request.user, private=True).exclude(
id__in=group.reports.all())
all_users = models.User.objects.all()
group_comments = group.comments
if request.user.profile not in group.members.all() and not request.user.is_staff:
return redirect('main')
elif request.method == "POST":
update_form = UpdateGroupForm(request.POST, instance=group)
comment_form = ReportCommentsForm(request.POST)
action = request.POST.get('action')
if action != "Save Changes":
if action[0] == 'a':
report = get_object_or_404(models.Report, pk=action[1:])
group.reports.add(report)
group.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Added " +
str(report.short_desc) + " to " + str(
group.name))
newactivity.save()
elif action[0] == 'r':
report = get_object_or_404(models.Report, pk=action[1:])
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Removed " +
str(report.short_desc) + " from " + str(
group.name))
newactivity.save()
group.reports.remove(report)
group.save()
elif action[0] == 'p':
m = get_object_or_404(models.Profile, pk=action[1:])
m.groups_in.add(group)
group.members.add(m)
group.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Added " +
str(m.user.username) + " to " + str(
group.name))
newactivity.save()
newactivity = models.Activity.objects.create(owned_by=m.user, time=datetime.datetime.now(),
description=str(request.user) + " added you to " + str(
group.name))
newactivity.save()
elif action == 'l':
request.user.profile.groups_in.remove(group)
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="You left " +
str(group.name))
newactivity.save()
group.members.remove(request.user.profile)
group.save()
return redirect('main')
elif action == 'e':
group.delete()
return redirect('main')
elif action == 'c':
c = models.ReportComments.objects.create(
creator=request.user.profile,
timestamp=datetime.datetime.now(),
comment=request.POST.get('comment')
)
group.comments.add(c)
group.save()
c.save()
else:
m = get_object_or_404(models.Profile, pk=action)
m.groups_in.remove(group)
group.members.remove(m)
group.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Removed " +
str(m.user.username) + " from " + str(
group.name))
newactivity.save()
newactivity = models.Activity.objects.create(owned_by=m.user, time=datetime.datetime.now(),
description=str(request.user) + " removed you from " + str(
group.name))
newactivity.save()
elif update_form.is_valid():
if request.POST.get('action') == "Save Changes":
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Edited " +
str(group.name))
newactivity.save()
update_form.save()
else:
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Removed yourself from " +
str(group.name))
newactivity.save()
group.delete()
return redirect('main')
else:
update_form = UpdateGroupForm(instance=group)
comment_form = ReportCommentsForm()
return render(request, 'fileshare/view_group.html',
{'group': group, 'update_form': update_form, 'private_reports': private_reports,
'all_users': all_users, 'comment_form': comment_form, 'group_comments': group_comments})
@login_required(login_url='login')
def view_folder(request, folder_id):
folder = get_object_or_404(models.Folder, pk=folder_id)
all_reports = models.Report.objects.filter(owned_by=request.user)
able_to_add = all_reports.exclude(id__in=folder.reports.all())
if folder.owned_by != request.user:
return redirect('main')
elif request.method == "POST":
update_form = FolderForm(request.POST, instance=folder)
action = request.POST.get('action')
if action != "view" and action != "Update" and action != "Delete":
report = get_object_or_404(models.Report, pk=action[1:])
if action[0] == 'a':
folder.reports.add(report)
report.in_folder = True
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Added " +
str(report.short_desc) + " to " + str(
folder.name))
newactivity.save()
else:
folder.reports.remove(report)
report.in_folder = False
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Removed " +
str(report.short_desc) + " from " + str(
folder.name))
newactivity.save()
folder.save()
elif update_form.is_valid():
if action == "Update":
update_form.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Updated " + str(folder.name))
newactivity.save()
return redirect('main')
else:
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Deleted " +
str(folder.name))
newactivity.save()
folder.delete()
return redirect('main')
else:
update_form = FolderForm(instance=folder)
return render(request, 'fileshare/view_folder.html',
{'folder': folder, 'update_form': update_form, 'all_reports': all_reports,
'able_to_add': able_to_add})
def register(request):
if request.method == 'POST':
register_form = signup_form(request.POST)
try:
theuser = authmodels.User.objects.get(username=request.POST['username'])
print("here")
return render(request, 'fileshare/register.html',
{'form': register_form, 'errormessage': "Username already exists!"})
except:
if register_form.is_valid():
# print(register_form.clean_password2())
print("here2")
if (register_form.clean_password2()):
return render(request, 'fileshare/register.html',
{'form': register_form, 'errormessage': "Passwords do not match."})
else:
newuser = authmodels.User.objects.create(username=request.POST['username'],
first_name=request.POST['first_name'],
last_name=request.POST['last_name'],
email=request.POST['email']
)
newuser.set_password(register_form.cleaned_data['password1'])
newuser.save()
random_generator = Random.new().read
key = RSA.generate(1024, random_generator)
pubkey = key.publickey()
newuser.profile.publickey = pubkey.exportKey()
newuser.profile.save()
newactivity = models.Activity.objects.create(owned_by=newuser, time=datetime.datetime.now(),
description="Account created.")
newactivity.save()
# return HttpResponseRedirect('/register/success/'+str(newuser.id))
# return(register_success(request,newuser.id))
return render(request, 'fileshare/register_success.html', {'key': str(key.exportKey())})
else:
register_form = signup_form()
return render(request, 'fileshare/register.html', {'form': signup_form()})
# site manager views
@login_required(login_url='login')
def sitemanager(request):
if (request.user.is_staff):
return render(request, 'fileshare/sitemanager.html')
@login_required(login_url='login')
def manage_users(request):
if (request.user.is_staff):
all_users = models.User.objects.all()
return render(request, 'fileshare/manage_users.html', {'all_users': all_users})
@login_required(login_url='login')
def manage_reports(request):
if (request.user.is_staff):
all_reports = models.Report.objects.all()
return render(request, 'fileshare/manage_reports.html', {'all_reports': all_reports})
@login_required(login_url='login')
def manage_groups(request):
if (request.user.is_staff):
all_groups = models.ProfileGroup.objects.all()
return render(request, 'fileshare/manage_groups.html', {'all_groups': all_groups})
@login_required(login_url='login')
def edit_user(request, user_id):
profile = models.Profile.objects.filter(user_id=user_id)
# print(profile[0].user.username)
return render(request, 'fileshare/edit_user.html', {'profile': profile[0]})
@login_required(login_url='login')
def sm_update_user(request):
profile = models.Profile.objects.filter(pk=request.POST['profile_id'])[0]
user = profile.user
user.first_name = request.POST['first_name']
user.last_name = request.POST['last_name']
user.email = request.POST['email']
user.is_active = not request.POST.get('is_active', None) == None
user.is_staff = not request.POST.get('is_staff', None) == None
user.save()
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Updated " + str(user.username))
newactivity.save()
return render(request, 'fileshare/user_update_success.html', {'profile': profile})
@login_required(login_url='login')
def delete_report(request, report_id):
report = get_object_or_404(models.Report, pk=report_id)
newactivity = models.Activity.objects.create(owned_by=request.user, time=datetime.datetime.now(),
description="Deleted " + str(report.short_desc))
newactivity.save()
report.delete()
return HttpResponseRedirect('/manage_reports.html')
# return render(request, 'fileshare/manage_reports.html')
# return render(request, 'fileshare/sm_update_user.html')
# def edit_group(request):
# all_groups = models.ProfileGroup.objects.all()
# return render(request, 'fileshare/edit_group.html')
# def update_user_permissions(request):
def test(request):
return HttpResponse("test")
@login_required(login_url='login')
def decrypt_message(request, message_pk):
query = models.Message.objects.get(pk=message_pk)
if request.method == 'POST':
decrypt_form = DecryptMessageForm(request.POST)
password = request.POST['password']
print(password)
password = password[2:]
password = password[:-1]
password2 = password.replace('\\n', '\n')
password = password[0:24] + password2[24:-25] + password[-25:]
print(password)
originalmessage = binascii.unhexlify(query.messagecontent)
try:
thekey = RSA.importKey(password)
pubkey = thekey.publickey()
encryptedmessage = thekey.decrypt(originalmessage)
except:
return render(request, 'fileshare/decrypt_message.html', {'message': "Invalid RSA key."})
return render(request, 'fileshare/decrypt_message.html', {'message': encryptedmessage})
else:
decrypt_form = DecryptMessageForm()
return render(request, 'fileshare/decrypt_message.html', {'form': decrypt_form})
@login_required(login_url='login')
def updateunread(request, message_pk):
query = models.Conversation.objects.get(pk=message_pk)
currentcount = query.unreadmessages
query.unreadmessages = 0
query.save()
# usercount = int(request.user.profile.unreadmessages)
# request.user.profile.save()
return HttpResponse("success")
@login_required(login_url='login')
def search_results(request):
query = request.POST.get('search')
param = request.POST.get('parameter')
search_form = SearchForm(request.POST)
day = None
if query == "" and param in ["desc", "owner", "modified_by"]:
return redirect('main')
usernames = []
if param == "desc":
results = models.Report.objects.filter(
Q(short_desc__icontains=query) | Q(long_desc__icontains=query)
).exclude(~Q(owned_by=request.user), Q(private=True))
elif param == "owner":
usernames = models.User.objects.filter(username__icontains=query)
results = models.Report.objects.filter(Q(owned_by__in=usernames)).exclude(~Q(owned_by=request.user),
Q(private=True))
elif param == "modified_by":
results = models.Report.objects.filter(Q(last_modified_by__icontains=query)).exclude(~Q(owned_by=request.user),
Q(private=True))
elif param == "created":
datefield = search_form.fields['datepicker']
m = request.POST.get('datepicker_month')
d = request.POST.get('datepicker_day')
y = request.POST.get('datepicker_year')
day = datetime.date(int(y), int(m), int(d))
new_end = day + datetime.timedelta(days=1)
results = models.Report.objects.filter(
Q(created__range=[day, new_end])
).exclude(~Q(owned_by=request.user), Q(private=True))
elif param == "modified":
datefield = search_form.fields['datepicker']
m = request.POST.get('datepicker_month')
d = request.POST.get('datepicker_day')
y = request.POST.get('datepicker_year')
day = datetime.date(int(y), int(m), int(d))
new_end = day + datetime.timedelta(days=1)
results = models.Report.objects.filter(
Q(last_modified__range=[day, new_end])
).exclude(~Q(owned_by=request.user), Q(private=True))
else:
results = Q()
# Add support for searching by date created and date modified??
return render(request, 'fileshare/search_results.html',
{'query': query, 'results': results, 'usernames': usernames, 'param': param, 'date': day})
|
the-stack_106_18290
|
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import github.webhook
from github.util import _create_github_api_object
from model.concourse import (
JobMappingSet,
)
from model.webhook_dispatcher import (
WebhookDispatcherDeploymentConfig,
)
from util import info, warning, ctx, create_url_from_attributes
def sync_org_webhooks(whd_deployment_cfg: WebhookDispatcherDeploymentConfig,):
'''Syncs required organization webhooks for a given webhook dispatcher instance'''
for organization_name, github_api, webhook_url in \
_enumerate_required_org_webhooks(whd_deployment_cfg=whd_deployment_cfg):
webhook_syncer = github.webhook.GithubWebHookSyncer(github_api)
failed_hooks = 0
try:
webhook_syncer.create_or_update_org_hook(
organization_name=organization_name,
webhook_url=webhook_url,
skip_ssl_validation=False,
)
info(f'Created/updated organization hook for organization "{organization_name}"')
except Exception as e:
failed_hooks += 1
warning(f'org: {organization_name} - error: {e}')
if failed_hooks != 0:
warning('Some webhooks could not be set - for more details see above.')
def _enumerate_required_org_webhooks(
whd_deployment_cfg: WebhookDispatcherDeploymentConfig,
):
'''Returns tuples of 'github orgname', 'github api object' and 'webhook url' '''
cfg_factory = ctx().cfg_factory()
whd_cfg_name = whd_deployment_cfg.webhook_dispatcher_config_name()
whd_cfg = cfg_factory.webhook_dispatcher(whd_cfg_name)
concourse_cfg_names = whd_cfg.concourse_config_names()
concourse_cfgs = map(cfg_factory.concourse, concourse_cfg_names)
for concourse_cfg in concourse_cfgs:
job_mapping_set = cfg_factory.job_mapping(concourse_cfg.job_mapping_cfg_name())
for github_orgname, github_cfg_name in _enumerate_github_org_configs(job_mapping_set):
github_api = _create_github_api_object(
github_cfg=cfg_factory.github(github_cfg_name),
)
webhook_url = create_url_from_attributes(
netloc=whd_deployment_cfg.ingress_host(),
scheme='https',
path='github-webhook',
params='',
query='{name}={value}'.format(
name=github.webhook.DEFAULT_ORG_HOOK_QUERY_KEY,
value=whd_cfg_name
),
fragment=''
)
yield (github_orgname, github_api, webhook_url)
def _enumerate_github_org_configs(job_mapping_set: JobMappingSet,):
'''Returns tuples of github org names and github config names'''
for _, job_mapping in job_mapping_set.job_mappings().items():
github_org_configs = job_mapping.github_organisations()
for github_org_config in github_org_configs:
yield (github_org_config.org_name(), github_org_config.github_cfg_name())
|
the-stack_106_18293
|
# utils.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import contextlib
from functools import wraps
import getpass
import logging
import os
import platform
import subprocess
import re
import shutil
import stat
from sys import maxsize
import time
from unittest import SkipTest
from gitdb.util import (# NOQA @IgnorePep8
make_sha,
LockedFD, # @UnusedImport
file_contents_ro, # @UnusedImport
file_contents_ro_filepath, # @UnusedImport
LazyMixin, # @UnusedImport
to_hex_sha, # @UnusedImport
to_bin_sha, # @UnusedImport
bin_to_hex, # @UnusedImport
hex_to_bin, # @UnusedImport
)
from git.compat import is_win
import os.path as osp
from .exc import InvalidGitRepositoryError
# NOTE: Some of the unused imports might be used/imported by others.
# Handle once test-cases are back up and running.
# Most of these are unused here, but are for use by git-python modules so these
# don't see gitdb all the time. Flake of course doesn't like it.
__all__ = ["stream_copy", "join_path", "to_native_path_linux",
"join_path_native", "Stats", "IndexFileSHA1Writer", "Iterable", "IterableList",
"BlockingLockFile", "LockFile", 'Actor', 'get_user_id', 'assure_directory_exists',
'RemoteProgress', 'CallableRemoteProgress', 'rmtree', 'unbare_repo',
'HIDE_WINDOWS_KNOWN_ERRORS']
log = logging.getLogger(__name__)
#: We need an easy way to see if Appveyor TCs start failing,
#: so the errors marked with this var are considered "acknowledged" ones, awaiting remedy,
#: till then, we wish to hide them.
HIDE_WINDOWS_KNOWN_ERRORS = is_win and os.environ.get('HIDE_WINDOWS_KNOWN_ERRORS', True)
HIDE_WINDOWS_FREEZE_ERRORS = is_win and os.environ.get('HIDE_WINDOWS_FREEZE_ERRORS', True)
#{ Utility Methods
def unbare_repo(func):
"""Methods with this decorator raise InvalidGitRepositoryError if they
encounter a bare repository"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self.repo.bare:
raise InvalidGitRepositoryError("Method '%s' cannot operate on bare repositories" % func.__name__)
# END bare method
return func(self, *args, **kwargs)
# END wrapper
return wrapper
@contextlib.contextmanager
def cwd(new_dir):
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield new_dir
finally:
os.chdir(old_dir)
def rmtree(path):
"""Remove the given recursively.
:note: we use shutil rmtree but adjust its behaviour to see whether files that
couldn't be deleted are read-only. Windows will not remove them in that case"""
def onerror(func, path, exc_info):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
try:
func(path) # Will scream if still not possible to delete.
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n {}".format(ex)) from ex
raise
return shutil.rmtree(path, False, onerror)
def rmfile(path):
"""Ensure file deleted also on *Windows* where read-only files need special treatment."""
if osp.isfile(path):
if is_win:
os.chmod(path, 0o777)
os.remove(path)
def stream_copy(source, destination, chunk_size=512 * 1024):
"""Copy all data from the source stream into the destination stream in chunks
of size chunk_size
:return: amount of bytes written"""
br = 0
while True:
chunk = source.read(chunk_size)
destination.write(chunk)
br += len(chunk)
if len(chunk) < chunk_size:
break
# END reading output stream
return br
def join_path(a, *p):
"""Join path tokens together similar to osp.join, but always use
'/' instead of possibly '\' on windows."""
path = a
for b in p:
if not b:
continue
if b.startswith('/'):
path += b[1:]
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
# END for each path token to add
return path
if is_win:
def to_native_path_windows(path):
return path.replace('/', '\\')
def to_native_path_linux(path):
return path.replace('\\', '/')
__all__.append("to_native_path_windows")
to_native_path = to_native_path_windows
else:
# no need for any work on linux
def to_native_path_linux(path):
return path
to_native_path = to_native_path_linux
def join_path_native(a, *p):
"""
As join path, but makes sure an OS native path is returned. This is only
needed to play it safe on my dear windows and to assure nice paths that only
use '\'"""
return to_native_path(join_path(a, *p))
def assure_directory_exists(path, is_file=False):
"""Assure that the directory pointed to by path exists.
:param is_file: If True, path is assumed to be a file and handled correctly.
Otherwise it must be a directory
:return: True if the directory was created, False if it already existed"""
if is_file:
path = osp.dirname(path)
# END handle file
if not osp.isdir(path):
os.makedirs(path, exist_ok=True)
return True
return False
def _get_exe_extensions():
PATHEXT = os.environ.get('PATHEXT', None)
return tuple(p.upper() for p in PATHEXT.split(os.pathsep)) \
if PATHEXT \
else (('.BAT', 'COM', '.EXE') if is_win else ())
def py_where(program, path=None):
# From: http://stackoverflow.com/a/377028/548792
winprog_exts = _get_exe_extensions()
def is_exec(fpath):
return osp.isfile(fpath) and os.access(fpath, os.X_OK) and (
os.name != 'nt' or not winprog_exts or any(fpath.upper().endswith(ext)
for ext in winprog_exts))
progs = []
if not path:
path = os.environ["PATH"]
for folder in path.split(os.pathsep):
folder = folder.strip('"')
if folder:
exe_path = osp.join(folder, program)
for f in [exe_path] + ['%s%s' % (exe_path, e) for e in winprog_exts]:
if is_exec(f):
progs.append(f)
return progs
def _cygexpath(drive, path):
if osp.isabs(path) and not drive:
## Invoked from `cygpath()` directly with `D:Apps\123`?
# It's an error, leave it alone just slashes)
p = path
else:
p = path and osp.normpath(osp.expandvars(osp.expanduser(path)))
if osp.isabs(p):
if drive:
# Confusing, maybe a remote system should expand vars.
p = path
else:
p = cygpath(p)
elif drive:
p = '/cygdrive/%s/%s' % (drive.lower(), p)
return p.replace('\\', '/')
_cygpath_parsers = (
## See: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
## and: https://www.cygwin.com/cygwin-ug-net/using.html#unc-paths
(re.compile(r"\\\\\?\\UNC\\([^\\]+)\\([^\\]+)(?:\\(.*))?"),
(lambda server, share, rest_path: '//%s/%s/%s' % (server, share, rest_path.replace('\\', '/'))),
False
),
(re.compile(r"\\\\\?\\(\w):[/\\](.*)"),
_cygexpath,
False
),
(re.compile(r"(\w):[/\\](.*)"),
_cygexpath,
False
),
(re.compile(r"file:(.*)", re.I),
(lambda rest_path: rest_path),
True),
(re.compile(r"(\w{2,}:.*)"), # remote URL, do nothing
(lambda url: url),
False),
)
def cygpath(path):
"""Use :meth:`git.cmd.Git.polish_url()` instead, that works on any environment."""
if not path.startswith(('/cygdrive', '//')):
for regex, parser, recurse in _cygpath_parsers:
match = regex.match(path)
if match:
path = parser(*match.groups())
if recurse:
path = cygpath(path)
break
else:
path = _cygexpath(None, path)
return path
_decygpath_regex = re.compile(r"/cygdrive/(\w)(/.*)?")
def decygpath(path):
m = _decygpath_regex.match(path)
if m:
drive, rest_path = m.groups()
path = '%s:%s' % (drive.upper(), rest_path or '')
return path.replace('/', '\\')
#: Store boolean flags denoting if a specific Git executable
#: is from a Cygwin installation (since `cache_lru()` unsupported on PY2).
_is_cygwin_cache = {}
def is_cygwin_git(git_executable):
if not is_win:
return False
#from subprocess import check_output
is_cygwin = _is_cygwin_cache.get(git_executable)
if is_cygwin is None:
is_cygwin = False
try:
git_dir = osp.dirname(git_executable)
if not git_dir:
res = py_where(git_executable)
git_dir = osp.dirname(res[0]) if res else None
## Just a name given, not a real path.
uname_cmd = osp.join(git_dir, 'uname')
process = subprocess.Popen([uname_cmd], stdout=subprocess.PIPE,
universal_newlines=True)
uname_out, _ = process.communicate()
#retcode = process.poll()
is_cygwin = 'CYGWIN' in uname_out
except Exception as ex:
log.debug('Failed checking if running in CYGWIN due to: %r', ex)
_is_cygwin_cache[git_executable] = is_cygwin
return is_cygwin
def get_user_id():
""":return: string identifying the currently active system user as name@node"""
return "%s@%s" % (getpass.getuser(), platform.node())
def finalize_process(proc, **kwargs):
"""Wait for the process (clone, fetch, pull or push) and handle its errors accordingly"""
## TODO: No close proc-streams??
proc.wait(**kwargs)
def expand_path(p, expand_vars=True):
try:
p = osp.expanduser(p)
if expand_vars:
p = osp.expandvars(p)
return osp.normpath(osp.abspath(p))
except Exception:
return None
#} END utilities
#{ Classes
class RemoteProgress(object):
"""
Handler providing an interface to parse progress information emitted by git-push
and git-fetch and to dispatch callbacks allowing subclasses to react to the progress.
"""
_num_op_codes = 9
BEGIN, END, COUNTING, COMPRESSING, WRITING, RECEIVING, RESOLVING, FINDING_SOURCES, CHECKING_OUT = \
[1 << x for x in range(_num_op_codes)]
STAGE_MASK = BEGIN | END
OP_MASK = ~STAGE_MASK
DONE_TOKEN = 'done.'
TOKEN_SEPARATOR = ', '
__slots__ = ('_cur_line',
'_seen_ops',
'error_lines', # Lines that started with 'error:' or 'fatal:'.
'other_lines') # Lines not denoting progress (i.e.g. push-infos).
re_op_absolute = re.compile(r"(remote: )?([\w\s]+):\s+()(\d+)()(.*)")
re_op_relative = re.compile(r"(remote: )?([\w\s]+):\s+(\d+)% \((\d+)/(\d+)\)(.*)")
def __init__(self):
self._seen_ops = []
self._cur_line = None
self.error_lines = []
self.other_lines = []
def _parse_progress_line(self, line):
"""Parse progress information from the given line as retrieved by git-push
or git-fetch.
- Lines that do not contain progress info are stored in :attr:`other_lines`.
- Lines that seem to contain an error (i.e. start with error: or fatal:) are stored
in :attr:`error_lines`."""
# handle
# Counting objects: 4, done.
# Compressing objects: 50% (1/2)
# Compressing objects: 100% (2/2)
# Compressing objects: 100% (2/2), done.
self._cur_line = line = line.decode('utf-8') if isinstance(line, bytes) else line
if self.error_lines or self._cur_line.startswith(('error:', 'fatal:')):
self.error_lines.append(self._cur_line)
return
# find escape characters and cut them away - regex will not work with
# them as they are non-ascii. As git might expect a tty, it will send them
last_valid_index = None
for i, c in enumerate(reversed(line)):
if ord(c) < 32:
# its a slice index
last_valid_index = -i - 1
# END character was non-ascii
# END for each character in line
if last_valid_index is not None:
line = line[:last_valid_index]
# END cut away invalid part
line = line.rstrip()
cur_count, max_count = None, None
match = self.re_op_relative.match(line)
if match is None:
match = self.re_op_absolute.match(line)
if not match:
self.line_dropped(line)
self.other_lines.append(line)
return
# END could not get match
op_code = 0
_remote, op_name, _percent, cur_count, max_count, message = match.groups()
# get operation id
if op_name == "Counting objects":
op_code |= self.COUNTING
elif op_name == "Compressing objects":
op_code |= self.COMPRESSING
elif op_name == "Writing objects":
op_code |= self.WRITING
elif op_name == 'Receiving objects':
op_code |= self.RECEIVING
elif op_name == 'Resolving deltas':
op_code |= self.RESOLVING
elif op_name == 'Finding sources':
op_code |= self.FINDING_SOURCES
elif op_name == 'Checking out files':
op_code |= self.CHECKING_OUT
else:
# Note: On windows it can happen that partial lines are sent
# Hence we get something like "CompreReceiving objects", which is
# a blend of "Compressing objects" and "Receiving objects".
# This can't really be prevented, so we drop the line verbosely
# to make sure we get informed in case the process spits out new
# commands at some point.
self.line_dropped(line)
# Note: Don't add this line to the other lines, as we have to silently
# drop it
return
# END handle op code
# figure out stage
if op_code not in self._seen_ops:
self._seen_ops.append(op_code)
op_code |= self.BEGIN
# END begin opcode
if message is None:
message = ''
# END message handling
message = message.strip()
if message.endswith(self.DONE_TOKEN):
op_code |= self.END
message = message[:-len(self.DONE_TOKEN)]
# END end message handling
message = message.strip(self.TOKEN_SEPARATOR)
self.update(op_code,
cur_count and float(cur_count),
max_count and float(max_count),
message)
def new_message_handler(self):
"""
:return:
a progress handler suitable for handle_process_output(), passing lines on to this Progress
handler in a suitable format"""
def handler(line):
return self._parse_progress_line(line.rstrip())
# end
return handler
def line_dropped(self, line):
"""Called whenever a line could not be understood and was therefore dropped."""
pass
def update(self, op_code, cur_count, max_count=None, message=''):
"""Called whenever the progress changes
:param op_code:
Integer allowing to be compared against Operation IDs and stage IDs.
Stage IDs are BEGIN and END. BEGIN will only be set once for each Operation
ID as well as END. It may be that BEGIN and END are set at once in case only
one progress message was emitted due to the speed of the operation.
Between BEGIN and END, none of these flags will be set
Operation IDs are all held within the OP_MASK. Only one Operation ID will
be active per call.
:param cur_count: Current absolute count of items
:param max_count:
The maximum count of items we expect. It may be None in case there is
no maximum number of items or if it is (yet) unknown.
:param message:
In case of the 'WRITING' operation, it contains the amount of bytes
transferred. It may possibly be used for other purposes as well.
You may read the contents of the current line in self._cur_line"""
pass
class CallableRemoteProgress(RemoteProgress):
"""An implementation forwarding updates to any callable"""
__slots__ = ('_callable')
def __init__(self, fn):
self._callable = fn
super(CallableRemoteProgress, self).__init__()
def update(self, *args, **kwargs):
self._callable(*args, **kwargs)
class Actor(object):
"""Actors hold information about a person acting on the repository. They
can be committers and authors or anything with a name and an email as
mentioned in the git log entries."""
# PRECOMPILED REGEX
name_only_regex = re.compile(r'<(.*)>')
name_email_regex = re.compile(r'(.*) <(.*?)>')
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_name = "GIT_AUTHOR_NAME"
env_author_email = "GIT_AUTHOR_EMAIL"
env_committer_name = "GIT_COMMITTER_NAME"
env_committer_email = "GIT_COMMITTER_EMAIL"
# CONFIGURATION KEYS
conf_name = 'name'
conf_email = 'email'
__slots__ = ('name', 'email')
def __init__(self, name, email):
self.name = name
self.email = email
def __eq__(self, other):
return self.name == other.name and self.email == other.email
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.name, self.email))
def __str__(self):
return self.name
def __repr__(self):
return '<git.Actor "%s <%s>">' % (self.name, self.email)
@classmethod
def _from_string(cls, string):
"""Create an Actor from a string.
:param string: is the string, which is expected to be in regular git format
John Doe <[email protected]>
:return: Actor """
m = cls.name_email_regex.search(string)
if m:
name, email = m.groups()
return Actor(name, email)
else:
m = cls.name_only_regex.search(string)
if m:
return Actor(m.group(1), None)
# assume best and use the whole string as name
return Actor(string, None)
# END special case name
# END handle name/email matching
@classmethod
def _main_actor(cls, env_name, env_email, config_reader=None):
actor = Actor('', '')
user_id = None # We use this to avoid multiple calls to getpass.getuser()
def default_email():
nonlocal user_id
if not user_id:
user_id = get_user_id()
return user_id
def default_name():
return default_email().split('@')[0]
for attr, evar, cvar, default in (('name', env_name, cls.conf_name, default_name),
('email', env_email, cls.conf_email, default_email)):
try:
val = os.environ[evar]
setattr(actor, attr, val)
except KeyError:
if config_reader is not None:
setattr(actor, attr, config_reader.get_value('user', cvar, default()))
# END config-reader handling
if not getattr(actor, attr):
setattr(actor, attr, default())
# END handle name
# END for each item to retrieve
return actor
@classmethod
def committer(cls, config_reader=None):
"""
:return: Actor instance corresponding to the configured committer. It behaves
similar to the git implementation, such that the environment will override
configuration values of config_reader. If no value is set at all, it will be
generated
:param config_reader: ConfigReader to use to retrieve the values from in case
they are not set in the environment"""
return cls._main_actor(cls.env_committer_name, cls.env_committer_email, config_reader)
@classmethod
def author(cls, config_reader=None):
"""Same as committer(), but defines the main author. It may be specified in the environment,
but defaults to the committer"""
return cls._main_actor(cls.env_author_name, cls.env_author_email, config_reader)
class Stats(object):
"""
Represents stat information as presented by git at the end of a merge. It is
created from the output of a diff operation.
``Example``::
c = Commit( sha1 )
s = c.stats
s.total # full-stat-dict
s.files # dict( filepath : stat-dict )
``stat-dict``
A dictionary with the following keys and values::
deletions = number of deleted lines as int
insertions = number of inserted lines as int
lines = total number of lines changed as int, or deletions + insertions
``full-stat-dict``
In addition to the items in the stat-dict, it features additional information::
files = number of changed files as int"""
__slots__ = ("total", "files")
def __init__(self, total, files):
self.total = total
self.files = files
@classmethod
def _list_from_string(cls, repo, text):
"""Create a Stat object from output retrieved by git-diff.
:return: git.Stat"""
hsh = {'total': {'insertions': 0, 'deletions': 0, 'lines': 0, 'files': 0}, 'files': {}}
for line in text.splitlines():
(raw_insertions, raw_deletions, filename) = line.split("\t")
insertions = raw_insertions != '-' and int(raw_insertions) or 0
deletions = raw_deletions != '-' and int(raw_deletions) or 0
hsh['total']['insertions'] += insertions
hsh['total']['deletions'] += deletions
hsh['total']['lines'] += insertions + deletions
hsh['total']['files'] += 1
hsh['files'][filename.strip()] = {'insertions': insertions,
'deletions': deletions,
'lines': insertions + deletions}
return Stats(hsh['total'], hsh['files'])
class IndexFileSHA1Writer(object):
"""Wrapper around a file-like object that remembers the SHA1 of
the data written to it. It will write a sha when the stream is closed
or if the asked for explicitly using write_sha.
Only useful to the indexfile
:note: Based on the dulwich project"""
__slots__ = ("f", "sha1")
def __init__(self, f):
self.f = f
self.sha1 = make_sha(b"")
def write(self, data):
self.sha1.update(data)
return self.f.write(data)
def write_sha(self):
sha = self.sha1.digest()
self.f.write(sha)
return sha
def close(self):
sha = self.write_sha()
self.f.close()
return sha
def tell(self):
return self.f.tell()
class LockFile(object):
"""Provides methods to obtain, check for, and release a file based lock which
should be used to handle concurrent access to the same file.
As we are a utility class to be derived from, we only use protected methods.
Locks will automatically be released on destruction"""
__slots__ = ("_file_path", "_owns_lock")
def __init__(self, file_path):
self._file_path = file_path
self._owns_lock = False
def __del__(self):
self._release_lock()
def _lock_file_path(self):
""":return: Path to lockfile"""
return "%s.lock" % (self._file_path)
def _has_lock(self):
""":return: True if we have a lock and if the lockfile still exists
:raise AssertionError: if our lock-file does not exist"""
return self._owns_lock
def _obtain_lock_or_raise(self):
"""Create a lock file as flag for other instances, mark our instance as lock-holder
:raise IOError: if a lock was already present or a lock file could not be written"""
if self._has_lock():
return
lock_file = self._lock_file_path()
if osp.isfile(lock_file):
raise IOError("Lock for file %r did already exist, delete %r in case the lock is illegal" %
(self._file_path, lock_file))
try:
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
if is_win:
flags |= os.O_SHORT_LIVED
fd = os.open(lock_file, flags, 0)
os.close(fd)
except OSError as e:
raise IOError(str(e)) from e
self._owns_lock = True
def _obtain_lock(self):
"""The default implementation will raise if a lock cannot be obtained.
Subclasses may override this method to provide a different implementation"""
return self._obtain_lock_or_raise()
def _release_lock(self):
"""Release our lock if we have one"""
if not self._has_lock():
return
# if someone removed our file beforhand, lets just flag this issue
# instead of failing, to make it more usable.
lfp = self._lock_file_path()
try:
rmfile(lfp)
except OSError:
pass
self._owns_lock = False
class BlockingLockFile(LockFile):
"""The lock file will block until a lock could be obtained, or fail after
a specified timeout.
:note: If the directory containing the lock was removed, an exception will
be raised during the blocking period, preventing hangs as the lock
can never be obtained."""
__slots__ = ("_check_interval", "_max_block_time")
def __init__(self, file_path, check_interval_s=0.3, max_block_time_s=maxsize):
"""Configure the instance
:param check_interval_s:
Period of time to sleep until the lock is checked the next time.
By default, it waits a nearly unlimited time
:param max_block_time_s: Maximum amount of seconds we may lock"""
super(BlockingLockFile, self).__init__(file_path)
self._check_interval = check_interval_s
self._max_block_time = max_block_time_s
def _obtain_lock(self):
"""This method blocks until it obtained the lock, or raises IOError if
it ran out of time or if the parent directory was not available anymore.
If this method returns, you are guaranteed to own the lock"""
starttime = time.time()
maxtime = starttime + float(self._max_block_time)
while True:
try:
super(BlockingLockFile, self)._obtain_lock()
except IOError as e:
# synity check: if the directory leading to the lockfile is not
# readable anymore, raise an exception
curtime = time.time()
if not osp.isdir(osp.dirname(self._lock_file_path())):
msg = "Directory containing the lockfile %r was not readable anymore after waiting %g seconds" % (
self._lock_file_path(), curtime - starttime)
raise IOError(msg) from e
# END handle missing directory
if curtime >= maxtime:
msg = "Waited %g seconds for lock at %r" % (maxtime - starttime, self._lock_file_path())
raise IOError(msg) from e
# END abort if we wait too long
time.sleep(self._check_interval)
else:
break
# END endless loop
class IterableList(list):
"""
List of iterable objects allowing to query an object by id or by named index::
heads = repo.heads
heads.master
heads['master']
heads[0]
It requires an id_attribute name to be set which will be queried from its
contained items to have a means for comparison.
A prefix can be specified which is to be used in case the id returned by the
items always contains a prefix that does not matter to the user, so it
can be left out."""
__slots__ = ('_id_attr', '_prefix')
def __new__(cls, id_attr, prefix=''):
return super(IterableList, cls).__new__(cls)
def __init__(self, id_attr, prefix=''):
self._id_attr = id_attr
self._prefix = prefix
def __contains__(self, attr):
# first try identity match for performance
try:
rval = list.__contains__(self, attr)
if rval:
return rval
except (AttributeError, TypeError):
pass
# END handle match
# otherwise make a full name search
try:
getattr(self, attr)
return True
except (AttributeError, TypeError):
return False
# END handle membership
def __getattr__(self, attr):
attr = self._prefix + attr
for item in self:
if getattr(item, self._id_attr) == attr:
return item
# END for each item
return list.__getattribute__(self, attr)
def __getitem__(self, index):
if isinstance(index, int):
return list.__getitem__(self, index)
try:
return getattr(self, index)
except AttributeError as e:
raise IndexError("No item found with id %r" % (self._prefix + index)) from e
# END handle getattr
def __delitem__(self, index):
delindex = index
if not isinstance(index, int):
delindex = -1
name = self._prefix + index
for i, item in enumerate(self):
if getattr(item, self._id_attr) == name:
delindex = i
break
# END search index
# END for each item
if delindex == -1:
raise IndexError("Item with name %s not found" % name)
# END handle error
# END get index to delete
list.__delitem__(self, delindex)
class Iterable(object):
"""Defines an interface for iterable items which is to assure a uniform
way to retrieve and iterate items within the git repository"""
__slots__ = ()
_id_attribute_ = "attribute that most suitably identifies your instance"
@classmethod
def list_items(cls, repo, *args, **kwargs):
"""
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances"""
out_list = IterableList(cls._id_attribute_)
out_list.extend(cls.iter_items(repo, *args, **kwargs))
return out_list
@classmethod
def iter_items(cls, repo, *args, **kwargs):
"""For more information about the arguments, see list_items
:return: iterator yielding Items"""
raise NotImplementedError("To be implemented by Subclass")
#} END classes
class NullHandler(logging.Handler):
def emit(self, record):
pass
|
the-stack_106_18294
|
#!/usr/bin/env python
import auturing
import argparse
import os
import sys
import logging
import numpy as np
def main():
LAUNCHER_DIR = os.path.join(os.path.dirname(__file__))
PYCKAGE_RESOURCES_DIR = os.path.join(os.path.abspath(os.path.join(LAUNCHER_DIR,os.pardir)),"resources")
parser = argparse.ArgumentParser()
parser.add_argument('program_name',
type=str)
parser.add_argument('--tape',"-t",
help='Input tape (comma-separated values) <DEFAULT:0>',
default="0")
parser.add_argument('--begin_state',"-b",
help='Begin state of the program <DEFAULT: "e1">',
default="e1")
parser.add_argument('--head_position',
help='Head position on the tape at start <DEFAULT:0>',
type=int,
default=0)
parser.add_argument('--return_begin',
help='Indice starting from which the tape should be returned <DEFAULT: 0>',
type=int,
default=0)
parser.add_argument('--return_end',
help='Indice until which the tape should be returned <DEFAULT: end>',
default=None)
parser.add_argument('--max_steps',
help='Indice until which the tape should be returned <DEFAULT: 0 (ignored)>',
default=0,
type=int)
parser.add_argument('--verbosity',"-v",
help='Choose your verbosity. Default: INFO',
required=False,
default="CRITICAL",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"])
parser.add_argument('--progress_bar',"-p",
help='Displays a progress bar',
action='store_true')
args = parser.parse_args()
if args.max_steps == 0:
args.max_steps = np.inf
assert args.max_steps > 0
verboselevel = "logging."+str(args.verbosity)
logging.basicConfig(level=eval(verboselevel),
format='%(asctime)s %(message)s',
stream=sys.stdout)
show_progress_bar = False
if args.progress_bar:
show_progress_bar = True
args = vars(args)
args["tape"] = [int(x.strip()) for x in args["tape"].split(',')]
assert len(args["tape"]) >= 0
logging.debug('[DEBUG] Received tape {}'.format(args["tape"]))
auturing.run_automata.runit(loglevel = verboselevel, progress_bar = show_progress_bar, resources = PYCKAGE_RESOURCES_DIR, args=args)
if __name__ == "__main__":
main()
|
the-stack_106_18300
|
import math
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class ASFormerMultiStageModel(nn.Module):
def __init__(self,device, num_stages, num_layers, num_f_maps, dim, num_classes):
super(ASFormerMultiStageModel, self).__init__()
self.num_classes = num_classes
self.conv_cls = nn.Conv1d(num_f_maps, num_classes, 1)
self.conv_bound = nn.Conv1d(num_f_maps, 1, 1)
self.stage1 = ASFormerSingleStageModel(device,num_layers, num_f_maps, dim, num_classes)
self.stages = nn.ModuleList([copy.deepcopy(Decoder(device,num_layers,2,2, num_f_maps, num_classes, num_classes,att_type='sliding_att', alpha=exponential_descrease(s))) for s in range(num_stages-1)])
def forward(self, x, mask):
out = self.stage1(x, mask) # feature = self.shared_layers(x, mask)
out_cls = self.conv_cls(out)
outputs = out_cls.unsqueeze(0)
for s in self.stages:
if self.num_classes == 1:
out_cls,_ = s(torch.sigmoid(out_cls) * mask[:, 0:1, :],out*mask[:,0:1, :], mask)
else:
out_cls,_ = s(F.softmax(out_cls, dim=1) * mask[:, 0:1, :],out*mask[:,0:1, :], mask)
outputs = torch.cat((outputs, out_cls.unsqueeze(0)), dim=0)
return outputs
class ASFormerSingleStageModel(nn.Module):
def __init__(self,device, num_layers, num_f_maps, dim, num_classes):
super(ASFormerSingleStageModel, self).__init__()
self.num_classes = num_classes
self.conv_1x1 = nn.Conv1d(dim, num_f_maps, 1)
self.layers=Encoder(device,num_layers, 2, 2, num_f_maps, dim, num_classes, 0.3, att_type='sliding_att', alpha=1)
#self.conv_out = nn.Conv1d(num_f_maps, num_classes, 1)
def forward(self, x, mask):
# out = self.conv_1x1(x)
out = self.layers(x, mask)
#out = self.conv_out(out) * mask[:, 0:1, :]
return out
def exponential_descrease(idx_decoder, p=3):
return math.exp(-p*idx_decoder)
class AttentionHelper(nn.Module):
def __init__(self):
super(AttentionHelper, self).__init__()
self.softmax = nn.Softmax(dim=-1)
def scalar_dot_att(self, proj_query, proj_key, proj_val, padding_mask):
'''
scalar dot attention.
:param proj_query: shape of (B, C, L)
:param proj_key: shape of (B, C, L)
:param proj_val: shape of (B, C, L)
:param padding_mask: shape of (B, C, L)
:return: attention value of shape (B, C, L)
'''
m, c1, l1 = proj_query.shape
m, c2, l2 = proj_key.shape
assert c1 == c2
energy = torch.bmm(proj_query.permute(0, 2, 1), proj_key) # out of shape (B, L1, L2)
attention = energy / np.sqrt(c1)
attention = attention + torch.log(padding_mask + 1e-6) # mask the zero paddings. log(1e-6) for zero paddings
attention = self.softmax(attention)
attention = attention * padding_mask
attention = attention.permute(0,2,1)
out = torch.bmm(proj_val, attention)
return out, attention
class AttLayer(nn.Module):
def __init__(self, device,q_dim, k_dim, v_dim, r1, r2, r3, bl, stage, att_type): # r1 = r2
super(AttLayer, self).__init__()
self.query_conv = nn.Conv1d(in_channels=q_dim, out_channels=q_dim // r1, kernel_size=1)
self.key_conv = nn.Conv1d(in_channels=k_dim, out_channels=k_dim // r2, kernel_size=1)
self.value_conv = nn.Conv1d(in_channels=v_dim, out_channels=v_dim // r3, kernel_size=1)
self.conv_out = nn.Conv1d(in_channels=v_dim // r3, out_channels=v_dim, kernel_size=1)
self.device=device
self.bl = bl
self.stage = stage
self.att_type = att_type
assert self.att_type in ['normal_att', 'block_att', 'sliding_att']
assert self.stage in ['encoder','decoder']
self.att_helper = AttentionHelper()
self.window_mask = self.construct_window_mask()
def construct_window_mask(self):
'''
construct window mask of shape (1, l, l + l//2 + l//2)
'''
window_mask = torch.zeros((1, self.bl, self.bl + 2* (self.bl //2)))
for i in range(self.bl):
window_mask[:, :, i:i+self.bl] = 1
return window_mask.to(self.device)
def forward(self, x1, x2, mask):
# x1 from the encoder
# x2 from the decoder
query = self.query_conv(x1)
key = self.key_conv(x1)
if self.stage == 'decoder':
assert x2 is not None
value = self.value_conv(x2)
else:
value = self.value_conv(x1)
if self.att_type == 'normal_att':
return self._normal_self_att(query, key, value, mask)
elif self.att_type == 'block_att':
return self._block_wise_self_att(query, key, value, mask)
elif self.att_type == 'sliding_att':
return self._sliding_window_self_att(query, key, value, mask)
def _normal_self_att(self,q,k,v, mask):
m_batchsize, c1, L = q.size()
_,c2,L = k.size()
_,c3,L = v.size()
padding_mask = torch.ones((m_batchsize, 1, L)).to(self.device) * mask[:,0:1,:]
output, attentions = self.att_helper.scalar_dot_att(q, k, v, padding_mask)
output = self.conv_out(F.relu(output))
output = output[:, :, 0:L]
return output * mask[:, 0:1, :]
def _block_wise_self_att(self, q,k,v, mask):
m_batchsize, c1, L = q.size()
_,c2,L = k.size()
_,c3,L = v.size()
nb = L // self.bl
if L % self.bl != 0:
q = torch.cat([q, torch.zeros((m_batchsize, c1, self.bl - L % self.bl)).to(self.device)], dim=-1)
k = torch.cat([k, torch.zeros((m_batchsize, c2, self.bl - L % self.bl)).to(self.device)], dim=-1)
v = torch.cat([v, torch.zeros((m_batchsize, c3, self.bl - L % self.bl)).to(self.device)], dim=-1)
nb += 1
padding_mask = torch.cat([torch.ones((m_batchsize, 1, L)).to(self.device) * mask[:,0:1,:], torch.zeros((m_batchsize, 1, self.bl * nb - L)).to(self.device)],dim=-1)
q = q.reshape(m_batchsize, c1, nb, self.bl).permute(0, 2, 1, 3).reshape(m_batchsize * nb, c1, self.bl)
padding_mask = padding_mask.reshape(m_batchsize, 1, nb, self.bl).permute(0, 2, 1, 3).reshape(m_batchsize * nb,1, self.bl)
k = k.reshape(m_batchsize, c2, nb, self.bl).permute(0, 2, 1, 3).reshape(m_batchsize * nb, c2, self.bl)
v = v.reshape(m_batchsize, c3, nb, self.bl).permute(0, 2, 1, 3).reshape(m_batchsize * nb, c3, self.bl)
output, attentions = self.att_helper.scalar_dot_att(q, k, v, padding_mask)
output = self.conv_out(F.relu(output))
output = output.reshape(m_batchsize, nb, c3, self.bl).permute(0, 2, 1, 3).reshape(m_batchsize, c3, nb * self.bl)
output = output[:, :, 0:L]
return output * mask[:, 0:1, :]
def _sliding_window_self_att(self, q,k,v, mask):
# block operation
m_batchsize, c1, L = q.size()
_, c2, _ = k.size()
_, c3, _ = v.size()
# assert m_batchsize == 1
# currently, we only accept input with batch size 1
# padding zeros for the last segment
nb = L // self.bl
if L % self.bl != 0:
q = torch.cat([q, torch.zeros((m_batchsize, c1, self.bl - L % self.bl)).to(self.device)], dim=-1)
k = torch.cat([k, torch.zeros((m_batchsize, c2, self.bl - L % self.bl)).to(self.device)], dim=-1)
v = torch.cat([v, torch.zeros((m_batchsize, c3, self.bl - L % self.bl)).to(self.device)], dim=-1)
nb += 1
padding_mask = torch.cat([torch.ones((m_batchsize, 1, L)).to(self.device) * mask[:,0:1,:], torch.zeros((m_batchsize, 1, self.bl * nb - L)).to(self.device)],dim=-1)
# sliding window approach, by splitting query_proj and key_proj into shape (c1, l) x (c1, 2l)
# sliding window for query_proj: reshape
q = q.reshape(m_batchsize, c1, nb, self.bl).permute(0, 2, 1, 3).reshape(m_batchsize * nb, c1, self.bl)
# sliding window approach for key_proj
# 1. add paddings at the start and end
k = torch.cat([torch.zeros(m_batchsize, c2, self.bl // 2).to(self.device), k, torch.zeros(m_batchsize, c2, self.bl // 2).to(self.device)], dim=-1)
v = torch.cat([torch.zeros(m_batchsize, c3, self.bl // 2).to(self.device), v, torch.zeros(m_batchsize, c3, self.bl // 2).to(self.device)], dim=-1)
padding_mask = torch.cat([torch.zeros(m_batchsize, 1, self.bl // 2).to(self.device), padding_mask, torch.zeros(m_batchsize, 1, self.bl // 2).to(self.device)], dim=-1)
# 2. reshape key_proj of shape (m_batchsize*nb, c1, 2*self.bl)
k = torch.cat([k[:,:, i*self.bl:(i+1)*self.bl+(self.bl//2)*2] for i in range(nb)], dim=0) # special case when self.bl = 1
v = torch.cat([v[:,:, i*self.bl:(i+1)*self.bl+(self.bl//2)*2] for i in range(nb)], dim=0)
# 3. construct window mask of shape (1, l, 2l), and use it to generate final mask
padding_mask = torch.cat([padding_mask[:,:, i*self.bl:(i+1)*self.bl+(self.bl//2)*2] for i in range(nb)], dim=0) # of shape (m*nb, 1, 2l)
final_mask = self.window_mask.repeat(m_batchsize * nb, 1, 1) * padding_mask
output, attention = self.att_helper.scalar_dot_att(q, k, v, final_mask)
output = self.conv_out(F.relu(output))
output = output.reshape(m_batchsize, nb, -1, self.bl).permute(0, 2, 1, 3).reshape(m_batchsize, -1, nb * self.bl)
output = output[:, :, 0:L]
return output * mask[:, 0:1, :]
class MultiHeadAttLayer(nn.Module):
def __init__(self, device,q_dim, k_dim, v_dim, r1, r2, r3, bl, stage, att_type, num_head):
super(MultiHeadAttLayer, self).__init__()
# assert v_dim % num_head == 0
self.conv_out = nn.Conv1d(v_dim * num_head, v_dim, 1)
self.layers = nn.ModuleList(
[copy.deepcopy(AttLayer(device,q_dim, k_dim, v_dim, r1, r2, r3, bl, stage, att_type)) for i in range(num_head)])
self.dropout = nn.Dropout(p=0.5)
def forward(self, x1, x2, mask):
out = torch.cat([layer(x1, x2, mask) for layer in self.layers], dim=1)
out = self.conv_out(self.dropout(out))
return out
class ConvFeedForward(nn.Module):
def __init__(self, dilation, in_channels, out_channels):
super(ConvFeedForward, self).__init__()
self.layer = nn.Sequential(
nn.Conv1d(in_channels, out_channels, 3, padding=dilation, dilation=dilation),
nn.ReLU()
)
def forward(self, x):
return self.layer(x)
class FCFeedForward(nn.Module):
def __init__(self, in_channels, out_channels):
super(FCFeedForward, self).__init__()
self.layer = nn.Sequential(
nn.Conv1d(in_channels, out_channels, 1), # conv1d equals fc
nn.ReLU(),
nn.Dropout(),
nn.Conv1d(out_channels, out_channels, 1)
)
def forward(self, x):
return self.layer(x)
class AttModule(nn.Module):
def __init__(self, device,dilation, in_channels, out_channels, r1, r2, att_type, stage, alpha):
super(AttModule, self).__init__()
self.feed_forward = ConvFeedForward(dilation, in_channels, out_channels)
self.instance_norm = nn.InstanceNorm1d(in_channels, track_running_stats=False)
self.att_layer = AttLayer(device,in_channels, in_channels, out_channels, r1, r1, r2, dilation, att_type=att_type, stage=stage) # dilation
self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1)
self.dropout = nn.Dropout()
self.alpha = alpha
def forward(self, x, f, mask):
out = self.feed_forward(x)
out = self.alpha * self.att_layer(self.instance_norm(out), f, mask) + out
out = self.conv_1x1(out)
out = self.dropout(out)
return (x + out) * mask[:, 0:1, :]
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, max_len=10000):
super(PositionalEncoding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).permute(0,2,1) # of shape (1, d_model, l)
self.pe = nn.Parameter(pe, requires_grad=True)
# self.register_buffer('pe', pe)
def forward(self, x):
return x + self.pe[:, :, 0:x.shape[2]]
class Encoder(nn.Module):
def __init__(self,device, num_layers, r1, r2, num_f_maps, input_dim, num_classes, channel_masking_rate, att_type, alpha):
super(Encoder, self).__init__()
self.conv_1x1 = nn.Conv1d(input_dim, num_f_maps, 1) # fc layer
# self.position_en = PositionalEncoding(d_model=num_f_maps)
self.layers = nn.ModuleList(
[AttModule(device,2 ** i, num_f_maps, num_f_maps, r1, r2, att_type, 'encoder', alpha) for i in # 2**i
range(num_layers)])
# self.conv_out = nn.Conv1d(num_f_maps, num_classes, 1)
self.dropout = nn.Dropout2d(p=channel_masking_rate)
self.channel_masking_rate = channel_masking_rate
def forward(self, x, mask):
'''
:param x: (N, C, L)
:param mask:
:return:
'''
if self.channel_masking_rate > 0:
x = x.unsqueeze(2)
x = self.dropout(x)
x = x.squeeze(2)
feature = self.conv_1x1(x)
# feature = self.position_en(feature)
for layer in self.layers:
feature = layer(feature, None, mask)
# out = self.conv_out(feature) * mask[:, 0:1, :]
return feature
class Decoder(nn.Module):
def __init__(self,device, num_layers, r1, r2, num_f_maps, input_dim, num_classes, att_type, alpha):
super(Decoder, self).__init__()# self.position_en = PositionalEncoding(d_model=num_f_maps)
self.conv_1x1 = nn.Conv1d(input_dim, num_f_maps, 1)
self.layers = nn.ModuleList(
[AttModule(device,2 ** i, num_f_maps, num_f_maps, r1, r2, att_type, 'decoder', alpha) for i in # 2 ** i
range(num_layers)])
self.conv_out = nn.Conv1d(num_f_maps, num_classes, 1)
def forward(self, x, fencoder, mask):
feature = self.conv_1x1(x)
for layer in self.layers:
feature = layer(feature, fencoder, mask)
out = self.conv_out(feature) * mask[:, 0:1, :]
return out, feature
class MyTransformer(nn.Module):
def __init__(self,device, num_decoders, num_layers, r1, r2, num_f_maps, input_dim, num_classes, channel_masking_rate):
super(MyTransformer, self).__init__()
self.encoder = Encoder(device,num_layers, r1, r2, num_f_maps, input_dim, num_classes, channel_masking_rate, att_type='sliding_att', alpha=1)
self.decoders = nn.ModuleList([copy.deepcopy(Decoder(device,num_layers, r1, r2, num_f_maps, num_classes, num_classes, att_type='sliding_att', alpha=exponential_descrease(s))) for s in range(num_decoders)]) # num_decoders
self.activation = nn.Softmax(dim=1)
def forward(self, x, mask):
outputs = []
out, feature = self.encoder(x, mask)
outputs.append(self.activation(out))
for decoder in self.decoders:
out, feature = decoder(F.softmax(out, dim=1) * mask[:, 0:1, :], feature* mask[:, 0:1, :], mask)
outputs.append(self.activation(out))
return outputs
|
the-stack_106_18302
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for exporting TensorFlow symbols to the API.
Exporting a function or a class:
To export a function or a class use tf_export decorator. For e.g.:
```python
@tf_export('foo', 'bar.foo')
def foo(...):
...
```
If a function is assigned to a variable, you can export it by calling
tf_export explicitly. For e.g.:
```python
foo = get_foo(...)
tf_export('foo', 'bar.foo')(foo)
```
Exporting a constant
```python
foo = 1
tf_export('consts.foo').export_constant(__name__, 'foo')
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import sys
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
ESTIMATOR_API_NAME = 'estimator'
KERAS_API_NAME = 'keras'
TENSORFLOW_API_NAME = 'tensorflow'
# List of subpackage names used by TensorFlow components. Have to check that
# TensorFlow core repo does not export any symbols under these names.
SUBPACKAGE_NAMESPACES = [ESTIMATOR_API_NAME]
_Attributes = collections.namedtuple(
'ExportedApiAttributes', ['names', 'constants'])
# Attribute values must be unique to each API.
API_ATTRS = {
TENSORFLOW_API_NAME: _Attributes(
'_tf_api_names',
'_tf_api_constants'),
ESTIMATOR_API_NAME: _Attributes(
'_estimator_api_names',
'_estimator_api_constants'),
KERAS_API_NAME: _Attributes(
'_keras_api_names',
'_keras_api_constants')
}
API_ATTRS_V1 = {
TENSORFLOW_API_NAME: _Attributes(
'_tf_api_names_v1',
'_tf_api_constants_v1'),
ESTIMATOR_API_NAME: _Attributes(
'_estimator_api_names_v1',
'_estimator_api_constants_v1'),
KERAS_API_NAME: _Attributes(
'_keras_api_names_v1',
'_keras_api_constants_v1')
}
class SymbolAlreadyExposedError(Exception):
"""Raised when adding API names to symbol that already has API names."""
pass
class InvalidSymbolNameError(Exception):
"""Raised when trying to export symbol as an invalid or unallowed name."""
pass
def get_canonical_name_for_symbol(
symbol, api_name=TENSORFLOW_API_NAME,
add_prefix_to_v1_names=False):
"""Get canonical name for the API symbol.
Args:
symbol: API function or class.
api_name: API name (tensorflow or estimator).
add_prefix_to_v1_names: Specifies whether a name available only in V1
should be prefixed with compat.v1.
Returns:
Canonical name for the API symbol (for e.g. initializers.zeros) if
canonical name could be determined. Otherwise, returns None.
"""
if not hasattr(symbol, '__dict__'):
return None
api_names_attr = API_ATTRS[api_name].names
_, undecorated_symbol = tf_decorator.unwrap(symbol)
if api_names_attr not in undecorated_symbol.__dict__:
return None
api_names = getattr(undecorated_symbol, api_names_attr)
deprecated_api_names = undecorated_symbol.__dict__.get(
'_tf_deprecated_api_names', [])
canonical_name = get_canonical_name(api_names, deprecated_api_names)
if canonical_name:
return canonical_name
# If there is no V2 canonical name, get V1 canonical name.
api_names_attr = API_ATTRS_V1[api_name].names
api_names = getattr(undecorated_symbol, api_names_attr)
v1_canonical_name = get_canonical_name(api_names, deprecated_api_names)
if add_prefix_to_v1_names:
return 'compat.v1.%s' % v1_canonical_name
return v1_canonical_name
def get_canonical_name(api_names, deprecated_api_names):
"""Get preferred endpoint name.
Args:
api_names: API names iterable.
deprecated_api_names: Deprecated API names iterable.
Returns:
Returns one of the following in decreasing preference:
- first non-deprecated endpoint
- first endpoint
- None
"""
non_deprecated_name = next(
(name for name in api_names if name not in deprecated_api_names),
None)
if non_deprecated_name:
return non_deprecated_name
if api_names:
return api_names[0]
return None
def get_v1_names(symbol):
"""Get a list of TF 1.* names for this symbol.
Args:
symbol: symbol to get API names for.
Returns:
List of all API names for this symbol including TensorFlow and
Estimator names.
"""
names_v1 = []
tensorflow_api_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].names
estimator_api_attr_v1 = API_ATTRS_V1[ESTIMATOR_API_NAME].names
keras_api_attr_v1 = API_ATTRS_V1[KERAS_API_NAME].names
if not hasattr(symbol, '__dict__'):
return names_v1
if tensorflow_api_attr_v1 in symbol.__dict__:
names_v1.extend(getattr(symbol, tensorflow_api_attr_v1))
if estimator_api_attr_v1 in symbol.__dict__:
names_v1.extend(getattr(symbol, estimator_api_attr_v1))
if keras_api_attr_v1 in symbol.__dict__:
names_v1.extend(getattr(symbol, keras_api_attr_v1))
return names_v1
def get_v2_names(symbol):
"""Get a list of TF 2.0 names for this symbol.
Args:
symbol: symbol to get API names for.
Returns:
List of all API names for this symbol including TensorFlow and
Estimator names.
"""
names_v2 = []
tensorflow_api_attr = API_ATTRS[TENSORFLOW_API_NAME].names
estimator_api_attr = API_ATTRS[ESTIMATOR_API_NAME].names
keras_api_attr = API_ATTRS[KERAS_API_NAME].names
if not hasattr(symbol, '__dict__'):
return names_v2
if tensorflow_api_attr in symbol.__dict__:
names_v2.extend(getattr(symbol, tensorflow_api_attr))
if estimator_api_attr in symbol.__dict__:
names_v2.extend(getattr(symbol, estimator_api_attr))
if keras_api_attr in symbol.__dict__:
names_v2.extend(getattr(symbol, keras_api_attr))
return names_v2
def get_v1_constants(module):
"""Get a list of TF 1.* constants in this module.
Args:
module: TensorFlow module.
Returns:
List of all API constants under the given module including TensorFlow and
Estimator constants.
"""
constants_v1 = []
tensorflow_constants_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].constants
estimator_constants_attr_v1 = API_ATTRS_V1[ESTIMATOR_API_NAME].constants
if hasattr(module, tensorflow_constants_attr_v1):
constants_v1.extend(getattr(module, tensorflow_constants_attr_v1))
if hasattr(module, estimator_constants_attr_v1):
constants_v1.extend(getattr(module, estimator_constants_attr_v1))
return constants_v1
def get_v2_constants(module):
"""Get a list of TF 2.0 constants in this module.
Args:
module: TensorFlow module.
Returns:
List of all API constants under the given module including TensorFlow and
Estimator constants.
"""
constants_v2 = []
tensorflow_constants_attr = API_ATTRS[TENSORFLOW_API_NAME].constants
estimator_constants_attr = API_ATTRS[ESTIMATOR_API_NAME].constants
if hasattr(module, tensorflow_constants_attr):
constants_v2.extend(getattr(module, tensorflow_constants_attr))
if hasattr(module, estimator_constants_attr):
constants_v2.extend(getattr(module, estimator_constants_attr))
return constants_v2
class api_export(object): # pylint: disable=invalid-name
"""Provides ways to export symbols to the TensorFlow API."""
def __init__(self, *args, **kwargs): # pylint: disable=g-doc-args
"""Export under the names *args (first one is considered canonical).
Args:
*args: API names in dot delimited format.
**kwargs: Optional keyed arguments.
v1: Names for the TensorFlow V1 API. If not set, we will use V2 API
names both for TensorFlow V1 and V2 APIs.
overrides: List of symbols that this is overriding
(those overrided api exports will be removed). Note: passing overrides
has no effect on exporting a constant.
api_name: Name of the API you want to generate (e.g. `tensorflow` or
`estimator`). Default is `tensorflow`.
allow_multiple_exports: Allow symbol to be exported multiple time under
different names.
"""
self._names = args
self._names_v1 = kwargs.get('v1', args)
if 'v2' in kwargs:
raise ValueError('You passed a "v2" argument to tf_export. This is not '
'what you want. Pass v2 names directly as positional '
'arguments instead.')
self._api_name = kwargs.get('api_name', TENSORFLOW_API_NAME)
self._overrides = kwargs.get('overrides', [])
self._allow_multiple_exports = kwargs.get('allow_multiple_exports', False)
self._validate_symbol_names()
def _validate_symbol_names(self):
"""Validate you are exporting symbols under an allowed package.
We need to ensure things exported by tf_export, estimator_export, etc.
export symbols under disjoint top-level package names.
For TensorFlow, we check that it does not export anything under subpackage
names used by components (estimator, keras, etc.).
For each component, we check that it exports everything under its own
subpackage.
Raises:
InvalidSymbolNameError: If you try to export symbol under disallowed name.
"""
all_symbol_names = set(self._names) | set(self._names_v1)
if self._api_name == TENSORFLOW_API_NAME:
for subpackage in SUBPACKAGE_NAMESPACES:
if any(n.startswith(subpackage) for n in all_symbol_names):
raise InvalidSymbolNameError(
'@tf_export is not allowed to export symbols under %s.*' % (
subpackage))
else:
if not all(n.startswith(self._api_name) for n in all_symbol_names):
raise InvalidSymbolNameError(
'Can only export symbols under package name of component. '
'e.g. tensorflow_estimator must export all symbols under '
'tf.estimator')
def __call__(self, func):
"""Calls this decorator.
Args:
func: decorated symbol (function or class).
Returns:
The input function with _tf_api_names attribute set.
Raises:
SymbolAlreadyExposedError: Raised when a symbol already has API names
and kwarg `allow_multiple_exports` not set.
"""
api_names_attr = API_ATTRS[self._api_name].names
api_names_attr_v1 = API_ATTRS_V1[self._api_name].names
# Undecorate overridden names
for f in self._overrides:
_, undecorated_f = tf_decorator.unwrap(f)
delattr(undecorated_f, api_names_attr)
delattr(undecorated_f, api_names_attr_v1)
_, undecorated_func = tf_decorator.unwrap(func)
self.set_attr(undecorated_func, api_names_attr, self._names)
self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)
return func
def set_attr(self, func, api_names_attr, names):
# Check for an existing api. We check if attribute name is in
# __dict__ instead of using hasattr to verify that subclasses have
# their own _tf_api_names as opposed to just inheriting it.
if api_names_attr in func.__dict__:
if not self._allow_multiple_exports:
raise SymbolAlreadyExposedError(
'Symbol %s is already exposed as %s.' %
(func.__name__, getattr(func, api_names_attr))) # pylint: disable=protected-access
setattr(func, api_names_attr, names)
def export_constant(self, module_name, name):
"""Store export information for constants/string literals.
Export information is stored in the module where constants/string literals
are defined.
e.g.
```python
foo = 1
bar = 2
tf_export("consts.foo").export_constant(__name__, 'foo')
tf_export("consts.bar").export_constant(__name__, 'bar')
```
Args:
module_name: (string) Name of the module to store constant at.
name: (string) Current constant name.
"""
module = sys.modules[module_name]
api_constants_attr = API_ATTRS[self._api_name].constants
api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants
if not hasattr(module, api_constants_attr):
setattr(module, api_constants_attr, [])
# pylint: disable=protected-access
getattr(module, api_constants_attr).append(
(self._names, name))
if not hasattr(module, api_constants_attr_v1):
setattr(module, api_constants_attr_v1, [])
getattr(module, api_constants_attr_v1).append(
(self._names_v1, name))
def kwarg_only(f):
"""A wrapper that throws away all non-kwarg arguments."""
f_argspec = tf_inspect.getargspec(f)
def wrapper(*args, **kwargs):
if args:
raise TypeError(
'{f} only takes keyword args (possible keys: {kwargs}). '
'Please pass these args as kwargs instead.'
.format(f=f.__name__, kwargs=f_argspec.args))
return f(**kwargs)
return tf_decorator.make_decorator(f, wrapper, decorator_argspec=f_argspec)
tf_export = functools.partial(api_export, api_name=TENSORFLOW_API_NAME)
estimator_export = functools.partial(api_export, api_name=ESTIMATOR_API_NAME)
keras_export = functools.partial(api_export, api_name=KERAS_API_NAME)
|
the-stack_106_18306
|
from tests import TestCase
from src.masonite.mail import Mailable
class Welcome(Mailable):
def build(self):
return (
self.to("[email protected]")
.subject("Masonite 4")
.from_("[email protected]")
.text("text from Masonite!")
.html("<h1>Hello from Masonite!</h1>")
)
class TestSMTPDriver(TestCase):
def setUp(self):
super().setUp()
self.fake("mail")
def tearDown(self):
super().tearDown()
self.restore("mail")
def test_mock_mail(self):
self.fake("mail")
welcome_email = self.application.make("mail").mailable(Welcome()).send()
(
welcome_email.seeEmailCc("")
.seeEmailBcc("")
.seeEmailContains("Hello from Masonite!")
.seeEmailContains("text from Masonite!")
.seeEmailFrom("[email protected]")
.seeEmailCountEquals(1)
.send()
.seeEmailCountEquals(2)
)
def test_mock_mail_sending(self):
self.fake("mail")
welcome_email = self.application.make("mail").mailable(Welcome())
(welcome_email.seeEmailWasNotSent().send().seeEmailWasSent())
|
the-stack_106_18307
|
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
ExtractorError,
)
class UplynkIE(InfoExtractor):
IE_NAME = 'uplynk'
_VALID_URL = r'https?://.*?\.uplynk\.com/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.(?:m3u8|json)(?:.*?\bpbs=(?P<session_id>[^&]+))?'
_TEST = {
'url': 'http://content.uplynk.com/e89eaf2ce9054aa89d92ddb2d817a52e.m3u8',
'info_dict': {
'id': 'e89eaf2ce9054aa89d92ddb2d817a52e',
'ext': 'mp4',
'title': '030816-kgo-530pm-solar-eclipse-vid_web.mp4',
'uploader_id': '4413701bf5a1488db55b767f8ae9d4fa',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _extract_uplynk_info(self, uplynk_content_url):
path, external_id, video_id, session_id = re.match(UplynkIE._VALID_URL, uplynk_content_url).groups()
display_id = video_id or external_id
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
'http://content.uplynk.com/%s.m3u8' % path,
display_id, 'mp4', 'm3u8_native')
if session_id:
for f in formats:
f['extra_param_to_segment_url'] = 'pbs=' + session_id
self._sort_formats(formats)
asset = self._download_json('http://content.uplynk.com/player/assetinfo/%s.json' % path, display_id)
if asset.get('error') == 1:
raise ExtractorError('% said: %s' % (self.IE_NAME, asset['msg']), expected=True)
return {
'id': asset['asset'],
'title': asset['desc'],
'thumbnail': asset.get('default_poster_url'),
'duration': float_or_none(asset.get('duration')),
'uploader_id': asset.get('owner'),
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
return self._extract_uplynk_info(url)
class UplynkPreplayIE(UplynkIE):
IE_NAME = 'uplynk:preplay'
_VALID_URL = r'https?://.*?\.uplynk\.com/preplay2?/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.json'
_TEST = None
def _real_extract(self, url):
path, external_id, video_id = self._match_valid_url(url).groups()
display_id = video_id or external_id
preplay = self._download_json(url, display_id)
content_url = 'http://content.uplynk.com/%s.m3u8' % path
session_id = preplay.get('sid')
if session_id:
content_url += '?pbs=' + session_id
return self._extract_uplynk_info(content_url)
|
the-stack_106_18309
|
"""
Record Arrays
=============
Record arrays expose the fields of structured arrays as properties.
Most commonly, ndarrays contain elements of a single type, e.g. floats,
integers, bools etc. However, it is possible for elements to be combinations
of these using structured types, such as::
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)])
>>> a
array([(1, 2.), (1, 2.)], dtype=[('x', '<i8'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
to columns in a spread-sheet. The different fields can be accessed as
one would a dictionary::
>>> a['x']
array([1, 1])
>>> a['y']
array([2., 2.])
Record arrays allow us to access fields as properties::
>>> ar = np.rec.array(a)
>>> ar.x
array([1, 1])
>>> ar.y
array([2., 2.])
"""
import os
import warnings
from collections import Counter, OrderedDict
from . import numeric as sb
from . import numerictypes as nt
from numpy.compat import (
isfileobj, os_fspath, contextlib_nullcontext
)
from numpy.core.overrides import set_module
from .arrayprint import get_printoptions
# All of the functions allow formats to be a dtype
__all__ = ['record', 'recarray', 'format_parser']
ndarray = sb.ndarray
_byteorderconv = {'b':'>',
'l':'<',
'n':'=',
'B':'>',
'L':'<',
'N':'=',
'S':'s',
's':'s',
'>':'>',
'<':'<',
'=':'=',
'|':'|',
'I':'|',
'i':'|'}
# formats regular expression
# allows multidimension spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.typeDict
# taken from OrderedDict recipes in the Python documentation
# https://docs.python.org/3.3/library/collections.html#ordereddict-examples-and-recipes
class _OrderedCounter(Counter, OrderedDict):
"""Counter that remembers the order elements are first encountered"""
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
return [
item
for item, counts in _OrderedCounter(list).items()
if counts > 1
]
@set_module('numpy')
class format_parser:
"""
Class to convert formats, names, titles description to a dtype.
After constructing the format_parser object, the dtype attribute is
the converted data-type:
``dtype = format_parser(formats, names, titles).dtype``
Attributes
----------
dtype : dtype
The converted data-type.
Parameters
----------
formats : str or list of str
The format description, either specified as a string with
comma-separated format descriptions in the form ``'f8, i4, a5'``, or
a list of format description strings in the form
``['f8', 'i4', 'a5']``.
names : str or list/tuple of str
The field names, either specified as a comma-separated string in the
form ``'col1, col2, col3'``, or as a list or tuple of strings in the
form ``['col1', 'col2', 'col3']``.
An empty list can be used, in that case default field names
('f0', 'f1', ...) are used.
titles : sequence
Sequence of title strings. An empty list can be used to leave titles
out.
aligned : bool, optional
If True, align the fields by padding as the C-compiler would.
Default is False.
byteorder : str, optional
If specified, all the fields will be changed to the
provided byte-order. Otherwise, the default byte-order is
used. For all available string specifiers, see `dtype.newbyteorder`.
See Also
--------
dtype, typename, sctype2char
Examples
--------
>>> np.format_parser(['<f8', '<i4', '<a5'], ['col1', 'col2', 'col3'],
... ['T1', 'T2', 'T3']).dtype
dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'), (('T3', 'col3'), 'S5')])
`names` and/or `titles` can be empty lists. If `titles` is an empty list,
titles will simply not appear. If `names` is empty, default field names
will be used.
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... []).dtype
dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '<S5')])
>>> np.format_parser(['<f8', '<i4', '<a5'], [], []).dtype
dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', 'S5')])
"""
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdtype(byteorder)
def _parseFormats(self, formats, aligned=False):
""" Parse the field formats """
if formats is None:
raise ValueError("Need formats argument")
if isinstance(formats, list):
dtype = sb.dtype(
[('f{}'.format(i), format_) for i, format_ in enumerate(formats)],
aligned,
)
else:
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if names:
if type(names) in [list, tuple]:
pass
elif isinstance(names, str):
names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError("Duplicate field names: %s" % _dup)
if titles:
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if self._nfields > len(titles):
self._titles += [None] * (self._nfields - len(titles))
def _createdtype(self, byteorder):
dtype = sb.dtype({
'names': self._names,
'formats': self._f_formats,
'offsets': self._offsets,
'titles': self._titles,
})
if byteorder is not None:
byteorder = _byteorderconv[byteorder[0]]
dtype = dtype.newbyteorder(byteorder)
self.dtype = dtype
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
# manually set name and module so that this class's type shows up
# as numpy.record when printed
__name__ = 'record'
__module__ = 'numpy'
def __repr__(self):
if get_printoptions()['legacy'] == '1.13':
return self.__str__()
return super(record, self).__repr__()
def __str__(self):
if get_printoptions()['legacy'] == '1.13':
return str(self.item())
return super(record, self).__str__()
def __getattribute__(self, attr):
if attr in ('setfield', 'getfield', 'dtype'):
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a record,
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
#happens if field is Object type
return obj
if dt.names is not None:
return obj.view((self.__class__, obj.dtype))
return obj
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __setattr__(self, attr, val):
if attr in ('setfield', 'getfield', 'dtype'):
raise AttributeError("Cannot set '%s' attribute" % attr)
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self, attr, None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __getitem__(self, indx):
obj = nt.void.__getitem__(self, indx)
# copy behavior of record.__getattribute__,
if isinstance(obj, nt.void) and obj.dtype.names is not None:
return obj.view((self.__class__, obj.dtype))
else:
# return a single element
return obj
def pprint(self):
"""Pretty-print all fields."""
# pretty-print all fields
names = self.dtype.names
maxlen = max(len(name) for name in names)
fmt = '%% %ds: %%s' % maxlen
rows = [fmt % (name, getattr(self, name)) for name in names]
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
"""Construct an ndarray that allows field access using attributes.
Arrays may have a data-types containing fields, analogous
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
where each entry in the array is a pair of ``(int, float)``. Normally,
these attributes are accessed using dictionary lookups such as ``arr['x']``
and ``arr['y']``. Record arrays allow the fields to be accessed as members
of the array, using ``arr.x`` and ``arr.y``.
Parameters
----------
shape : tuple
Shape of output array.
dtype : data-type, optional
The desired data-type. By default, the data-type is determined
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
formats : list of data-types, optional
A list containing the data-types for the different columns, e.g.
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
convention of using types directly, i.e. ``(int, float, int)``.
Note that `formats` must be a list, not a tuple.
Given that `formats` is somewhat limited, we recommend specifying
`dtype` instead.
names : tuple of str, optional
The name of each column, e.g. ``('x', 'y', 'z')``.
buf : buffer, optional
By default, a new array is created of the given shape and data-type.
If `buf` is specified and is an object exposing the buffer interface,
the array will use the memory from the existing buffer. In this case,
the `offset` and `strides` keywords are available.
Other Parameters
----------------
titles : tuple of str, optional
Aliases for column names. For example, if `names` were
``('x', 'y', 'z')`` and `titles` is
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
byteorder : {'<', '>', '='}, optional
Byte-order for all fields.
aligned : bool, optional
Align the fields in memory as the C-compiler would.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
offset : int, optional
Start reading buffer (`buf`) from this offset onwards.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Returns
-------
rec : recarray
Empty array of the given shape and type.
See Also
--------
rec.fromrecords : Construct a record array from data.
record : fundamental data-type for `recarray`.
format_parser : determine a data-type from formats, names, titles.
Notes
-----
This constructor can be compared to ``empty``: it creates a new record
array but does not fill it with data. To create a record array from data,
use one of the following methods:
1. Create a standard ndarray and convert it to a record array,
using ``arr.view(np.recarray)``
2. Use the `buf` keyword.
3. Use `np.rec.fromrecords`.
Examples
--------
Create an array with two fields, ``x`` and ``y``:
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '<f8'), ('y', '<i8')])
>>> x
array([(1., 2), (3., 4)], dtype=[('x', '<f8'), ('y', '<i8')])
>>> x['x']
array([1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
array([1., 3.])
>>> x.y
array([2, 4])
Create a new, empty record array:
>>> np.recarray((2,),
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
(3471280, 1.2134086255804012e-316, 0)],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
# manually set name and module so that this class's type shows
# up as "numpy.recarray" when printed
__name__ = 'recarray'
__module__ = 'numpy'
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False, order='C'):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr), order=order)
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides, order=order)
return self
def __array_finalize__(self, obj):
if self.dtype.type is not record and self.dtype.names is not None:
# if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
def __getattribute__(self, attr):
# See if ndarray has this attr, and return it if so. (note that this
# means a field with the same name as an ndarray attr cannot be
# accessed by attribute).
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
# look for a field with this name
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("recarray has no attribute %s" % attr)
obj = self.getfield(*res)
# At this point obj will always be a recarray, since (see
# PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
# non-structured, convert it to an ndarray. Then if obj is structured
# with void type convert it to the same dtype.type (eg to preserve
# numpy.record type if present), since nested structured fields do not
# inherit type. Don't do this for non-void structures though.
if obj.dtype.names is not None:
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(ndarray)
# Save the dictionary.
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
# Automatically convert (void) structured types to records
# (but not non-void structures, subarrays, or non-structured voids)
if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None:
val = sb.dtype((record, val))
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except Exception:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
raise
else:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except Exception:
return ret
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = super(recarray, self).__getitem__(indx)
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
if obj.dtype.names is not None:
obj = obj.view(type(self))
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(type=ndarray)
else:
# return a single element
return obj
def __repr__(self):
repr_dtype = self.dtype
if self.dtype.type is record or not issubclass(self.dtype.type, nt.void):
# If this is a full record array (has numpy.record dtype),
# or if it has a scalar (non-void) dtype with no records,
# represent it using the rec.array function. Since rec.array
# converts dtype to a numpy.record for us, convert back
# to non-record before printing
if repr_dtype.type is record:
repr_dtype = sb.dtype((nt.void, repr_dtype))
prefix = "rec.array("
fmt = 'rec.array(%s,%sdtype=%s)'
else:
# otherwise represent it using np.array plus a view
# This should only happen if the user is playing
# strange games with dtypes.
prefix = "array("
fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)'
# get data/shape string. logic taken from numeric.array_repr
if self.size > 0 or self.shape == (0,):
lst = sb.array2string(
self, separator=', ', prefix=prefix, suffix=',')
else:
# show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(self.shape),)
lf = '\n'+' '*len(prefix)
if get_printoptions()['legacy'] == '1.13':
lf = ' ' + lf # trailing space
return fmt % (lst, lf, repr_dtype)
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self, 'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self, 'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.names is not None:
return obj
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def _deprecate_shape_0_as_None(shape):
if shape == 0:
warnings.warn(
"Passing `shape=0` to have the shape be inferred is deprecated, "
"and in future will be equivalent to `shape=(0,)`. To infer "
"the shape and suppress this warning, pass `shape=None` instead.",
FutureWarning, stacklevel=3)
return None
else:
return shape
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create a record array from a (flat) list of arrays
Parameters
----------
arrayList : list or tuple
List of array-like objects (such as lists, tuples,
and ndarrays).
dtype : data-type, optional
valid dtype for all arrays
shape : int or tuple of ints, optional
Shape of the resulting array. If not provided, inferred from
``arrayList[0]``.
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation.
Returns
-------
np.recarray
Record array consisting of given arrayList columns.
Examples
--------
>>> x1=np.array([1,2,3,4])
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print(r[1])
(2, 'dd', 2.0) # may vary
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
>>> x1 = np.array([1, 2, 3, 4])
>>> x2 = np.array(['a', 'dd', 'xyz', '12'])
>>> x3 = np.array([1.1, 2, 3,4])
>>> r = np.core.records.fromarrays(
... [x1, x2, x3],
... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)]))
>>> r
rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ),
(4, b'12', 4. )],
dtype=[('a', '<i4'), ('b', 'S3'), ('c', '<f4')])
"""
arrayList = [sb.asarray(x) for x in arrayList]
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape is None:
shape = arrayList[0].shape
elif isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = [obj.dtype for obj in arrayList]
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
_names = descr.names
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError("mismatch between the number of fields "
"and the number of arrays")
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
for k, obj in enumerate(arrayList):
nn = descr[k].ndim
testshape = obj.shape[:obj.ndim - nn]
if testshape != shape:
raise ValueError("array-shape mismatch in array %d" % k)
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for i in range(len(arrayList)):
_array[_names[i]] = arrayList[i]
return _array
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
"""Create a recarray from a list of records in text form.
Parameters
----------
recList : sequence
data in the same field may be heterogeneous - they will be promoted
to the highest data type.
dtype : data-type, optional
valid dtype for all arrays
shape : int or tuple of ints, optional
shape of each array.
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation.
If both `formats` and `dtype` are None, then this will auto-detect
formats. Use list of tuples rather than list of lists for faster
processing.
Returns
-------
np.recarray
record array consisting of given recList rows.
Examples
--------
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
>>> print(r[0])
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
array(['dbe', 'de'], dtype='<U3')
>>> import pickle
>>> pickle.loads(pickle.dumps(r))
rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)],
dtype=[('col1', '<i8'), ('col2', '<U3'), ('col3', '<f8')])
"""
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype((record, dtype))
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
try:
retval = sb.array(recList, dtype=descr)
except (TypeError, ValueError):
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape is None:
shape = len(recList)
if isinstance(shape, int):
shape = (shape,)
if len(shape) > 1:
raise ValueError("Can only deal with 1-d array.")
_array = recarray(shape, descr)
for k in range(_array.size):
_array[k] = tuple(recList[k])
# list of lists instead of list of tuples ?
# 2018-02-07, 1.14.1
warnings.warn(
"fromrecords expected a list of tuples, may have received a list "
"of lists instead. In the future that will raise an error",
FutureWarning, stacklevel=2)
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
return res
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
itemsize = descr.itemsize
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape in (None, -1):
shape = (len(datastring) - offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
try:
fn = fd.fileno()
except AttributeError:
return os.path.getsize(fd.name) - fd.tell()
st = os.fstat(fn)
size = st.st_size - fd.tell()
return size
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
Parameters
----------
fd : str or file type
If file is a string or a path-like object then that file is opened,
else it is assumed to be a file object. The file object must
support random access (i.e. it must have tell and seek methods).
dtype : data-type, optional
valid dtype for all arrays
shape : int or tuple of ints, optional
shape of each array.
offset : int, optional
Position in the file to start reading from.
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation
Returns
-------
np.recarray
record array consisting of data enclosed in file.
Examples
--------
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> _ = fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print(r[5])
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if dtype is None and formats is None:
raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape is None:
shape = (-1,)
elif isinstance(shape, int):
shape = (shape,)
if isfileobj(fd):
# file already opened
ctx = contextlib_nullcontext(fd)
else:
# open file
ctx = open(os_fspath(fd), 'rb')
with ctx as fd:
if offset > 0:
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod(dtype=nt.intp)
shapesize = shapeprod * itemsize
if shapesize < 0:
shape = list(shape)
shape[shape.index(-1)] = size // -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod(dtype=nt.intp)
nbytes = shapeprod * itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise IOError("Didn't read as many bytes as expected")
return _array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
formats is None and dtype is None):
raise ValueError("Must define formats (or dtype) if object is "
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder).dtype
else:
kwds = {'formats': formats,
'names': names,
'titles': titles,
'aligned': aligned,
'byteorder': byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, bytes):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isfileobj(obj):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new.view(recarray)
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
return obj.view(recarray)
|
the-stack_106_18310
|
# A user is presented with the text below. the program allows them to select an option to list all of their tasks, add a task to their list, delete a task, or quit the program.
def main():
problem1()
def getList():
print("Congratulations! You're running Superman's Task List program")
print("What would you like to do?")
print("1. List all tasks.")
print("2. Add a task to the list")
print("3. Delete a Task")
print("0. To quit to the program")
toDo = []
def problem1():
UserInput = ""
while (UserInput != "0"):
getList()
UserInput = input("What would you like to do")
# prints items in array
if (UserInput == "1"):
for itemsinarray in toDo:
print(toDo)
UserInput = input("What would you like to do?")
# adds task
elif (UserInput == "2"):
addTask = input("Add a task")
toDo.append(addTask)
UserInput = input("What would you like to do?")
#removes a task
elif (UserInput == "3"):
removeTask = input("Delete a Task")
toDo.remove(removeTask)
UserInput = input("What would you like to do")
print(toDo[0])
elif (UserInput == 0):
break
if __name__ == '__main__':
main()
|
the-stack_106_18313
|
from __future__ import print_function # Python 2/3 compatibility
import boto3
from boto3.session import Session
import json
import decimal
# Helper class to convert a DynamoDB item to JSON.
# class DecimalEncoder(json.JSONEncoder):
# def default(self, o):
# if isinstance(o, decimal.Decimal):
# if abs(o) % 1 > 0:
# return float(o)
# else:
# return int(o)
# return super(DecimalEncoder, self).default(o)
def getLocationDB(event, context):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1' )
# ,aws_access_key_id='AKIAJB4O4YMRGCSBNN2A',
# aws_secret_access_key='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')
CornellLocs = dynamodb.Table('CornellLocations111')
hall_name = "MPS lab"
city = "Ithaca"
location = "Rhodes 153"
response = CornellLocs.put_item(
Item={
'Hall_Name': hall_name,
'CIty': city
}
)
# print("PutItem succeeded:")
# print(json.dumps(response, indent=4, cls=DecimalEncoder))
|
the-stack_106_18315
|
# -*- coding: utf-8 -*-
R"""
Created on Sun May 23 01:00:41 2021
@author: Christian
"""
from planesections import EulerBeam, OpenSeesAnalyzer, RecordOutput, plotMoment,plotShear
import numpy as np
import openseespy.opensees as op
x = np.linspace(0,5,80)
fixed = np.array([1,1,0.])
P = np.array([0.,1000.,0.])
q = np.array([0.,-1000.])
beam = EulerBeam()
beam.addNodes(x)
beam.setFixity(.4, fixed)
beam.setFixity(4.60, fixed)
beam.addVerticalLoad(0, -1000.)
beam.addVerticalLoad(2.5, -1000.)
beam.addVerticalLoad(5, -1000.)
beam.addDistLoad(0,5, q)
beam.plot()
analysis = OpenSeesAnalyzer(beam)
analysis.runAnalysis()
RecordOutput(beam)
plotMoment(beam)
plotShear(beam)
|
the-stack_106_18316
|
# String encodings and numeric representations
import binascii
import codecs
import string
from .types import (
is_string,
is_text,
)
def decode_hex(value):
if not is_text(value):
raise TypeError('Value must be an instance of str')
return codecs.decode(remove_0x_prefix(value), 'hex')
def encode_hex(value):
if not is_string(value):
raise TypeError('Value must be an instance of str or unicode')
binary_hex = codecs.encode(value, 'hex')
return add_0x_prefix(binary_hex.decode('ascii'))
def is_0x_prefixed(value):
if not is_text(value):
raise TypeError(
"is_0x_prefixed requires text typed arguments. Got: {0}".format(repr(value))
)
return value.startswith('0x') or value.startswith('0X')
def remove_0x_prefix(value):
if is_0x_prefixed(value):
return value[2:]
return value
def add_0x_prefix(value):
if is_0x_prefixed(value):
return value
return '0x' + value
def is_hex(value):
if not is_text(value):
raise TypeError('is_hex requires text typed arguments. Got: {0}'.format(repr(value)))
elif value.lower() == '0x':
return True
unprefixed_value = remove_0x_prefix(value)
if len(unprefixed_value) % 2 != 0:
value_to_decode = '0' + unprefixed_value
else:
value_to_decode = unprefixed_value
if any(char not in string.hexdigits for char in value_to_decode):
return False
try:
value_as_bytes = codecs.decode(value_to_decode, 'hex')
except binascii.Error:
return False
except TypeError:
return False
else:
return bool(value_as_bytes)
|
the-stack_106_18317
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import sys
import time
from torch.optim.lr_scheduler import StepLR
import torchvision.utils as vutils
from lib.loss import *
from tensorboardX import SummaryWriter
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from configs.config_agri_v2 import *
from lib.lr_schd import init_params_lr, adjust_learning_rate
from lib.measure import *
from lib.visual import *
from tools.agri_models import load_model
cudnn.benchmark = True
prepare_gt(VAL_ROOT)
prepare_gt(TRAIN_ROOT)
train_args = agriculture2021_configs(
net_name='PAGNet_rx50',
data='Agriculture2021',
bands_list=['RGB', 'NIR'],
k_folder=6, # default 6 cv folders
kf=4, # 0, 1, ..., 5 , the index of cv folder for val
note='training'
)
# train_args.optim = 'SGD' # 'Adam' as default
train_args.non_val = False # False, default, if set to True val-set will be added into training set for overfitting training
train_args.input_size = [384, 384] # 224, 384, 448, 512
train_args.scale_rate = 384./512. #
train_args.val_size = [384, 384]
train_args.train_batch = 16 # 24, 18, 16, 12
train_args.snapshot = '' # copy the file name of the ckpt to resume the training with different hypeparameter..
# output training configuration to a text file
train_args.ckpt_path=os.path.abspath(os.curdir)
writer = SummaryWriter(os.path.join(train_args.save_path, 'tblog'))
visualize, restore = get_visualize(train_args)
# Remember to use num_workers=0 when creating the DataBunch.
def random_seed(seed_value, use_cuda=True):
np.random.seed(seed_value) # cpu vars
torch.manual_seed(seed_value) # cpu vars
random.seed(seed_value) # Python
if use_cuda:
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value) # gpu vars
torch.backends.cudnn.deterministic = True # needed
torch.backends.cudnn.benchmark = False
def main():
random_seed(train_args.seeds)
train_args.write2txt()
net = load_model(name=train_args.model, classes=train_args.nb_classes,
m_views=train_args.m_views, kernel=train_args.kernel, hidden_ch=train_args.hidden_ch,
heads=train_args.heads, depth=train_args.depth,
dropout=train_args.dropout, activation=train_args.activation,
shortcut=train_args.shortcut, ndvi=train_args.ndvi,
)
net, start_epoch = train_args.resume_train(net)
net.cuda()
net.train()
# prepare dataset for training and validation
train_set, val_set = train_args.get_dataset()
train_loader = DataLoader(dataset=train_set, batch_size=train_args.train_batch, num_workers=0, shuffle=True)
val_loader = DataLoader(dataset=val_set, batch_size=train_args.val_batch, num_workers=0)
criterion = ACW_loss().cuda()
params = init_params_lr(net, train_args)
if train_args.optim == 'Adam':
optimizer = optim.Adam(params, amsgrad=True)
else:
optimizer = optim.SGD(params, momentum=train_args.momentum, nesterov=True)
if train_args.lrschd == 'Cosin':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, train_args.cosin_epoch, train_args.min_lr)
else:
lr_scheduler = StepLR(optimizer, step_size=train_args.steps, gamma=train_args.gamma)
new_ep = 0
while True:
starttime = time.time()
train_main_loss = AverageMeter()
cls_trian_loss = AverageMeter()
start_lr = train_args.lr
train_args.lr = optimizer.param_groups[0]['lr']
num_iter = len(train_loader)
curr_iter = ((start_epoch + new_ep) - 1) * num_iter
print('---curr_iter: {}, num_iter per epoch: {}---'.format(curr_iter, num_iter))
for i, (inputs, labels) in enumerate(train_loader):
sys.stdout.flush()
inputs, labels = inputs.cuda(), labels.cuda(),
N = inputs.size(0) * inputs.size(2) * inputs.size(3)
optimizer.zero_grad()
outputs, cost = net(inputs)
main_loss = criterion(outputs, labels)
loss = main_loss + cost
loss.backward()
optimizer.step()
lr_scheduler.step(epoch=(start_epoch + new_ep))
adjust_learning_rate(optimizer, curr_iter, train_args)
train_main_loss.update(main_loss.item(), N)
curr_iter += 1
writer.add_scalar('main_loss', train_main_loss.avg, curr_iter)
writer.add_scalar('lr', optimizer.param_groups[0]['lr'], curr_iter)
if (i + 1) % train_args.print_freq == 0:
newtime = time.time()
print('[epoch %d], [iter %d / %d], [loss %.5f, cls %.5f], [lr %.10f], [time %.3f]' %
(start_epoch + new_ep, i + 1, num_iter, train_main_loss.avg,
cls_trian_loss.avg,
optimizer.param_groups[0]['lr'], newtime - starttime))
starttime = newtime
validate(net, val_set, val_loader, criterion, optimizer, start_epoch + new_ep, new_ep)
new_ep += 1
def validate(net, val_set, val_loader, criterion, optimizer, epoch, new_ep):
net.eval()
val_loss = AverageMeter()
inputs_all, gts_all, predictions_all = [], [], []
with torch.no_grad():
for vi, (inputs, gts) in enumerate(val_loader):
inputs, gts = inputs.cuda(), gts.cuda()
N = inputs.size(0) * inputs.size(2) * inputs.size(3)
outputs = net(inputs)
val_loss.update(criterion(outputs, gts).item(), N)
if random.random() > train_args.save_rate:
inputs_all.append(None)
else:
inputs_all.append(inputs.data.squeeze(0).cpu())
gts_all.append(gts.data.squeeze(0).cpu().numpy())
predictions = outputs.data.max(1)[1].squeeze(1).squeeze(0).cpu().numpy()
predictions_all.append(predictions)
update_ckpt(net, optimizer, epoch, new_ep, val_loss,
inputs_all, gts_all, predictions_all)
net.train()
return val_loss, inputs_all, gts_all, predictions_all
def update_ckpt(net, optimizer, epoch, new_ep, val_loss,
inputs_all, gts_all, predictions_all):
avg_loss = val_loss.avg
acc, acc_cls, mean_iu, fwavacc, f1 = evaluate(predictions_all, gts_all, train_args.nb_classes)
writer.add_scalar('val_loss', avg_loss, epoch)
writer.add_scalar('acc', acc, epoch)
writer.add_scalar('acc_cls', acc_cls, epoch)
writer.add_scalar('mean_iu', mean_iu, epoch)
writer.add_scalar('fwavacc', fwavacc, epoch)
writer.add_scalar('f1_score', f1, epoch)
updated = train_args.update_best_record(epoch, avg_loss, acc, acc_cls, mean_iu, fwavacc, f1)
# save best record and snapshot prameters
val_visual = []
snapshot_name = 'epoch_%d_loss_%.5f_acc_%.5f_acc-cls_%.5f_mean-iu_%.5f_fwavacc_%.5f_f1_%.5f_' % (
epoch, avg_loss, acc, acc_cls, mean_iu, fwavacc, f1
)
if updated or (new_ep % 2 == 0) or (train_args.best_record['val_loss'] > avg_loss):
torch.save(net.state_dict(), os.path.join(train_args.save_path, snapshot_name + '.pth'))
# train_args.update_best_record(epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc, f1)
if train_args.save_pred:
if updated or (new_ep % 5 == 0):
val_visual = visual_ckpt(epoch, new_ep, inputs_all, gts_all, predictions_all)
if len(val_visual) > 0:
val_visual = torch.stack(val_visual, 0)
val_visual = vutils.make_grid(val_visual, nrow=3, padding=5)
writer.add_image(snapshot_name, val_visual)
def visual_ckpt(epoch, new_ep, inputs_all, gts_all, predictions_all):
val_visual = []
if train_args.save_pred:
to_save_dir = os.path.join(train_args.save_path, str(epoch) + '_' + str(new_ep))
check_mkdir(to_save_dir)
for idx, data in enumerate(zip(inputs_all, gts_all, predictions_all)):
if data[0] is None:
continue
if train_args.val_batch == 1:
input_pil = restore(data[0][0:3, :, :])
gt_pil = colorize_mask(data[1], train_args.palette)
predictions_pil = colorize_mask(data[2], train_args.palette)
else:
input_pil = restore(data[0][0][0:3, :, :]) # only for the first 3 bands
# input_pil = restore(data[0][0])
gt_pil = colorize_mask(data[1][0], train_args.palette)
predictions_pil = colorize_mask(data[2][0], train_args.palette)
# if train_args['val_save_to_img_file']:
if train_args.save_pred:
input_pil.save(os.path.join(to_save_dir, '%d_input.png' % idx))
predictions_pil.save(os.path.join(to_save_dir, '%d_prediction.png' % idx))
gt_pil.save(os.path.join(to_save_dir, '%d_gt.png' % idx))
val_visual.extend([visualize(input_pil.convert('RGB')), visualize(gt_pil.convert('RGB')),
visualize(predictions_pil.convert('RGB'))])
return val_visual
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
if __name__ == '__main__':
main()
|
the-stack_106_18318
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Axe Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from time import *
'''
multikeysporks.py
Test logic for several signer keys usage for spork broadcast.
We set 5 possible keys for sporks signing and set minimum
required signers to 3. We check 1 and 2 signers can't set the spork
value, any 3 signers can change spork value and other 3 signers
can change it again.
'''
class MultiKeySporkTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 5
self.setup_clean_chain = True
self.is_network_split = False
def setup_network(self):
self.nodes = []
# secret(base58): 931wyuRNVYvhg18Uu9bky5Qg1z4QbxaJ7fefNBzjBPiLRqcd33F
# keyid(hex): 60f0f57f71f0081f1aacdd8432340a33a526f91b
# address(base58): yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa
# secret(base58): 91vbXGMSWKGHom62986XtL1q2mQDA12ngcuUNNe5NfMSj44j7g3
# keyid(hex): 43dff2b09de2f904f688ec14ee6899087b889ad0
# address(base58): yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h
# secret(base58): 92bxUjPT5AhgXuXJwfGGXqhomY2SdQ55MYjXyx9DZNxCABCSsRH
# keyid(hex): d9aa5fa00cce99101a4044e65dc544d1579890de
# address(base58): ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7
# secret(base58): 934yPXiVGf4RCY2qTs2Bt5k3TEtAiAg12sMxCt8yVWbSU7p3fuD
# keyid(hex): 0b23935ce0bea3b997a334f6fa276c9fa17687b2
# address(base58): ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn
# secret(base58): 92Cxwia363Wg2qGF1fE5z4GKi8u7r1nrWQXdtsj2ACZqaDPSihD
# keyid(hex): 1d1098b2b1f759b678a0a7a098637a9b898adcac
# address(base58): yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui
self.nodes.append(start_node(0, self.options.tmpdir,
["-sporkkey=931wyuRNVYvhg18Uu9bky5Qg1z4QbxaJ7fefNBzjBPiLRqcd33F",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"]))
self.nodes.append(start_node(1, self.options.tmpdir,
["-sporkkey=91vbXGMSWKGHom62986XtL1q2mQDA12ngcuUNNe5NfMSj44j7g3",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"]))
self.nodes.append(start_node(2, self.options.tmpdir,
["-sporkkey=92bxUjPT5AhgXuXJwfGGXqhomY2SdQ55MYjXyx9DZNxCABCSsRH",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"]))
self.nodes.append(start_node(3, self.options.tmpdir,
["-sporkkey=934yPXiVGf4RCY2qTs2Bt5k3TEtAiAg12sMxCt8yVWbSU7p3fuD",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"]))
self.nodes.append(start_node(4, self.options.tmpdir,
["-sporkkey=92Cxwia363Wg2qGF1fE5z4GKi8u7r1nrWQXdtsj2ACZqaDPSihD",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"]))
# connect nodes at start
for i in range(0, 5):
for j in range(i, 5):
connect_nodes(self.nodes[i], j)
def get_test_spork_state(self, node):
info = node.spork('show')
# use InstantSend spork for tests
return info['SPORK_2_INSTANTSEND_ENABLED']
def set_test_spork_state(self, node, value):
# use InstantSend spork for tests
node.spork('SPORK_2_INSTANTSEND_ENABLED', value)
def wait_for_test_spork_state(self, node, value):
start = time()
got_state = False
while True:
if self.get_test_spork_state(node) == value:
got_state = True
break
if time() > start + 10:
break
sleep(0.1)
return got_state
def run_test(self):
# check test spork default state
for node in self.nodes:
assert(self.get_test_spork_state(node) == 0)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
# first and second signers set spork value
self.set_test_spork_state(self.nodes[0], 1)
self.set_test_spork_state(self.nodes[1], 1)
# spork change requires at least 3 signers
for node in self.nodes:
assert(not self.wait_for_test_spork_state(node, 1))
# third signer set spork value
self.set_test_spork_state(self.nodes[2], 1)
# now spork state is changed
for node in self.nodes:
assert(self.wait_for_test_spork_state(node, 1))
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
# now set the spork again with other signers to test
# old and new spork messages interaction
self.set_test_spork_state(self.nodes[2], 2)
self.set_test_spork_state(self.nodes[3], 2)
self.set_test_spork_state(self.nodes[4], 2)
for node in self.nodes:
assert(self.wait_for_test_spork_state(node, 2))
if __name__ == '__main__':
MultiKeySporkTest().main()
|
the-stack_106_18319
|
# Load and prepare the dataset
import nltk
from nltk.corpus import movie_reviews
from nltk.util import ngrams
import random
import sys
import re
from emoji import UNICODE_EMOJI
from bisect import bisect_left
import math
from sklearn.metrics import classification_report
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB,BernoulliNB
from sklearn.linear_model import LogisticRegression,SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
# Appeding our src directory to sys path so that we can import modules.
sys.path.append('../..')
from src.tn.lib.sentimoji import get_emoji_sentiment_rank
nltk.download('movie_reviews')
#nltk_documents = [(list(movie_reviews.words(fileid)), category)
# for category in movie_reviews.categories()
# for fileid in movie_reviews.fileids(category)]
def load_docs(source):
documents = []
with open(source, 'r', encoding='utf-8') as inf:
# skipping header row
next(inf)
for line in inf:
(review, cat) = re.split('\t', line.strip())
words = review.split()
document = (list(words), cat)
documents.append(document)
# Commenting this because this might bias the distribution
# if cat != 'Positive':
# for i in range(5):
# # oversampling to correct bias
# documents.append(document)
return documents
# Define the feature extractor
def document_features(document, feature_sets):
document_words = set(document)
# TODO: use bigrams in both training and testing
# document_bigrams = set(list(nltk.bigrams(document)))
features = {}
if ('bag_of_words' in feature_sets):
document_bag_of_words_feature(document_words, features)
if ('emojis' in feature_sets):
document_emoji_feature(document_words, features)
if ('length' in feature_sets):
document_length_feature(document_words, features)
if ('ngram' in feature_sets):
for size in feature_sets['ngram']:
document_ngram_feature(document, features, size)
return(features)
def get_bag_of_all_words():
if not hasattr(get_bag_of_all_words, "bag_of_words"):
get_bag_of_all_words.bag_of_words = {}
imdb_words = list(nltk.FreqDist(w.lower()
for w in movie_reviews.words()))[:1000]
training_words = nltk.FreqDist(w.lower()
for d in training_documents for w in d[0])
training_words = list(training_words)[:3000]
all_words = imdb_words + training_words
word_features = all_words
for word in word_features:
get_bag_of_all_words.bag_of_words['contains({})'.format(
word)] = (False)
return get_bag_of_all_words.bag_of_words
# The bag of Words Feature Classifier. Marks occurance of words from the universal
# dictonary
def document_bag_of_words_feature(document_words, features):
bag_of_words = get_bag_of_all_words()
features.update(bag_of_words)
for word in document_words:
features['contains({})'.format(word)] = (True)
def get_all_emojis():
if not hasattr(get_all_emojis, "all_emojis"):
get_all_emojis.all_emojis = {}
for c in UNICODE_EMOJI:
get_all_emojis.all_emojis['has-emoji({})'.format(c)] = (False)
return get_all_emojis.all_emojis
# The emoji feature classifier
def document_emoji_feature(document_words, features):
all_emojis = get_all_emojis()
features.update(all_emojis)
allchars = set(''.join(document_words))
score = 0.0
for c in allchars:
features['has-emoji({})'.format(c)] = (True)
sentiment = get_emoji_sentiment_rank(c)
if sentiment is not False:
score += sentiment['sentiment_score']
features['emoji-positive'] = (False)
features['emoji-negative'] = (False)
features['emoji-neutral'] = (False)
if score > 0.2:
features['emoji-positive'] = (True)
elif score < -0.2:
features['emoji-negative'] = (True)
else:
features['emoji-neutral'] = (True)
def document_length_feature(document_words, features):
features['word-count'] = len(document_words)
# doclen = sum(len(word) for word in document_words)
# features['doc-length'] = get_range(doclen)
# features['avg-word-length'] = int(round(features['doc-length']/len(document_words)))
def get_range(doclen):
ranges = ["1-10", "11-20", "21-30", "31-40", "41-50", "51-60", "61-70", "71-80", "81-90", "91-100", "101-110", "111-120", "121-130", "131-140",
"141-150", "151-160", "161-170", "171-180", "181-190", "191-200", ">200"]
breakpoints = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100,
110, 120, 130, 140, 150, 160, 170, 180, 190, math.inf]
index = bisect_left(breakpoints, doclen)
return ranges[index]
# Similar to bag of words filter, but for N grams
def get_all_ngrams(n):
if not hasattr(get_all_ngrams, "all_ngrams"):
get_all_ngrams.all_ngrams = {}
imdb_ngrams = list(ngrams(movie_reviews.words(), n))[:1000]
training_ngrams = []
for d in training_documents:
training_ngrams.extend(ngrams(d[0], n))
training_ngrams = training_ngrams[:3000]
total_ngrams = imdb_ngrams + training_ngrams
for ngram in total_ngrams:
get_all_ngrams.all_ngrams['contains({})'.format(
"-".join(ngram))] = (False)
return get_all_ngrams.all_ngrams
def document_ngram_feature(doc, features, n):
all_ngrams = get_all_ngrams(n)
doc_ngrams = list(ngrams(doc, n))
features.update(all_ngrams)
for ngram in doc_ngrams:
features['contains({})'.format("-".join(ngram))] = (True)
# Output classification in sklearn report format -
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html
# The inputset is the documents and not the document features
def get_classifier_metrics_report(classifier, inputset, features):
refset, guesset= [], []
for (d,c) in inputset:
refset.append(c)
guesset.append(classifier.classify(document_features(d, features)))
return classification_report(refset, guesset)
training_documents = load_docs("../../resources/data/tamil_train.tsv")
testing_documents = load_docs("../../resources/data/tamil_dev.tsv")
# random.shuffle(documents)
# test_size = int(len(documents)/20.0)
feature_filters = [{'length': 1}, {'bag_of_words': 1}, {'length': 1, 'ngram': [5]},
{'length': 1, 'ngram': [4]}, {'emojis': 1}, {'emojis': 1, 'ngram': [2, 3, 4]},
{'bag_of_words': 1, 'ngram': [2, 3, 4], 'length': 1, 'emojis': 1}]
# feature_filters = [{'length': 1}, {'bag_of_words': 1}]
for filter in feature_filters:
# Train Naive Bayes classifier
train_set = [
(document_features(d, filter), c) for (d, c) in training_documents]
test_set = testing_documents[2000:]
# classifier = nltk.NaiveBayesClassifier.train(train_set)
print(filter)
NB_classifier = nltk.NaiveBayesClassifier.train(train_set)
report = get_classifier_metrics_report(NB_classifier, test_set, filter)
print("Classification report for NaiveBayesian classifier %s\n" % (report))
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(train_set)
report = get_classifier_metrics_report(MNB_classifier, test_set, filter)
print("Classification report for MNB classifier %s\n" % (report))
BNB_classifier = SklearnClassifier(BernoulliNB())
BNB_classifier.train(train_set)
report = get_classifier_metrics_report(BNB_classifier, test_set, filter)
print("Classification report for BNB classifier %s\n" % (report))
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(train_set)
report = get_classifier_metrics_report(LogisticRegression_classifier, test_set, filter)
print("Classification report for LR classifier %s\n" % (report))
SGDClassifier_classifier = SklearnClassifier(SGDClassifier())
SGDClassifier_classifier.train(train_set)
report = get_classifier_metrics_report(SGDClassifier_classifier, test_set, filter)
print("Classification report for SGD classifier %s\n" % (report))
SVC_classifier = SklearnClassifier(SVC())
SVC_classifier.train(train_set)
report = get_classifier_metrics_report(SVC_classifier, test_set, filter)
print("Classification report for SVC classifier %s\n" % (report))
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier.train(train_set)
report = get_classifier_metrics_report(LinearSVC_classifier, test_set, filter)
print("Classification report for LSVC classifier %s\n" % (report))
# Test the classifier
# print("{} -> {}". format(str(filter),
# nltk.classify.accuracy(classifier, test_set)))
# Classify a few docs and check
# for(d, c) in documents[:100]:
# guess = classifier.classify(document_features(
# d, {'length' : 1 ,'ngram': 4}))
# if(guess != c):
# print('Got It Wrong correct={} guess={} comment={}'.format(
# c, guess, ' '.join(d)))
# else:
# print('Got It Right guess={} comment={}'.format(
# guess, ' '.join(d).strip()))
|
the-stack_106_18321
|
import inspect
import logging
import os
import re
import subprocess
from typing import Dict, Any
from pyhttpd.certs import CertificateSpec
from pyhttpd.conf import HttpdConf
from pyhttpd.env import HttpdTestEnv, HttpdTestSetup
log = logging.getLogger(__name__)
class H2TestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["http2", "proxy_http2"])
self._add_h2test()
def _add_h2test(self):
p = subprocess.run([self.env.apxs, '-c', 'mod_h2test.c'],
capture_output=True,
cwd=os.path.join(self.env.local_dir, 'mod_h2test'))
rv = p.returncode
if rv != 0:
log.error(f"compiling md_h2test failed: {p.stderr}")
raise Exception(f"compiling md_h2test failed: {p.stderr}")
modules_conf = os.path.join(self.env.server_dir, 'conf/modules.conf')
with open(modules_conf, 'a') as fd:
# load our test module which is not installed
fd.write(f"LoadModule h2test_module \"{self.env.local_dir}/mod_h2test/.libs/mod_h2test.so\"\n")
class H2TestEnv(HttpdTestEnv):
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(H2TestEnv)),
add_base_conf=[
"H2MinWorkers 1",
"H2MaxWorkers 64",
"Protocols h2 http/1.1 h2c",
],
interesting_modules=["http2", "proxy_http2", "h2test"])
self.add_cert_specs([
CertificateSpec(domains=[
f"push.{self._http_tld}",
f"hints.{self._http_tld}",
f"ssl.{self._http_tld}",
f"pad0.{self._http_tld}",
f"pad1.{self._http_tld}",
f"pad2.{self._http_tld}",
f"pad3.{self._http_tld}",
f"pad8.{self._http_tld}",
]),
CertificateSpec(domains=[f"noh2.{self.http_tld}"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
'AH02032',
'AH01276',
'AH01630',
'AH00135',
'AH02261', # Re-negotiation handshake failed (our test_101)
])
self.httpd_error_log.set_ignored_patterns([
re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
])
if setup_dirs:
self._setup = H2TestSetup(env=self)
self._setup.make()
self.issue_certs()
self.setup_data_1k_1m()
def setup_data_1k_1m(self):
s90 = "01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678\n"
with open(os.path.join(self.gen_dir, "data-1k"), 'w') as f:
for i in range(10):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.gen_dir, "data-10k"), 'w') as f:
for i in range(100):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.gen_dir, "data-100k"), 'w') as f:
for i in range(1000):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.gen_dir, "data-1m"), 'w') as f:
for i in range(10000):
f.write(f"{i:09d}-{s90}")
class H2Conf(HttpdConf):
def __init__(self, env: HttpdTestEnv, extras: Dict[str, Any] = None):
super().__init__(env=env, extras=HttpdConf.merge_extras(extras, {
f"cgi.{env.http_tld}": [
"SSLOptions +StdEnvVars",
"AddHandler cgi-script .py",
]
}))
def start_vhost(self, domains, port=None, doc_root="htdocs", with_ssl=False):
super().start_vhost(domains=domains, port=port, doc_root=doc_root, with_ssl=with_ssl)
if f"noh2.{self.env.http_tld}" in domains:
protos = ["http/1.1"]
elif port == self.env.https_port or with_ssl is True:
protos = ["h2", "http/1.1"]
else:
protos = ["h2c", "http/1.1"]
if f"test2.{self.env.http_tld}" in domains:
protos = reversed(protos)
self.add(f"Protocols {' '.join(protos)}")
return self
def add_vhost_noh2(self):
domains = [f"noh2.{self.env.http_tld}", f"noh2-alias.{self.env.http_tld}"]
self.start_vhost(domains=domains, port=self.env.https_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
self.start_vhost(domains=domains, port=self.env.http_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
return self
def add_vhost_test1(self, proxy_self=False, h2proxy_self=False):
return super().add_vhost_test1(proxy_self=proxy_self, h2proxy_self=h2proxy_self)
def add_vhost_test2(self):
return super().add_vhost_test2()
|
the-stack_106_18322
|
#!/usr/bin/env python
# MBUtil: a tool for MBTiles files
# Supports importing, exporting, and more
#
# (c) Development Seed 2012
# Licensed under BSD
# for additional reference on schema see:
# https://github.com/mapbox/node-mbtiles/blob/master/lib/schema.sql
import sqlite3, sys, logging, time, os, json, zlib, gzip, re, StringIO, math
logger = logging.getLogger(__name__)
def flip_y(zoom, y):
return (2**zoom-1) - y
def mbtiles_setup(cur):
cur.execute("""
create table tiles (
zoom_level integer,
tile_column integer,
tile_row integer,
tile_data blob);
""")
cur.execute("""create table metadata
(name text, value text);""")
cur.execute("""create unique index tiles_index on tiles
(zoom_level, tile_column, tile_row);""")
def mbtiles_connect(mbtiles_file, silent):
try:
con = sqlite3.connect(mbtiles_file)
return con
except Exception as e:
if not silent:
logger.error("Could not connect to database")
logger.exception(e)
sys.exit(1)
def optimize_connection(cur):
cur.execute("""PRAGMA synchronous=0""")
cur.execute("""PRAGMA locking_mode=EXCLUSIVE""")
cur.execute("""PRAGMA journal_mode=DELETE""")
def optimize_database(cur, silent):
if not silent:
logger.debug('analyzing db')
cur.execute("""ANALYZE;""")
if not silent:
logger.debug('cleaning db')
# Workaround for python>=3.6.0,python<3.6.2
# https://bugs.python.org/issue28518
cur.isolation_level = None
cur.execute("""VACUUM;""")
cur.isolation_level = '' # reset default value of isolation_level
def get_dirs(path):
return [name for name in os.listdir(path)
if os.path.isdir(os.path.join(path, name))]
def read_tiles(zoom_level, depth, base_tile_id, directory_path, image_format, silent, cur):
if depth > 0:
for sub_dir in get_dirs(directory_path):
idx = int(sub_dir)
tile_id = base_tile_id + (idx * (10**(depth * 3)))
read_tiles(zoom_level, depth - 1, tile_id, os.path.join(directory_path, sub_dir), image_format, silent, cur)
else:
maxx = 180
minx = -180
tile_size = 4
if zoom_level == 1:
tile_size = 1
elif zoom_level == 2:
tile_size = 0.25
n_columns = math.ceil((maxx - minx) / tile_size)
for current_file in os.listdir(directory_path):
file_name, ext = current_file.split('.', 1)
if (ext != image_format):
pass
f = open(os.path.join(directory_path, current_file), 'rb')
file_content = f.read()
f.close()
tile_id = base_tile_id + int(file_name)
tile_row = round(tile_id / n_columns)
tile_col = tile_id % n_columns
if not silent:
logger.debug(' Read tile %i with zoom %i (%i, %i)' % (tile_id, zoom_level, tile_col, tile_row))
blob = StringIO.StringIO()
with gzip.GzipFile(fileobj=blob, mode="w", compresslevel=6) as f:
f.write(file_content)
cur.execute("""insert into tiles (zoom_level,
tile_column, tile_row, tile_data) values
(?, ?, ?, ?);""",
(zoom_level, tile_col, tile_row, sqlite3.Binary(blob.getvalue())))
def disk_to_mbtiles(directory_path, mbtiles_file, **kwargs):
silent = kwargs.get('silent')
if not silent:
logger.info("Importing disk to MBTiles")
logger.debug("%s --> %s" % (directory_path, mbtiles_file))
con = mbtiles_connect(mbtiles_file, silent)
cur = con.cursor()
optimize_connection(cur)
mbtiles_setup(cur)
#~ image_format = 'gph'
image_format = kwargs.get('format', 'gph')
try:
metadata = json.load(open(os.path.join(directory_path, 'metadata.json'), 'r'))
image_format = kwargs.get('format')
for name, value in metadata.items():
cur.execute('insert into metadata (name, value) values (?, ?)',
(name, value))
if not silent:
logger.info('metadata from metadata.json restored')
except IOError:
if not silent:
logger.warning('metadata.json not found')
for zoom_dir in get_dirs(directory_path):
z = int(zoom_dir)
depth = 2 if (z>=2) else 1
read_tiles(z, depth, 0, os.path.join(directory_path, zoom_dir), image_format, silent, cur)
if not silent:
logger.debug('tiles (and grids) inserted.')
optimize_database(con, silent)
def mbtiles_metadata_to_disk(mbtiles_file, **kwargs):
silent = kwargs.get('silent')
if not silent:
logger.debug("Exporting MBTiles metatdata from %s" % (mbtiles_file))
con = mbtiles_connect(mbtiles_file, silent)
metadata = dict(con.execute('select name, value from metadata;').fetchall())
if not silent:
logger.debug(json.dumps(metadata, indent=2))
def mbtiles_to_disk(mbtiles_file, directory_path, **kwargs):
silent = kwargs.get('silent')
if not silent:
logger.debug("Exporting MBTiles to disk")
logger.debug("%s --> %s" % (mbtiles_file, directory_path))
con = mbtiles_connect(mbtiles_file, silent)
os.mkdir("%s" % directory_path)
metadata = dict(con.execute('select name, value from metadata;').fetchall())
json.dump(metadata, open(os.path.join(directory_path, 'metadata.json'), 'w'), indent=4)
count = con.execute('select count(zoom_level) from tiles;').fetchone()[0]
done = 0
base_path = directory_path
if not os.path.isdir(base_path):
os.makedirs(base_path)
# if interactivity
formatter = metadata.get('formatter')
if formatter:
layer_json = os.path.join(base_path, 'layer.json')
formatter_json = {"formatter":formatter}
open(layer_json, 'w').write(json.dumps(formatter_json))
tiles = con.execute('select zoom_level, tile_column, tile_row, tile_data from tiles;')
t = tiles.fetchone()
while t:
z = t[0]
x = t[1]
y = t[2]
if kwargs.get('scheme') == 'xyz':
y = flip_y(z,y)
if not silent:
logger.debug('flipping')
tile_dir = os.path.join(base_path, str(z), str(x))
elif kwargs.get('scheme') == 'wms':
tile_dir = os.path.join(base_path,
"%02d" % (z),
"%03d" % (int(x) / 1000000),
"%03d" % ((int(x) / 1000) % 1000),
"%03d" % (int(x) % 1000),
"%03d" % (int(y) / 1000000),
"%03d" % ((int(y) / 1000) % 1000))
else:
tile_dir = os.path.join(base_path, str(z), str(x))
if not os.path.isdir(tile_dir):
os.makedirs(tile_dir)
if kwargs.get('scheme') == 'wms':
tile = os.path.join(tile_dir,'%03d.%s' % (int(y) % 1000, kwargs.get('format', 'png')))
else:
tile = os.path.join(tile_dir,'%s.%s' % (y, kwargs.get('format', 'png')))
f = open(tile, 'wb')
f.write(t[3])
f.close()
done = done + 1
if not silent:
logger.info('%s / %s tiles exported' % (done, count))
t = tiles.fetchone()
# grids
callback = kwargs.get('callback')
done = 0
|
the-stack_106_18323
|
import ipaddress
import json
import logging
import pytest
import re
import yaml
from tests.common.fixtures.ptfhost_utils import ptf_portmap_file # lgtm[py/unused-import]
from tests.common.helpers.assertions import pytest_assert, pytest_require
from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice
from tests.common.utilities import wait_until
from tests.common.dualtor.dual_tor_utils import upper_tor_host,lower_tor_host
from tests.common.dualtor.mux_simulator_control import mux_server_url, toggle_all_simulator_ports
from tests.common.dualtor.constants import UPPER_TOR, LOWER_TOR
from tests.common.utilities import check_qos_db_fv_reference_with_table
logger = logging.getLogger(__name__)
class QosBase:
"""
Common APIs
"""
SUPPORTED_T0_TOPOS = ["t0", "t0-64", "t0-116", "t0-35", "dualtor-56", "dualtor", "t0-80", "t0-backend"]
SUPPORTED_T1_TOPOS = ["t1-lag", "t1-64-lag", "t1-backend"]
SUPPORTED_PTF_TOPOS = ['ptf32', 'ptf64']
SUPPORTED_ASIC_LIST = ["td2", "th", "th2", "spc1", "spc2", "spc3", "td3", "th3"]
TARGET_QUEUE_WRED = 3
TARGET_LOSSY_QUEUE_SCHED = 0
TARGET_LOSSLESS_QUEUE_SCHED = 3
buffer_model_initialized = False
buffer_model = None
def isBufferInApplDb(self, dut_asic):
if not self.buffer_model_initialized:
self.buffer_model = dut_asic.run_redis_cmd(
argv = [
"redis-cli", "-n", "4", "hget",
"DEVICE_METADATA|localhost", "buffer_model"
]
)
self.buffer_model_initialized = True
logger.info(
"Buffer model is {}, buffer tables will be fetched from {}".
format(
self.buffer_model or "not defined",
"APPL_DB" if self.buffer_model else "CONFIG_DB"
)
)
return self.buffer_model
@pytest.fixture(scope='class', autouse=True)
def dutTestParams(self, duthosts, rand_one_dut_hostname, tbinfo, ptf_portmap_file):
"""
Prepares DUT host test params
Args:
duthost (AnsibleHost): Device Under Test (DUT)
tbinfo (Fixture, dict): Map containing testbed information
ptfPortMapFile (Fxiture, str): filename residing on PTF host and contains port maps information
Returns:
dutTestParams (dict): DUT host test params
"""
duthost = duthosts[rand_one_dut_hostname]
mgFacts = duthost.get_extended_minigraph_facts(tbinfo)
topo = tbinfo["topo"]["name"]
yield {
"topo": topo,
"hwsku": mgFacts["minigraph_hwsku"],
"basicParams": {
"router_mac": '' if topo in self.SUPPORTED_T0_TOPOS else duthost.facts["router_mac"],
"server": duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'],
"port_map_file": ptf_portmap_file,
"sonic_asic_type": duthost.facts['asic_type'],
"sonic_version": duthost.os_version
}
}
def runPtfTest(self, ptfhost, testCase='', testParams={}):
"""
Runs QoS SAI test case on PTF host
Args:
ptfhost (AnsibleHost): Packet Test Framework (PTF)
testCase (str): SAI tests test case name
testParams (dict): Map of test params required by testCase
Returns:
None
Raises:
RunAnsibleModuleFail if ptf test fails
"""
pytest_assert(ptfhost.shell(
argv = [
"ptf",
"--test-dir",
"saitests",
testCase,
"--platform-dir",
"ptftests",
"--platform",
"remote",
"-t",
";".join(["{}={}".format(k, repr(v)) for k, v in testParams.items()]),
"--disable-ipv6",
"--disable-vxlan",
"--disable-geneve",
"--disable-erspan",
"--disable-mpls",
"--disable-nvgre",
"--log-file",
"/tmp/{0}.log".format(testCase),
"--test-case-timeout",
"600"
],
chdir = "/root",
)["rc"] == 0, "Failed when running test '{0}'".format(testCase))
class QosSaiBase(QosBase):
"""
QosSaiBase contains collection of pytest fixtures that ready the
testbed for QoS SAI test cases.
"""
def __computeBufferThreshold(self, dut_asic, bufferProfile):
"""
Computes buffer threshold for dynamic threshold profiles
Args:
dut_asic (SonicAsic): Device ASIC Under Test (DUT)
bufferProfile (dict, inout): Map of puffer profile attributes
Returns:
Updates bufferProfile with computed buffer threshold
"""
if self.isBufferInApplDb(dut_asic):
db = "0"
keystr = "BUFFER_POOL_TABLE:"
else:
db = "4"
keystr = "BUFFER_POOL|"
if check_qos_db_fv_reference_with_table(dut_asic) == True:
pool = bufferProfile["pool"].encode("utf-8").translate(None, "[]")
else:
pool = keystr + bufferProfile["pool"].encode("utf-8")
bufferSize = int(
dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", db, "HGET", pool, "size"]
)[0]
)
bufferScale = 2**float(bufferProfile["dynamic_th"])
bufferScale /= (bufferScale + 1)
bufferProfile.update(
{"static_th": int(bufferProfile["size"]) + int(bufferScale * bufferSize)}
)
def __updateVoidRoidParams(self, dut_asic, bufferProfile):
"""
Updates buffer profile with VOID/ROID params
Args:
dut_asic (SonicAsic): Device Under Test (DUT)
bufferProfile (dict, inout): Map of puffer profile attributes
Returns:
Updates bufferProfile with VOID/ROID obtained from Redis db
"""
if check_qos_db_fv_reference_with_table(dut_asic) == True:
if self.isBufferInApplDb(dut_asic):
bufferPoolName = bufferProfile["pool"].encode("utf-8").translate(
None, "[]").replace("BUFFER_POOL_TABLE:",''
)
else:
bufferPoolName = bufferProfile["pool"].encode("utf-8").translate(
None, "[]").replace("BUFFER_POOL|",''
)
else:
bufferPoolName = bufferProfile["pool"].encode("utf-8")
bufferPoolVoid = dut_asic.run_redis_cmd(
argv = [
"redis-cli", "-n", "2", "HGET",
"COUNTERS_BUFFER_POOL_NAME_MAP", bufferPoolName
]
)[0].encode("utf-8")
bufferProfile.update({"bufferPoolVoid": bufferPoolVoid})
bufferPoolRoid = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", "1", "HGET", "VIDTORID", bufferPoolVoid]
)[0].encode("utf-8").replace("oid:",'')
bufferProfile.update({"bufferPoolRoid": bufferPoolRoid})
def __getBufferProfile(self, request, dut_asic, os_version, table, port, priorityGroup):
"""
Get buffer profile attribute from Redis db
Args:
request (Fixture): pytest request object
dut_asic(SonicAsic): Device Under Test (DUT)
table (str): Redis table name
port (str): DUT port alias
priorityGroup (str): QoS priority group
Returns:
bufferProfile (dict): Map of buffer profile attributes
"""
if self.isBufferInApplDb(dut_asic):
db = "0"
keystr = "{0}:{1}:{2}".format(table, port, priorityGroup)
bufkeystr = "BUFFER_PROFILE_TABLE:"
else:
db = "4"
keystr = "{0}|{1}|{2}".format(table, port, priorityGroup)
bufkeystr = "BUFFER_PROFILE|"
if check_qos_db_fv_reference_with_table(dut_asic) == True:
bufferProfileName = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", db, "HGET", keystr, "profile"]
)[0].encode("utf-8").translate(None, "[]")
else:
bufferProfileName = bufkeystr + dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", db, "HGET", keystr, "profile"])[0].encode("utf-8")
result = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", db, "HGETALL", bufferProfileName]
)
it = iter(result)
bufferProfile = dict(zip(it, it))
bufferProfile.update({"profileName": bufferProfileName})
# Update profile static threshold value if profile threshold is dynamic
if "dynamic_th" in bufferProfile.keys():
self.__computeBufferThreshold(dut_asic, bufferProfile)
if "pg_lossless" in bufferProfileName:
pytest_assert(
"xon" in bufferProfile.keys() and "xoff" in bufferProfile.keys(),
"Could not find xon and/or xoff values for profile '{0}'".format(
bufferProfileName
)
)
if "201811" not in os_version:
self.__updateVoidRoidParams(dut_asic, bufferProfile)
return bufferProfile
def __getSharedHeadroomPoolSize(self, request, dut_asic):
"""
Get shared headroom pool size from Redis db
Args:
request (Fixture): pytest request object
dut_asic (SonicAsic): Device Under Test (DUT)
Returns:
size (str) size of shared headroom pool
None if shared headroom pool isn't enabled
"""
if self.isBufferInApplDb(dut_asic):
db = "0"
keystr = "BUFFER_POOL_TABLE:ingress_lossless_pool"
else:
db = "4"
keystr = "BUFFER_POOL|ingress_lossless_pool"
result = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", db, "HGETALL", keystr]
)
it = iter(result)
ingressLosslessPool = dict(zip(it, it))
return ingressLosslessPool.get("xoff")
def __getEcnWredParam(self, dut_asic, table, port):
"""
Get ECN/WRED parameters from Redis db
Args:
dut_asic (SonicAsic): Device Under Test (DUT)
table (str): Redis table name
port (str): DUT port alias
Returns:
wredProfile (dict): Map of ECN/WRED attributes
"""
if check_qos_db_fv_reference_with_table(dut_asic) == True:
wredProfileName = dut_asic.run_redis_cmd(
argv = [
"redis-cli", "-n", "4", "HGET",
"{0}|{1}|{2}".format(table, port, self.TARGET_QUEUE_WRED),
"wred_profile"
]
)[0].encode("utf-8").translate(None, "[]")
else:
wredProfileName = "WRED_PROFILE|" + dut_asic.run_redis_cmd(
argv = [
"redis-cli", "-n", "4", "HGET",
"{0}|{1}|{2}".format(table, port, self.TARGET_QUEUE_WRED),
"wred_profile"
]
)[0].encode("utf-8")
result = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", "4", "HGETALL", wredProfileName]
)
it = iter(result)
wredProfile = dict(zip(it, it))
return wredProfile
def __getWatermarkStatus(self, dut_asic):
"""
Get watermark status from Redis db
Args:
dut_asic (SonicAsic): Device Under Test (DUT)
Returns:
watermarkStatus (str): Watermark status
"""
watermarkStatus = dut_asic.run_redis_cmd(
argv = [
"redis-cli", "-n", "4", "HGET",
"FLEX_COUNTER_TABLE|QUEUE_WATERMARK", "FLEX_COUNTER_STATUS"
]
)[0].encode("utf-8")
return watermarkStatus
def __getSchedulerParam(self, dut_asic, port, queue):
"""
Get scheduler parameters from Redis db
Args:
dut_asic (SonicAsic): Device Under Test (DUT)
port (str): DUT port alias
queue (str): QoS queue
Returns:
SchedulerParam (dict): Map of scheduler parameters
"""
if check_qos_db_fv_reference_with_table(dut_asic) == True:
schedProfile = dut_asic.run_redis_cmd(
argv = [
"redis-cli", "-n", "4", "HGET",
"QUEUE|{0}|{1}".format(port, queue), "scheduler"
]
)[0].encode("utf-8").translate(None, "[]")
else:
schedProfile = "SCHEDULER|" + dut_asic.run_redis_cmd(
argv = [
"redis-cli", "-n", "4", "HGET",
"QUEUE|{0}|{1}".format(port, queue), "scheduler"
]
)[0].encode("utf-8")
schedWeight = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", "4", "HGET", schedProfile, "weight"]
)[0].encode("utf-8")
return {"schedProfile": schedProfile, "schedWeight": schedWeight}
def __assignTestPortIps(self, mgFacts):
"""
Assign IPs to test ports of DUT host
Args:
mgFacts (dict): Map of DUT minigraph facts
Returns:
dutPortIps (dict): Map of port index to IPs
"""
dutPortIps = {}
if len(mgFacts["minigraph_vlans"]) > 0:
#TODO: handle the case when there are multiple vlans
testVlan = next(iter(mgFacts["minigraph_vlans"]))
testVlanMembers = mgFacts["minigraph_vlans"][testVlan]["members"]
testVlanIp = None
for vlan in mgFacts["minigraph_vlan_interfaces"]:
if mgFacts["minigraph_vlans"][testVlan]["name"] in vlan["attachto"]:
testVlanIp = ipaddress.ip_address(unicode(vlan["addr"]))
break
pytest_assert(testVlanIp, "Failed to obtain vlan IP")
vlan_id = None
if 'type' in mgFacts["minigraph_vlans"][testVlan]:
vlan_type = mgFacts["minigraph_vlans"][testVlan]['type']
if vlan_type is not None and "Tagged" in vlan_type:
vlan_id = mgFacts["minigraph_vlans"][testVlan]['vlanid']
for i in range(len(testVlanMembers)):
portIndex = mgFacts["minigraph_ptf_indices"][testVlanMembers[i]]
portIpMap = {'peer_addr': str(testVlanIp + portIndex + 1)}
if vlan_id is not None:
portIpMap['vlan_id'] = vlan_id
dutPortIps.update({portIndex: portIpMap})
return dutPortIps
def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, dst_port_ids):
"""
Build map of test ports index and IPs
Args:
request (Fixture): pytest request object
testPortIds (list): List of QoS SAI test port IDs
testPortIps (list): List of QoS SAI test port IPs
Returns:
testPorts (dict): Map of test ports index and IPs
"""
dstPorts = request.config.getoption("--qos_dst_ports")
srcPorts = request.config.getoption("--qos_src_ports")
if dstPorts is None:
if dst_port_ids:
pytest_assert(
len(set(testPortIds).intersection(set(dst_port_ids))) == len(set(dst_port_ids)),
"Dest port id passed in qos.yml not valid"
)
dstPorts = dst_port_ids
elif len(testPortIds) >= 4:
dstPorts = [0, 2, 3]
elif len(testPortIds) == 3:
dstPorts = [0, 2, 2]
else:
dstPorts = [0, 0, 0]
if srcPorts is None:
if src_port_ids:
pytest_assert(
len(set(testPortIds).intersection(set(src_port_ids))) == len(set(src_port_ids)),
"Source port id passed in qos.yml not valid"
)
# To verify ingress lossless speed/cable-length randomize the source port.
srcPorts = [random.choice(src_port_ids)]
else:
srcPorts = [1]
pytest_assert(len(testPortIds) >= 2, "Provide at least 2 test ports")
logging.debug(
"Test Port IDs:{} IPs:{}".format(testPortIds, testPortIps)
)
logging.debug("Test Port dst:{}, src:{}".format(dstPorts, srcPorts))
pytest_assert(
len(set(dstPorts).intersection(set(srcPorts))) == 0,
"Duplicate destination and source ports '{0}'".format(
set(dstPorts).intersection(set(srcPorts))
)
)
#TODO: Randomize port selection
dstPort = dstPorts[0] if dst_port_ids else testPortIds[dstPorts[0]]
dstVlan = testPortIps[dstPort]['vlan_id'] if 'vlan_id' in testPortIps[dstPort] else None
dstPort2 = dstPorts[1] if dst_port_ids else testPortIds[dstPorts[1]]
dstVlan2 = testPortIps[dstPort2]['vlan_id'] if 'vlan_id' in testPortIps[dstPort2] else None
dstPort3 = dstPorts[2] if dst_port_ids else testPortIds[dstPorts[2]]
dstVlan3 = testPortIps[dstPort3]['vlan_id'] if 'vlan_id' in testPortIps[dstPort3] else None
srcPort = srcPorts[0] if src_port_ids else testPortIds[srcPorts[0]]
srcVlan = testPortIps[srcPort]['vlan_id'] if 'vlan_id' in testPortIps[srcPort] else None
return {
"dst_port_id": dstPort,
"dst_port_ip": testPortIps[dstPort]['peer_addr'],
"dst_port_vlan": dstVlan,
"dst_port_2_id": dstPort2,
"dst_port_2_ip": testPortIps[dstPort2]['peer_addr'],
"dst_port_2_vlan": dstVlan2,
'dst_port_3_id': dstPort3,
"dst_port_3_ip": testPortIps[dstPort3]['peer_addr'],
"dst_port_3_vlan": dstVlan3,
"src_port_id": srcPorts[0] if src_port_ids else testPortIds[srcPorts[0]],
"src_port_ip": testPortIps[srcPorts[0] if src_port_ids else testPortIds[srcPorts[0]]]["peer_addr"],
"src_port_vlan": srcVlan
}
@pytest.fixture(scope='class', autouse=True)
def dutConfig(
self, request, duthosts, rand_one_dut_hostname, tbinfo,
enum_frontend_asic_index
):
"""
Build DUT host config pertaining to QoS SAI tests
Args:
request (Fixture): pytest request object
duthost (AnsibleHost): Device Under Test (DUT)
Returns:
dutConfig (dict): Map of DUT config containing dut interfaces,
test port IDs, test port IPs, and test ports
"""
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
dutLagInterfaces = []
dutPortIps = {}
testPortIps = {}
mgFacts = duthost.get_extended_minigraph_facts(tbinfo)
topo = tbinfo["topo"]["name"]
testPortIds = []
# LAG ports in T1 TOPO need to be removed in Mellanox devices
if topo in self.SUPPORTED_T0_TOPOS or isMellanoxDevice(duthost):
pytest_assert(
not duthost.sonichost.is_multi_asic, "Fixture not supported on T0 multi ASIC"
)
for _, lag in mgFacts["minigraph_portchannels"].items():
for intf in lag["members"]:
dutLagInterfaces.append(mgFacts["minigraph_ptf_indices"][intf])
testPortIds = set(mgFacts["minigraph_ptf_indices"][port]
for port in mgFacts["minigraph_ports"].keys())
testPortIds -= set(dutLagInterfaces)
if isMellanoxDevice(duthost):
# The last port is used for up link from DUT switch
testPortIds -= {len(mgFacts["minigraph_ptf_indices"]) - 1}
testPortIds = sorted(testPortIds)
pytest_require(len(testPortIds) != 0, "Skip test since no ports are available for testing")
# get current DUT port IPs
dutPortIps = {}
if 'backend' in topo:
intf_map = mgFacts["minigraph_vlan_sub_interfaces"]
else:
intf_map = mgFacts["minigraph_interfaces"]
for portConfig in intf_map:
intf = portConfig["attachto"].split(".")[0]
if ipaddress.ip_interface(portConfig['peer_addr']).ip.version == 4:
portIndex = mgFacts["minigraph_ptf_indices"][intf]
if portIndex in testPortIds:
portIpMap = {'peer_addr': portConfig["peer_addr"]}
if 'vlan' in portConfig:
portIpMap['vlan_id'] = portConfig['vlan']
dutPortIps.update({portIndex: portIpMap})
testPortIps = self.__assignTestPortIps(mgFacts)
elif topo in self.SUPPORTED_T1_TOPOS:
for iface,addr in dut_asic.get_active_ip_interfaces(tbinfo).items():
vlan_id = None
if iface.startswith("Ethernet"):
if "." in iface:
iface, vlan_id = iface.split(".")
portIndex = mgFacts["minigraph_ptf_indices"][iface]
portIpMap = {'peer_addr': addr["peer_ipv4"]}
if vlan_id is not None:
portIpMap['vlan_id'] = vlan_id
dutPortIps.update({portIndex: portIpMap})
elif iface.startswith("PortChannel"):
portName = next(
iter(mgFacts["minigraph_portchannels"][iface]["members"])
)
portIndex = mgFacts["minigraph_ptf_indices"][portName]
portIpMap = {'peer_addr': addr["peer_ipv4"]}
dutPortIps.update({portIndex: portIpMap})
testPortIds = sorted(dutPortIps.keys())
else:
pytest.skip("Unsupported testbed type - {}".format(topo))
# restore currently assigned IPs
testPortIps.update(dutPortIps)
qosConfigs = {}
with open(r"qos/files/qos.yml") as file:
qosConfigs = yaml.load(file, Loader=yaml.FullLoader)
vendor = duthost.facts["asic_type"]
hostvars = duthost.host.options['variable_manager']._hostvars[duthost.hostname]
dutAsic = None
for asic in self.SUPPORTED_ASIC_LIST:
vendorAsic = "{0}_{1}_hwskus".format(vendor, asic)
if vendorAsic in hostvars.keys() and mgFacts["minigraph_hwsku"] in hostvars[vendorAsic]:
dutAsic = asic
break
pytest_assert(dutAsic, "Cannot identify DUT ASIC type")
dutTopo = "topo-"
if dutTopo + topo in qosConfigs['qos_params'].get(dutAsic, {}):
dutTopo = dutTopo + topo
else:
# Default topo is any
dutTopo = dutTopo + "any"
# Support of passing source and dest ptf port id from qos.yml
# This is needed when on some asic port are distributed across
# multiple buffer pipes.
src_port_ids = None
dst_port_ids = None
try:
if "src_port_ids" in qosConfigs['qos_params'][dutAsic][dutTopo]:
src_port_ids = qosConfigs['qos_params'][dutAsic][dutTopo]["src_port_ids"]
if "dst_port_ids" in qosConfigs['qos_params'][dutAsic][dutTopo]:
dst_port_ids = qosConfigs['qos_params'][dutAsic][dutTopo]["dst_port_ids"]
except KeyError:
pass
testPorts = self.__buildTestPorts(request, testPortIds, testPortIps, src_port_ids, dst_port_ids)
yield {
"dutInterfaces" : {
index: port for port, index in mgFacts["minigraph_ptf_indices"].items()
},
"testPortIds": testPortIds,
"testPortIps": testPortIps,
"testPorts": testPorts,
"qosConfigs": qosConfigs,
"dutAsic" : dutAsic,
"dutTopo" : dutTopo
}
@pytest.fixture(scope='class')
def ssh_tunnel_to_syncd_rpc(
self, duthosts, rand_one_dut_hostname, enum_frontend_asic_index,
swapSyncd
):
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
dut_asic.create_ssh_tunnel_sai_rpc()
yield
dut_asic.remove_ssh_tunnel_sai_rpc()
@pytest.fixture(scope='class')
def updateIptables(
self, duthosts, rand_one_dut_hostname, enum_frontend_asic_index, swapSyncd
):
"""
Update iptables on DUT host with drop rule for BGP SYNC packets
Args:
duthost (AnsibleHost): Device Under Test (DUT)
swapSyncd (Fixture): swapSyncd fixture is required to run prior to updating iptables
Returns:
None
"""
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
ipVersions = [{"ip_version": "ipv4"}, {"ip_version": "ipv6"}]
logger.info("Add ip[6]tables rule to drop BGP SYN Packet from peer so that we do not ACK back")
for ipVersion in ipVersions:
dut_asic.bgp_drop_rule(state="present", **ipVersion)
yield
logger.info("Remove ip[6]tables rule to drop BGP SYN Packet from Peer")
for ipVersion in ipVersions:
dut_asic.bgp_drop_rule(state="absent", **ipVersion)
@pytest.fixture(scope='class')
def stopServices(
self, duthosts, rand_one_dut_hostname, enum_frontend_asic_index,
swapSyncd, enable_container_autorestart, disable_container_autorestart,
tbinfo, upper_tor_host, lower_tor_host, toggle_all_simulator_ports
):
"""
Stop services (lldp-syncs, lldpd, bgpd) on DUT host prior to test start
Args:
duthost (AnsibleHost): Device Under Test (DUT)
swapSyncd (Fxiture): swapSyncd fixture is required to run prior to stopping services
Returns:
None
"""
if 'dualtor' in tbinfo['topo']['name']:
duthost = upper_tor_host
duthost_lower = lower_tor_host
else:
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
def updateDockerService(host, docker="", action="", service=""):
"""
Helper function to update docker services
Args:
host (AnsibleHost): Ansible host that is running docker
docker (str): docker container name
action (str): action to apply to service running within docker
service (str): service name running within docker
Returns:
None
"""
host.command(
"docker exec {docker} supervisorctl {action} {service}".format(
docker=docker,
action=action,
service=service
)
)
logger.info("{}ed {}".format(action, service))
services = [
{"docker": dut_asic.get_docker_name("lldp"), "service": "lldp-syncd"},
{"docker": dut_asic.get_docker_name("lldp"), "service": "lldpd"},
{"docker": dut_asic.get_docker_name("bgp"), "service": "bgpd"},
{"docker": dut_asic.get_docker_name("bgp"), "service": "bgpmon"},
]
feature_list = ['lldp', 'bgp', 'syncd', 'swss']
if 'dualtor' in tbinfo['topo']['name']:
disable_container_autorestart(duthost_lower, testcase="test_qos_sai", feature_list=feature_list)
disable_container_autorestart(duthost, testcase="test_qos_sai", feature_list=feature_list)
for service in services:
updateDockerService(duthost, action="stop", **service)
""" Stop mux container for dual ToR """
if 'dualtor' in tbinfo['topo']['name']:
file = "/usr/local/bin/write_standby.py"
backup_file = "/usr/local/bin/write_standby.py.bkup"
toggle_all_simulator_ports(UPPER_TOR)
try:
duthost.shell("ls %s" % file)
duthost.shell("sudo cp {} {}".format(file,backup_file))
duthost.shell("sudo rm {}".format(file))
duthost.shell("sudo touch {}".format(file))
except:
pytest.skip('file {} not found'.format(file))
duthost_lower.shell('sudo config feature state mux disabled')
duthost.shell('sudo config feature state mux disabled')
yield
for service in services:
updateDockerService(duthost, action="start", **service)
""" Start mux conatiner for dual ToR """
if 'dualtor' in tbinfo['topo']['name']:
try:
duthost.shell("ls %s" % backup_file)
duthost.shell("sudo cp {} {}".format(backup_file,file))
duthost.shell("sudo chmod +x {}".format(file))
duthost.shell("sudo rm {}".format(backup_file))
except:
pytest.skip('file {} not found'.format(backup_file))
duthost.shell('sudo config feature state mux enabled')
duthost_lower.shell('sudo config feature state mux enabled')
logger.info("Start mux container for dual ToR testbed")
enable_container_autorestart(duthost, testcase="test_qos_sai", feature_list=feature_list)
if 'dualtor' in tbinfo['topo']['name']:
enable_container_autorestart(duthost_lower, testcase="test_qos_sai", feature_list=feature_list)
@pytest.fixture(autouse=True)
def updateLoganalyzerExceptions(self, rand_one_dut_hostname, loganalyzer):
"""
Update loganalyzer ignore regex list
Args:
duthost (AnsibleHost): Device Under Test (DUT)
loganalyzer (Fixture): log analyzer fixture
Returns:
None
"""
if loganalyzer:
ignoreRegex = [
".*ERR monit.*'lldpd_monitor' process is not running.*",
".*ERR monit.* 'lldp\|lldpd_monitor' status failed.*-- 'lldpd:' is not running.*",
".*ERR monit.*'lldp_syncd' process is not running.*",
".*ERR monit.*'lldp\|lldp_syncd' status failed.*-- 'python.* -m lldp_syncd' is not running.*",
".*ERR monit.*'bgpd' process is not running.*",
".*ERR monit.*'bgp\|bgpd' status failed.*-- '/usr/lib/frr/bgpd' is not running.*",
".*ERR monit.*'bgpcfgd' process is not running.*",
".*ERR monit.*'bgp\|bgpcfgd' status failed.*-- '/usr/bin/python.* /usr/local/bin/bgpcfgd' is not running.*",
".*ERR syncd#syncd:.*brcm_sai_set_switch_attribute:.*updating switch mac addr failed.*",
".*ERR monit.*'bgp\|bgpmon' status failed.*'/usr/bin/python.* /usr/local/bin/bgpmon' is not running.*",
".*ERR monit.*bgp\|fpmsyncd.*status failed.*NoSuchProcess process no longer exists.*",
".*WARNING syncd#SDK:.*check_attribs_metadata: Not implemented attribute.*",
".*WARNING syncd#SDK:.*sai_set_attribute: Failed attribs check, key:Switch ID.*",
".*WARNING syncd#SDK:.*check_rate: Set max rate to 0.*"
]
loganalyzer[rand_one_dut_hostname].ignore_regex.extend(ignoreRegex)
yield
@pytest.fixture(scope='class', autouse=True)
def disablePacketAging(
self, duthosts, rand_one_dut_hostname, stopServices
):
"""
disable packet aging on DUT host
Args:
duthost (AnsibleHost): Device Under Test (DUT)
stopServices (Fxiture): stopServices fixture is required to run prior to disabling packet aging
Returns:
None
"""
duthost = duthosts[rand_one_dut_hostname]
if isMellanoxDevice(duthost):
logger.info("Disable Mellanox packet aging")
duthost.copy(src="qos/files/mellanox/packets_aging.py", dest="/tmp")
duthost.command("docker cp /tmp/packets_aging.py syncd:/")
duthost.command("docker exec syncd python /packets_aging.py disable")
yield
if isMellanoxDevice(duthost):
logger.info("Enable Mellanox packet aging")
duthost.command("docker exec syncd python /packets_aging.py enable")
duthost.command("docker exec syncd rm -rf /packets_aging.py")
@pytest.fixture(scope='class', autouse=True)
def dutQosConfig(
self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
dutConfig, ingressLosslessProfile, ingressLossyProfile,
egressLosslessProfile, egressLossyProfile, sharedHeadroomPoolSize,
tbinfo
):
"""
Prepares DUT host QoS configuration
Args:
duthost (AnsibleHost): Device Under Test (DUT)
ingressLosslessProfile (Fxiture): ingressLosslessProfile fixture is required to run prior to collecting
QoS configuration
Returns:
QoSConfig (dict): Map containing DUT host QoS configuration
"""
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
mgFacts = duthost.get_extended_minigraph_facts(tbinfo)
pytest_assert("minigraph_hwsku" in mgFacts, "Could not find DUT SKU")
profileName = ingressLosslessProfile["profileName"]
logger.info("Lossless Buffer profile selected is {}".format(profileName))
if self.isBufferInApplDb(dut_asic):
profile_pattern = "^BUFFER_PROFILE_TABLE\:pg_lossless_(.*)_profile$"
else:
profile_pattern = "^BUFFER_PROFILE\|pg_lossless_(.*)_profile"
m = re.search(profile_pattern, profileName)
pytest_assert(m.group(1), "Cannot find port speed/cable length")
portSpeedCableLength = m.group(1)
qosConfigs = dutConfig["qosConfigs"]
dutAsic = dutConfig["dutAsic"]
dutTopo = dutConfig["dutTopo"]
if isMellanoxDevice(duthost):
current_file_dir = os.path.dirname(os.path.realpath(__file__))
sub_folder_dir = os.path.join(current_file_dir, "files/mellanox/")
if sub_folder_dir not in sys.path:
sys.path.append(sub_folder_dir)
import qos_param_generator
qpm = qos_param_generator.QosParamMellanox(qosConfigs['qos_params']['mellanox'][dutTopo], dutAsic,
portSpeedCableLength,
dutConfig,
ingressLosslessProfile,
ingressLossyProfile,
egressLosslessProfile,
egressLossyProfile,
sharedHeadroomPoolSize
)
qosParams = qpm.run()
else:
qosParams = qosConfigs['qos_params'][dutAsic][dutTopo]
yield {
"param": qosParams,
"portSpeedCableLength": portSpeedCableLength,
}
@pytest.fixture(scope='class')
def releaseAllPorts(
self, duthosts, rand_one_dut_hostname, ptfhost, dutTestParams,
updateIptables, ssh_tunnel_to_syncd_rpc
):
"""
Release all paused ports prior to running QoS SAI test cases
Args:
ptfhost (AnsibleHost): Packet Test Framework (PTF)
dutTestParams (Fixture, dict): DUT host test params
updateIptables (Fixture, dict): updateIptables to run prior to releasing paused ports
Returns:
None
Raises:
RunAnsibleModuleFail if ptf test fails
"""
self.runPtfTest(
ptfhost, testCase="sai_qos_tests.ReleaseAllPorts",
testParams=dutTestParams["basicParams"]
)
@pytest.fixture(scope='class', autouse=True)
def populateArpEntries(
self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
ptfhost, dutTestParams, dutConfig, releaseAllPorts,
):
"""
Update ARP entries of QoS SAI test ports
Args:
duthost (AnsibleHost): Device Under Test (DUT)
ptfhost (AnsibleHost): Packet Test Framework (PTF)
dutTestParams (Fixture, dict): DUT host test params
dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs,
and test ports
releaseAllPorts (Fixture, dict): releaseAllPorts to run prior to updating ARP entries
Returns:
None
Raises:
RunAnsibleModuleFail if ptf test fails
"""
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
saiQosTest = None
if dutTestParams["topo"] in self.SUPPORTED_T0_TOPOS:
saiQosTest = "sai_qos_tests.ARPpopulate"
elif dutTestParams["topo"] in self.SUPPORTED_PTF_TOPOS:
saiQosTest = "sai_qos_tests.ARPpopulatePTF"
else:
result = dut_asic.command("arp -n")
pytest_assert(result["rc"] == 0, "failed to run arp command on {0}".format(duthost.hostname))
if result["stdout"].find("incomplete") == -1:
saiQosTest = "sai_qos_tests.ARPpopulate"
if saiQosTest:
testParams = dutTestParams["basicParams"]
testParams.update(dutConfig["testPorts"])
self.runPtfTest(
ptfhost, testCase=saiQosTest, testParams=testParams
)
@pytest.fixture(scope='class', autouse=True)
def dut_disable_ipv6(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
duthost.shell("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
yield
duthost.shell("sysctl -w net.ipv6.conf.all.disable_ipv6=0")
@pytest.fixture(scope='class', autouse=True)
def sharedHeadroomPoolSize(
self, request, duthosts, enum_frontend_asic_index,
rand_one_dut_hostname
):
"""
Retreives shared headroom pool size
Args:
request (Fixture): pytest request object
duthost (AnsibleHost): Device Under Test (DUT)
Returns:
size: shared headroom pool size
none if it is not defined
"""
duthost = duthosts[rand_one_dut_hostname]
yield self.__getSharedHeadroomPoolSize(
request,
duthost.asic_instance(enum_frontend_asic_index)
)
@pytest.fixture(scope='class', autouse=True)
def ingressLosslessProfile(
self, request, duthosts, enum_frontend_asic_index,
rand_one_dut_hostname, dutConfig
):
"""
Retreives ingress lossless profile
Args:
request (Fixture): pytest request object
duthost (AnsibleHost): Device Under Test (DUT)
dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs,
and test ports
Returns:
ingressLosslessProfile (dict): Map of ingress lossless buffer profile attributes
"""
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
yield self.__getBufferProfile(
request,
dut_asic,
duthost.os_version,
"BUFFER_PG_TABLE" if self.isBufferInApplDb(dut_asic) else "BUFFER_PG",
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
"3-4"
)
@pytest.fixture(scope='class', autouse=True)
def ingressLossyProfile(
self, request, duthosts, enum_frontend_asic_index,
rand_one_dut_hostname, dutConfig
):
"""
Retreives ingress lossy profile
Args:
request (Fixture): pytest request object
duthost (AnsibleHost): Device Under Test (DUT)
dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs,
and test ports
Returns:
ingressLossyProfile (dict): Map of ingress lossy buffer profile attributes
"""
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
yield self.__getBufferProfile(
request,
dut_asic,
duthost.os_version,
"BUFFER_PG_TABLE" if self.isBufferInApplDb(dut_asic) else "BUFFER_PG",
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
"0"
)
@pytest.fixture(scope='class', autouse=True)
def egressLosslessProfile(
self, request, duthosts, enum_frontend_asic_index,
rand_one_dut_hostname, dutConfig
):
"""
Retreives egress lossless profile
Args:
request (Fixture): pytest request object
duthost (AnsibleHost): Device Under Test (DUT)
dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs,
and test ports
Returns:
egressLosslessProfile (dict): Map of egress lossless buffer profile attributes
"""
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
yield self.__getBufferProfile(
request,
dut_asic,
duthost.os_version,
"BUFFER_QUEUE_TABLE" if self.isBufferInApplDb(dut_asic) else "BUFFER_QUEUE",
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
"3-4"
)
@pytest.fixture(scope='class', autouse=True)
def egressLossyProfile(
self, request, duthosts, enum_frontend_asic_index,
rand_one_dut_hostname, dutConfig
):
"""
Retreives egress lossy profile
Args:
request (Fixture): pytest request object
duthost (AnsibleHost): Device Under Test (DUT)
dutConfig (Fixture, dict): Map of DUT config containing dut interfaces,
test port IDs, test port IPs, and test ports
Returns:
egressLossyProfile (dict): Map of egress lossy buffer profile attributes
"""
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
yield self.__getBufferProfile(
request,
dut_asic,
duthost.os_version,
"BUFFER_QUEUE_TABLE" if self.isBufferInApplDb(dut_asic) else "BUFFER_QUEUE",
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
"0-2"
)
@pytest.fixture(scope='class')
def losslessSchedProfile(
self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
dutConfig
):
"""
Retreives lossless scheduler profile
Args:
duthost (AnsibleHost): Device Under Test (DUT)
dutConfig (Fixture, dict): Map of DUT config containing dut interfaces,
test port IDs, test port IPs, and test ports
Returns:
losslessSchedProfile (dict): Map of scheduler parameters
"""
duthost = duthosts[rand_one_dut_hostname]
yield self.__getSchedulerParam(
duthost.asic_instance(enum_frontend_asic_index),
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
self.TARGET_LOSSLESS_QUEUE_SCHED
)
@pytest.fixture(scope='class')
def lossySchedProfile(
self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
dutConfig
):
"""
Retreives lossy scheduler profile
Args:
duthost (AnsibleHost): Device Under Test (DUT)
dutConfig (Fixture, dict): Map of DUT config containing dut interfaces,
test port IDs, test port IPs, and test ports
Returns:
lossySchedProfile (dict): Map of scheduler parameters
"""
duthost = duthosts[rand_one_dut_hostname]
yield self.__getSchedulerParam(
duthost.asic_instance(enum_frontend_asic_index),
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
self.TARGET_LOSSY_QUEUE_SCHED
)
@pytest.fixture
def updateSchedProfile(
self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
dutQosConfig, losslessSchedProfile, lossySchedProfile
):
"""
Updates lossless/lossy scheduler profiles
Args:
duthost (AnsibleHost): Device Under Test (DUT)
dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration
losslessSchedProfile (Fixture, dict): Map of lossless scheduler parameters
lossySchedProfile (Fixture, dict): Map of lossy scheduler parameters
Returns:
None
"""
duthost = duthosts[rand_one_dut_hostname]
def updateRedisSchedParam(schedParam):
"""
Helper function to updates lossless/lossy scheduler profiles
Args:
schedParam (dict): Scheduler params to be set
Returns:
None
"""
duthost.asic_instance(enum_frontend_asic_index).run_redis_cmd(
argv = [
"redis-cli",
"-n",
"4",
"HSET",
schedParam["profile"],
"weight",
schedParam["qosConfig"]
]
)
wrrSchedParams = [
{
"profile": lossySchedProfile["schedProfile"],
"qosConfig": dutQosConfig["param"]["wrr_chg"]["lossy_weight"]
},
{
"profile": losslessSchedProfile["schedProfile"],
"qosConfig": dutQosConfig["param"]["wrr_chg"]["lossless_weight"]
},
]
for schedParam in wrrSchedParams:
updateRedisSchedParam(schedParam)
yield
schedProfileParams = [
{
"profile": lossySchedProfile["schedProfile"],
"qosConfig": lossySchedProfile["schedWeight"]
},
{
"profile": losslessSchedProfile["schedProfile"],
"qosConfig": losslessSchedProfile["schedWeight"]
},
]
for schedParam in schedProfileParams:
updateRedisSchedParam(schedParam)
@pytest.fixture
def resetWatermark(
self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname
):
"""
Reset queue watermark
Args:
duthost (AnsibleHost): Device Under Test (DUT)
Returns:
None
"""
duthost = duthosts[rand_one_dut_hostname]
dut_asic = duthost.asic_instance(enum_frontend_asic_index)
dut_asic.command("counterpoll watermark enable")
dut_asic.command("sleep 70")
dut_asic.command("counterpoll watermark disable")
class QosSaiBaseMasic(QosBase):
def build_port_ips(self, asic_index, ifaces, mg_facts):
"""
Returns list of port index and IP address for a given ASIC
"""
dut_port_ips = dict()
for iface, addr in ifaces.items():
if iface.startswith("Ethernet"):
portIndex = mg_facts["minigraph_ptf_indices"][iface]
elif iface.startswith("PortChannel"):
portName = mg_facts["minigraph_portchannels"][iface]["members"][0]
portIndex = mg_facts["minigraph_ptf_indices"][portName]
dut_port_ips.update({
portIndex: {
"ipv4": addr["peer_ipv4"],
"bgp_neighbor": addr["bgp_neighbor"]
}
})
return {asic_index: dut_port_ips}
def get_backend_ip_ifs(self, duthost, frontend_asic):
"""
On a frontend ASIC return a dict of interfaces with
backend ASIC names
"""
pytest_assert(
frontend_asic in duthost.get_frontend_asic_ids(),
"{} is not frontend ASIC ID".format(frontend_asic)
)
ip_ifs = duthost.asic_instance(
frontend_asic
).show_ip_interface()["ansible_facts"]["ip_interfaces"]
# Find backend interface names
return {intf: ip["bgp_neighbor"].lower() for intf, ip in ip_ifs.items()
if ip["bgp_neighbor"].lower().startswith("asic")}
def check_v4route_backend_nhop(self, duthost, frontend_asic, route):
"""
On frontend ASIC Check if v4 address has at least one backend
ASIC nexthop
Returns:
False if not nexthops with backend ASICs
"""
cmd = 'vtysh -n {} -c "show ip route {} json"'.format(
frontend_asic, route
)
result = duthost.command(cmd)
pytest_assert(result["rc"] == 0, cmd)
route_info = json.loads(result["stdout"])
nhop = route_info[route_info.keys().pop()][0]
nhop_ifs = {x["interfaceName"] for x in nhop["nexthops"]}
backend_ifs = set(self.get_backend_ip_ifs(
duthost, frontend_asic).keys()
)
return len(nhop_ifs.intersection(backend_ifs))
def backend_ip_if_admin_state(
self, duthost, test_asic, frontend_asic, admin_state
):
"""
On a frontend ASIC bring down ports (channels) towards backend ASICs
other than the ASIC under test, so that traffic always goes via
backend ASIC under test
"""
def is_intf_status(asic, intf, oper_state):
intf_status = duthost.asic_instance(asic).show_interface(
command="status", include_internal_intfs=True
)["ansible_facts"]["int_status"]
if intf_status[intf]["oper_state"] == oper_state:
return True
return False
oper_state = "up" if admin_state == "startup" else "down"
ip_ifs = self.get_backend_ip_ifs(duthost, frontend_asic)
for intf, asic in ip_ifs.items():
if asic != "asic{}".format(test_asic):
if admin_state == "startup":
duthost.asic_instance(frontend_asic).startup_interface(intf)
else:
duthost.asic_instance(frontend_asic).shutdown_interface(intf)
# wait for port status to change
pytest_assert(
wait_until(
10, 1, 0, is_intf_status, frontend_asic, intf,
oper_state
),
"Failed to update port status {} {}".format(
intf, admin_state
)
)
def find_asic_traffic_ports(self, duthost, ptfhost, test_params):
"""
For a given pair of source IP and destination IP, identify
the path taken by the L3 packet. Path implies the backend ASIC
and its tx and rx ports. The path is identified by sending
a burst of packets and finding the difference in interface
counters before and after the burst.
Assert is thrown if multiple ports or multiple backend ASICs
have similar interface counters.
"""
def find_traffic_ports(asic_id, c1, c2, diff):
rx_port = None
tx_port = None
a1 = c1[asic_id]["ansible_facts"]["int_counter"]
a2 = c2[asic_id]["ansible_facts"]["int_counter"]
for port in a2.keys():
rx_diff = int(a2[port]["RX_OK"]) - int(a1[port]["RX_OK"])
if rx_diff >= diff:
pytest_assert(
rx_port is None,
"Multiple rx ports with {} rx packets".format(diff)
)
rx_port = port
tx_diff = int(a2[port]["TX_OK"]) - int(a1[port]["TX_OK"])
if tx_diff >= diff:
pytest_assert(
tx_port is None,
"Multiple tx ports with {} tx packets".format(diff)
)
tx_port = port
# return rx, tx ports that have a packet count difference of > diff
return rx_port, tx_port
test_params["count"] = 100
duthost.command("sonic-clear counters")
cnt_before = duthost.show_interface(
command="counter", asic_index="all", include_internal_intfs=True
)
# send a burst of packets from a given src IP to dst IP
self.runPtfTest(
ptfhost, testCase="sai_qos_tests.PacketTransmit",
testParams=test_params
)
time.sleep(8)
cnt_after = duthost.show_interface(
command="counter", asic_index="all", include_internal_intfs=True
)
asic_idx = None
rx_port = None
tx_port = None
# identify the backend ASIC and the rx, tx ports on that ASIC
# that forwarded the traffic
for asic in duthost.get_backend_asic_ids():
rx, tx = find_traffic_ports(
asic, cnt_before, cnt_after, test_params["count"]
)
if rx and tx:
pytest_assert(
rx_port is None and tx_port is None,
"Multiple backend ASICs with rx/tx ports"
)
rx_port, tx_port, asic_idx = rx, tx, asic
pytest_assert(asic_idx is not None, "ASIC, rx and tx ports not found")
return ({
"test_src_port_name": rx_port,
"test_dst_port_name": tx_port,
"asic_under_test": asic_idx,
}
)
@pytest.fixture(scope='class')
def build_ip_interface(
self, duthosts, rand_one_dut_hostname, swapSyncd, tbinfo
):
"""
builds a list of active IP interfaces and port index
for each ASIC
Returns:
{
asic_index: {
portIndex: {
"ipv4": peer ipv4,
"bgp_neighbor": BGP neighbor
}
.
.
}
.
.
}
"""
duthost = duthosts[rand_one_dut_hostname]
topo = tbinfo["topo"]["name"]
if topo not in self.SUPPORTED_T1_TOPOS:
pytest.skip("unsupported topology {}".format(topo))
pytest_require(duthost.is_multi_asic, "Not a multi asic platform")
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
ip_ifaces = duthost.get_active_ip_interfaces(tbinfo, asic_index="all")
port_ips = dict()
for idx in range(len(ip_ifaces)):
port_ips.update(self.build_port_ips(idx, ip_ifaces[idx], mg_facts))
yield port_ips
@pytest.fixture(scope='class')
def build_test_ports(self, build_ip_interface):
"""
This fixture builds a list of active L3 interface ports on each
ASIC so that source and destination interfaces can be selected
from different ASICs. Returns a dict of 'src' and 'dst' interfaces
along with the ASIC ID
Only frontend ASCIs connected to T0 devices are reachable end
to end on multi ASIC platform.
"""
# find asics with T0 neighbors
ports = dict()
for k, v in build_ip_interface.items():
try:
port_index = next(iter(v))
port_info = v[port_index]
if port_info["bgp_neighbor"].lower().endswith("t0"):
ports.update({k: v})
except StopIteration:
continue
pytest_assert(
len(ports) >= 0, "Ports from at least two ASICs required"
)
test_ports = dict()
keys = ports.keys()
src_asic = keys.pop(0)
test_ports.update({"src": {src_asic: ports[src_asic]}})
test_ports.update({"dst": dict()})
for dst_asic in keys:
test_ports["dst"].update({dst_asic: ports[dst_asic]})
yield test_ports
@pytest.fixture(scope='class')
def get_test_ports(self, build_test_ports):
"""
Fixture to select test ports from a given list of active L3
interfaces from multiple frontend ASICs. The source and
destination port will be on different ASICs.
Fixture also returns the source and desitnation ASCIS IDs
"""
# source port
src_asic = build_test_ports["src"].keys().pop(0)
src_port_ids = build_test_ports["src"][src_asic].keys()
src_port_id = src_port_ids.pop(0)
src_port_ip = build_test_ports["src"][src_asic][src_port_id]["ipv4"]
# destination port
dst_asic = build_test_ports["dst"].keys().pop(0)
dst_port_ids = build_test_ports["dst"][dst_asic].keys()
dst_port_id = dst_port_ids.pop(0)
dst_port_ip = build_test_ports["dst"][dst_asic][dst_port_id]["ipv4"]
return {
"dst_port_id": dst_port_id,
"dst_port_ip": dst_port_ip,
"dst_asic": dst_asic,
"src_port_id": src_port_id,
"src_port_ip": src_port_ip,
"src_asic": src_asic,
}
|
the-stack_106_18324
|
"""
Deals with multipart POST requests.
The code is adapted from the recipe found at :
http://code.activestate.com/recipes/146306/
No author name was given.
Author : Alexis Mignon (c)
email : [email protected]
Date : 06/08/2011
"""
import httplib
import mimetypes
import urlparse
def posturl(url, fields, files):
urlparts = urlparse.urlsplit(url)
return post_multipart(urlparts[1], urlparts[2], fields, files)
def post_multipart(host, selector, fields, files):
"""
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be
uploaded as files.
Return the server's response page.
"""
content_type, body = encode_multipart_formdata(fields, files)
h = httplib.HTTPConnection(host)
headers = {"Content-Type": content_type, 'content-length': str(len(body))}
h.request("POST", selector, headers=headers)
h.send(body)
r = h.getresponse()
data = r.read()
h.close()
return r, data
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be
uploaded as files.
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
filename = filename.encode("utf8")
L.append('--' + BOUNDARY)
L.append(
'Content-Disposition: form-data; name="%s"; filename="%s"' % (
key, filename
)
)
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
|
the-stack_106_18326
|
# Taks 04. Odd and Even Sum
def odd_even_sum(digit_as_str):
odd_nums = [int(x) for x in digit_as_str if not int(x) % 2 == 0]
even_nums = [int(x) for x in digit_as_str if int(x) % 2 == 0]
return sum(odd_nums), sum(even_nums)
number_string = input()
odd_sum, even_sum = odd_even_sum(number_string)
print(f'Odd sum = {odd_sum}, Even sum = {even_sum}')
|
the-stack_106_18327
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import dataclasses
import logging
import re
import textwrap
import time
from collections import defaultdict, deque
from contextlib import closing
from datetime import datetime
from distutils.version import StrictVersion
from typing import Any, cast, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
from urllib import parse
import pandas as pd
import simplejson as json
from flask_babel import gettext as __, lazy_gettext as _
from sqlalchemy import Column, literal_column, types
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.result import RowProxy
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import ColumnClause, Select
from superset import app, cache, is_feature_enabled, security_manager
from superset.db_engine_specs.base import BaseEngineSpec
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import SupersetTemplateException
from superset.models.sql_lab import Query
from superset.models.sql_types.presto_sql_types import (
Array,
Interval,
Map,
Row,
TinyInteger,
)
from superset.result_set import destringify
from superset.sql_parse import ParsedQuery
from superset.utils import core as utils
if TYPE_CHECKING:
# prevent circular imports
from superset.models.core import Database
COLUMN_NOT_RESOLVED_ERROR_REGEX = "line (.+?): .*Column '(.+?)' cannot be resolved"
TABLE_DOES_NOT_EXIST_ERROR_REGEX = ".*Table (.+?) does not exist"
QueryStatus = utils.QueryStatus
config = app.config
logger = logging.getLogger(__name__)
def get_children(column: Dict[str, str]) -> List[Dict[str, str]]:
"""
Get the children of a complex Presto type (row or array).
For arrays, we return a single list with the base type:
>>> get_children(dict(name="a", type="ARRAY(BIGINT)"))
[{"name": "a", "type": "BIGINT"}]
For rows, we return a list of the columns:
>>> get_children(dict(name="a", type="ROW(BIGINT,FOO VARCHAR)"))
[{'name': 'a._col0', 'type': 'BIGINT'}, {'name': 'a.foo', 'type': 'VARCHAR'}]
:param column: dictionary representing a Presto column
:return: list of dictionaries representing children columns
"""
pattern = re.compile(r"(?P<type>\w+)\((?P<children>.*)\)")
match = pattern.match(column["type"])
if not match:
raise Exception(f"Unable to parse column type {column['type']}")
group = match.groupdict()
type_ = group["type"].upper()
children_type = group["children"]
if type_ == "ARRAY":
return [{"name": column["name"], "type": children_type}]
if type_ == "ROW":
nameless_columns = 0
columns = []
for child in utils.split(children_type, ","):
parts = list(utils.split(child.strip(), " "))
if len(parts) == 2:
name, type_ = parts
name = name.strip('"')
else:
name = f"_col{nameless_columns}"
type_ = parts[0]
nameless_columns += 1
columns.append({"name": f"{column['name']}.{name.lower()}", "type": type_})
return columns
raise Exception(f"Unknown type {type_}!")
class PrestoEngineSpec(BaseEngineSpec):
engine = "presto"
engine_name = "Presto"
_time_grain_expressions = {
None: "{col}",
"PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))",
"PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))",
"PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))",
"P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))",
"P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))",
"P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))",
"P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))",
"P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))",
"P1W/1970-01-03T00:00:00Z": "date_add('day', 5, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
"1969-12-28T00:00:00Z/P1W": "date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
}
@classmethod
def get_allow_cost_estimate(cls, version: Optional[str] = None) -> bool:
return version is not None and StrictVersion(version) >= StrictVersion("0.319")
@classmethod
def get_table_names(
cls, database: "Database", inspector: Inspector, schema: Optional[str]
) -> List[str]:
tables = super().get_table_names(database, inspector, schema)
if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"):
return tables
views = set(cls.get_view_names(database, inspector, schema))
actual_tables = set(tables) - views
return list(actual_tables)
@classmethod
def get_view_names(
cls, database: "Database", inspector: Inspector, schema: Optional[str]
) -> List[str]:
"""Returns an empty list
get_table_names() function returns all table names and view names,
and get_view_names() is not implemented in sqlalchemy_presto.py
https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py
"""
if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"):
return []
if schema:
sql = (
"SELECT table_name FROM information_schema.views "
"WHERE table_schema=%(schema)s"
)
params = {"schema": schema}
else:
sql = "SELECT table_name FROM information_schema.views"
params = {}
engine = cls.get_engine(database, schema=schema)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
results = cursor.fetchall()
return [row[0] for row in results]
@classmethod
def _create_column_info(
cls, name: str, data_type: types.TypeEngine
) -> Dict[str, Any]:
"""
Create column info object
:param name: column name
:param data_type: column data type
:return: column info object
"""
return {"name": name, "type": f"{data_type}"}
@classmethod
def _get_full_name(cls, names: List[Tuple[str, str]]) -> str:
"""
Get the full column name
:param names: list of all individual column names
:return: full column name
"""
return ".".join(column[0] for column in names if column[0])
@classmethod
def _has_nested_data_types(cls, component_type: str) -> bool:
"""
Check if string contains a data type. We determine if there is a data type by
whitespace or multiple data types by commas
:param component_type: data type
:return: boolean
"""
comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
white_space_regex = r"\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
return (
re.search(comma_regex, component_type) is not None
or re.search(white_space_regex, component_type) is not None
)
@classmethod
def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]:
"""
Split data type based on given delimiter. Do not split the string if the
delimiter is enclosed in quotes
:param data_type: data type
:param delimiter: string separator (i.e. open parenthesis, closed parenthesis,
comma, whitespace)
:return: list of strings after breaking it by the delimiter
"""
return re.split(
r"{}(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)".format(delimiter), data_type
)
@classmethod
def _parse_structural_column( # pylint: disable=too-many-locals,too-many-branches
cls,
parent_column_name: str,
parent_data_type: str,
result: List[Dict[str, Any]],
) -> None:
"""
Parse a row or array column
:param result: list tracking the results
"""
formatted_parent_column_name = parent_column_name
# Quote the column name if there is a space
if " " in parent_column_name:
formatted_parent_column_name = f'"{parent_column_name}"'
full_data_type = f"{formatted_parent_column_name} {parent_data_type}"
original_result_len = len(result)
# split on open parenthesis ( to get the structural
# data type and its component types
data_types = cls._split_data_type(full_data_type, r"\(")
stack: List[Tuple[str, str]] = []
for data_type in data_types:
# split on closed parenthesis ) to track which component
# types belong to what structural data type
inner_types = cls._split_data_type(data_type, r"\)")
for inner_type in inner_types:
# We have finished parsing multiple structural data types
if not inner_type and stack:
stack.pop()
elif cls._has_nested_data_types(inner_type):
# split on comma , to get individual data types
single_fields = cls._split_data_type(inner_type, ",")
for single_field in single_fields:
single_field = single_field.strip()
# If component type starts with a comma, the first single field
# will be an empty string. Disregard this empty string.
if not single_field:
continue
# split on whitespace to get field name and data type
field_info = cls._split_data_type(single_field, r"\s")
# check if there is a structural data type within
# overall structural data type
column_type = cls.get_sqla_column_type(field_info[1])
if column_type is None:
column_type = types.String()
logger.info(
"Did not recognize type %s of column %s",
field_info[1],
field_info[0],
)
if field_info[1] == "array" or field_info[1] == "row":
stack.append((field_info[0], field_info[1]))
full_parent_path = cls._get_full_name(stack)
result.append(
cls._create_column_info(full_parent_path, column_type)
)
else: # otherwise this field is a basic data type
full_parent_path = cls._get_full_name(stack)
column_name = "{}.{}".format(
full_parent_path, field_info[0]
)
result.append(
cls._create_column_info(column_name, column_type)
)
# If the component type ends with a structural data type, do not pop
# the stack. We have run across a structural data type within the
# overall structural data type. Otherwise, we have completely parsed
# through the entire structural data type and can move on.
if not (inner_type.endswith("array") or inner_type.endswith("row")):
stack.pop()
# We have an array of row objects (i.e. array(row(...)))
elif inner_type in ("array", "row"):
# Push a dummy object to represent the structural data type
stack.append(("", inner_type))
# We have an array of a basic data types(i.e. array(varchar)).
elif stack:
# Because it is an array of a basic data type. We have finished
# parsing the structural data type and can move on.
stack.pop()
# Unquote the column name if necessary
if formatted_parent_column_name != parent_column_name:
for index in range(original_result_len, len(result)):
result[index]["name"] = result[index]["name"].replace(
formatted_parent_column_name, parent_column_name
)
@classmethod
def _show_columns(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[RowProxy]:
"""
Show presto column names
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: list of column objects
"""
quote = inspector.engine.dialect.identifier_preparer.quote_identifier
full_table = quote(table_name)
if schema:
full_table = "{}.{}".format(quote(schema), full_table)
columns = inspector.bind.execute("SHOW COLUMNS FROM {}".format(full_table))
return columns
column_type_mappings = (
(re.compile(r"^boolean.*", re.IGNORECASE), types.Boolean()),
(re.compile(r"^tinyint.*", re.IGNORECASE), TinyInteger()),
(re.compile(r"^smallint.*", re.IGNORECASE), types.SmallInteger()),
(re.compile(r"^integer.*", re.IGNORECASE), types.Integer()),
(re.compile(r"^bigint.*", re.IGNORECASE), types.BigInteger()),
(re.compile(r"^real.*", re.IGNORECASE), types.Float()),
(re.compile(r"^double.*", re.IGNORECASE), types.Float()),
(re.compile(r"^decimal.*", re.IGNORECASE), types.DECIMAL()),
(
re.compile(r"^varchar(\((\d+)\))*$", re.IGNORECASE),
lambda match: types.VARCHAR(int(match[2])) if match[2] else types.String(),
),
(
re.compile(r"^char(\((\d+)\))*$", re.IGNORECASE),
lambda match: types.CHAR(int(match[2])) if match[2] else types.CHAR(),
),
(re.compile(r"^varbinary.*", re.IGNORECASE), types.VARBINARY()),
(re.compile(r"^json.*", re.IGNORECASE), types.JSON()),
(re.compile(r"^date.*", re.IGNORECASE), types.DATE()),
(re.compile(r"^time.*", re.IGNORECASE), types.Time()),
(re.compile(r"^timestamp.*", re.IGNORECASE), types.TIMESTAMP()),
(re.compile(r"^interval.*", re.IGNORECASE), Interval()),
(re.compile(r"^array.*", re.IGNORECASE), Array()),
(re.compile(r"^map.*", re.IGNORECASE), Map()),
(re.compile(r"^row.*", re.IGNORECASE), Row()),
)
@classmethod
def get_columns(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[Dict[str, Any]]:
"""
Get columns from a Presto data source. This includes handling row and
array data types
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: a list of results that contain column info
(i.e. column name and data type)
"""
columns = cls._show_columns(inspector, table_name, schema)
result: List[Dict[str, Any]] = []
for column in columns:
# parse column if it is a row or array
if is_feature_enabled("PRESTO_EXPAND_DATA") and (
"array" in column.Type or "row" in column.Type
):
structural_column_index = len(result)
cls._parse_structural_column(column.Column, column.Type, result)
result[structural_column_index]["nullable"] = getattr(
column, "Null", True
)
result[structural_column_index]["default"] = None
continue
# otherwise column is a basic data type
column_type = cls.get_sqla_column_type(column.Type)
if column_type is None:
column_type = types.String()
logger.info(
"Did not recognize type %s of column %s",
str(column.Type),
str(column.Column),
)
column_info = cls._create_column_info(column.Column, column_type)
column_info["nullable"] = getattr(column, "Null", True)
column_info["default"] = None
result.append(column_info)
return result
@classmethod
def _is_column_name_quoted(cls, column_name: str) -> bool:
"""
Check if column name is in quotes
:param column_name: column name
:return: boolean
"""
return column_name.startswith('"') and column_name.endswith('"')
@classmethod
def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]:
"""
Format column clauses where names are in quotes and labels are specified
:param cols: columns
:return: column clauses
"""
column_clauses = []
# Column names are separated by periods. This regex will find periods in a
# string if they are not enclosed in quotes because if a period is enclosed in
# quotes, then that period is part of a column name.
dot_pattern = r"""\. # split on period
(?= # look ahead
(?: # create non-capture group
[^\"]*\"[^\"]*\" # two quotes
)*[^\"]*$) # end regex"""
dot_regex = re.compile(dot_pattern, re.VERBOSE)
for col in cols:
# get individual column names
col_names = re.split(dot_regex, col["name"])
# quote each column name if it is not already quoted
for index, col_name in enumerate(col_names):
if not cls._is_column_name_quoted(col_name):
col_names[index] = '"{}"'.format(col_name)
quoted_col_name = ".".join(
col_name if cls._is_column_name_quoted(col_name) else f'"{col_name}"'
for col_name in col_names
)
# create column clause in the format "name"."name" AS "name.name"
column_clause = literal_column(quoted_col_name).label(col["name"])
column_clauses.append(column_clause)
return column_clauses
@classmethod
def select_star( # pylint: disable=too-many-arguments
cls,
database: "Database",
table_name: str,
engine: Engine,
schema: Optional[str] = None,
limit: int = 100,
show_cols: bool = False,
indent: bool = True,
latest_partition: bool = True,
cols: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Include selecting properties of row objects. We cannot easily break arrays into
rows, so render the whole array in its own row and skip columns that correspond
to an array's contents.
"""
cols = cols or []
presto_cols = cols
if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols:
dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
presto_cols = [
col for col in presto_cols if not re.search(dot_regex, col["name"])
]
return super().select_star(
database,
table_name,
engine,
schema,
limit,
show_cols,
indent,
latest_partition,
presto_cols,
)
@classmethod
def estimate_statement_cost( # pylint: disable=too-many-locals
cls, statement: str, database: "Database", cursor: Any, user_name: str
) -> Dict[str, Any]:
"""
Run a SQL query that estimates the cost of a given statement.
:param statement: A single SQL statement
:param database: Database instance
:param cursor: Cursor instance
:param username: Effective username
:return: JSON response from Presto
"""
parsed_query = ParsedQuery(statement)
sql = parsed_query.stripped()
sql_query_mutator = config["SQL_QUERY_MUTATOR"]
if sql_query_mutator:
sql = sql_query_mutator(sql, user_name, security_manager, database)
sql = f"EXPLAIN (TYPE IO, FORMAT JSON) {sql}"
cursor.execute(sql)
# the output from Presto is a single column and a single row containing
# JSON:
#
# {
# ...
# "estimate" : {
# "outputRowCount" : 8.73265878E8,
# "outputSizeInBytes" : 3.41425774958E11,
# "cpuCost" : 3.41425774958E11,
# "maxMemory" : 0.0,
# "networkCost" : 3.41425774958E11
# }
# }
result = json.loads(cursor.fetchone()[0])
return result
@classmethod
def query_cost_formatter(
cls, raw_cost: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
Format cost estimate.
:param raw_cost: JSON estimate from Presto
:return: Human readable cost estimate
"""
def humanize(value: Any, suffix: str) -> str:
try:
value = int(value)
except ValueError:
return str(value)
prefixes = ["K", "M", "G", "T", "P", "E", "Z", "Y"]
prefix = ""
to_next_prefix = 1000
while value > to_next_prefix and prefixes:
prefix = prefixes.pop(0)
value //= to_next_prefix
return f"{value} {prefix}{suffix}"
cost = []
columns = [
("outputRowCount", "Output count", " rows"),
("outputSizeInBytes", "Output size", "B"),
("cpuCost", "CPU cost", ""),
("maxMemory", "Max memory", "B"),
("networkCost", "Network cost", ""),
]
for row in raw_cost:
estimate: Dict[str, float] = row.get("estimate", {})
statement_cost = {}
for key, label, suffix in columns:
if key in estimate:
statement_cost[label] = humanize(estimate[key], suffix).strip()
cost.append(statement_cost)
return cost
@classmethod
def adjust_database_uri(
cls, uri: URL, selected_schema: Optional[str] = None
) -> None:
database = uri.database
if selected_schema and database:
selected_schema = parse.quote(selected_schema, safe="")
if "/" in database:
database = database.split("/")[0] + "/" + selected_schema
else:
database += "/" + selected_schema
uri.database = database
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"""from_iso8601_date('{dttm.date().isoformat()}')"""
if tt == utils.TemporalType.TIMESTAMP:
return f"""from_iso8601_timestamp('{dttm.isoformat(timespec="microseconds")}')""" # pylint: disable=line-too-long
return None
@classmethod
def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
@classmethod
def get_all_datasource_names(
cls, database: "Database", datasource_type: str
) -> List[utils.DatasourceName]:
datasource_df = database.get_df(
"SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S "
"ORDER BY concat(table_schema, '.', table_name)".format(
datasource_type.upper()
),
None,
)
datasource_names: List[utils.DatasourceName] = []
for _unused, row in datasource_df.iterrows():
datasource_names.append(
utils.DatasourceName(
schema=row["table_schema"], table=row["table_name"]
)
)
return datasource_names
@classmethod
def expand_data( # pylint: disable=too-many-locals,too-many-branches
cls, columns: List[Dict[Any, Any]], data: List[Dict[Any, Any]]
) -> Tuple[List[Dict[Any, Any]], List[Dict[Any, Any]], List[Dict[Any, Any]]]:
"""
We do not immediately display rows and arrays clearly in the data grid. This
method separates out nested fields and data values to help clearly display
structural columns.
Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int)
Original data set = [
{'ColumnA': ['a1'], 'ColumnB': [1, 2]},
{'ColumnA': ['a2'], 'ColumnB': [3, 4]},
]
Expanded data set = [
{'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2},
{'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4},
]
:param columns: columns selected in the query
:param data: original data set
:return: list of all columns(selected columns and their nested fields),
expanded data set, listed of nested fields
"""
if not is_feature_enabled("PRESTO_EXPAND_DATA"):
return columns, data, []
# process each column, unnesting ARRAY types and
# expanding ROW types into new columns
to_process = deque((column, 0) for column in columns)
all_columns: List[Dict[str, Any]] = []
expanded_columns = []
current_array_level = None
while to_process:
column, level = to_process.popleft()
if column["name"] not in [column["name"] for column in all_columns]:
all_columns.append(column)
# When unnesting arrays we need to keep track of how many extra rows
# were added, for each original row. This is necessary when we expand
# multiple arrays, so that the arrays after the first reuse the rows
# added by the first. every time we change a level in the nested arrays
# we reinitialize this.
if level != current_array_level:
unnested_rows: Dict[int, int] = defaultdict(int)
current_array_level = level
name = column["name"]
values: Optional[Union[str, List[Any]]]
if column["type"].startswith("ARRAY("):
# keep processing array children; we append to the right so that
# multiple nested arrays are processed breadth-first
to_process.append((get_children(column)[0], level + 1))
# unnest array objects data into new rows
i = 0
while i < len(data):
row = data[i]
values = row.get(name)
if isinstance(values, str):
row[name] = values = destringify(values)
if values:
# how many extra rows we need to unnest the data?
extra_rows = len(values) - 1
# how many rows were already added for this row?
current_unnested_rows = unnested_rows[i]
# add any necessary rows
missing = extra_rows - current_unnested_rows
for _ in range(missing):
data.insert(i + current_unnested_rows + 1, {})
unnested_rows[i] += 1
# unnest array into rows
for j, value in enumerate(values):
data[i + j][name] = value
# skip newly unnested rows
i += unnested_rows[i]
i += 1
if column["type"].startswith("ROW("):
# expand columns; we append them to the left so they are added
# immediately after the parent
expanded = get_children(column)
to_process.extendleft((column, level) for column in expanded[::-1])
expanded_columns.extend(expanded)
# expand row objects into new columns
for row in data:
values = row.get(name) or []
if isinstance(values, str):
row[name] = values = cast(List[Any], destringify(values))
for value, col in zip(values, expanded):
row[col["name"]] = value
data = [
{k["name"]: row.get(k["name"], "") for k in all_columns} for row in data
]
return all_columns, data, expanded_columns
@classmethod
def extra_table_metadata(
cls, database: "Database", table_name: str, schema_name: str
) -> Dict[str, Any]:
metadata = {}
indexes = database.get_indexes(table_name, schema_name)
if indexes:
cols = indexes[0].get("column_names", [])
full_table_name = table_name
if schema_name and "." not in table_name:
full_table_name = "{}.{}".format(schema_name, table_name)
pql = cls._partition_query(full_table_name, database)
col_names, latest_parts = cls.latest_partition(
table_name, schema_name, database, show_first=True
)
if not latest_parts:
latest_parts = tuple([None] * len(col_names))
metadata["partitions"] = {
"cols": cols,
"latest": dict(zip(col_names, latest_parts)),
"partitionQuery": pql,
}
# flake8 is not matching `Optional[str]` to `Any` for some reason...
metadata["view"] = cast(
Any, cls.get_create_view(database, schema_name, table_name)
)
return metadata
@classmethod
def get_create_view(
cls, database: "Database", schema: str, table: str
) -> Optional[str]:
"""
Return a CREATE VIEW statement, or `None` if not a view.
:param database: Database instance
:param schema: Schema name
:param table: Table (view) name
"""
from pyhive.exc import DatabaseError
engine = cls.get_engine(database, schema)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
sql = f"SHOW CREATE VIEW {schema}.{table}"
try:
cls.execute(cursor, sql)
polled = cursor.poll()
while polled:
time.sleep(0.2)
polled = cursor.poll()
except DatabaseError: # not a VIEW
return None
rows = cls.fetch_data(cursor, 1)
return rows[0][0]
@classmethod
def handle_cursor(cls, cursor: Any, query: Query, session: Session) -> None:
"""Updates progress information"""
query_id = query.id
poll_interval = query.database.connect_args.get(
"poll_interval", config["PRESTO_POLL_INTERVAL"]
)
logger.info("Query %i: Polling the cursor for progress", query_id)
polled = cursor.poll()
# poll returns dict -- JSON status information or ``None``
# if the query is done
# https://github.com/dropbox/PyHive/blob/
# b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178
while polled:
# Update the object and wait for the kill signal.
stats = polled.get("stats", {})
query = session.query(type(query)).filter_by(id=query_id).one()
if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]:
cursor.cancel()
break
if stats:
state = stats.get("state")
# if already finished, then stop polling
if state == "FINISHED":
break
completed_splits = float(stats.get("completedSplits"))
total_splits = float(stats.get("totalSplits"))
if total_splits and completed_splits:
progress = 100 * (completed_splits / total_splits)
logger.info(
"Query {} progress: {} / {} " # pylint: disable=logging-format-interpolation
"splits".format(query_id, completed_splits, total_splits)
)
if progress > query.progress:
query.progress = progress
session.commit()
time.sleep(poll_interval)
logger.info("Query %i: Polling the cursor for progress", query_id)
polled = cursor.poll()
@classmethod
def _extract_error_message(cls, ex: Exception) -> str:
if (
hasattr(ex, "orig")
and type(ex.orig).__name__ == "DatabaseError" # type: ignore
and isinstance(ex.orig[0], dict) # type: ignore
):
error_dict = ex.orig[0] # type: ignore
return "{} at {}: {}".format(
error_dict.get("errorName"),
error_dict.get("errorLocation"),
error_dict.get("message"),
)
if type(ex).__name__ == "DatabaseError" and hasattr(ex, "args") and ex.args:
error_dict = ex.args[0]
return error_dict.get("message", _("Unknown Presto Error"))
return utils.error_msg_from_exception(ex)
@classmethod
def _partition_query( # pylint: disable=too-many-arguments,too-many-locals
cls,
table_name: str,
database: "Database",
limit: int = 0,
order_by: Optional[List[Tuple[str, bool]]] = None,
filters: Optional[Dict[Any, Any]] = None,
) -> str:
"""Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations
"""
limit_clause = "LIMIT {}".format(limit) if limit else ""
order_by_clause = ""
if order_by:
l = []
for field, desc in order_by:
l.append(field + " DESC" if desc else "")
order_by_clause = "ORDER BY " + ", ".join(l)
where_clause = ""
if filters:
l = []
for field, value in filters.items():
l.append(f"{field} = '{value}'")
where_clause = "WHERE " + " AND ".join(l)
presto_version = database.get_extra().get("version")
# Partition select syntax changed in v0.199, so check here.
# Default to the new syntax if version is unset.
partition_select_clause = (
f'SELECT * FROM "{table_name}$partitions"'
if not presto_version
or StrictVersion(presto_version) >= StrictVersion("0.199")
else f"SHOW PARTITIONS FROM {table_name}"
)
sql = textwrap.dedent(
f"""\
{partition_select_clause}
{where_clause}
{order_by_clause}
{limit_clause}
"""
)
return sql
@classmethod
def where_latest_partition( # pylint: disable=too-many-arguments
cls,
table_name: str,
schema: Optional[str],
database: "Database",
query: Select,
columns: Optional[List[Dict[str, str]]] = None,
) -> Optional[Select]:
try:
col_names, values = cls.latest_partition(
table_name, schema, database, show_first=True
)
except Exception: # pylint: disable=broad-except
# table is not partitioned
return None
if values is None:
return None
column_names = {column.get("name") for column in columns or []}
for col_name, value in zip(col_names, values):
if col_name in column_names:
query = query.where(Column(col_name) == value)
return query
@classmethod
def _latest_partition_from_df(cls, df: pd.DataFrame) -> Optional[List[str]]:
if not df.empty:
return df.to_records(index=False)[0].item()
return None
@classmethod
@cache.memoize(timeout=60)
def latest_partition(
cls,
table_name: str,
schema: Optional[str],
database: "Database",
show_first: bool = False,
) -> Tuple[List[str], Optional[List[str]]]:
"""Returns col name and the latest (max) partition value for a table
:param table_name: the name of the table
:param schema: schema / database / namespace
:param database: database query will be run against
:type database: models.Database
:param show_first: displays the value for the first partitioning key
if there are many partitioning keys
:type show_first: bool
>>> latest_partition('foo_table')
(['ds'], ('2018-01-01',))
"""
indexes = database.get_indexes(table_name, schema)
if not indexes:
raise SupersetTemplateException(
f"Error getting partition for {schema}.{table_name}. "
"Verify that this table has a partition."
)
if len(indexes[0]["column_names"]) < 1:
raise SupersetTemplateException(
"The table should have one partitioned field"
)
if not show_first and len(indexes[0]["column_names"]) > 1:
raise SupersetTemplateException(
"The table should have a single partitioned field "
"to use this function. You may want to use "
"`presto.latest_sub_partition`"
)
column_names = indexes[0]["column_names"]
part_fields = [(column_name, True) for column_name in column_names]
sql = cls._partition_query(table_name, database, 1, part_fields)
df = database.get_df(sql, schema)
return column_names, cls._latest_partition_from_df(df)
@classmethod
def latest_sub_partition(
cls, table_name: str, schema: Optional[str], database: "Database", **kwargs: Any
) -> Any:
"""Returns the latest (max) partition value for a table
A filtering criteria should be passed for all fields that are
partitioned except for the field to be returned. For example,
if a table is partitioned by (``ds``, ``event_type`` and
``event_category``) and you want the latest ``ds``, you'll want
to provide a filter as keyword arguments for both
``event_type`` and ``event_category`` as in
``latest_sub_partition('my_table',
event_category='page', event_type='click')``
:param table_name: the name of the table, can be just the table
name or a fully qualified table name as ``schema_name.table_name``
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param kwargs: keyword arguments define the filtering criteria
on the partition list. There can be many of these.
:type kwargs: str
>>> latest_sub_partition('sub_partition_table', event_type='click')
'2018-01-01'
"""
indexes = database.get_indexes(table_name, schema)
part_fields = indexes[0]["column_names"]
for k in kwargs.keys(): # pylint: disable=consider-iterating-dictionary
if k not in k in part_fields: # pylint: disable=comparison-with-itself
msg = "Field [{k}] is not part of the portioning key"
raise SupersetTemplateException(msg)
if len(kwargs.keys()) != len(part_fields) - 1:
msg = (
"A filter needs to be specified for {} out of the " "{} fields."
).format(len(part_fields) - 1, len(part_fields))
raise SupersetTemplateException(msg)
for field in part_fields:
if field not in kwargs.keys():
field_to_return = field
sql = cls._partition_query(
table_name, database, 1, [(field_to_return, True)], kwargs
)
df = database.get_df(sql, schema)
if df.empty:
return ""
return df.to_dict()[field_to_return][0]
@classmethod
@cache.memoize()
def get_function_names(cls, database: "Database") -> List[str]:
"""
Get a list of function names that are able to be called on the database.
Used for SQL Lab autocomplete.
:param database: The database to get functions for
:return: A list of function names useable in the database
"""
return database.get_df("SHOW FUNCTIONS")["Function"].tolist()
@classmethod
def extract_errors(cls, ex: Exception) -> List[Dict[str, Any]]:
raw_message = cls._extract_error_message(ex)
column_match = re.search(COLUMN_NOT_RESOLVED_ERROR_REGEX, raw_message)
if column_match:
return [
dataclasses.asdict(
SupersetError(
error_type=SupersetErrorType.COLUMN_DOES_NOT_EXIST_ERROR,
message=__(
'We can\'t seem to resolve the column "%(column_name)s" at '
"line %(location)s.",
column_name=column_match.group(2),
location=column_match.group(1),
),
level=ErrorLevel.ERROR,
extra={"engine_name": cls.engine_name},
)
)
]
table_match = re.search(TABLE_DOES_NOT_EXIST_ERROR_REGEX, raw_message)
if table_match:
return [
dataclasses.asdict(
SupersetError(
error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR,
message=__(
'The table "%(table_name)s" does not exist. '
"A valid table must be used to run this query.",
table_name=table_match.group(1),
),
level=ErrorLevel.ERROR,
extra={"engine_name": cls.engine_name},
)
)
]
return [
dataclasses.asdict(
SupersetError(
error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR,
message=cls._extract_error_message(ex),
level=ErrorLevel.ERROR,
extra={"engine_name": cls.engine_name},
)
)
]
|
the-stack_106_18328
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient ImageNet input pipeline using tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import os
from absl import logging
import tensorflow.compat.v1 as tf
import preprocessing
# The input tensor is in the range of [0, 255], we need to scale them.
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def build_image_serving_input_fn(image_size):
"""Builds a serving input fn for raw images."""
def _image_serving_input_fn():
"""Serving input fn for raw images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
image = preprocessing.preprocess_image(
image_bytes=image_bytes, is_training=False, image_size=image_size)
return image
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
return _image_serving_input_fn
class ImageNetTFExampleInput(object):
"""Base class for ImageNet input_fn generator.
Attributes:
image_preprocessing_fn: function to preprocess images
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
num_cores: `int` for the number of TPU cores
image_size: `int` for image size (both width and height).
transpose_input: 'bool' for whether to use the double transpose trick
"""
__metaclass__ = abc.ABCMeta
def __init__(self,
is_training,
use_bfloat16,
num_cores=8,
image_size=224,
transpose_input=False):
self.image_preprocessing_fn = preprocessing.preprocess_image
self.is_training = is_training
self.use_bfloat16 = use_bfloat16
self.num_cores = num_cores
self.transpose_input = transpose_input
self.image_size = image_size
def set_shapes(self, batch_size, images, labels):
"""Statically set the batch_size dimension."""
if self.transpose_input:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([None, None, None, batch_size])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
else:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
return images, labels
def dataset_parser(self, value):
"""Parses an image and its label from a serialized ResNet-50 TFExample.
Args:
value: serialized string containing an ImageNet TFExample.
Returns:
Returns a tuple of (image, label) from the TFExample.
"""
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, ''),
'image/class/label': tf.FixedLenFeature([], tf.int64, -1),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image = self.image_preprocessing_fn(
image_bytes=image_bytes,
is_training=self.is_training,
image_size=self.image_size,
use_bfloat16=self.use_bfloat16)
# Subtract one so that labels are in [0, 1000).
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32) - 1
return image, label
@abc.abstractmethod
def make_source_dataset(self, index, num_hosts):
"""Makes dataset of serialized TFExamples.
The returned dataset will contain `tf.string` tensors, but these strings are
serialized `TFExample` records that will be parsed by `dataset_parser`.
If self.is_training, the dataset should be infinite.
Args:
index: current host index.
num_hosts: total number of hosts.
Returns:
A `tf.data.Dataset` object.
"""
return
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `tf.data.Dataset` object.
"""
# Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See
# tf.estimator.tpu.RunConfig for details.
batch_size = params['batch_size']
if 'context' in params:
current_host = params['context'].current_input_fn_deployment()[1]
num_hosts = params['context'].num_hosts
else:
current_host = 0
num_hosts = 1
dataset = self.make_source_dataset(current_host, num_hosts)
# Use the fused map-and-batch operation.
#
# For XLA, we must used fixed shapes. Because we repeat the source training
# dataset indefinitely, we can use `drop_remainder=True` to get fixed-size
# batches without dropping any training examples.
#
# When evaluating, `drop_remainder=True` prevents accidentally evaluating
# the same image twice by dropping the final batch if it is less than a full
# batch size. As long as this validation is done with consistent batch size,
# exactly the same images will be used.
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
self.dataset_parser,
batch_size=batch_size,
num_parallel_batches=self.num_cores,
drop_remainder=True))
# Transpose for performance on TPU
if self.transpose_input:
dataset = dataset.map(
lambda images, labels: (tf.transpose(images, [1, 2, 3, 0]), labels),
num_parallel_calls=self.num_cores)
# Assign static batch size dimension
dataset = dataset.map(functools.partial(self.set_shapes, batch_size))
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
class ImageNetInput(ImageNetTFExampleInput):
"""Generates ImageNet input_fn from a series of TFRecord files.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
"""
def __init__(self,
is_training,
use_bfloat16,
transpose_input,
data_dir,
image_size=224,
num_parallel_calls=64,
cache=False):
"""Create an input from TFRecord files.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null') or implicitly False
then construct a null pipeline, consisting of empty images
and blank labels.
image_size: `int` for image size (both width and height).
num_parallel_calls: concurrency level to use when reading data from disk.
cache: if true, fill the dataset by repeating from its cache
"""
super(ImageNetInput, self).__init__(
is_training=is_training,
image_size=image_size,
use_bfloat16=use_bfloat16,
transpose_input=transpose_input)
self.data_dir = data_dir
if self.data_dir == 'null' or not self.data_dir:
self.data_dir = None
self.num_parallel_calls = num_parallel_calls
self.cache = cache
def _get_null_input(self, data):
"""Returns a null image (all black pixels).
Args:
data: element of a dataset, ignored in this method, since it produces
the same null image regardless of the element.
Returns:
a tensor representing a null image.
"""
del data # Unused since output is constant regardless of input
return tf.zeros([self.image_size, self.image_size, 3], tf.bfloat16
if self.use_bfloat16 else tf.float32)
def dataset_parser(self, value):
"""See base class."""
if not self.data_dir:
return value, tf.constant(0, tf.int32)
return super(ImageNetInput, self).dataset_parser(value)
def make_source_dataset(self, index, num_hosts):
"""See base class."""
if not self.data_dir:
tf.logging.info('Undefined data_dir implies null input')
return tf.data.Dataset.range(1).repeat().map(self._get_null_input)
# Shuffle the filenames to ensure better randomization.
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
# For multi-host training, we want each hosts to always process the same
# subset of files. Each host only sees a subset of the entire dataset,
# allowing us to cache larger datasets in memory.
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False)
dataset = dataset.shard(num_hosts, index)
if self.is_training and not self.cache:
dataset = dataset.repeat()
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
fetch_dataset, cycle_length=self.num_parallel_calls, sloppy=True))
if self.cache:
dataset = dataset.cache().apply(
tf.data.experimental.shuffle_and_repeat(1024 * 16))
else:
dataset = dataset.shuffle(1024)
return dataset
# Defines a selection of data from a Cloud Bigtable.
BigtableSelection = collections.namedtuple('BigtableSelection', [
'project', 'instance', 'table', 'prefix', 'column_family',
'column_qualifier'
])
class ImageNetBigtableInput(ImageNetTFExampleInput):
"""Generates ImageNet input_fn from a Bigtable for training or evaluation.
"""
def __init__(self, is_training, use_bfloat16, transpose_input, selection):
"""Constructs an ImageNet input from a BigtableSelection.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
selection: a BigtableSelection specifying a part of a Bigtable.
"""
super(ImageNetBigtableInput, self).__init__(
is_training=is_training,
use_bfloat16=use_bfloat16,
transpose_input=transpose_input)
self.selection = selection
def make_source_dataset(self, index, num_hosts):
"""See base class."""
data = self.selection
try:
from tensorflow.contrib.cloud import BigtableClient # pylint: disable=g-import-not-at-top
except ImportError as e:
logging.exception('Bigtable is not supported in TensorFlow 2.x.')
raise e
client = BigtableClient(data.project, data.instance)
table = client.table(data.table)
ds = table.parallel_scan_prefix(data.prefix,
columns=[(data.column_family,
data.column_qualifier)])
# The Bigtable datasets will have the shape (row_key, data)
ds_data = ds.map(lambda index, data: data)
if self.is_training:
ds_data = ds_data.repeat()
return ds_data
|
the-stack_106_18329
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test thje publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'[email protected]',
'password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
|
the-stack_106_18331
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import time
from frappe import _, msgprint
from frappe.utils import flt, cstr, now, get_datetime_str, file_lock
from frappe.utils.background_jobs import enqueue
from frappe.model.base_document import BaseDocument, get_controller
from frappe.model.naming import set_new_name
from six import iteritems, string_types
from werkzeug.exceptions import NotFound, Forbidden
import hashlib, json
from frappe.model import optional_fields
from frappe.model.workflow import validate_workflow
from frappe.utils.file_manager import save_url
from frappe.utils.global_search import update_global_search
from frappe.integrations.doctype.webhook import run_webhooks
# once_only validation
# methods
def get_doc(*args, **kwargs):
"""returns a frappe.model.Document object.
:param arg1: Document dict or DocType name.
:param arg2: [optional] document name.
There are multiple ways to call `get_doc`
# will fetch the latest user object (with child table) from the database
user = get_doc("User", "[email protected]")
# create a new object
user = get_doc({
"doctype":"User"
"email_id": "[email protected]",
"roles: [
{"role": "System Manager"}
]
})
# create new object with keyword arguments
user = get_doc(doctype='User', email_id='[email protected]')
"""
if args:
if isinstance(args[0], BaseDocument):
# already a document
return args[0]
elif isinstance(args[0], string_types):
doctype = args[0]
elif isinstance(args[0], dict):
# passed a dict
kwargs = args[0]
else:
raise ValueError('First non keyword argument must be a string or dict')
if kwargs:
if 'doctype' in kwargs:
doctype = kwargs['doctype']
else:
raise ValueError('"doctype" is a required key')
controller = get_controller(doctype)
if controller:
return controller(*args, **kwargs)
raise ImportError(doctype)
class Document(BaseDocument):
"""All controllers inherit from `Document`."""
def __init__(self, *args, **kwargs):
"""Constructor.
:param arg1: DocType name as string or document **dict**
:param arg2: Document name, if `arg1` is DocType name.
If DocType name and document name are passed, the object will load
all values (including child documents) from the database.
"""
self.doctype = self.name = None
self._default_new_docs = {}
self.flags = frappe._dict()
if args and args[0] and isinstance(args[0], string_types):
# first arugment is doctype
if len(args)==1:
# single
self.doctype = self.name = args[0]
else:
self.doctype = args[0]
if isinstance(args[1], dict):
# filter
self.name = frappe.db.get_value(args[0], args[1], "name")
if self.name is None:
frappe.throw(_("{0} {1} not found").format(_(args[0]), args[1]),
frappe.DoesNotExistError)
else:
self.name = args[1]
self.load_from_db()
return
if args and args[0] and isinstance(args[0], dict):
# first argument is a dict
kwargs = args[0]
if kwargs:
# init base document
super(Document, self).__init__(kwargs)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise ValueError('Illegal arguments')
def reload(self):
"""Reload document from database"""
self.load_from_db()
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
super(Document, self).__init__(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import doctype_table_fields
table_fields = doctype_table_fields
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def get_latest(self):
if not getattr(self, "latest", None):
self.latest = frappe.get_doc(self.doctype, self.name)
return self.latest
def check_permission(self, permtype='read', permlevel=None):
"""Raise `frappe.PermissionError` if not permitted"""
if not self.has_permission(permtype):
self.raise_no_permission_to(permlevel or permtype)
def has_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
if self.flags.ignore_permissions:
return True
return frappe.has_permission(self.doctype, permtype, self, verbose=verbose)
def raise_no_permission_to(self, perm_type):
"""Raise `frappe.PermissionError`."""
frappe.flags.error_message = _('Insufficient Permission for {0}').format(self.doctype)
raise frappe.PermissionError
def insert(self, ignore_permissions=None, ignore_links=None, ignore_if_duplicate=False, ignore_mandatory=None):
"""Insert the document in the database (as a new document).
This will check for user permissions and execute `before_insert`,
`validate`, `on_update`, `after_insert` methods if they are written.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if ignore_links!=None:
self.flags.ignore_links = ignore_links
if ignore_mandatory!=None:
self.flags.ignore_mandatory = ignore_mandatory
self.set("__islocal", True)
self.check_permission("create")
self._set_defaults()
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.run_method("before_insert")
self._validate_links()
self.set_new_name()
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.flags.in_insert = True
self.run_before_save_methods()
self._validate()
self.set_docstatus()
self.flags.in_insert = False
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
try:
self.db_insert()
except frappe.DuplicateEntryError as e:
if not ignore_if_duplicate:
raise e
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.flags.in_insert = True
if self.get("amended_from"):
self.copy_attachments_from_amended_from()
self.run_post_save_methods()
self.flags.in_insert = False
# delete __islocal
if hasattr(self, "__islocal"):
delattr(self, "__islocal")
return self
def save(self, *args, **kwargs):
"""Wrapper for _save"""
return self._save(*args, **kwargs)
def _save(self, ignore_permissions=None, ignore_version=None):
"""Save the current document in the database in the **DocType**'s table or
`tabSingles` (for single types).
This will check for user permissions and execute
`validate` before updating, `on_update` after updating triggers.
:param ignore_permissions: Do not check permissions if True.
:param ignore_version: Do not save version if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if ignore_version!=None:
self.flags.ignore_version = ignore_version
if self.get("__islocal") or not self.get("name"):
self.insert()
return
self.check_permission("write", "save")
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.set_parent_in_children()
self.set_name_in_children()
self.validate_higher_perm_levels()
self._validate_links()
self.run_before_save_methods()
if self._action != "cancel":
self._validate()
if self._action == "update_after_submit":
self.validate_update_after_submit()
self.set_docstatus()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
self.update_children()
self.run_post_save_methods()
return self
def copy_attachments_from_amended_from(self):
'''Copy attachments from `amended_from`'''
from frappe.desk.form.load import get_attachments
#loop through attachments
for attach_item in get_attachments(self.doctype, self.amended_from):
#save attachments to new doc
save_url(attach_item.file_url, attach_item.file_name, self.doctype, self.name, "Home/Attachments", attach_item.is_private)
def update_children(self):
'''update child tables'''
for df in self.meta.get_table_fields():
self.update_child_table(df.fieldname, df)
def update_child_table(self, fieldname, df=None):
'''sync child table for given fieldname'''
rows = []
if not df:
df = self.meta.get_field(fieldname)
for d in self.get(df.fieldname):
d.db_update()
rows.append(d.name)
if df.options in (self.flags.ignore_children_type or []):
# do not delete rows for this because of flags
# hack for docperm :(
return
if rows:
# select rows that do not match the ones in the document
deleted_rows = frappe.db.sql("""select name from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s
and name not in ({1})""".format(df.options, ','.join(['%s'] * len(rows))),
[self.name, self.doctype, fieldname] + rows)
if len(deleted_rows) > 0:
# delete rows that do not match the ones in the document
frappe.db.sql("""delete from `tab{0}` where name in ({1})""".format(df.options,
','.join(['%s'] * len(deleted_rows))), tuple(row[0] for row in deleted_rows))
else:
# no rows found, delete all rows
frappe.db.sql("""delete from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s""".format(df.options),
(self.name, self.doctype, fieldname))
def get_doc_before_save(self):
if not getattr(self, '_doc_before_save', None):
try:
self._doc_before_save = frappe.get_doc(self.doctype, self.name)
except frappe.DoesNotExistError:
self._doc_before_save = None
frappe.clear_last_message()
return self._doc_before_save
def set_new_name(self, force=False):
"""Calls `frappe.naming.se_new_name` for parent and child docs."""
if self.flags.name_set and not force:
return
set_new_name(self)
# set name for children
for d in self.get_all_children():
set_new_name(d)
self.flags.name_set = True
def get_title(self):
'''Get the document title based on title_field or `title` or `name`'''
return self.get(self.meta.get_title_field())
def set_title_field(self):
"""Set title field based on template"""
def get_values():
values = self.as_dict()
# format values
for key, value in iteritems(values):
if value==None:
values[key] = ""
return values
if self.meta.get("title_field")=="title":
df = self.meta.get_field(self.meta.title_field)
if df.options:
self.set(df.fieldname, df.options.format(**get_values()))
elif self.is_new() and not self.get(df.fieldname) and df.default:
# set default title for new transactions (if default)
self.set(df.fieldname, df.default.format(**get_values()))
def update_single(self, d):
"""Updates values for Single type Document in `tabSingles`."""
frappe.db.sql("""delete from tabSingles where doctype=%s""", self.doctype)
for field, value in iteritems(d):
if field != "doctype":
frappe.db.sql("""insert into tabSingles(doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
if self.doctype in frappe.db.value_cache:
del frappe.db.value_cache[self.doctype]
def set_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
if not self.creation:
self.creation = self.modified
if not self.owner:
self.owner = self.modified_by
for d in self.get_all_children():
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
frappe.flags.currently_saving.append((self.doctype, self.name))
def set_docstatus(self):
if self.docstatus==None:
self.docstatus=0
for d in self.get_all_children():
d.docstatus = self.docstatus
def _validate(self):
self._validate_mandatory()
self._validate_selects()
self._validate_length()
self._extract_images_from_text_editor()
self._sanitize_content()
self._save_passwords()
self.validate_workflow()
children = self.get_all_children()
for d in children:
d._validate_selects()
d._validate_length()
d._extract_images_from_text_editor()
d._sanitize_content()
d._save_passwords()
if self.is_new():
# don't set fields like _assign, _comments for new doc
for fieldname in optional_fields:
self.set(fieldname, None)
else:
self.validate_set_only_once()
def validate_workflow(self):
'''Validate if the workflow transition is valid'''
if self.meta.get_workflow():
validate_workflow(self)
def validate_set_only_once(self):
'''Validate that fields are not changed if not in insert'''
set_only_once_fields = self.meta.get_set_only_once_fields()
if set_only_once_fields and self._doc_before_save:
# document exists before saving
for field in set_only_once_fields:
fail = False
value = self.get(field.fieldname)
original_value = self._doc_before_save.get(field.fieldname)
if field.fieldtype=='Table':
fail = not self.is_child_table_same(field.fieldname)
elif field.fieldtype in ('Date', 'Datetime', 'Time'):
fail = str(value) != str(original_value)
else:
fail = value != original_value
if fail:
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(field.fieldname)),
frappe.CannotChangeConstantError)
return False
def is_child_table_same(self, fieldname):
'''Validate child table is same as original table before saving'''
value = self.get(fieldname)
original_value = self._doc_before_save.get(fieldname)
same = True
if len(original_value) != len(value):
same = False
else:
# check all child entries
for i, d in enumerate(original_value):
new_child = value[i].as_dict(convert_dates_to_str = True)
original_child = d.as_dict(convert_dates_to_str = True)
# all fields must be same other than modified and modified_by
for key in ('modified', 'modified_by', 'creation'):
del new_child[key]
del original_child[key]
if original_child != new_child:
same = False
break
return same
def apply_fieldlevel_read_permissions(self):
'''Remove values the user is not allowed to read (called when loading in desk)'''
has_higher_permlevel = False
for p in self.get_permissions():
if p.permlevel > 0:
has_higher_permlevel = True
break
if not has_higher_permlevel:
return
has_access_to = self.get_permlevel_access('read')
for df in self.meta.fields:
if df.permlevel and not df.permlevel in has_access_to:
self.set(df.fieldname, None)
for table_field in self.meta.get_table_fields():
for df in frappe.get_meta(table_field.options).fields or []:
if df.permlevel and not df.permlevel in has_access_to:
for child in self.get(table_field.fieldname) or []:
child.set(df.fieldname, None)
def validate_higher_perm_levels(self):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
if self.flags.ignore_permissions or frappe.flags.in_install:
return
has_access_to = self.get_permlevel_access()
high_permlevel_fields = self.meta.get_high_permlevel_fields()
if high_permlevel_fields:
self.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
# check for child tables
for df in self.meta.get_table_fields():
high_permlevel_fields = frappe.get_meta(df.options).meta.get_high_permlevel_fields()
if high_permlevel_fields:
for d in self.get(df.fieldname):
d.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
def get_permlevel_access(self, permission_type='write'):
if not hasattr(self, "_has_access_to"):
roles = frappe.get_roles()
self._has_access_to = []
for perm in self.get_permissions():
if perm.role in roles and perm.permlevel > 0 and perm.get(permission_type):
if perm.permlevel not in self._has_access_to:
self._has_access_to.append(perm.permlevel)
return self._has_access_to
def has_permlevel_access_to(self, fieldname, df=None, permission_type='read'):
if not df:
df = self.meta.get_field(fieldname)
return df.permlevel in self.get_permlevel_access(permission_type)
def get_permissions(self):
if self.meta.istable:
# use parent permissions
permissions = frappe.get_meta(self.parenttype).permissions
else:
permissions = self.meta.permissions
return permissions
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype, as_dict=True)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options, as_dict=True)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
"""Checks if `modified` timestamp provided by document being updated is same as the
`modified` timestamp in the database. If there is a different, the document has been
updated in the database after the current copy was read. Will throw an error if
timestamps don't match.
Will also validate document transitions (Save > Submit > Cancel) calling
`self.check_docstatus_transition`."""
conflict = False
self._action = "save"
if not self.get('__islocal'):
if self.meta.issingle:
modified = frappe.db.sql('''select value from tabSingles
where doctype=%s and field='modified' for update''', self.doctype)
modified = modified and modified[0][0]
if modified and modified != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.sql("""select modified, docstatus from `tab{0}`
where name = %s for update""".format(self.doctype), self.name, as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
else:
tmp = tmp[0]
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, docstatus):
"""Ensures valid `docstatus` transition.
Valid transitions are (number in brackets is `docstatus`):
- Save (0) > Save (0)
- Save (0) > Submit (1)
- Submit (1) > Submit (1)
- Submit (1) > Cancel (2)
"""
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
self.check_permission("submit")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 0 to 2"))
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
self.check_permission("submit")
elif self.docstatus==2:
self._action = "cancel"
self.check_permission("cancel")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 1 to 0"))
elif docstatus==2:
raise frappe.ValidationError(_("Cannot edit cancelled document"))
def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def set_name_in_children(self):
# Set name for any new children
for d in self.get_all_children():
if not d.name:
set_new_name(d)
def validate_update_after_submit(self):
if self.flags.ignore_validate_update_after_submit:
return
self._validate_update_after_submit()
for d in self.get_all_children():
if d.is_new() and self.meta.get_field(d.parentfield).allow_on_submit:
# in case of a new row, don't validate allow on submit, if table is allow on submit
continue
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.flags.ignore_mandatory:
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
if frappe.flags.print_messages:
print(self.as_json().encode("utf-8"))
raise frappe.MandatoryError('[{doctype}, {name}]: {fields}'.format(
fields=", ".join((each[0] for each in missing)),
doctype=self.doctype,
name=self.name))
def _validate_links(self):
if self.flags.ignore_links or self._action == "cancel":
return
invalid_links, cancelled_links = self.get_invalid_links()
for d in self.get_all_children():
result = d.get_invalid_links(is_submittable=self.meta.is_submittable)
invalid_links.extend(result[0])
cancelled_links.extend(result[1])
if invalid_links:
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
if cancelled_links:
msg = ", ".join((each[2] for each in cancelled_links))
frappe.throw(_("Cannot link cancelled document: {0}").format(msg),
frappe.CancelledLinkError)
def get_all_children(self, parenttype=None):
"""Returns all children documents from **Table** type field in a list."""
ret = []
for df in self.meta.get("fields", {"fieldtype": "Table"}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in hooks"""
if "flags" in kwargs:
del kwargs["flags"]
if hasattr(self, method) and hasattr(getattr(self, method), "__call__"):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
else:
# hack! to run hooks even if method does not exist
fn = lambda self, *args, **kwargs: None
fn.__name__ = str(method)
out = Document.hook(fn)(self, *args, **kwargs)
self.run_notifications(method)
run_webhooks(self, method)
return out
def run_trigger(self, method, *args, **kwargs):
return self.run_method(method, *args, **kwargs)
def run_notifications(self, method):
'''Run notifications for this method'''
if frappe.flags.in_import or frappe.flags.in_patch or frappe.flags.in_install:
return
if self.flags.notifications_executed==None:
self.flags.notifications_executed = []
from frappe.email.doctype.notification.notification import evaluate_alert
if self.flags.notifications == None:
alerts = frappe.cache().hget('notifications', self.doctype)
if alerts==None:
alerts = frappe.get_all('Notification', fields=['name', 'event', 'method'],
filters={'enabled': 1, 'document_type': self.doctype})
frappe.cache().hset('notifications', self.doctype, alerts)
self.flags.notifications = alerts
if not self.flags.notifications:
return
def _evaluate_alert(alert):
if not alert.name in self.flags.notifications_executed:
evaluate_alert(self, alert.name, alert.event)
self.flags.notifications_executed.append(alert.name)
event_map = {
"on_update": "Save",
"after_insert": "New",
"on_submit": "Submit",
"on_cancel": "Cancel"
}
if not self.flags.in_insert:
# value change is not applicable in insert
event_map['validate'] = 'Value Change'
event_map['before_change'] = 'Value Change'
event_map['before_update_after_submit'] = 'Value Change'
for alert in self.flags.notifications:
event = event_map.get(method, None)
if event and alert.event == event:
_evaluate_alert(alert)
elif alert.event=='Method' and method == alert.method:
_evaluate_alert(alert)
@staticmethod
def whitelist(f):
f.whitelisted = True
return f
@whitelist.__func__
def _submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self.docstatus = 1
self.save()
@whitelist.__func__
def _cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self.docstatus = 2
self.save()
@whitelist.__func__
def submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self._submit()
@whitelist.__func__
def cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self._cancel()
def delete(self):
"""Delete document."""
frappe.delete_doc(self.doctype, self.name, flags=self.flags)
def run_before_save_methods(self):
"""Run standard methods before `INSERT` or `UPDATE`. Standard Methods are:
- `validate`, `before_save` for **Save**.
- `validate`, `before_submit` for **Submit**.
- `before_cancel` for **Cancel**
- `before_update_after_submit` for **Update after Submit**
Will also update title_field if set"""
self.load_doc_before_save()
self.reset_seen()
if self.flags.ignore_validate:
return
if self._action=="save":
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
self.set_title_field()
def load_doc_before_save(self):
'''Save load document from db before saving'''
self._doc_before_save = None
if not (self.is_new()
and (getattr(self.meta, 'track_changes', False)
or self.meta.get_set_only_once_fields()
or self.meta.get_workflow())):
self.get_doc_before_save()
def run_post_save_methods(self):
"""Run standard methods after `INSERT` or `UPDATE`. Standard Methods are:
- `on_update` for **Save**.
- `on_update`, `on_submit` for **Submit**.
- `on_cancel` for **Cancel**
- `update_after_submit` for **Update after Submit**"""
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
elif self._action=="cancel":
self.run_method("on_cancel")
self.check_no_back_links_exist()
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
self.run_method('on_change')
self.update_timeline_doc()
self.clear_cache()
self.notify_update()
update_global_search(self)
if getattr(self.meta, 'track_changes', False) and self._doc_before_save and not self.flags.ignore_version:
self.save_version()
if (self.doctype, self.name) in frappe.flags.currently_saving:
frappe.flags.currently_saving.remove((self.doctype, self.name))
self.latest = None
def clear_cache(self):
frappe.clear_document_cache(self.doctype, self.name)
def reset_seen(self):
'''Clear _seen property and set current user as seen'''
if getattr(self.meta, 'track_seen', False):
self._seen = json.dumps([frappe.session.user])
def notify_update(self):
"""Publish realtime that the current document is modified"""
frappe.publish_realtime("doc_update", {"modified": self.modified, "doctype": self.doctype, "name": self.name},
doctype=self.doctype, docname=self.name, after_commit=True)
if not self.meta.get("read_only") and not self.meta.get("issingle") and \
not self.meta.get("istable"):
data = {
"doctype": self.doctype,
"name": self.name,
"user": frappe.session.user
}
frappe.publish_realtime("list_update", data, after_commit=True)
def db_set(self, fieldname, value=None, update_modified=True, notify=False, commit=False):
'''Set a value in the document object, update the timestamp and update the database.
WARNING: This method does not trigger controller validations and should
be used very carefully.
:param fieldname: fieldname of the property to be updated, or a {"field":"value"} dictionary
:param value: value of the property to be updated
:param update_modified: default True. updates the `modified` and `modified_by` properties
:param notify: default False. run doc.notify_updated() to send updates via socketio
:param commit: default False. run frappe.db.commit()
'''
if isinstance(fieldname, dict):
self.update(fieldname)
else:
self.set(fieldname, value)
if update_modified and (self.doctype, self.name) not in frappe.flags.currently_saving:
# don't update modified timestamp if called from post save methods
# like on_update or on_submit
self.set("modified", now())
self.set("modified_by", frappe.session.user)
# to trigger notification on value change
self.run_method('before_change')
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
self.run_method('on_change')
if notify:
self.notify_update()
self.clear_cache()
if commit:
frappe.db.commit()
def db_get(self, fieldname):
'''get database value for this fieldname'''
return frappe.db.get_value(self.doctype, self.name, fieldname)
def check_no_back_links_exist(self):
"""Check if document links to any active document before Cancel."""
from frappe.model.delete_doc import check_if_doc_is_linked, check_if_doc_is_dynamically_linked
if not self.flags.ignore_links:
check_if_doc_is_linked(self, method="Cancel")
check_if_doc_is_dynamically_linked(self, method="Cancel")
def save_version(self):
'''Save version info'''
version = frappe.new_doc('Version')
if version.set_diff(self._doc_before_save, self):
version.insert(ignore_permissions=True)
@staticmethod
def whitelist(f):
"""Decorator: Whitelist method to be called remotely via REST API."""
f.whitelisted = True
return f
@staticmethod
def hook(f):
"""Decorator: Make method `hookable` (i.e. extensible by another app).
Note: If each hooked method returns a value (dict), then all returns are
collated in one dict and returned. Ideally, don't return values in hookable
methods, set properties in the document."""
def add_to_return_value(self, new_return_value):
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value or self.get("_return_value")
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_doc_hooks()
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def is_whitelisted(self, method):
fn = getattr(self, method, None)
if not fn:
raise NotFound("Method {0} not found".format(method))
elif not getattr(fn, "whitelisted", False):
raise Forbidden("Method {0} not whitelisted".format(method))
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""Check that value of fieldname should be 'condition' val2
else throw Exception."""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
val1 = doc.get_value(fieldname)
df = doc.meta.get_field(fieldname)
val2 = doc.cast(val2, df)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.parentfield:
msg = _("Incorrect value in row {0}: {1} must be {2} {3}".format(doc.idx, label, condition_str, val2))
else:
msg = _("Incorrect value: {0} must be {1} {2}".format(label, condition_str, val2))
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
"""Raise exception if Table field is empty."""
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
"""Round floats for all `Currency`, `Float`, `Percent` fields for the given doc.
:param doc: Document whose numeric properties are to be rounded.
:param fieldnames: [Optional] List of fields to be rounded."""
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float", "Percent"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.parentfield)))
def get_url(self):
"""Returns Desk URL for this document. `/desk#Form/{doctype}/{name}`"""
return "/desk#Form/{doctype}/{name}".format(doctype=self.doctype, name=self.name)
def add_comment(self, comment_type, text=None, comment_by=None, link_doctype=None, link_name=None):
"""Add a comment to this document.
:param comment_type: e.g. `Comment`. See Communication for more info."""
if comment_type=='Comment':
out = frappe.get_doc({
"doctype":"Communication",
"communication_type": "Comment",
"sender": comment_by or frappe.session.user,
"comment_type": comment_type,
"reference_doctype": self.doctype,
"reference_name": self.name,
"content": text or comment_type,
"link_doctype": link_doctype,
"link_name": link_name
}).insert(ignore_permissions=True)
else:
out = frappe.get_doc(dict(
doctype='Version',
ref_doctype= self.doctype,
docname= self.name,
data = frappe.as_json(dict(comment_type=comment_type, comment=text))
))
if comment_by:
out.owner = comment_by
out.insert(ignore_permissions=True)
return out
def add_seen(self, user=None):
'''add the given/current user to list of users who have seen this document (_seen)'''
if not user:
user = frappe.session.user
if self.meta.track_seen:
_seen = self.get('_seen') or []
_seen = frappe.parse_json(_seen)
if user not in _seen:
_seen.append(user)
frappe.db.set_value(self.doctype, self.name, '_seen', json.dumps(_seen), update_modified=False)
self._seen = json.dumps(_seen)
frappe.local.flags.commit = True
def add_viewed(self, user=None):
'''add log to communication when a user viewes a document'''
if not user:
user = frappe.session.user
if hasattr(self.meta, 'track_views') and self.meta.track_views:
frappe.get_doc({
"doctype": "View Log",
"viewed_by": frappe.session.user,
"reference_doctype": self.doctype,
"reference_name": self.name,
}).insert(ignore_permissions=True)
frappe.local.flags.commit = True
def get_signature(self):
"""Returns signature (hash) for private URL."""
return hashlib.sha224(get_datetime_str(self.creation).encode()).hexdigest()
def get_liked_by(self):
liked_by = getattr(self, "_liked_by", None)
if liked_by:
return json.loads(liked_by)
else:
return []
def set_onload(self, key, value):
if not self.get("__onload"):
self.set("__onload", frappe._dict())
self.get("__onload")[key] = value
def get_onload(self, key=None):
if not key:
return self.get("__onload", frappe._dict())
return self.get('__onload')[key]
def update_timeline_doc(self):
if frappe.flags.in_install or not self.meta.get("timeline_field"):
return
timeline_doctype = self.meta.get_link_doctype(self.meta.timeline_field)
timeline_name = self.get(self.meta.timeline_field)
if not (timeline_doctype and timeline_name):
return
# update timeline doc in communication if it is different than current timeline doc
frappe.db.sql("""update `tabCommunication`
set timeline_doctype=%(timeline_doctype)s, timeline_name=%(timeline_name)s
where
reference_doctype=%(doctype)s and reference_name=%(name)s
and (timeline_doctype is null or timeline_doctype != %(timeline_doctype)s
or timeline_name is null or timeline_name != %(timeline_name)s)""",
{
"doctype": self.doctype,
"name": self.name,
"timeline_doctype": timeline_doctype,
"timeline_name": timeline_name
})
def queue_action(self, action, **kwargs):
'''Run an action in background. If the action has an inner function,
like _submit for submit, it will call that instead'''
# call _submit instead of submit, so you can override submit to call
# run_delayed based on some action
# See: Stock Reconciliation
if hasattr(self, '_' + action):
action = '_' + action
if file_lock.lock_exists(self.get_signature()):
frappe.throw(_('This document is currently queued for execution. Please try again'),
title=_('Document Queued'))
self.lock()
enqueue('frappe.model.document.execute_action', doctype=self.doctype, name=self.name,
action=action, **kwargs)
def lock(self, timeout=None):
'''Creates a lock file for the given document. If timeout is set,
it will retry every 1 second for acquiring the lock again
:param timeout: Timeout in seconds, default 0'''
signature = self.get_signature()
if file_lock.lock_exists(signature):
lock_exists = True
if timeout:
for i in range(timeout):
time.sleep(1)
if not file_lock.lock_exists(signature):
lock_exists = False
break
if lock_exists:
raise frappe.DocumentLockedError
file_lock.create_lock(signature)
def unlock(self):
'''Delete the lock file for this document'''
file_lock.delete_lock(self.get_signature())
def execute_action(doctype, name, action, **kwargs):
'''Execute an action on a document (called by background worker)'''
doc = frappe.get_doc(doctype, name)
doc.unlock()
try:
getattr(doc, action)(**kwargs)
except Exception:
frappe.db.rollback()
# add a comment (?)
if frappe.local.message_log:
msg = json.loads(frappe.local.message_log[-1]).get('message')
else:
msg = '<pre><code>' + frappe.get_traceback() + '</pre></code>'
doc.add_comment('Comment', _('Action Failed') + '<br><br>' + msg)
doc.notify_update()
|
the-stack_106_18332
|
#!/usr/bin/env python3
import signal
import traceback
import sys
import argparse
from . import Logger, DefaultConfig, RedisConfig, NetworkConfig
class Scripter:
def __init__(self,
log_dir=None, log_level=Logger.INFO, log_sysout_level=Logger.DEBUG, log_source=None):
self.logger = Logger(log_dir=log_dir, level=log_level, sysout_level=log_sysout_level, source=log_source)
self.cfg = DefaultConfig()
self.rd_cfg = RedisConfig()
self.n_cfg = NetworkConfig()
self.exit_threads = False
self.exit_code = 1
signal.signal(signal.SIGINT, self._sigint_handler)
signal.signal(signal.SIGTERM, self._sigterm_handler)
def init_arg_parser(self, description='', args={}):
self.parser = argparse.ArgumentParser(description=description)
try:
args = args.items()
except (AttributeError, TypeError):
pass
else:
for arg, desc in args:
self.parser.add_argument(arg, help=desc)
def get_args(self):
return self.parser.parse_args()
def _sigint_handler(self, sig, frame):
self.exit_threads = True
def _sigterm_handler(self, sig, frame):
self.exit_threads = True
self.exit_code = 0
def run(self):
try:
self.logger.log_info("Running script")
self.run_main()
except:
self.logger.log_crit(traceback.format_exc())
finally:
self.logger.log_info("Closing script")
self.close_script()
signal.setitimer(signal.ITIMER_REAL, 5) # seconds...
sys.exit(self.exit_code)
def run_main(self):
raise NotImplementedError
def close_script(self):
raise NotImplementedError
|
the-stack_106_18333
|
description = 'Verify the user can create a new page from the project page'
pages = ['login',
'common',
'index',
'project_pages']
def setup(data):
common.access_golem(data.env.url, data.env.admin)
index.create_access_project('test')
common.navigate_menu('Pages')
def test(data):
store('page_name', 'page_' + random('cccc'))
project_pages.add_page(data.page_name)
project_pages.verify_page_exists(data.page_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.