filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_25489 | from typing import List, Optional, Iterable
from ....head.database import Database as db
from ....globals import SURVEY_TYPES
from ....head.data_management import DataManager as Dm
from ....head.messages import Messages as Msg
from ....gui.TopWindow import TopWindow
from ....head.objects.survey import Survey
class ConfigCalculationActTemplate:
LIST_COLUMNS = "ŚKD [mm]",
SURVEY_ARGS = "jet_diameter",
FRAME_NUMBER = 9
OUTPUT_FRAME_NUMBER = 12
NEEDED_SURVEY_TYPES = "press", "pressthru"
SHOW_VALUES = "press"
def __init__(self, top: TopWindow):
self.top = top
self.frame = top.frames[self.FRAME_NUMBER]
self.surveys = {}
self.frame.ch_fuel_cbox.bind(
"<Button>", lambda e: self.__set_fuels_cbox())
self.activate_events()
self.set_buttons()
def activate_events(self):
self.frame.ch_fuel_cbox.bind(
"<<ComboboxSelected>>", lambda e: self.__load_surveys())
def set_buttons(self):
self.frame.navi_buttons[0].configure(
command=lambda: self.start())
self.frame.navi_buttons[1].configure(
command=lambda: self.clean())
def start(self):
data = self.parse_data()
if data:
self.start_calculation(data)
def start_calculation(self, data):
"To be overwritten by the child class."
pass
def parse_data(self):
message = self.frame.show_message
fuel_name = self.frame.ch_fuel_cbox.get()
if not fuel_name:
message(Msg.needs_to_choose_fuel)
return
inputs, report = self.valid_inputs(self.get_values_from_inputs())
self.point_mistakes(report)
if report:
message(self.get_msg_from_report(report))
return
cboxes, invalid_fields = self.get_valid_values_from_cboxes()
if invalid_fields:
message(Msg.needs_to_fulfil_field(invalid_fields[0]))
return
surveys = self.get_chosen_surveys()
survey_issue = self.check_surveys(surveys)
if survey_issue:
message(survey_issue)
return
times = self.get_times()
self.frame.hide_message()
return fuel_name, cboxes, inputs, surveys, times
def get_values_from_inputs(self):
if self.frame.inputs_frame:
return self.frame.inputs_frame.get_inserted_values()
def get_valid_values_from_cboxes(self):
if self.frame.cboxes_frame:
return self.frame.cboxes_frame.get_validated_values()
def get_chosen_surveys(self):
ids = tuple(self.frame.surveys_list.tree_frame.get_chosen_ids())
surveys_list = tuple(*filter(lambda x: x, self.surveys.values()))
return [surveys_list[i] for i in ids]
def get_times(self):
ids = tuple(self.frame.surveys_list.tree_frame.get_chosen_ids())
lines = self.frame.surveys_list.surveys_t_lines
selected_lines = (lines[i] for i in ids)
x_values = (line.get_xdata() for line in selected_lines)
parse_val = lambda x: x[0] if isinstance(x, (list, tuple)) else x
return list(map(parse_val, x_values))
def valid_inputs(self, inputs):
values, report = Dm.to_float(inputs)
if report:
return values, report
if inputs:
names = tuple(self.flatten_nested_list(self.frame.INPUT_VARIABLES))
report = Dm.are_bigger_than_0(values, names)
return values, report
@staticmethod
def check_surveys(surveys):
jets = set(s.jet_diameter for s in surveys)
if len(jets) < 2:
return Msg.needs_2_diff_jets_diam
def point_mistakes(self, report):
if self.frame.inputs_frame:
self.frame.inputs_frame.point_entries(report)
@staticmethod
def get_msg_from_report(report):
for point in report:
if point:
return point
def flatten_nested_list(self, nested):
for item in nested:
if isinstance(item, Iterable) and not isinstance(item, str):
for val in self.flatten_nested_list(item):
yield val
else:
yield item
def clean(self):
self.frame.ch_fuel_cbox.set('')
if self.frame.inputs_frame:
self.frame.inputs_frame.clean()
if self.frame.cboxes_frame:
self.frame.cboxes_frame.clean()
self.frame.surveys_list.clean()
self.frame.hide_message()
def __set_fuels_cbox(self):
fuels = db.get_fuels_list()
self.frame.ch_fuel_cbox.config(values=fuels)
def __load_surveys(self):
fuel_name = self.frame.ch_fuel_cbox.get()
if not fuel_name:
return
for survey_type in self.NEEDED_SURVEY_TYPES:
self.surveys.update(
{survey_type: self.__load_surveys_from_db(
fuel_name, survey_type)})
self.frame.surveys_list.hide_lines()
self.__set_surveys_list()
def __set_surveys_list(self):
list_data = []
plots_data = []
for survey_type in self.surveys.keys():
if not self.surveys[survey_type]:
continue
for survey in self.surveys[survey_type]:
list_data.append(
[survey.__getattribute__(arg) for arg in self.SURVEY_ARGS])
if self.SHOW_VALUES == "thrust" and survey.type == "pressthru":
plots_data.append((survey.values[1],
survey.sampling_time,
survey.comment))
else:
plots_data.append((survey.values[0],
survey.sampling_time,
survey.comment))
self.frame.surveys_list.tree_frame.set_data(list_data)
self.frame.surveys_list.set_plots_data(plots_data)
@staticmethod
def __load_surveys_from_db(
fuel_name: str, survey_type: str)\
-> Optional[List[Survey]]:
return db.load_surveys(fuel_name, survey_type)
|
the-stack_106_25493 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from heat.common import exception
from heat.common import short_id
from heat.common import template_format
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import node_data
from heat.engine.resources.aws.iam import user
from heat.engine.resources.openstack.heat import access_policy as ap
from heat.engine import scheduler
from heat.engine import stk_defn
from heat.objects import resource_data as resource_data_object
from heat.tests import common
from heat.tests import utils
user_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User"
}
}
}
'''
user_template_password = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User",
"Properties": {
"LoginProfile": { "Password": "myP@ssW0rd" }
}
}
}
}
'''
user_accesskey_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User"
},
"HostKeys" : {
"Type" : "AWS::IAM::AccessKey",
"Properties" : {
"UserName" : {"Ref": "CfnUser"}
}
}
}
}
'''
user_policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User",
"Properties" : {
"Policies" : [ { "Ref": "WebServerAccessPolicy"} ]
}
},
"WebServerAccessPolicy" : {
"Type" : "OS::Heat::AccessPolicy",
"Properties" : {
"AllowedResources" : [ "WikiDatabase" ]
}
},
"WikiDatabase" : {
"Type" : "AWS::EC2::Instance",
}
}
}
'''
class UserTest(common.HeatTestCase):
def setUp(self):
super(UserTest, self).setUp()
self.stack_name = 'test_user_stack_%s' % utils.random_name()
self.username = '%s-CfnUser-aabbcc' % self.stack_name
self.fc = fake_ks.FakeKeystoneClient(username=self.username)
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
def create_user(self, t, stack, resource_name,
project_id='stackproject', user_id='dummy_user',
password=None):
self.m.StubOutWithMock(user.User, 'keystone')
user.User.keystone().MultipleTimes().AndReturn(self.fc)
self.m.StubOutWithMock(fake_ks.FakeKeystoneClient,
'create_stack_domain_project')
fake_ks.FakeKeystoneClient.create_stack_domain_project(
stack.id).AndReturn(project_id)
resource_defns = stack.t.resource_definitions(stack)
rsrc = user.User(resource_name,
resource_defns[resource_name],
stack)
rsrc.store()
self.m.StubOutWithMock(short_id, 'get_id')
short_id.get_id(rsrc.uuid).MultipleTimes().AndReturn('aabbcc')
self.m.StubOutWithMock(fake_ks.FakeKeystoneClient,
'create_stack_domain_user')
fake_ks.FakeKeystoneClient.create_stack_domain_user(
username=self.username, password=password,
project_id=project_id).AndReturn(user_id)
self.m.ReplayAll()
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_user(self):
t = template_format.parse(user_template)
stack = utils.parse_stack(t, stack_name=self.stack_name)
rsrc = self.create_user(t, stack, 'CfnUser')
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'Foo')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertIsNone(rsrc.handle_suspend())
self.assertIsNone(rsrc.handle_resume())
rsrc.resource_id = None
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
rsrc.resource_id = self.fc.access
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE)
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE)
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_user_password(self):
t = template_format.parse(user_template_password)
stack = utils.parse_stack(t, stack_name=self.stack_name)
rsrc = self.create_user(t, stack, 'CfnUser', password=u'myP@ssW0rd')
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_user_validate_policies(self):
t = template_format.parse(user_policy_template)
stack = utils.parse_stack(t, stack_name=self.stack_name)
rsrc = self.create_user(t, stack, 'CfnUser')
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual([u'WebServerAccessPolicy'],
rsrc.properties['Policies'])
# OK
self.assertTrue(rsrc._validate_policies([u'WebServerAccessPolicy']))
# Resource name doesn't exist in the stack
self.assertFalse(rsrc._validate_policies([u'NoExistAccessPolicy']))
# Resource name is wrong Resource type
self.assertFalse(rsrc._validate_policies([u'NoExistAccessPolicy',
u'WikiDatabase']))
# Wrong type (AWS embedded policy format, not yet supported)
dict_policy = {"PolicyName": "AccessForCFNInit",
"PolicyDocument":
{"Statement": [{"Effect": "Allow",
"Action":
"cloudformation:DescribeStackResource",
"Resource": "*"}]}}
# However we should just ignore it to avoid breaking existing templates
self.assertTrue(rsrc._validate_policies([dict_policy]))
self.m.VerifyAll()
def test_user_create_bad_policies(self):
t = template_format.parse(user_policy_template)
t['Resources']['CfnUser']['Properties']['Policies'] = ['NoExistBad']
stack = utils.parse_stack(t, stack_name=self.stack_name)
resource_name = 'CfnUser'
resource_defns = stack.t.resource_definitions(stack)
rsrc = user.User(resource_name,
resource_defns[resource_name],
stack)
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.handle_create)
def test_user_access_allowed(self):
self.m.StubOutWithMock(ap.AccessPolicy, 'access_allowed')
ap.AccessPolicy.access_allowed('a_resource').AndReturn(True)
ap.AccessPolicy.access_allowed('b_resource').AndReturn(False)
self.m.ReplayAll()
t = template_format.parse(user_policy_template)
stack = utils.parse_stack(t, stack_name=self.stack_name)
rsrc = self.create_user(t, stack, 'CfnUser')
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertTrue(rsrc.access_allowed('a_resource'))
self.assertFalse(rsrc.access_allowed('b_resource'))
self.m.VerifyAll()
def test_user_access_allowed_ignorepolicy(self):
self.m.StubOutWithMock(ap.AccessPolicy, 'access_allowed')
ap.AccessPolicy.access_allowed('a_resource').AndReturn(True)
ap.AccessPolicy.access_allowed('b_resource').AndReturn(False)
self.m.ReplayAll()
t = template_format.parse(user_policy_template)
t['Resources']['CfnUser']['Properties']['Policies'] = [
'WebServerAccessPolicy', {'an_ignored': 'policy'}]
stack = utils.parse_stack(t, stack_name=self.stack_name)
rsrc = self.create_user(t, stack, 'CfnUser')
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertTrue(rsrc.access_allowed('a_resource'))
self.assertFalse(rsrc.access_allowed('b_resource'))
self.m.VerifyAll()
def test_user_refid_rsrc_id(self):
t = template_format.parse(user_template)
stack = utils.parse_stack(t)
rsrc = stack['CfnUser']
rsrc.resource_id = 'phy-rsrc-id'
self.assertEqual('phy-rsrc-id', rsrc.FnGetRefId())
def test_user_refid_convg_cache_data(self):
t = template_format.parse(user_template)
cache_data = {'CfnUser': node_data.NodeData.from_dict({
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'reference_id': 'convg_xyz'
})}
stack = utils.parse_stack(t, cache_data=cache_data)
rsrc = stack.defn['CfnUser']
self.assertEqual('convg_xyz', rsrc.FnGetRefId())
class AccessKeyTest(common.HeatTestCase):
def setUp(self):
super(AccessKeyTest, self).setUp()
self.username = utils.PhysName('test_stack', 'CfnUser')
self.credential_id = 'acredential123'
self.fc = fake_ks.FakeKeystoneClient(username=self.username,
user_id='dummy_user',
credential_id=self.credential_id)
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
def create_user(self, t, stack, resource_name,
project_id='stackproject', user_id='dummy_user',
password=None):
self.m.StubOutWithMock(user.User, 'keystone')
user.User.keystone().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
rsrc = stack[resource_name]
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
stk_defn.update_resource_data(stack.defn, resource_name,
rsrc.node_data())
return rsrc
def create_access_key(self, t, stack, resource_name):
rsrc = stack[resource_name]
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_access_key(self):
t = template_format.parse(user_accesskey_template)
stack = utils.parse_stack(t)
self.create_user(t, stack, 'CfnUser')
rsrc = self.create_access_key(t, stack, 'HostKeys')
self.m.VerifyAll()
self.assertEqual(self.fc.access,
rsrc.resource_id)
self.assertEqual(self.fc.secret,
rsrc._secret)
# Ensure the resource data has been stored correctly
rs_data = resource_data_object.ResourceData.get_all(rsrc)
self.assertEqual(self.fc.secret, rs_data.get('secret_key'))
self.assertEqual(self.fc.credential_id, rs_data.get('credential_id'))
self.assertEqual(2, len(rs_data.keys()))
self.assertEqual(utils.PhysName(stack.name, 'CfnUser'),
rsrc.FnGetAtt('UserName'))
rsrc._secret = None
self.assertEqual(self.fc.secret,
rsrc.FnGetAtt('SecretAccessKey'))
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'Foo')
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_access_key_get_from_keystone(self):
self.m.StubOutWithMock(user.AccessKey, 'keystone')
user.AccessKey.keystone().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
t = template_format.parse(user_accesskey_template)
stack = utils.parse_stack(t)
self.create_user(t, stack, 'CfnUser')
rsrc = self.create_access_key(t, stack, 'HostKeys')
# Delete the resource data for secret_key, to test that existing
# stacks which don't have the resource_data stored will continue
# working via retrieving the keypair from keystone
resource_data_object.ResourceData.delete(rsrc, 'credential_id')
resource_data_object.ResourceData.delete(rsrc, 'secret_key')
self.assertRaises(exception.NotFound,
resource_data_object.ResourceData.get_all,
rsrc)
rsrc._secret = None
rsrc._data = None
self.assertEqual(self.fc.secret,
rsrc.FnGetAtt('SecretAccessKey'))
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_access_key_no_user(self):
self.m.ReplayAll()
t = template_format.parse(user_accesskey_template)
# Set the resource properties UserName to an unknown user
t['Resources']['HostKeys']['Properties']['UserName'] = 'NonExistent'
stack = utils.parse_stack(t)
stack['CfnUser'].resource_id = self.fc.user_id
resource_defns = stack.t.resource_definitions(stack)
rsrc = user.AccessKey('HostKeys',
resource_defns['HostKeys'],
stack)
create = scheduler.TaskRunner(rsrc.create)
self.assertRaises(exception.ResourceFailure, create)
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
class AccessPolicyTest(common.HeatTestCase):
def test_accesspolicy_create_ok(self):
t = template_format.parse(user_policy_template)
stack = utils.parse_stack(t)
resource_name = 'WebServerAccessPolicy'
resource_defns = stack.t.resource_definitions(stack)
rsrc = ap.AccessPolicy(resource_name,
resource_defns[resource_name],
stack)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
def test_accesspolicy_create_ok_empty(self):
t = template_format.parse(user_policy_template)
resource_name = 'WebServerAccessPolicy'
t['Resources'][resource_name]['Properties']['AllowedResources'] = []
stack = utils.parse_stack(t)
resource_defns = stack.t.resource_definitions(stack)
rsrc = ap.AccessPolicy(resource_name,
resource_defns[resource_name],
stack)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
def test_accesspolicy_create_err_notfound(self):
t = template_format.parse(user_policy_template)
resource_name = 'WebServerAccessPolicy'
t['Resources'][resource_name]['Properties']['AllowedResources'] = [
'NoExistResource']
stack = utils.parse_stack(t)
self.assertRaises(exception.StackValidationFailed, stack.validate)
def test_accesspolicy_access_allowed(self):
t = template_format.parse(user_policy_template)
resource_name = 'WebServerAccessPolicy'
stack = utils.parse_stack(t)
resource_defns = stack.t.resource_definitions(stack)
rsrc = ap.AccessPolicy(resource_name,
resource_defns[resource_name],
stack)
self.assertTrue(rsrc.access_allowed('WikiDatabase'))
self.assertFalse(rsrc.access_allowed('NotWikiDatabase'))
self.assertFalse(rsrc.access_allowed(None))
|
the-stack_106_25496 | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for librispeech dataset module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.audio import librispeech
class LibrispeechTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = librispeech.Librispeech
BUILDER_CONFIG_NAMES_TO_TEST = ["plain_text", "subwords8k"]
SPLITS = {
"train_clean100": 2,
"train_clean360": 2,
"train_other500": 2,
"test_clean": 2,
"test_other": 2,
"dev_clean": 2,
"dev_other": 2,
}
DL_EXTRACT_RESULT = {
"train_clean100": "train-clean-100",
"train_clean360": "train-clean-360",
"train_other500": "train-other-500",
"test_clean": "test-clean",
"test_other": "test-other",
"dev_clean": "dev-clean",
"dev_other": "dev-other",
}
if __name__ == "__main__":
testing.test_main()
|
the-stack_106_25498 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import RobertaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
import numpy
from transformers.modeling_tf_roberta import (
TFRobertaModel,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaForQuestionAnswering,
TFRobertaForMultipleChoice,
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class TFRobertaModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = RobertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
return_dict=True,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_roberta_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFRobertaModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertListEqual(
list(result["last_hidden_state"].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
def create_and_check_roberta_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFRobertaForMaskedLM(config=config)
result = model([input_ids, input_mask, token_type_ids])
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.vocab_size])
def create_and_check_roberta_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFRobertaForTokenClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels])
def create_and_check_roberta_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFRobertaForQuestionAnswering(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result["start_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].shape), [self.batch_size, self.seq_length])
def create_and_check_roberta_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = TFRobertaForMultipleChoice(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_choices])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFRobertaModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFRobertaModel,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaForQuestionAnswering,
)
if is_tf_available()
else ()
)
def setUp(self):
self.model_tester = TFRobertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_roberta_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_token_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_question_answering(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFRobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFRobertaModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = TFRobertaForMaskedLM.from_pretrained("roberta-base")
input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = [1, 11, 50265]
self.assertEqual(list(output.numpy().shape), expected_shape)
# compare the actual values for a slice.
expected_slice = tf.constant(
[[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]]
)
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
@slow
def test_inference_no_head(self):
model = TFRobertaModel.from_pretrained("roberta-base")
input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = tf.constant(
[[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]]
)
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
@slow
def test_inference_classification_head(self):
model = TFRobertaForSequenceClassification.from_pretrained("roberta-large-mnli")
input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = [1, 3]
self.assertEqual(list(output.numpy().shape), expected_shape)
expected_tensor = tf.constant([[-0.9469, 0.3913, 0.5118]])
self.assertTrue(numpy.allclose(output.numpy(), expected_tensor.numpy(), atol=1e-4))
|
the-stack_106_25504 | import pytest
import torch
import torch.nn as nn
import random
import itertools
from hydra.experimental import initialize, compose
from train_gata import (
request_infos_for_train,
request_infos_for_eval,
get_game_files,
GATADoubleDQN,
TransitionCache,
Transition,
ReplayBuffer,
main,
)
from agent import EpsilonGreedyAgent
from preprocessor import PAD, UNK, BOS, EOS
from utils import increasing_mask
def test_request_infos_for_train():
infos = request_infos_for_train()
assert infos.admissible_commands is True
assert infos.description is False
assert infos.location is False
assert infos.facts is False
assert infos.last_action is False
assert infos.game is True
def test_request_infos_for_eval():
infos = request_infos_for_eval()
assert infos.admissible_commands is True
assert infos.description is True
assert infos.location is True
assert infos.facts is True
assert infos.last_action is True
assert infos.game is True
@pytest.mark.parametrize(
"dataset,difficulty_levels,training_size,expected_game_files",
[
(
"train",
[1],
1,
[
"test-data/rl_games/train_1/difficulty_level_1/train_1-level-1.z8",
],
),
(
"train",
[2],
1,
[
"test-data/rl_games/train_1/difficulty_level_2/train_1-level-2.z8",
],
),
(
"train",
[1],
2,
[
"test-data/rl_games/train_2/difficulty_level_1/train_2-level-1-1.z8",
"test-data/rl_games/train_2/difficulty_level_1/train_2-level-1-2.z8",
],
),
(
"train",
[2],
2,
[
"test-data/rl_games/train_2/difficulty_level_2/train_2-level-2-1.z8",
"test-data/rl_games/train_2/difficulty_level_2/train_2-level-2-2.z8",
],
),
(
"valid",
[1],
None,
[
"test-data/rl_games/valid/difficulty_level_1/valid-level-1-1.z8",
"test-data/rl_games/valid/difficulty_level_1/valid-level-1-2.z8",
],
),
(
"valid",
[2],
None,
[
"test-data/rl_games/valid/difficulty_level_2/valid-level-2-1.z8",
"test-data/rl_games/valid/difficulty_level_2/valid-level-2-2.z8",
],
),
(
"test",
[1],
None,
[
"test-data/rl_games/test/difficulty_level_1/test-level-1-1.z8",
"test-data/rl_games/test/difficulty_level_1/test-level-1-2.z8",
],
),
(
"test",
[2],
None,
[
"test-data/rl_games/test/difficulty_level_2/test-level-2-1.z8",
"test-data/rl_games/test/difficulty_level_2/test-level-2-2.z8",
],
),
(
"train",
[1, 2],
2,
# static since we set the seed
[
"test-data/rl_games/train_2/difficulty_level_1/train_2-level-1-1.z8",
"test-data/rl_games/train_2/difficulty_level_2/train_2-level-2-2.z8",
],
),
(
"valid",
[1, 2],
None,
[
"test-data/rl_games/valid/difficulty_level_1/valid-level-1-1.z8",
"test-data/rl_games/valid/difficulty_level_1/valid-level-1-2.z8",
"test-data/rl_games/valid/difficulty_level_2/valid-level-2-1.z8",
"test-data/rl_games/valid/difficulty_level_2/valid-level-2-2.z8",
],
),
(
"test",
[1, 2],
None,
[
"test-data/rl_games/test/difficulty_level_1/test-level-1-1.z8",
"test-data/rl_games/test/difficulty_level_1/test-level-1-2.z8",
"test-data/rl_games/test/difficulty_level_2/test-level-2-1.z8",
"test-data/rl_games/test/difficulty_level_2/test-level-2-2.z8",
],
),
],
)
def test_get_game_dirs(dataset, difficulty_levels, training_size, expected_game_files):
random.seed(42)
assert (
set(
get_game_files(
"test-data/rl_games",
dataset,
difficulty_levels,
training_size=training_size,
)
)
== set(expected_game_files)
)
def test_gata_double_dqn_default_init():
gata_ddqn = GATADoubleDQN()
# train_env is initialized with the test games
assert len(gata_ddqn.train_env.gamefiles) == 2
assert gata_ddqn.train_env.request_infos == request_infos_for_train()
assert gata_ddqn.train_env.batch_size == gata_ddqn.hparams.train_game_batch_size
assert gata_ddqn.train_env.spec.id.split("-")[1] == "train"
# val_env is initialized with the test games
assert len(gata_ddqn.val_env.gamefiles) == 2
assert gata_ddqn.val_env.request_infos == request_infos_for_eval()
assert gata_ddqn.val_env.batch_size == gata_ddqn.hparams.eval_game_batch_size
assert gata_ddqn.val_env.spec.id.split("-")[1] == "val"
# test_env is initialized with the test games
assert len(gata_ddqn.test_env.gamefiles) == 2
assert gata_ddqn.test_env.request_infos == request_infos_for_eval()
assert gata_ddqn.test_env.batch_size == gata_ddqn.hparams.eval_game_batch_size
assert gata_ddqn.test_env.spec.id.split("-")[1] == "test"
# default words
default_word_vocab = [PAD, UNK, BOS, EOS]
assert gata_ddqn.preprocessor.word_vocab == default_word_vocab
assert gata_ddqn.graph_updater.word_embeddings[0].weight.size() == (
len(default_word_vocab),
gata_ddqn.hparams.word_emb_dim,
)
# default node_vocab = ['node']
assert gata_ddqn.graph_updater.node_name_word_ids.size() == (
len(gata_ddqn.node_vocab),
1,
)
assert gata_ddqn.graph_updater.node_name_mask.size() == (
len(gata_ddqn.node_vocab),
1,
)
# default relation_vocab = ['relation', 'relation reverse']
assert gata_ddqn.graph_updater.rel_name_word_ids.size() == (
len(gata_ddqn.relation_vocab),
2,
)
assert gata_ddqn.graph_updater.rel_name_mask.size() == (
len(gata_ddqn.relation_vocab),
2,
)
# online action selector is train mode
assert gata_ddqn.action_selector.training
# target action selector is in train mode
assert gata_ddqn.target_action_selector.training
# and frozen
for param in gata_ddqn.target_action_selector.parameters():
assert param.requires_grad is False
# online and target action selectors should be initialized to be the same
for online, target in zip(
gata_ddqn.action_selector.parameters(),
gata_ddqn.target_action_selector.parameters(),
):
assert online.equal(target)
# graph updater is in eval mode
assert not gata_ddqn.graph_updater.training
# and frozen
for param in gata_ddqn.graph_updater.parameters():
assert param.requires_grad is False
def test_gata_double_dqn_update_target_action_selector():
gata_ddqn = GATADoubleDQN()
# scramble layers in the online action selector and update
gata_ddqn.action_selector.node_name_word_ids.fill_(42)
gata_ddqn.action_selector.node_embeddings = nn.Embedding(
gata_ddqn.num_nodes, gata_ddqn.hparams.node_emb_dim
)
# make sure the weights are the same after updating
gata_ddqn.update_target_action_selector()
for online, target in zip(
gata_ddqn.action_selector.parameters(),
gata_ddqn.target_action_selector.parameters(),
):
assert online.equal(target)
@pytest.mark.parametrize(
"batch_size,obs_len,prev_action_len,num_action_cands,action_cand_len",
[(1, 5, 3, 4, 10), (3, 6, 4, 5, 12)],
)
def test_gata_double_dqn_forward(
batch_size,
obs_len,
prev_action_len,
num_action_cands,
action_cand_len,
):
gata_ddqn = GATADoubleDQN()
results = gata_ddqn(
torch.randint(gata_ddqn.num_words, (batch_size, obs_len)),
increasing_mask(batch_size, obs_len),
torch.randint(gata_ddqn.num_words, (batch_size, prev_action_len)),
increasing_mask(batch_size, prev_action_len),
torch.rand(batch_size, gata_ddqn.hparams.hidden_dim),
torch.randint(
gata_ddqn.num_words, (batch_size, num_action_cands, action_cand_len)
),
increasing_mask(batch_size * num_action_cands, action_cand_len).view(
batch_size, num_action_cands, action_cand_len
),
increasing_mask(batch_size, num_action_cands),
)
assert results["action_scores"].size() == (batch_size, num_action_cands)
assert results["rnn_curr_hidden"].size() == (
batch_size,
gata_ddqn.hparams.hidden_dim,
)
assert results["current_graph"].size() == (
batch_size,
gata_ddqn.num_relations,
gata_ddqn.num_nodes,
gata_ddqn.num_nodes,
)
@pytest.mark.parametrize(
"action_scores,action_mask,actions_idx,expected",
[
(
torch.tensor([[1.0]]),
torch.tensor([[1.0]]),
torch.tensor([0]),
torch.tensor([1.0]),
),
(
torch.tensor([[1.0, 2.0]]),
torch.tensor([[1.0, 1.0]]),
torch.tensor([1]),
torch.tensor([2.0]),
),
(
torch.tensor([[1.0, 2.0]]),
torch.tensor([[1.0, 0.0]]),
torch.tensor([1]),
torch.tensor([0.0]),
),
(
torch.tensor([[1.0, 2.0], [2.0, 1.0]]),
torch.tensor([[1.0, 0.0], [1.0, 1.0]]),
torch.tensor([1, 0]),
torch.tensor([0.0, 2.0]),
),
],
)
def test_gata_double_dqn_get_q_values(
action_scores, action_mask, actions_idx, expected
):
assert GATADoubleDQN.get_q_values(action_scores, action_mask, actions_idx).equal(
expected
)
@pytest.mark.parametrize(
"batch_size,obs_len,prev_action_len,curr_action_len,"
"num_action_cands,action_cand_len",
[
(1, 8, 6, 4, 5, 3),
(3, 12, 10, 9, 8, 4),
],
)
def test_gata_double_dqn_training_step(
replay_buffer_gata_double_dqn,
batch_size,
obs_len,
prev_action_len,
curr_action_len,
num_action_cands,
action_cand_len,
):
# Note: batch_idx is not used
assert (
replay_buffer_gata_double_dqn.training_step(
{
"obs_word_ids": torch.randint(4, (batch_size, obs_len)),
"obs_mask": torch.randint(2, (batch_size, obs_len), dtype=torch.float),
"prev_action_word_ids": torch.randint(4, (batch_size, prev_action_len)),
"prev_action_mask": torch.randint(
2, (batch_size, prev_action_len), dtype=torch.float
),
"rnn_prev_hidden": torch.rand(
batch_size, replay_buffer_gata_double_dqn.hparams.hidden_dim
),
"action_cand_word_ids": torch.randint(
4, (batch_size, num_action_cands, action_cand_len)
),
"action_cand_mask": torch.randint(
2,
(batch_size, num_action_cands, action_cand_len),
dtype=torch.float,
),
"action_mask": torch.randint(
2, (batch_size, num_action_cands), dtype=torch.float
),
"actions_idx": torch.randint(num_action_cands, (batch_size,)),
"rewards": torch.rand(batch_size),
"next_obs_word_ids": torch.randint(4, (batch_size, obs_len)),
"next_obs_mask": torch.randint(
2, (batch_size, obs_len), dtype=torch.float
),
"curr_action_word_ids": torch.randint(4, (batch_size, curr_action_len)),
"curr_action_mask": torch.randint(
2, (batch_size, curr_action_len), dtype=torch.float
),
"next_action_cand_word_ids": torch.randint(
4, (batch_size, num_action_cands, action_cand_len)
),
"next_action_cand_mask": torch.randint(
2,
(batch_size, num_action_cands, action_cand_len),
dtype=torch.float,
),
"next_action_mask": torch.randint(
2, (batch_size, num_action_cands), dtype=torch.float
),
"rnn_curr_hidden": torch.rand(
batch_size, replay_buffer_gata_double_dqn.hparams.hidden_dim
),
"steps": torch.randint(1, 4, (batch_size,)),
"weights": torch.rand(batch_size),
"indices": torch.tensor(list(range(batch_size))),
},
0,
).ndimension()
== 0
)
# make sure priorities are updated
for prio in replay_buffer_gata_double_dqn.replay_buffer.priorities[:batch_size]:
assert prio != 0
def test_transition_cache_batch_add():
t_cache = TransitionCache(3)
def generate_batch(step, dones):
return {
"obs": [f"{i}: step {step} obs" for i in range(3)],
"prev_actions": [f"{i}: step {step} prev act" for i in range(3)],
"rnn_prev_hiddens": torch.rand(3, 12),
"batch_action_cands": [
[
f"{i}: step {step} act 1",
f"{i}: step {step} act 2",
f"{i}: step {step} act 3",
]
for i in range(3)
],
"actions_idx": [random.randint(0, 2) for _ in range(3)],
"cum_rewards": [random.random() for _ in range(3)],
"step_rewards": [random.random() for _ in range(3)],
"next_obs": [f"{i}: step {step} next obs" for i in range(3)],
"batch_next_action_cands": [
[
f"{i}: step {step} next act 1",
f"{i}: step {step} next act 2",
f"{i}: step {step} next act 3",
]
for i in range(3)
],
"rnn_curr_hiddens": torch.rand(3, 12),
"dones": dones,
}
def compare_batch_transition(batch, batch_num, transition):
assert transition.ob == batch["obs"][batch_num]
assert transition.prev_action == batch["prev_actions"][batch_num]
assert transition.rnn_prev_hidden.equal(batch["rnn_prev_hiddens"][batch_num])
assert transition.action_cands == batch["batch_action_cands"][batch_num]
assert transition.action_id == batch["actions_idx"][batch_num]
assert transition.cum_reward == batch["cum_rewards"][batch_num]
assert transition.step_reward == batch["step_rewards"][batch_num]
assert transition.next_ob == batch["next_obs"][batch_num]
assert (
transition.next_action_cands == batch["batch_next_action_cands"][batch_num]
)
assert transition.rnn_curr_hidden.equal(batch["rnn_curr_hiddens"][batch_num])
assert transition.done == batch["dones"][batch_num]
# add a not done step
batch_0 = generate_batch(0, [False] * 3)
t_cache.batch_add(**batch_0)
for i in range(3):
assert len(t_cache.cache[i]) == 1
compare_batch_transition(batch_0, i, t_cache.cache[i][-1])
# add a done game
batch_1 = generate_batch(1, [False, True, False])
t_cache.batch_add(**batch_1)
for i in range(3):
assert len(t_cache.cache[i]) == 2
compare_batch_transition(batch_1, i, t_cache.cache[i][-1])
# add another done step
batch_2 = generate_batch(2, [False, True, False])
t_cache.batch_add(**batch_2)
for i in range(3):
if batch_2["dones"][i]:
# it shouldn't have been added
assert len(t_cache.cache[i]) == 2
compare_batch_transition(batch_1, i, t_cache.cache[i][-1])
else:
assert len(t_cache.cache[i]) == 3
compare_batch_transition(batch_2, i, t_cache.cache[i][-1])
@pytest.mark.parametrize(
"batch_cum_rewards,batch_expected",
[
(
[
[1, 3, 3, 4],
],
[1],
),
(
[
[1, 3, 3, 4],
[1, 3, 3, 4, 6],
],
[1.0, 1.2],
),
],
)
def test_transition_cache_get_avg_rewards(batch_cum_rewards, batch_expected):
t_cache = TransitionCache(len(batch_cum_rewards))
for i, cum_rewards in enumerate(batch_cum_rewards):
for cum_reward in cum_rewards:
t_cache.cache[i].append(Transition(cum_reward=cum_reward))
for avg, expected in zip(t_cache.get_avg_rewards(), batch_expected):
assert pytest.approx(avg) == expected
@pytest.mark.parametrize(
"batch_cum_rewards,batch_expected",
[
(
[
[1, 3, 3, 4],
],
[4],
),
(
[
[1, 3, 3, 4],
[1, 3, 3, 4, 6],
],
[4, 6],
),
],
)
def test_transition_cache_get_rewards(batch_cum_rewards, batch_expected):
t_cache = TransitionCache(len(batch_cum_rewards))
for i, cum_rewards in enumerate(batch_cum_rewards):
for cum_reward in cum_rewards:
t_cache.cache[i].append(Transition(cum_reward=cum_reward))
for rewards, expected in zip(t_cache.get_game_rewards(), batch_expected):
assert rewards == expected
@pytest.mark.parametrize("expected_steps", [[1], [2, 3, 1, 5]])
def test_transition_cache_get_game_steps(expected_steps):
t_cache = TransitionCache(len(expected_steps))
for i, steps in enumerate(expected_steps):
t_cache.cache[i].extend([Transition()] * steps)
for steps, expected in zip(t_cache.get_game_steps(), expected_steps):
assert steps == expected
@pytest.fixture
def eps_greedy_agent():
gata_double_dqn = GATADoubleDQN(word_vocab_path="vocabs/word_vocab.txt")
return EpsilonGreedyAgent(
gata_double_dqn.graph_updater,
gata_double_dqn.action_selector,
gata_double_dqn.preprocessor,
0.1,
1.0,
20,
)
@pytest.fixture
def replay_buffer_gata_double_dqn():
return GATADoubleDQN(
train_game_batch_size=2,
train_max_episode_steps=5,
replay_buffer_populate_episodes=10,
yield_step_freq=10,
replay_buffer_capacity=20,
train_sample_batch_size=4,
)
@pytest.fixture
def replay_buffer():
return ReplayBuffer(20, 0.1, 4, 1e-6, 0.6, 0.4, 100000, 3, 0.9)
@pytest.mark.parametrize(
"initial_buffer,batch_transitions,expected_buffer",
[
(
[],
[[Transition(cum_reward=1)], [Transition(cum_reward=1)]],
[Transition(cum_reward=1), Transition(cum_reward=1)],
),
(
[Transition(step_reward=2), Transition(step_reward=1)],
[
[Transition(cum_reward=2), Transition(cum_reward=3)],
[Transition(cum_reward=0)],
],
[
Transition(step_reward=2),
Transition(step_reward=1),
Transition(cum_reward=2),
Transition(cum_reward=3),
],
),
],
)
def test_replay_buffer_push(
replay_buffer, initial_buffer, batch_transitions, expected_buffer
):
replay_buffer.buffer = initial_buffer
t_cache = TransitionCache(0)
t_cache.cache = batch_transitions
replay_buffer.push(t_cache)
assert replay_buffer.buffer == expected_buffer
def test_replay_buffer_extend_limited_list(replay_buffer):
replay_buffer._extend_limited_list([Transition(ob=str(i)) for i in range(10)])
assert len(replay_buffer.buffer) == 10
assert replay_buffer.buffer_next_id == 10
for i in range(10):
assert replay_buffer.buffer[i].ob == str(i)
assert replay_buffer.priorities[i] == 1.0
# mess with the priorities
replay_buffer.priorities[i] = i / 20
replay_buffer._extend_limited_list([Transition(ob=str(i)) for i in range(10, 20)])
assert len(replay_buffer.buffer) == 20
assert replay_buffer.buffer_next_id == 0
for i in range(10, 20):
assert replay_buffer.buffer[i].ob == str(i)
assert pytest.approx(replay_buffer.priorities[i]) == 0.45
# mess with the priorities
replay_buffer.priorities[i] = i / 20
replay_buffer._extend_limited_list([Transition(ob=str(i)) for i in range(20, 30)])
assert len(replay_buffer.buffer) == 20
assert replay_buffer.buffer_next_id == 10
for i in range(20, 30):
assert replay_buffer.buffer[i % 20].ob == str(i)
assert pytest.approx(replay_buffer.priorities[i % 20]) == 0.95
def test_replay_buffer_sample(replay_buffer):
replay_buffer.buffer = [
Transition(
ob=f"{i} o",
prev_action=f"{i} p a",
rnn_prev_hidden=torch.rand(16),
action_cands=[f"{i} a1", f"{i} a2"],
action_id=random.randint(0, 1),
cum_reward=random.random(),
step_reward=random.random(),
next_ob=f"{i} next o",
next_action_cands=[f"{i} next a1", f"{i} next a2"],
rnn_curr_hidden=torch.rand(16),
)
for i in range(10)
]
replay_buffer.buffer_next_id += 10
# mess with the priorities so that only the first four are drawn
replay_buffer.priorities[:4] = 1.0
sampled = replay_buffer.sample()
assert len(sampled["steps"]) == 4
for step in sampled["steps"]:
assert step in set(range(1, replay_buffer.multi_step + 1))
assert set(sampled["indices"]) == set([0, 1, 2, 3])
assert len(sampled["samples"]) == 4
for i, sample in zip(sampled["indices"], sampled["samples"]):
# cum_reward, step_reward, next_ob, next_action_cands
# rnn_curr_hidden, done
# are not the same b/c of multi-step learning
# skip them since they're tested separately anyway
expected = replay_buffer.buffer[i]
assert sample.ob == expected.ob
assert sample.prev_action == expected.prev_action
assert sample.rnn_prev_hidden.equal(expected.rnn_prev_hidden)
assert sample.action_cands == expected.action_cands
assert sample.action_id == expected.action_id
# weights are all 1's b/c they all have the same values
assert sampled["weights"] == [1.0] * 4
def test_replay_buffer_sample_multi_step(replay_buffer):
replay_buffer.buffer = [
Transition(
ob=f"{i} o",
prev_action=f"{i} p a",
rnn_prev_hidden=torch.rand(16),
action_cands=[f"{i} a1", f"{i} a2"],
action_id=random.randint(0, 1),
cum_reward=random.random(),
step_reward=random.random(),
next_ob=f"{i} next o",
next_action_cands=[f"{i} next a1", f"{i} next a2"],
rnn_curr_hidden=torch.rand(16),
done=True if (i + 1) % 5 == 0 else False,
)
for i in range(10)
]
replay_buffer.buffer_next_id += 10
# 0 steps
samples, indices, steps = replay_buffer.sample_multi_step([0, 1], [0, 0])
assert steps == [0, 0]
assert indices == [0, 1]
assert samples == replay_buffer.buffer[:2]
# 1 step
samples, indices, steps = replay_buffer.sample_multi_step([0], [1])
assert steps == [1]
assert indices == [0]
assert len(samples) == 1
sample = samples[0]
head = replay_buffer.buffer[0]
tail = replay_buffer.buffer[1]
assert sample.ob == head.ob
assert sample.prev_action == head.prev_action
assert sample.action_cands == head.action_cands
assert sample.action_id == head.action_id
assert sample.cum_reward == tail.cum_reward
assert (
sample.step_reward
== head.step_reward + tail.step_reward * replay_buffer.reward_discount
)
assert sample.next_ob == tail.next_ob
assert sample.next_action_cands == tail.next_action_cands
assert sample.rnn_curr_hidden.equal(tail.rnn_curr_hidden)
assert sample.done is False
# 2 step and tail is done
samples, indices, steps = replay_buffer.sample_multi_step([2], [2])
assert steps == [2]
assert indices == [2]
assert len(samples) == 1
sample = samples[0]
head = replay_buffer.buffer[2]
tail = replay_buffer.buffer[4]
assert sample.ob == head.ob
assert sample.prev_action == head.prev_action
assert sample.action_cands == head.action_cands
assert sample.action_id == head.action_id
assert sample.cum_reward == tail.cum_reward
assert sample.step_reward == head.step_reward + replay_buffer.buffer[
3
].step_reward * replay_buffer.reward_discount + tail.step_reward * (
replay_buffer.reward_discount ** 2
)
assert sample.next_ob == tail.next_ob
assert sample.next_action_cands == tail.next_action_cands
assert sample.rnn_curr_hidden.equal(tail.rnn_curr_hidden)
assert sample.done is True
# 2 samples with 2 step. one of them goes over
samples, indices, steps = replay_buffer.sample_multi_step([2, 3], [2, 2])
assert steps == [2]
assert indices == [2]
assert len(samples) == 1
sample = samples[0]
head = replay_buffer.buffer[2]
tail = replay_buffer.buffer[4]
assert sample.ob == head.ob
assert sample.prev_action == head.prev_action
assert sample.action_cands == head.action_cands
assert sample.action_id == head.action_id
assert sample.cum_reward == tail.cum_reward
assert sample.step_reward == head.step_reward + replay_buffer.buffer[
3
].step_reward * replay_buffer.reward_discount + tail.step_reward * (
replay_buffer.reward_discount ** 2
)
assert sample.next_ob == tail.next_ob
assert sample.next_action_cands == tail.next_action_cands
assert sample.rnn_curr_hidden.equal(tail.rnn_curr_hidden)
assert sample.done is True
# 1 sampled near the end of the buffer with 2 step.
# make sure we loop around without error
# since it's done at index 9, no sample is produced.
samples, indices, steps = replay_buffer.sample_multi_step([8], [2])
assert steps == []
assert indices == []
assert len(samples) == 0
@pytest.mark.parametrize(
"beta_from,beta_frames",
[(0.1, 20), (0.4, 100000)],
)
def test_replay_buffer_update_beta(replay_buffer, beta_from, beta_frames):
replay_buffer.beta_from = beta_from
replay_buffer.beta_frames = beta_frames
# if step is 0, beta should equal beta_from
replay_buffer.update_beta(0)
assert replay_buffer.beta == beta_from
# if step is bigger than beta_frames
# beta should equal 1.0
replay_buffer.update_beta(beta_frames)
assert pytest.approx(replay_buffer.beta) == 1.0
replay_buffer.update_beta(beta_frames + 10)
assert replay_buffer.beta == 1.0
# if step is 1, equal to one step down from beta_from
replay_buffer.update_beta(1)
assert (
replay_buffer.beta
== beta_from - (replay_buffer.beta_from - 1.0) / replay_buffer.beta_frames
)
# if step is beta_frames - 1,
# equal to one step up from 1.0
replay_buffer.update_beta(beta_frames - 1)
assert (
pytest.approx(replay_buffer.beta)
== 1.0 + (replay_buffer.beta_from - 1.0) / replay_buffer.beta_frames
)
# if step is in the middle, beta should be the mean of from and 1.0
replay_buffer.update_beta(beta_frames // 2)
assert replay_buffer.beta == pytest.approx((beta_from + 1.0) / 2)
@pytest.mark.parametrize(
"batch_idx,batch_prios",
[
([19], [3.5]),
([0, 4, 2, 7], [0.1, 0.2, 0.3, 0.4]),
],
)
def test_replay_buffer_update_priorities(replay_buffer, batch_idx, batch_prios):
replay_buffer.update_priorities(batch_idx, batch_prios)
for i, prio in zip(batch_idx, batch_prios):
assert pytest.approx(replay_buffer.priorities[i]) == prio + replay_buffer.eps
def test_gata_double_dqn_prepare_batch(replay_buffer_gata_double_dqn):
sampled = {
"samples": [
Transition(
ob=f"{i} o",
prev_action=f"{i} p a",
rnn_prev_hidden=torch.rand(
replay_buffer_gata_double_dqn.hparams.hidden_dim
),
action_cands=[f"{i} a1", f"{i} a2"],
action_id=random.randint(0, 1),
cum_reward=random.randint(0, 10),
step_reward=random.randint(0, 1),
next_ob=f"{i} next o",
next_action_cands=[f"{i} next a1", f"{i} next a2"],
rnn_curr_hidden=torch.rand(
replay_buffer_gata_double_dqn.hparams.hidden_dim
),
done=False,
)
for i in range(10)
],
"steps": torch.randint(1, 4, (10,)).tolist(),
"indices": list(i for i in range(10)),
"weights": torch.rand(10).tolist(),
}
batch_size = len(sampled["samples"])
batch = replay_buffer_gata_double_dqn.prepare_batch(sampled)
assert batch["obs_word_ids"].size() == (batch_size, 2)
assert batch["obs_mask"].size() == (batch_size, 2)
assert batch["prev_action_word_ids"].size() == (batch_size, 3)
assert batch["prev_action_mask"].size() == (batch_size, 3)
assert batch["rnn_prev_hidden"].size() == (
batch_size,
replay_buffer_gata_double_dqn.hparams.hidden_dim,
)
assert batch["rnn_prev_hidden"].equal(
torch.stack([t.rnn_prev_hidden for t in sampled["samples"]])
)
assert batch["action_cand_word_ids"].size() == (batch_size, 2, 2)
assert batch["action_cand_mask"].size() == (batch_size, 2, 2)
assert batch["action_mask"].size() == (batch_size, 2)
assert batch["actions_idx"].equal(
torch.tensor([t.action_id for t in sampled["samples"]])
)
assert batch["rewards"].equal(
torch.tensor([t.step_reward for t in sampled["samples"]])
)
assert batch["curr_action_word_ids"].size() == (batch_size, 2)
assert batch["curr_action_mask"].size() == (batch_size, 2)
assert batch["next_obs_word_ids"].size() == (batch_size, 3)
assert batch["next_obs_mask"].size() == (batch_size, 3)
assert batch["next_action_cand_word_ids"].size() == (batch_size, 2, 3)
assert batch["next_action_cand_mask"].size() == (batch_size, 2, 3)
assert batch["next_action_mask"].size() == (batch_size, 2)
assert batch["rnn_curr_hidden"].size() == (
batch_size,
replay_buffer_gata_double_dqn.hparams.hidden_dim,
)
assert batch["rnn_curr_hidden"].equal(
torch.stack([t.rnn_curr_hidden for t in sampled["samples"]])
)
assert batch["steps"].equal(torch.tensor(sampled["steps"]))
assert batch["indices"].equal(torch.tensor(sampled["indices"]))
assert batch["weights"].equal(torch.tensor(sampled["weights"]))
def test_gata_double_dqn_train_dataloader(replay_buffer_gata_double_dqn):
for batch in replay_buffer_gata_double_dqn.train_dataloader():
# sampled batch size could be less than train_sample_batch_size
batch_size = batch["obs_word_ids"].size(0)
assert (
batch_size <= replay_buffer_gata_double_dqn.hparams.train_sample_batch_size
)
assert batch["obs_word_ids"].size(0) == batch_size
assert batch["obs_mask"].size() == batch["obs_word_ids"].size()
assert batch["prev_action_word_ids"].size(0) == batch_size
assert batch["prev_action_mask"].size() == batch["prev_action_word_ids"].size()
assert batch["rnn_prev_hidden"].size() == (
batch_size,
replay_buffer_gata_double_dqn.hparams.hidden_dim,
)
assert batch["action_cand_word_ids"].size(0) == batch_size
assert batch["action_cand_mask"].size() == batch["action_cand_word_ids"].size()
assert batch["action_mask"].size(0) == batch_size
assert batch["action_mask"].size(1) == batch["action_cand_mask"].size(1)
assert batch["actions_idx"].size() == (batch_size,)
assert batch["rewards"].size() == (batch_size,)
assert batch["curr_action_word_ids"].size(0) == batch_size
assert batch["curr_action_mask"].size() == batch["curr_action_word_ids"].size()
assert batch["next_obs_word_ids"].size(0) == batch_size
assert batch["next_obs_mask"].size() == batch["next_obs_word_ids"].size()
assert batch["next_action_cand_word_ids"].size(0) == batch_size
assert (
batch["next_action_cand_mask"].size()
== batch["next_action_cand_word_ids"].size()
)
assert batch["next_action_mask"].size(0) == batch_size
assert batch["next_action_mask"].size(1) == batch["next_action_cand_mask"].size(
1
)
def test_gata_double_dqn_populate_replay_buffer(replay_buffer_gata_double_dqn):
assert len(replay_buffer_gata_double_dqn.replay_buffer.buffer) == 0
replay_buffer_gata_double_dqn.populate_replay_buffer()
assert len(replay_buffer_gata_double_dqn.replay_buffer.buffer) > 0
# make sure everyhing is sequential
a, b = itertools.tee(replay_buffer_gata_double_dqn.replay_buffer.buffer)
next(b, None)
for prev_t, curr_t in zip(a, b):
if prev_t.done:
# different game started, so skip
continue
# ob should be the same as previous next_ob
assert curr_t.ob == prev_t.next_ob
# prev_action should be the same as the selected action
# from previous transition
assert curr_t.prev_action == prev_t.action_cands[prev_t.action_id]
# rnn_prev_hidden should be the right size
assert prev_t.rnn_prev_hidden.size() == (
replay_buffer_gata_double_dqn.hparams.hidden_dim,
)
assert curr_t.rnn_prev_hidden.size() == (
replay_buffer_gata_double_dqn.hparams.hidden_dim,
)
# action_cands should be same as the previous next_action_cands
assert curr_t.action_cands == prev_t.next_action_cands
# cum_reward should be previous cum_reward + current step_reward
assert curr_t.cum_reward == prev_t.cum_reward + curr_t.step_reward
def test_gata_double_dqn_gen_tain_batch(replay_buffer_gata_double_dqn):
# make sure gen_train_batch() produces at least one batch
def mock_play_episodes(sample, action_select_fn, episode_end_fn):
if mock_play_episodes.counter == 0:
mock_play_episodes.counter += 1
return
yield "batch"
mock_play_episodes.counter = 0
replay_buffer_gata_double_dqn.play_episodes = mock_play_episodes
# no exception should be raised
next(replay_buffer_gata_double_dqn.gen_train_batch())
def test_main(tmp_path):
with initialize(config_path="train_gata_conf"):
cfg = compose(
config_name="config",
overrides=[
"data.base_data_dir=null",
"data.train_data_size=1",
"data.train_game_batch_size=3",
"data.train_max_episode_steps=5",
"data.train_sample_batch_size=4",
"data.eval_max_episode_steps=5",
"data.eval_game_batch_size=3",
"train.training_step_freq=4",
"train.target_net_update_frequency=3",
"train.replay_buffer_populate_episodes=3",
"pl_trainer.max_epochs=2",
"pl_trainer.check_val_every_n_epoch=1",
f"+pl_trainer.default_root_dir={tmp_path}",
],
)
main(cfg)
@pytest.mark.parametrize("difficulty_level", [1, 5])
def test_main_test_only(tmp_path, difficulty_level):
with initialize(config_path="train_gata_conf"):
cfg = compose(
config_name="config",
overrides=[
"data.base_data_dir=null",
"eval.test_only=true",
"eval.checkpoint_path=test-data/test-gata.ckpt",
f"+pl_trainer.default_root_dir={tmp_path}",
"+pl_trainer.limit_test_batches=1",
],
)
main(cfg)
|
the-stack_106_25506 | from splunk_eventgen.lib.plugins.output.httpevent_core import HTTPCoreOutputPlugin
from splunk_eventgen.lib.logging_config import logger
try:
import ujson as json
except ImportError:
import json
class NoServers(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class BadConnection(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class MetricHTTPEventOutputPlugin(HTTPCoreOutputPlugin):
'''
MetricHTTPEvent output will enable events that are generated to be sent directly
to splunk metrics indexes through the HTTP event input. In order to use this output plugin,
you will need to supply an attribute 'httpeventServers' as a valid json object.
this json object should look like the following:
{servers:[{ protocol:http/https, address:127.0.0.1, port:8088, key:12345-12345-123123123123123123}]}
'''
name = 'metric_httpevent'
def __init__(self, sample, output_counter=None):
super(MetricHTTPEventOutputPlugin, self).__init__(sample, output_counter)
def flush(self, q):
logger.debug("Flush called on metric_httpevent plugin")
self._setup_REST_workers()
if len(q) > 0:
try:
payload = []
for event in q:
logger.debug("HTTPEvent proccessing event: %s" % event)
payloadFragment = {}
if event.get('_raw') is None or event['_raw'] == "\n":
logger.error('failure outputting event, does not contain _raw')
else:
logger.debug("Event contains _raw, attempting to process...")
logger.debug(event['_raw'])
fields = json.loads(event['_raw'])['fields']
payloadFragment['fields'] = fields
payloadFragment['event'] = "metric"
if event.get('source'):
logger.debug("Event contains source, adding to httpevent event")
payloadFragment['source'] = event['source']
if event.get('sourcetype'):
logger.debug("Event contains sourcetype, adding to httpevent event")
payloadFragment['sourcetype'] = event['sourcetype']
self.lastsourcetype = event['sourcetype']
if event.get('host'):
logger.debug("Event contains host, adding to httpevent event")
payloadFragment['host'] = event['host']
if event.get('_time'):
# make sure _time can be an epoch timestamp
try:
float(event.get("_time"))
logger.debug("Event contains _time, adding to httpevent event")
payloadFragment['time'] = event['_time']
except:
logger.error("Timestamp not in epoch format, ignoring event: {0}".format(event))
if event.get('index'):
logger.debug("Event contains index, adding to httpevent event")
payloadFragment['index'] = event['index']
logger.debug("Full payloadFragment: {}".format(payloadFragment))
# logger.debug("Full payloadFragment: %s" % json.dumps(payloadFragment))
payload.append(payloadFragment)
logger.debug("Metric_httpevent Finished processing events, sending all to splunk")
self._sendHTTPEvents(payload)
if self.config.httpeventWaitResponse:
for session in self.active_sessions:
response = session.result()
if not response.raise_for_status():
logger.debug("Payload successfully sent to httpevent server.")
else:
logger.error("Server returned an error while trying to send, response code: %s" %
response.status_code)
raise BadConnection(
"Server returned an error while sending, response code: %s" % response.status_code)
else:
logger.debug("Ignoring response from HTTP server, leaving metric_httpevent outputter")
except Exception as e:
logger.error('failed indexing events, reason: %s ' % e)
def load():
"""Returns an instance of the plugin"""
return MetricHTTPEventOutputPlugin
|
the-stack_106_25507 | import pathlib
from setuptools import setup, find_packages
from distutils.core import setup
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name='gerrit_coverage',
url='https://github.com/tom-010/gerrit_coverage',
version='0.0.5',
author='Thomas Deniffel',
author_email='[email protected]',
packages=['gerrit_coverage'], # find_packages(),
license='Apache2',
install_requires=[
'gerrit-robo==0.0.2',
'missing-diff-lines==0.0.4'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
description='Convert the test-coverage-result while only including not covered lines in the current diff.',
long_description=README,
long_description_content_type="text/markdown",
python_requires='>=3',
include_package_data=True,
entry_points = {
'console_scripts': [
'gerrit_check_style = gerrit_coverage:check_style',
'gerrit_coverage = gerrit_coverage:check_missing_lines',
]
},
) |
the-stack_106_25508 | import re
lmps_log_file='log.lammps'
lines=None
with open(lmps_log_file,'r') as f:
lines=f.readlines()
line = lines[len(lines)-1].strip()
line = re.sub(' +',' ',line)
line = [float(s) for s in line.split(" ")]
n_data = len(line)
step = line[0]
max_replica_force = line[1]
max_atom_force = line[2]
grad_v0 = line[3]
grad_v1 = line[4]
grac_vc = line[5]
ebf = line[6]
ebr = line[7]
rdt = line[8]
rd = []
pe = []
for i in range(9,n_data):
if i % 2 == 0:
pe.append(line[i])
else:
rd.append(line[i])
#import matplotlib.pyplot as plt
#plt.plot(rd,pe)
#print(rd)
#print(pe)
n_image = len(pe)
e_0 = pe[0]
e_f = pe[n_image - 1]
e_max = max(pe)
e_barrier = e_max - 0.5*(e_0 + e_f)
print(e_0,e_f,e_max,e_barrier)
|
the-stack_106_25509 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import tvm
from tvm.ir import IRModule
from tvm.meta_schedule import TuneContext
from tvm.meta_schedule.space_generator import PostOrderApply
from tvm.meta_schedule.testing.conv2d_winograd_cpu import conv2d_winograd_cpu
from tvm.meta_schedule.tune import DefaultLLVM
from tvm.target import Target
from tvm.tir.schedule import Schedule, Trace
def _get_mod():
# pylint: disable=invalid-name
def inline(sch: Schedule):
b1 = sch.get_block(name="A")
b2 = sch.get_block(name="B")
sch.compute_inline(block=b1)
sch.compute_inline(block=b2)
def input_tile_data_pad(sch: Schedule):
b78 = sch.get_block(name="input_tile")
l80 = sch.sample_compute_location(block=b78, decision=4)
sch.compute_at(block=b78, loop=l80, preserve_unit_loops=True)
b81 = sch.get_block(name="data_pad")
l83 = sch.sample_compute_location(block=b81, decision=-2)
sch.compute_at(block=b81, loop=l83, preserve_unit_loops=True)
def data_pack(sch: Schedule):
b18 = sch.get_block(name="data_pack")
l19, l20, l21, l22, l23, l24 = sch.get_loops(block=b18)
sch.unroll(loop=l19)
sch.unroll(loop=l20)
v25, v26 = sch.sample_perfect_tile(
n=2,
loop=l21,
max_innermost_factor=64,
decision=[9, 1],
)
l27, l28 = sch.split(loop=l21, factors=[v25, v26])
v29, v30 = sch.sample_perfect_tile(
n=2,
loop=l22,
max_innermost_factor=64,
decision=[32, 4],
)
l31, l32 = sch.split(loop=l22, factors=[v29, v30])
sch.unroll(loop=l23)
sch.unroll(loop=l24)
sch.reorder(l27, l31, l28, l32, l19, l20, l23, l24)
def bgemm(sch: Schedule):
bgemm = sch.get_block(name="bgemm")
write_cache = sch.cache_write(
block=bgemm,
write_buffer_index=0,
storage_scope="global",
)
sch.annotate(
block_or_loop=bgemm,
ann_key="meta_schedule.tiling_structure",
ann_val="SSRSRS",
)
# b33, b34 = b34, b33
l35, l36, l37, l38, l39 = sch.get_loops(block=bgemm)
v40, v41, v42, v43 = sch.sample_perfect_tile(
n=4,
loop=l35,
max_innermost_factor=64,
decision=[1, 2, 3, 1],
)
l44, l45, l46, l47 = sch.split(loop=l35, factors=[v40, v41, v42, v43])
v48, v49, v50, v51 = sch.sample_perfect_tile(
n=4,
loop=l36,
max_innermost_factor=64,
decision=[1, 1, 1, 6],
)
l52, l53, l54, l55 = sch.split(loop=l36, factors=[v48, v49, v50, v51])
v56, v57, v58, v59 = sch.sample_perfect_tile(
n=4,
loop=l37,
max_innermost_factor=64,
decision=[1, 1, 1, 9],
)
l60, l61, l62, l63 = sch.split(loop=l37, factors=[v56, v57, v58, v59])
v64, v65, v66, v67 = sch.sample_perfect_tile(
n=4,
loop=l38,
max_innermost_factor=64,
decision=[2, 1, 16, 4],
)
l68, l69, l70, l71 = sch.split(loop=l38, factors=[v64, v65, v66, v67])
v72, v73 = sch.sample_perfect_tile(
n=2,
loop=l39,
max_innermost_factor=64,
decision=[16, 8],
)
l74, l75 = sch.split(loop=l39, factors=[v72, v73])
sch.reorder(
# fmt: off
l44, l52, l60, l68,
l45, l53, l61, l69,
l74,
l46, l54, l62, l70,
l75,
l47, l55, l63, l71,
# fmt: on
)
sch.reverse_compute_at(block=write_cache, loop=l69, preserve_unit_loops=True)
def inverse(sch: Schedule):
b3 = sch.get_block(name="inverse")
l4, l5, l6, l7, l8, l9 = sch.get_loops(block=b3)
sch.unroll(loop=l4)
sch.unroll(loop=l5)
v10, v11 = sch.sample_perfect_tile(
n=2,
loop=l6,
max_innermost_factor=64,
decision=[1, 9],
)
l12, l13 = sch.split(loop=l6, factors=[v10, v11])
v14, v15 = sch.sample_perfect_tile(
n=2,
loop=l7,
max_innermost_factor=64,
decision=[2, 64],
)
l16, l17 = sch.split(loop=l7, factors=[v14, v15])
sch.unroll(loop=l8)
sch.unroll(loop=l9)
sch.reorder(l12, l16, l13, l17, l4, l5, l8, l9)
# pylint: enable=invalid-name
sch = Schedule(mod=conv2d_winograd_cpu)
inline(sch)
data_pack(sch)
input_tile_data_pad(sch)
bgemm(sch)
inverse(sch)
return sch.mod
def test_conv2d_winograd_cpu():
mod = conv2d_winograd_cpu
mod = IRModule({"main": mod})
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Custom Search Space Task",
sch_rules=DefaultLLVM._sch_rules(), # pylint: disable=protected-access
)
post_order_apply = PostOrderApply()
post_order_apply.initialize_with_tune_context(context)
(sch,) = post_order_apply.generate_design_space(mod)
decisions = dict(
zip(
[i for i in sch.trace.insts[:-4] if i.kind.name.startswith("Sample")],
[
# data_pack
[9, 1],
[32, 4],
# input_tile
4,
# data_pad
-2,
# inverse
[1, 9],
[2, 64],
# bgemm
[1, 2, 3, 1],
[1, 1, 1, 6],
[1, 1, 1, 9],
[2, 1, 16, 4],
[16, 8],
],
)
)
trace = Trace(sch.trace.insts[:-4], decisions=decisions)
sch = Schedule(mod=mod)
trace.apply_to_schedule(sch, remove_postproc=False)
answer = sch.mod
expected = _get_mod()
tvm.ir.assert_structural_equal(answer, expected)
if __name__ == "__main__":
test_conv2d_winograd_cpu()
|
the-stack_106_25510 | """
Simulation to examine the P(reject) as the number of test locations
increases.
"""
__author__ = 'wittawat'
import kgof
import kgof.data as data
import kgof.glo as glo
import kgof.density as density
import kgof.goftest as gof
import kgof.util as util
import kgof.kernel as kernel
# need independent_jobs package
# https://github.com/karlnapf/independent-jobs
# The independent_jobs and kgof have to be in the global search path (.bashrc)
import independent_jobs as inj
from independent_jobs.jobs.IndependentJob import IndependentJob
from independent_jobs.results.SingleResult import SingleResult
from independent_jobs.aggregators.SingleResultAggregator import SingleResultAggregator
from independent_jobs.engines.BatchClusterParameters import BatchClusterParameters
from independent_jobs.engines.SerialComputationEngine import SerialComputationEngine
from independent_jobs.engines.SlurmComputationEngine import SlurmComputationEngine
from independent_jobs.tools.Log import logger
import logging
import math
#import numpy as np
import autograd.numpy as np
import os
import sys
import time
"""
All the job functions return a dictionary with the following keys:
- goftest: test object. (may or may not return)
- test_result: the result from calling perform_test(te).
- time_secs: run time in seconds
"""
def job_fssdq_med(p, data_source, tr, te, r, J, null_sim=None):
"""
FSSD test with a Gaussian kernel, where the test locations are randomized,
and the Gaussian width is set with the median heuristic. Use full sample.
No training/testing splits.
p: an UnnormalizedDensity
data_source: a DataSource
tr, te: Data
r: trial number (positive integer)
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
med = util.meddistance(X, subsample=1000)
k = kernel.KGauss(med**2)
V = util.fit_gaussian_draw(X, J, seed=r+1)
fssd_med = gof.FSSD(p, k, V, null_sim=null_sim, alpha=alpha)
fssd_med_result = fssd_med.perform_test(data)
return { 'test_result': fssd_med_result, 'time_secs': t.secs}
def job_fssdq_opt(p, data_source, tr, te, r, J, null_sim=None):
"""
FSSD with optimization on tr. Test on te. Use a Gaussian kernel.
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
Xtr = tr.data()
with util.ContextTimer() as t:
# Use grid search to initialize the gwidth
n_gwidth_cand = 5
gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand)
med2 = util.meddistance(Xtr, 1000)**2
k = kernel.KGauss(med2*2)
# fit a Gaussian to the data and draw to initialize V0
V0 = util.fit_gaussian_draw(Xtr, J, seed=r+1, reg=1e-6)
list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
besti, objs = gof.GaussFSSD.grid_search_gwidth(p, tr, V0, list_gwidth)
gwidth = list_gwidth[besti]
assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
logging.info('After grid search, gwidth=%.3g'%gwidth)
ops = {
'reg': 1e-2,
'max_iter': 50,
'tol_fun': 1e-4,
'disp': True,
'locs_bounds_frac': 10.0,
'gwidth_lb': 1e-1,
'gwidth_ub': 1e3,
}
V_opt, gwidth_opt, info = gof.GaussFSSD.optimize_locs_widths(p, tr,
gwidth, V0, **ops)
# Use the optimized parameters to construct a test
k_opt = kernel.KGauss(gwidth_opt)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te)
return {'test_result': fssd_opt_result, 'time_secs': t.secs,
'goftest': fssd_opt, 'opt_info': info,
}
def job_fssdp_opt(p, data_source, tr, te, r, J):
"""
The suffix p means that p is sampled to get a sample for computing the
covariance matrix under H0.
"""
null_sim = gof.FSSDH0SimCovDraw(n_draw=2000, n_simulate=2000, seed=r)
return job_fssdq_opt(p, data_source, tr, te, r, J, null_sim=null_sim)
# Define our custom Job, which inherits from base class IndependentJob
class Ex3Job(IndependentJob):
def __init__(self, aggregator, p, data_source,
prob_label, rep, job_func, n_locs):
#walltime = 60*59*24
walltime = 60*59
memory = int(tr_proportion*sample_size*1e-2) + 50
IndependentJob.__init__(self, aggregator, walltime=walltime,
memory=memory)
# p: an UnnormalizedDensity
self.p = p
self.data_source = data_source
self.prob_label = prob_label
self.rep = rep
self.job_func = job_func
self.n_locs = n_locs
# we need to define the abstract compute method. It has to return an instance
# of JobResult base class
def compute(self):
p = self.p
data_source = self.data_source
r = self.rep
n_locs = self.n_locs
job_func = self.job_func
# sample_size is a global variable
data = data_source.sample(sample_size, seed=r)
with util.ContextTimer() as t:
tr, te = data.split_tr_te(tr_proportion=tr_proportion, seed=r+21 )
prob_label = self.prob_label
logger.info("computing. %s. prob=%s, r=%d,\
J=%d"%(job_func.__name__, prob_label, r, n_locs))
job_result = job_func(p, data_source, tr, te, r, n_locs)
# create ScalarResult instance
result = SingleResult(job_result)
# submit the result to my own aggregator
self.aggregator.submit_result(result)
func_name = job_func.__name__
logger.info("done. ex2: %s, prob=%s, r=%d, J=%d. Took: %.3g s "%(func_name,
prob_label, r, n_locs, t.secs))
# save result
fname = '%s-%s-n%d_r%d_J%d_a%.3f_trp%.2f.p' \
%(prob_label, func_name, sample_size, r, n_locs, alpha,
tr_proportion)
glo.ex_save_result(ex, job_result, prob_label, fname)
# This import is needed so that pickle knows about the class Ex3Job.
# pickle is used when collecting the results from the submitted jobs.
from kgof.ex.ex3_vary_nlocs import Ex3Job
from kgof.ex.ex3_vary_nlocs import job_fssdq_med
from kgof.ex.ex3_vary_nlocs import job_fssdq_opt
from kgof.ex.ex3_vary_nlocs import job_fssdp_opt
#--- experimental setting -----
ex = 3
# sample size = n (the training and test sizes are n/2)
sample_size = 500
# number of test locations / test frequencies J
alpha = 0.05
tr_proportion = 0.5
# repetitions for each parameter setting
reps = 300
# list of number of test locations/frequencies
#Js = [5, 10, 15, 20, 25]
#Js = range(2, 6+1)
#Js = [2**x for x in range(5)]
Js = [2, 8, 32, 96, 384 ]
#Js = [2, 8, 32]
method_job_funcs = [ job_fssdq_med, job_fssdq_opt,
#job_fssdp_opt,
]
# If is_rerun==False, do not rerun the experiment if a result file for the current
# setting already exists.
is_rerun = False
#---------------------------
def gaussbern_rbm_tuple(var, dx=50, dh=10, n=sample_size):
"""
Get a tuple of Gaussian-Bernoulli RBM problems.
We follow the parameter settings as described in section 6 of Liu et al.,
2016.
- var: Gaussian noise variance for perturbing B.
- dx: observed dimension
- dh: latent dimension
Return p, a DataSource
"""
with util.NumpySeedContext(seed=1000):
B = np.random.randint(0, 2, (dx, dh))*2 - 1.0
b = np.random.randn(dx)
c = np.random.randn(dh)
p = density.GaussBernRBM(B, b, c)
B_perturb = B + np.random.randn(dx, dh)*np.sqrt(var)
gb_rbm = data.DSGaussBernRBM(B_perturb, b, c, burnin=50)
return p, gb_rbm
def get_pqsource(prob_label):
"""
Return (p, ds), a tuple of
- p: a Density representing the distribution p
- ds: a DataSource, each corresponding to one parameter setting.
The DataSource generates sample from q.
"""
prob2tuples = {
# H0 is true. vary d. P = Q = N(0, I)
'sg5': (density.IsotropicNormal(np.zeros(5), 1),
data.DSIsotropicNormal(np.zeros(5), 1) ),
# P = N(0, I), Q = N( (0.2,..0), I)
'gmd5': (density.IsotropicNormal(np.zeros(5), 1),
data.DSIsotropicNormal(np.hstack((0.2, np.zeros(4))), 1) ),
'gmd1': (density.IsotropicNormal(np.zeros(1), 1),
data.DSIsotropicNormal(np.ones(1)*0.2, 1) ),
# P = N(0, I), Q = N( (1,..0), I)
'gmd100': (density.IsotropicNormal(np.zeros(100), 1),
data.DSIsotropicNormal(np.hstack((1, np.zeros(99))), 1) ),
# Gaussian variance difference problem. Only the variance
# of the first dimenion differs. d varies.
'gvd5': (density.Normal(np.zeros(5), np.eye(5) ),
data.DSNormal(np.zeros(5), np.diag(np.hstack((2, np.ones(4)))) )),
'gvd10': (density.Normal(np.zeros(10), np.eye(10) ),
data.DSNormal(np.zeros(10), np.diag(np.hstack((2, np.ones(9)))) )),
# Gaussian Bernoulli RBM. dx=50, dh=10. H0 is true
'gbrbm_dx50_dh10_v0': gaussbern_rbm_tuple(0,
dx=50, dh=10, n=sample_size),
# Gaussian Bernoulli RBM. dx=5, dh=3. H0 is true
'gbrbm_dx5_dh3_v0': gaussbern_rbm_tuple(0,
dx=5, dh=3, n=sample_size),
# Gaussian Bernoulli RBM. dx=50, dh=10.
'gbrbm_dx50_dh10_v1em3': gaussbern_rbm_tuple(1e-3,
dx=50, dh=10, n=sample_size),
# Gaussian Bernoulli RBM. dx=5, dh=3. Perturb with noise = 1e-2.
'gbrbm_dx5_dh3_v5em3': gaussbern_rbm_tuple(5e-3,
dx=5, dh=3, n=sample_size),
# Gaussian mixture of two components. Uniform mixture weights.
# p = 0.5*N(0, 1) + 0.5*N(3, 0.01)
# q = 0.5*N(-3, 0.01) + 0.5*N(0, 1)
'gmm_d1': (
density.IsoGaussianMixture(np.array([[0], [3.0]]), np.array([1, 0.01]) ),
data.DSIsoGaussianMixture(np.array([[-3.0], [0]]), np.array([0.01, 1]) )
),
# p = N(0, 1)
# q = 0.1*N([-10, 0,..0], 0.001) + 0.9*N([0,0,..0], 1)
'g_vs_gmm_d5': (
density.IsotropicNormal(np.zeros(5), 1),
data.DSIsoGaussianMixture(
np.vstack(( np.hstack((0.0, np.zeros(4))), np.zeros(5) )),
np.array([0.0001, 1]), pmix=[0.1, 0.9] )
),
'g_vs_gmm_d2': (
density.IsotropicNormal(np.zeros(2), 1),
data.DSIsoGaussianMixture(
np.vstack(( np.hstack((0.0, np.zeros(1))), np.zeros(2) )),
np.array([0.01, 1]), pmix=[0.1, 0.9] )
),
'g_vs_gmm_d1': (
density.IsotropicNormal(np.zeros(1), 1),
data.DSIsoGaussianMixture(np.array([[0.0], [0]]),
np.array([0.01, 1]), pmix=[0.1, 0.9] )
),
}
if prob_label not in prob2tuples:
raise ValueError('Unknown problem label. Need to be one of %s'%str(list(prob2tuples.keys())) )
return prob2tuples[prob_label]
def run_problem(prob_label):
"""Run the experiment"""
p, ds = get_pqsource(prob_label)
# /////// submit jobs //////////
# create folder name string
#result_folder = glo.result_folder()
from kgof.config import expr_configs
tmp_dir = expr_configs['scratch_path']
foldername = os.path.join(tmp_dir, 'kgof_slurm', 'e%d'%ex)
logger.info("Setting engine folder to %s" % foldername)
# create parameter instance that is needed for any batch computation engine
logger.info("Creating batch parameter instance")
batch_parameters = BatchClusterParameters(
foldername=foldername, job_name_base="e%d_"%ex, parameter_prefix="")
# Use the following line if Slurm queue is not used.
#engine = SerialComputationEngine()
engine = SlurmComputationEngine(batch_parameters)
n_methods = len(method_job_funcs)
# repetitions x len(Js) x #methods
aggregators = np.empty((reps, len(Js), n_methods ), dtype=object)
for r in range(reps):
for ji, J in enumerate(Js):
for mi, f in enumerate(method_job_funcs):
# name used to save the result
func_name = f.__name__
fname = '%s-%s-n%d_r%d_J%d_a%.3f_trp%.2f.p' \
%(prob_label, func_name, sample_size, r, J, alpha,
tr_proportion)
if not is_rerun and glo.ex_file_exists(ex, prob_label, fname):
logger.info('%s exists. Load and return.'%fname)
job_result = glo.ex_load_result(ex, prob_label, fname)
sra = SingleResultAggregator()
sra.submit_result(SingleResult(job_result))
aggregators[r, ji, mi] = sra
else:
# result not exists or rerun
# p: an UnnormalizedDensity object
job = Ex3Job(SingleResultAggregator(), p, ds, prob_label,
r, f, J)
agg = engine.submit_job(job)
aggregators[r, ji, mi] = agg
# let the engine finish its business
logger.info("Wait for all call in engine")
engine.wait_for_all()
# ////// collect the results ///////////
logger.info("Collecting results")
job_results = np.empty((reps, len(Js), n_methods), dtype=object)
for r in range(reps):
for ji, J in enumerate(Js):
for mi, f in enumerate(method_job_funcs):
logger.info("Collecting result (%s, r=%d, J=%rd)" %
(f.__name__, r, J))
# let the aggregator finalize things
aggregators[r, ji, mi].finalize()
# aggregators[i].get_final_result() returns a SingleResult instance,
# which we need to extract the actual result
job_result = aggregators[r, ji, mi].get_final_result().result
job_results[r, ji, mi] = job_result
#func_names = [f.__name__ for f in method_job_funcs]
#func2labels = exglobal.get_func2label_map()
#method_labels = [func2labels[f] for f in func_names if f in func2labels]
# save results
results = {'job_results': job_results, 'data_source': ds,
'alpha': alpha, 'repeats': reps, 'Js': Js,
'p': p,
'tr_proportion': tr_proportion,
'method_job_funcs': method_job_funcs, 'prob_label': prob_label,
'sample_size': sample_size,
}
# class name
fname = 'ex%d-%s-me%d_n%d_rs%d_Jmi%d_Jma%d_a%.3f_trp%.2f.p' \
%(ex, prob_label, n_methods, sample_size, reps, min(Js),
max(Js), alpha, tr_proportion)
glo.ex_save_result(ex, results, fname)
logger.info('Saved aggregated results to %s'%fname)
def main():
if len(sys.argv) != 2:
print(('Usage: %s problem_label'%sys.argv[0]))
sys.exit(1)
prob_label = sys.argv[1]
run_problem(prob_label)
if __name__ == '__main__':
main()
|
the-stack_106_25511 | #!/usr/bin/env python
import sys
import numpy as np
from frovedis.exrpc.server import *
from frovedis.matrix.dense import FrovedisBlockcyclicMatrix
from frovedis.matrix.wrapper import PBLAS
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if (argc < 2):
print ('Please give frovedis_server calling command as the first argument \n(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
# sample numpy matrices creation
m1 = np.matrix([[1],[2],[3],[4]], dtype=np.float64) # 4x1
m2 = np.matrix([[5],[6],[7],[8]], dtype=np.float64) # 4x1
m3 = np.matrix([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],
dtype=np.float64) # 4x4: eye(I)
m4 = np.matrix([[1,2,3,4],[5,6,7,8],[8,7,6,5],[4,3,2,1]],
dtype=np.float64) # 4x4
# Creating Frovedis server side blockcyclic matrics from numpy matrices
bcm1 = FrovedisBlockcyclicMatrix(m1) # blockcyclic vector (x)
bcm2 = FrovedisBlockcyclicMatrix(m2) # blockcyclic vector (y)
bcm3 = FrovedisBlockcyclicMatrix(m3) # blockcyclic matrix (m)
bcm4 = FrovedisBlockcyclicMatrix(m4) # blockcyclic matrix (n)
# --- swap demo ---
PBLAS.swap(bcm1,bcm2)
print ("after swap (x <=> y):")
bcm1.get_rowmajor_view()
bcm2.get_rowmajor_view()
# --- scal demo ---
PBLAS.scal(bcm1,2)
print ("after scal (x = 2x):")
bcm1.get_rowmajor_view()
# --- axpy demo ---
PBLAS.axpy(bcm1,bcm2,2)
print ("after axpy (y = 2x + y):")
bcm1.get_rowmajor_view()
bcm2.get_rowmajor_view()
# --- copy demo ---
PBLAS.copy(bcm1,bcm2)
print ("after copy (y = x):")
bcm1.get_rowmajor_view()
bcm2.get_rowmajor_view()
# --- dot demo ---
r1 = PBLAS.dot(bcm1,bcm2) # dot product on transformed blockcyclic vectors
print ("x.y = " + str(r1))
# short-cut version (blockcyclic matrix creation and deletion will happen automatically)
r2 = PBLAS.dot(m1,m2) # dot product on original numpy data
print ("x.y = " + str(r2))
# --- nrm2 demo ---
r = PBLAS.nrm2(bcm1)
print ("norm(x) = " + str(r))
r = PBLAS.nrm2(bcm2)
print ("norm(y) = " + str(r))
# --- gemv (matrix-vector multiplication) demo ---
print ("m*x: ")
gemv_ret = PBLAS.gemv(bcm3,bcm1)
gemv_ret.get_rowmajor_view()
gemv_ret.release()
# --- ger (vector-vector multiplication) demo ---
print ("xT*y: ")
ger_ret = PBLAS.ger(bcm1,bcm2) # (4x4) = (4x1) * (1x4)
ger_ret.get_rowmajor_view()
ger_ret.release()
# --- gemm (matrix-matrix multiplication) demo ---
print ("m*n: ")
gemm_ret = PBLAS.gemm(bcm3,bcm4)
gemm_ret.get_rowmajor_view()
gemm_ret.release()
# --- geadd (matrix-matrix addition) demo ---
print ("n = m + n: ")
PBLAS.geadd(bcm3,bcm4)
bcm4.get_rowmajor_view()
# Releasing Frovedis side blockcyclic matrices
bcm1.release()
bcm2.release()
bcm3.release()
bcm4.release()
# Shutting down the Frovedis server
FrovedisServer.shut_down()
|
the-stack_106_25513 |
import socket
import ure
def http_get(url):
_, _, host, path = url.split('/', 3)
print(path)
addr = socket.getaddrinfo(host, 80)[0][-1]
s = socket.socket()
s.connect(addr)
s.send(bytes('GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n' % (path, host), 'utf8'))
while True:
data = s.recv(100)
if data:
# print(str(data, 'utf8'), end='')
result = str(data)
print(result)
result = ure.match('<em>(.*)</em>', result)
if result != None:
print(result.group())
else:
break
s.close()
http_get('https://tianqi.moji.com/weather/china/shanghai/shanghai')
'''
result = ure.match('<em>(.*)</em>', r'<em>29\xc2\xb0</em>')
print(result.group(0))
''' |
the-stack_106_25514 | """WordOps Swap Creation"""
import os
import psutil
from wo.core.aptget import WOAptGet
from wo.core.fileutils import WOFileUtils
from wo.core.logging import Log
from wo.core.shellexec import WOShellExec
class WOSwap():
"""Manage Swap"""
def __init__():
"""Initialize """
pass
def add(self):
"""Swap addition with WordOps"""
# Get System RAM and SWAP details
wo_ram = psutil.virtual_memory().total / (1024 * 1024)
wo_swap = psutil.swap_memory().total / (1024 * 1024)
if wo_ram < 512:
if wo_swap < 1000:
Log.info(self, "Adding SWAP file, please wait...")
# Install dphys-swapfile
WOAptGet.update(self)
WOAptGet.install(self, ["dphys-swapfile"])
# Stop service
WOShellExec.cmd_exec(self, "service dphys-swapfile stop")
# Remove Default swap created
WOShellExec.cmd_exec(self, "/sbin/dphys-swapfile uninstall")
# Modify Swap configuration
if os.path.isfile("/etc/dphys-swapfile"):
WOFileUtils.searchreplace(self, "/etc/dphys-swapfile",
"#CONF_SWAPFILE=/var/swap",
"CONF_SWAPFILE=/wo-swapfile")
WOFileUtils.searchreplace(self, "/etc/dphys-swapfile",
"#CONF_MAXSWAP=2048",
"CONF_MAXSWAP=1024")
WOFileUtils.searchreplace(self, "/etc/dphys-swapfile",
"#CONF_SWAPSIZE=",
"CONF_SWAPSIZE=1024")
else:
with open("/etc/dphys-swapfile", 'w') as conffile:
conffile.write("CONF_SWAPFILE=/wo-swapfile\n"
"CONF_SWAPSIZE=1024\n"
"CONF_MAXSWAP=1024\n")
# Create swap file
WOShellExec.cmd_exec(self, "service dphys-swapfile start")
|
the-stack_106_25515 | '''
For test of the trained model
'''
import os
import time
import sys
import shutil
import random
from time import strftime
from argparse import ArgumentParser
import numpy as np
import torch
import torch.utils.data
import torch.nn.functional as F
torch.multiprocessing.set_sharing_strategy('file_system')
from PIL import Image
from subprocess import call
from data_dynamic import PartNetPartDataset
import utils
import render_using_blender as render_utils
from quaternion import qrot
import ipdb
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../utils'))
def test(conf):
data_features = ['part_pcs', 'part_poses', 'part_valids', 'shape_id', 'part_ids', 'contact_points', 'sym', 'pairs', 'match_ids']
# data_features = ['part_pcs', 'part_poses', 'part_valids', 'shape_id', 'part_ids', 'match_ids', 'contact_points']
# data_features = ['part_pcs', 'part_poses', 'part_valids', 'shape_id', 'part_ids', 'match_ids', 'pairs']
val_dataset = PartNetPartDataset(conf.category, conf.data_dir, conf.val_data_fn, data_features, \
max_num_part=20, level=conf.level)
#utils.printout(conf.flog, str(val_dataset))
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=conf.batch_size, shuffle=False,
pin_memory=True, \
num_workers=0, drop_last=True,
collate_fn=utils.collate_feats_with_none,
worker_init_fn=utils.worker_init_fn)
model_def = utils.get_model_module(conf.model_version)
network = model_def.Network(conf)
network.load_state_dict(torch.load(conf.model_dir))
#network = torch.load(conf.model_dir)
#ipdb.set_trace()
# utils.printout(conf.flog, '\n' + str(network) + '\n')
models = [network]
model_names = ['network']
# create optimizers
network_opt = torch.optim.Adam(network.parameters(), lr=conf.lr, weight_decay=conf.weight_decay)
optimizers = [network_opt]
optimizer_names = ['network_opt']
# learning rate scheduler
network_lr_scheduler = torch.optim.lr_scheduler.StepLR(network_opt, step_size=conf.lr_decay_every,
gamma=conf.lr_decay_by)
# send parameters to device
for m in models:
m.to(conf.device)
for o in optimizers:
utils.optimizer_to_device(o, conf.device)
# start training
start_time = time.time()
#last_checkpoint_step = None
last_val_console_log_step = None
val_num_batch = len(val_dataloader)
# train for every epoch
#for in range(conf.epochs):
# if not conf.no_console_log:
# utils.printout(conf.flog, f'training run {conf.exp_name}')
# utils.printout(conf.flog, header)
val_batches = enumerate(val_dataloader, 0)
val_fraction_done = 0.0
val_batch_ind = -1
sum_part_cd_loss = 0
sum_shape_cd_loss = 0
sum_contact_point_loss = 0
total_acc_num = 0
sum_resample_shape_cd_loss = 0
total_valid_num = 0
total_max_count = 0
total_total_num = 0
# validate one batch
while val_batch_ind + 1 < val_num_batch:
val_batch_ind, val_batch = next(val_batches)
val_fraction_done = (val_batch_ind + 1) / val_num_batch
if len(val_batch)==0:
continue
#val_step = (epoch + val_fraction_done) * train_num_batch - 1
# log_console = not conf.no_console_log and (last_val_console_log_step is None or \
# val_step - last_val_console_log_step >= conf.console_log_interval)
# if log_console:
# last_val_console_log_step = val_step
# set models to evaluation mode
for m in models:
m.eval()
#ipdb.set_trace()
with torch.no_grad():
# forward pass (including logging)
part_cd_loss, shape_cd_loss, contact_point_loss, acc_num, valid_num, max_count, total_num = forward(batch=val_batch, data_features=data_features, network=network, conf=conf, is_val=True, \
batch_ind=val_batch_ind, num_batch=val_num_batch,
start_time=start_time, \
log_console=1, log_tb=not conf.no_tb_log, tb_writer=None,
lr=network_opt.param_groups[0]['lr'])
sum_part_cd_loss += part_cd_loss
sum_shape_cd_loss += shape_cd_loss
sum_contact_point_loss += contact_point_loss
total_acc_num += acc_num
total_valid_num += valid_num
total_max_count += max_count
total_total_num += total_num
total_max_count = total_max_count.float()
total_total_num = float(total_total_num)
total_shape_loss = sum_shape_cd_loss / val_num_batch
total_part_loss = sum_part_cd_loss / val_num_batch
total_contact_loss = sum_contact_point_loss / val_num_batch
total_acc = total_acc_num / total_valid_num
total_contact = total_max_count / total_total_num
print('total_shape_loss:',total_shape_loss.item())
print('total_part_loss:',total_part_loss.item())
print('total_contact_loss:', total_contact_loss.item())
print('total_acc:',100 * total_acc.item())
print('total_contact', total_contact)
print(total_max_count, total_total_num)
def forward(batch, data_features, network, conf, \
is_val=False, step=None, epoch=None, batch_ind=0, num_batch=1, start_time=0, \
log_console=False, log_tb=False, tb_writer=None, lr=None):
# prepare input
input_part_pcs = torch.cat(batch[data_features.index('part_pcs')], dim=0).to(conf.device) # B x P x N x 3
input_part_valids = torch.cat(batch[data_features.index('part_valids')], dim=0).to(conf.device) # B x P
input_part_pairs = torch.cat(batch[data_features.index('pairs')], dim=0).to(conf.device)
batch_size = input_part_pcs.shape[0]
num_part = input_part_pcs.shape[1]
num_point = input_part_pcs.shape[2]
part_ids = torch.cat(batch[data_features.index('part_ids')], dim=0).to(conf.device) # B x P
match_ids=batch[data_features.index('match_ids')]
gt_part_poses = torch.cat(batch[data_features.index('part_poses')], dim=0).to(conf.device) # B x P x (3 + 4)
contact_points = torch.cat(batch[data_features.index("contact_points")], dim=0).to(conf.device)
# input_part_pairs = torch.squeeze(contact_points[:, :, :, :1], dim=3)
# cope with the sym_info
sym_info = torch.cat(batch[data_features.index("sym")], dim=0) # B x P x 3
# get instance label
instance_label = torch.zeros(batch_size, num_part, num_part).to(conf.device)
same_class_list = []
for i in range(batch_size):
num_class = [ 0 for i in range(160) ]
cur_same_class_list = [[] for i in range(160)]
for j in range(num_part):
cur_class = int(part_ids[i][j])
if j < input_part_valids[i].sum():
cur_same_class_list[cur_class].append(j)
if cur_class == 0: continue
cur_instance = int(num_class[cur_class])
instance_label[i][j][cur_instance] = 1
num_class[int(part_ids[i][j])] += 1
for i in range(cur_same_class_list.count([])):
cur_same_class_list.remove([])
same_class_list.append(cur_same_class_list)
repeat_times = 10
array_trans_l2_loss_per_data = []
array_rot_l2_loss_per_data = []
array_rot_cd_loss_per_data = []
array_total_cd_loss_per_data = []
array_shape_cd_loss_per_data = []
array_contact_point_loss_per_data = []
array_acc = []
array_pred_part_poses = []
for repeat_ind in range(repeat_times):
# forward through the network
total_pred_part_poses = network(conf, input_part_pairs.float(), input_part_valids.float(),
input_part_pcs.float(), instance_label, same_class_list) # B x P x P, B x P, B x P x N x 3
# for iter_ind in range(conf.iter):
pred_part_poses = total_pred_part_poses[conf.iter - 1]
# pred_part_poses = gt_part_poses
array_pred_part_poses.append(pred_part_poses)
# matching loss
for ind in range(len(batch[0])):
cur_match_ids = match_ids[ind]
for i in range(1, 10):
need_to_match_part = []
for j in range(conf.max_num_part):
if cur_match_ids[j] == i:
need_to_match_part.append(j)
if len(need_to_match_part) == 0: break
cur_input_pts = input_part_pcs[ind, need_to_match_part]
cur_pred_poses = pred_part_poses[ind, need_to_match_part]
cur_pred_centers = cur_pred_poses[:, :3]
cur_pred_quats = cur_pred_poses[:, 3:]
cur_gt_part_poses = gt_part_poses[ind, need_to_match_part]
cur_gt_centers = cur_gt_part_poses[:, :3]
cur_gt_quats = cur_gt_part_poses[:, 3:]
matched_pred_ids, matched_gt_ids = network.linear_assignment(cur_input_pts, cur_pred_centers,
cur_pred_quats, cur_gt_centers,
cur_gt_quats)
match_pred_part_poses = pred_part_poses[ind, need_to_match_part][matched_pred_ids]
pred_part_poses[ind, need_to_match_part] = match_pred_part_poses
match_gt_part_poses = gt_part_poses[ind, need_to_match_part][matched_gt_ids]
gt_part_poses[ind, need_to_match_part] = match_gt_part_poses
# prepare gt
input_part_pcs = input_part_pcs[:, :, :1000, :]
# for each type of loss, compute losses per data
trans_l2_loss_per_data = network.get_trans_l2_loss(pred_part_poses[:, :, :3], gt_part_poses[:, :, :3],
input_part_valids) # B
rot_l2_loss_per_data = network.get_rot_l2_loss(input_part_pcs, pred_part_poses[:, :, 3:],
gt_part_poses[:, :, 3:], input_part_valids) # B
rot_cd_loss_per_data = network.get_rot_cd_loss(input_part_pcs, pred_part_poses[:, :, 3:],
gt_part_poses[:, :, 3:], input_part_valids, conf.device) # B
# # for each type of loss, compute avg loss per batch
# trans_l2_loss = trans_l2_loss_per_data.mean()
# rot_l2_loss = rot_l2_loss_per_data.mean()
# rot_cd_loss = rot_cd_loss_per_data.mean()
# # compute total loss
# if iter_ind == 0:
# total_loss = trans_l2_loss * conf.loss_weight_trans_l2 + \
# rot_l2_loss * conf.loss_weight_rot_l2 + \
# rot_cd_loss * conf.loss_weight_rot_cd
# total_trans_l2_loss = trans_l2_loss
# total_rot_l2_loss = rot_l2_loss
# total_rot_cd_loss = rot_cd_loss
# else:
# total_loss += trans_l2_loss * conf.loss_weight_trans_l2 + \
# rot_l2_loss * conf.loss_weight_rot_l2 + \
# rot_cd_loss * conf.loss_weight_rot_cd
# total_trans_l2_loss += trans_l2_loss
# total_rot_l2_loss += rot_l2_loss
# total_rot_cd_loss += rot_cd_loss
# prepare gt
input_part_pcs = input_part_pcs[:, :, :1000, :]
# if iter_ind == 2:
total_cd_loss_per_data, acc = network.get_total_cd_loss(input_part_pcs, pred_part_poses[:, :, 3:],
gt_part_poses[:, :, 3:],
input_part_valids, pred_part_poses[:, :, :3],
gt_part_poses[:, :, :3], conf.device) # B)
# total_cd_loss = total_cd_loss_per_data.mean()
shape_cd_loss_per_data = network.get_shape_cd_loss(input_part_pcs, pred_part_poses[:, :, 3:],
gt_part_poses[:, :, 3:],
input_part_valids, pred_part_poses[:, :, :3],
gt_part_poses[:, :, :3], conf.device)
# shape_cd_loss = shape_cd_loss_per_data.mean()
contact_point_loss_per_data, count, total_num = network.get_contact_point_loss(pred_part_poses[:, :, :3],
pred_part_poses[:, :, 3:], contact_points, sym_info)
array_trans_l2_loss_per_data.append(trans_l2_loss_per_data)
array_rot_l2_loss_per_data.append(rot_l2_loss_per_data)
array_rot_cd_loss_per_data.append(rot_cd_loss_per_data)
array_total_cd_loss_per_data.append(total_cd_loss_per_data)
array_shape_cd_loss_per_data.append(shape_cd_loss_per_data)
array_contact_point_loss_per_data.append(contact_point_loss_per_data)
# B x P -> B
acc = torch.tensor(acc)
acc = acc.sum(-1).float() # B
valid_number = input_part_valids.sum(-1).float().cpu() # B
acc_rate = acc / valid_number
array_acc.append(acc_rate)
count = torch.tensor(count)
if repeat_ind == 0:
res_total_cd = total_cd_loss_per_data
res_shape_cd = shape_cd_loss_per_data
res_contact_point = contact_point_loss_per_data
res_acc = acc
res_count = count
else:
res_total_cd = res_total_cd.min(total_cd_loss_per_data)
res_shape_cd = res_shape_cd.min(shape_cd_loss_per_data)
res_contact_point = res_contact_point.min(contact_point_loss_per_data)
res_acc = res_acc.max(acc) # B
res_count = res_count.max(count)
shape_cd_loss = res_shape_cd.mean()
total_cd_loss = res_total_cd.mean()
contact_point_loss = res_contact_point.mean()
acc_num = res_acc.sum() # how many parts are right in total in a certain batch
valid_num = input_part_valids.sum() # how many parts in total in a certain batch
# display information
data_split = 'train'
if is_val:
data_split = 'val'
with torch.no_grad():
# gen visu
is_val = False
if is_val and (not conf.no_visu):
visu_dir = os.path.join(conf.exp_dir, 'val_visu')
out_dir = os.path.join(visu_dir, 'test_196')
input_part_pcs_dir = os.path.join(out_dir, 'input_part_pcs')
gt_assembly_dir = os.path.join(out_dir, 'gt_assembly')
pred_assembly_dir = os.path.join(out_dir, 'pred_assembly')
info_dir = os.path.join(out_dir, 'info')
if batch_ind == 0:
# create folders
os.mkdir(out_dir)
os.mkdir(input_part_pcs_dir)
os.mkdir(gt_assembly_dir)
os.mkdir(pred_assembly_dir)
os.mkdir(info_dir)
if batch_ind < conf.num_batch_every_visu:
#utils.printout(conf.flog, 'Visualizing ...')
for repeat_ind in range(repeat_times):
pred_center = array_pred_part_poses[repeat_ind][:, :, :3]
gt_center = gt_part_poses[:, :, :3]
# compute pred_pts and gt_pts
# import ipdb; ipdb.set_trace()
pred_pts = qrot(array_pred_part_poses[repeat_ind][:, :, 3:].unsqueeze(2).repeat(1, 1, num_point, 1),
input_part_pcs) + pred_center.unsqueeze(2).repeat(1, 1, num_point, 1)
gt_pts = qrot(gt_part_poses[:, :, 3:].unsqueeze(2).repeat(1, 1, num_point, 1),
input_part_pcs) + gt_center.unsqueeze(2).repeat(1, 1, num_point, 1)
for i in range(batch_size):
fn = 'data-%03d-%03d.png' % (batch_ind * batch_size + i, repeat_ind)
cur_input_part_cnt = input_part_valids[i].sum().item()
# print(cur_input_part_cnt)
cur_input_part_cnt = int(cur_input_part_cnt)
cur_input_part_pcs = input_part_pcs[i, :cur_input_part_cnt]
cur_gt_part_poses = gt_part_poses[i, :cur_input_part_cnt]
cur_pred_part_poses = array_pred_part_poses[repeat_ind][i, :cur_input_part_cnt]
pred_part_pcs = qrot(cur_pred_part_poses[:, 3:].unsqueeze(1).repeat(1, num_point, 1),
cur_input_part_pcs) + \
cur_pred_part_poses[:, :3].unsqueeze(1).repeat(1, num_point, 1)
gt_part_pcs = qrot(cur_gt_part_poses[:, 3:].unsqueeze(1).repeat(1, num_point, 1),
cur_input_part_pcs) + \
cur_gt_part_poses[:, :3].unsqueeze(1).repeat(1, num_point, 1)
part_pcs_to_visu = cur_input_part_pcs.cpu().detach().numpy()
render_utils.render_part_pts(os.path.join(BASE_DIR, input_part_pcs_dir, fn), part_pcs_to_visu,
blender_fn='object_centered.blend')
part_pcs_to_visu = pred_part_pcs.cpu().detach().numpy()
render_utils.render_part_pts(os.path.join(BASE_DIR, pred_assembly_dir, fn), part_pcs_to_visu,
blender_fn='object_centered.blend')
part_pcs_to_visu = gt_part_pcs.cpu().detach().numpy()
render_utils.render_part_pts(os.path.join(BASE_DIR, gt_assembly_dir, fn), part_pcs_to_visu,
blender_fn='object_centered.blend')
with open(os.path.join(info_dir, fn.replace('.png', '.txt')), 'w') as fout:
fout.write('shape_id: %s\n' % batch[data_features.index('shape_id')][i])
fout.write('num_part: %d\n' % cur_input_part_cnt)
fout.write('trans_l2_loss: %f\n' % array_trans_l2_loss_per_data[repeat_ind][i].item())
fout.write('rot_l2_loss: %f\n' % array_rot_l2_loss_per_data[repeat_ind][i].item())
fout.write('rot_cd_loss: %f\n' % array_rot_cd_loss_per_data[repeat_ind][i].item())
fout.write('total_cd_loss: %f\n' % array_total_cd_loss_per_data[repeat_ind][i].item())
fout.write('shape_cd_loss: %f\n' % array_shape_cd_loss_per_data[repeat_ind][i].item())
fout.write('contact_point_loss: %f\n' % array_contact_point_loss_per_data[repeat_ind][i].item())
fout.write('part_accuracy: %f\n' % array_acc[repeat_ind][i].item())
# if batch_ind == conf.num_batch_every_visu - 1:
# # visu html
# utils.printout(conf.flog, 'Generating html visualization ...')
# sublist = 'input_part_pcs,gt_assembly,pred_assembly,info'
# cmd = 'cd %s && python %s . 10 htmls %s %s > /dev/null' % (out_dir, os.path.join(BASE_DIR, '../utils/gen_html_hierarchy_local.py'), sublist, sublist)
# call(cmd, shell=True)
# utils.printout(conf.flog, 'DONE')
return total_cd_loss, shape_cd_loss, contact_point_loss, acc_num, valid_num, res_count, total_num
if __name__ == '__main__':
### get parameters
parser = ArgumentParser()
# main parameters (required)
parser.add_argument('--exp_suffix', type=str, help='exp suffix')
parser.add_argument('--model_version', type=str, help='model def file')
parser.add_argument('--category', type=str, help='model def file')
parser.add_argument('--train_data_fn', type=str, help='training data file that indexs all data tuples')
parser.add_argument('--val_data_fn', type=str, help='validation data file that indexs all data tuples')
# main parameters (optional)
parser.add_argument('--device', type=str, default='cuda:0', help='cpu or cuda:x for using cuda on GPU number x')
parser.add_argument('--seed', type=int, default=3124256514,
help='random seed (for reproducibility) [specify -1 means to generate a random one]')
# parser.add_argument('--seed', type=int, default=-1, help='random seed (for reproducibility) [specify -1 means to generate a random one]')
parser.add_argument('--log_dir', type=str, default='logs', help='exp logs directory')
parser.add_argument('--data_dir', type=str, default='../../prepare_data', help='data directory')
parser.add_argument('--overwrite', action='store_true', default=False,
help='overwrite if exp_dir exists [default: False]')
# network settings
parser.add_argument('--feat_len', type=int, default=256)
parser.add_argument('--max_num_part', type=int, default=20)
# loss weights
parser.add_argument('--loss_weight_trans_l2', type=float, default=1.0, help='loss weight')
parser.add_argument('--loss_weight_rot_l2', type=float, default=1.0, help='loss weight')
parser.add_argument('--loss_weight_rot_cd', type=float, default=10.0, help='loss weight')
# training parameters
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--num_workers', type=int, default=5)
parser.add_argument('--lr', type=float, default=.001)
parser.add_argument('--weight_decay', type=float, default=1e-5)
parser.add_argument('--lr_decay_by', type=float, default=0.9)
parser.add_argument('--lr_decay_every', type=float, default=5000)
parser.add_argument('--iter', default = 5, help = 'times to iteration')
parser.add_argument('--iter_to_test', default = 4, help = 'times to iteration')
# logging
parser.add_argument('--no_tb_log', action='store_true', default=False)
parser.add_argument('--no_console_log', action='store_true', default=False)
parser.add_argument('--console_log_interval', type=int, default=10,
help='number of optimization steps beween console log prints')
parser.add_argument('--checkpoint_interval', type=int, default=10000,
help='number of optimization steps beween checkpoints')
# visu
parser.add_argument('--num_batch_every_visu', type=int, default=1, help='num batch every visu')
parser.add_argument('--num_epoch_every_visu', type=int, default=1, help='num epoch every visu')
parser.add_argument('--no_visu', action='store_true', default=False, help='no visu? [default: False]')
# data
parser.add_argument('--level', default='3', help='level of dataset')
#model path
parser.add_argument('--model_dir', type=str, help='the path of the model')
# parse args
conf = parser.parse_args()
conf.exp_name = f'exp-{conf.category}-{conf.model_version}-level{conf.level}{conf.exp_suffix}'
# conf.exp_name = f'exp-{conf.category}-{conf.model_version}-{conf.train_data_fn.split(".")[0]}-{conf.exp_suffix}'
conf.exp_dir = os.path.join(conf.log_dir, conf.exp_name)
#flog = open(os.path.join(conf.exp_dir, 'train_log.txt'), 'w')
#conf.flog = flog
print("conf", conf)
### start training
test(conf)
|
the-stack_106_25518 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from cloudbaseinit.openstack.common import context as req_context
from cloudbaseinit.openstack.common.gettextutils import _LE
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ],
help='AMQP topic used for OpenStack notifications')
CONF = cfg.CONF
CONF.register_opt(notification_topic_opt)
def notify(context, message):
"""Sends a notification via RPC."""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.notification_topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_LE("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
{"topic": topic, "message": message})
|
the-stack_106_25520 | import json
from typing import Union
import numpy as np
# Modified from
# https://gist.github.com/jannismain/e96666ca4f059c3e5bc28abb711b5c92#file-compactjsonencoder-py
# to handle more classes
class CompactJSONEncoder(json.JSONEncoder):
"""A JSON Encoder that puts small containers on single lines."""
CONTAINER_TYPES = (list, tuple, dict)
"""Container datatypes include primitives or other containers."""
MAX_ITEMS = 6
"""Maximum number of items in container that might be put on single line."""
INDENTATION_CHAR = " "
def __init__(self, max_width = 80, precise = False, *args, **kwargs):
self.max_width = max_width
self.precise = precise
super().__init__(*args, **kwargs)
self.indentation_level = 0
def encode(self, o):
"""Encode JSON object *o* with respect to single line lists."""
if isinstance(o, (list, tuple)):
if self._put_on_single_line(o):
return "[" + ", ".join(self.encode(el) for el in o) + "]"
else:
self.indentation_level += 1
output = [self.indent_str + self.encode(el) for el in o]
self.indentation_level -= 1
return "[\n" + ",\n".join(output) + "\n" + self.indent_str + "]"
elif isinstance(o, dict):
if o:
if self._put_on_single_line(o):
return "{ " + ", ".join(f"{self.encode(k)}: {self.encode(el)}" for k, el in o.items()) + " }"
else:
self.indentation_level += 1
output = [self.indent_str + f"{json.dumps(k)}: {self.encode(v)}" for k, v in o.items()]
self.indentation_level -= 1
return "{\n" + ",\n".join(output) + "\n" + self.indent_str + "}"
else:
return "{}"
elif isinstance(o, float): # Use scientific notation for floats, where appropiate
if self.precise:
return format(o, ".12g")
else:
return format(o, "g")
elif isinstance(o, str): # escape newlines
o = o.replace("\n", "\\n")
return f'"{o}"'
elif isinstance(o, np.int32):
return json.dumps(int(o))
elif isinstance(o, np.bool_):
return json.dumps(bool(o))
elif isinstance(o, np.ndarray):
return self.encode(list(o))
else:
return json.dumps(o)
def _put_on_single_line(self, o):
return self._primitives_only(o) and len(o) <= self.MAX_ITEMS and len(str(o)) - 2 <= self.max_width
def _primitives_only(self, o: Union[list, tuple, dict]):
if isinstance(o, (list, tuple)):
return not any(isinstance(el, self.CONTAINER_TYPES) for el in o)
elif isinstance(o, dict):
return not any(isinstance(el, self.CONTAINER_TYPES) for el in o.values())
@property
def indent_str(self) -> str:
return self.INDENTATION_CHAR*(self.indentation_level*self.indent)
|
the-stack_106_25522 | """TCP class packets"""
import struct
import textwrap
class TCP(object):
"""Class representing a tcp packet"""
def __init__(self, packet):
"""pass in the tcp packet to be parsed"""
__packet = struct.unpack("!HH2I2H2H", packet[:20])
self.src_port = __packet[0]
self.dest_port = __packet[1]
self.seq_number = __packet[2]
self.ack_number = __packet[3]
self.flags = {
"NS ": (__packet[4] & 0x80) >> 0x07,
"CWR": (__packet[4] & 0x100) >> 0x08,
"ECE": (__packet[4] & 0x200) >> 0x09,
"URG": (__packet[4] & 0x400) >> 0x0a,
"ACK": (__packet[4] & 0x800) >> 0x0b,
"PSH": (__packet[4] & 0x1000) >> 0x0c,
"RST": (__packet[4] & 0x2000) >> 0x0d,
"SYN": (__packet[4] & 0x4000) >> 0x0e,
"FIN": (__packet[4] & 0x8000) >> 0x0f
}
self.window_size = __packet[5]
self.checksum = __packet[6]
self.urg = __packet[7]
self.data = packet[20:]
def print_header(self):
"""prints TCP header information"""
print("\tSource port: {: <5d} Destination port: {: <5d}\n".format(self.src_port, self.dest_port))
dataType = "TCP"
line_break = 0
for key, value in self.flags.items():
line_break = line_break + 1
print("\t", "{:<4s}: {: <1d}".format(key, value), end=' ')
if line_break is 4:
print()
if self.src_port == 80 or self.dest_port == 80:
dataType = "HTTP"
print(
"\n\n\t",
"Seq number: {} ACK number: {} Checksum: {} Data Type: {}\n".format(self.seq_number, self.ack_number, self.checksum, dataType)
)
def print_data(self):
"""prints TCP data information"""
data_strs = textwrap.wrap(str(self.data), width=80)
for line in data_strs:
print("\t\t", "{}".format(line))
print("{:-<90s}\n".format("-"))
def write_header(self, filename):
"""writes TCP header information"""
dataType = "TCP"
if self.src_port == 80 or self.dest_port == 80:
dataType = "HTTP"
_info = "\tSource port: {: <5d} Destination port: {: <5d}\n".format(self.src_port, self.dest_port)
_info2 ="\n\tSeq number: {} ACK number: {} Checksum: {} Data Type: {}\n".format(self.seq_number, self.ack_number, self.checksum, dataType)
_flags = " "
line_break = 0
for key, value in self.flags.items():
line_break = line_break + 1
_flags = _flags + "\t{:<4s}: {: <1d}".format(key, value)
if line_break is 4:
_flags = _flags + "\n"
with open(filename, 'a', encoding='utf-8') as f:
f.write(_info)
f.write(_flags)
f.write(_info2)
def write_data(self, filename):
"""writes TCP data information"""
data_strs = textwrap.wrap(str(self.data), width=80)
_data = "\n"
for line in data_strs:
_data = _data + "\t\t{}\n".format(line)
with open(filename, 'a', encoding='utf-8') as f:
f.write(_data)
f.write("{:-<90s}\n".format("-"))
|
the-stack_106_25523 | #!/usr/bin/env python
# ========================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hadoop.io.SequenceFile import CompressionType
from hadoop.io import Text
from hadoop.io import SequenceFile
def writeData(writer):
key = Text()
value = Text()
key.set('Key')
value.set('Value')
writer.append(key, value)
if __name__ == '__main__':
writer = SequenceFile.createWriter('test.seq', Text, Text)
writeData(writer)
writer.close()
|
the-stack_106_25527 | import os
import xlrd
import random
import collections
import sys
import copy
import json
from datetime import date
import datetime
import argparse
import numpy as np
import time
import requests
import json
import pandas as pd
# 🏢 load package first, it's important
try:
import package.analyzer_system as ana_sys
except:
import analyzer_system as na_sys
"""
🎹 🎹 v2 version:
- support S1, E1, H1, W1 etc.. format
- support output package print out
- for example, if we meet W1, W2, W3 , we can pick first char [0] == W
# 🌟🌟 load v2.1 version:
# 1. support prevent checking S1 inside the rtp_agent.run() .
# 2. S1 count logic is being iplement outside the libs.
"""
####################### Readme ###########################
# slot_bet_rate_agent() -> slot_bet_rate_class()
##########################################################
# random geneartor python verrsion (compare to golang version):
# This is version is not good
# --------------------------------
# Obtenir la loop from randint(1,5)
# Prends seed += random number (1,100000) avec serveral times
# Obtenir la numero et mettre dans la seed
# Prends la random number (1, 131) etc..
#----------------------
# Try another one
# Obtenir la loop from randint(1,5)
# Prends for loop , random_number = randint(1,131)
# Prends la derineer random number
# Jira-Ticket: DAT-147
def jason_random_generator_v1(low_number, high_number):
# Obtenir times
"""
get_times = random.randint(1,5)
my_seed = 0
for _ in range(get_times):
my_seed += random.randint(1,100000)
random.seed(my_seed)
"""
get_times = random.randint(1,5)
for _ in range(get_times):
my_random = random.randint(low_number,high_number)
return my_random
# become_which_target = "W" for example
def slot_overwrite_list_by_target(input_list_r, overwrite_target, become_which_target):
for col_index, col_list in enumerate(input_list_r):
for row_index, _ in enumerate(col_list):
if input_list_r[col_index][row_index] == overwrite_target:
del input_list_r[col_index][row_index]
input_list_r[col_index].insert(row_index,become_which_target)
return input_list_r
def slot_overwrite_list_by_target_with_lock_list(input_list_r,lock_list, become_which_target):
return_update_list_r = []
return_update_list_r = slot_shallow_copy_to_origin_value_by_list_r(input_list_r)
for col_index, col_list in enumerate(input_list_r):
for row_index, _ in enumerate(col_list):
if lock_list[col_index][row_index] == 1:
return_update_list_r[col_index][row_index] = become_which_target
return return_update_list_r
# Par Example: vois la "W", return la positon à "1", else à "0"
def slot_obtenir_lock_list_by_target(input_list_r,check_target):
# [Attension]: If you use: lock_list = input_list_r.copy(), when iterating the lock_list, still affect the input_list_r,
############# which i don't know why:
# Copie la input_list_r's shape use manually code.
lock_list = []
for index_one_list in input_list_r:
tem_index_one_list = []
for value in index_one_list:
tem_index_one_list.append(value)
lock_list.append(tem_index_one_list)
#lock_list = input_list_r.copy()
for col_index, col_list in enumerate(lock_list):
for row_index, _ in enumerate(col_list):
if lock_list[col_index][row_index] == check_target:
get_the_target = 1
del lock_list[col_index][row_index]
lock_list[col_index].insert(row_index, get_the_target)
else:
didnt_get_the_target = 0
del lock_list[col_index][row_index]
lock_list[col_index].insert(row_index, didnt_get_the_target)
return lock_list
def slot_shallow_copy_to_zero_by_list_r(input_list_r):
return_list = []
for index_one_list in input_list_r:
tem_index_one_list = []
for _ in index_one_list:
value = 0
tem_index_one_list.append(value)
return_list.append(tem_index_one_list)
return return_list
def slot_shallow_copy_to_origin_value_by_list_r(input_list_r):
return_list = []
for index_one_list in input_list_r:
tem_index_one_list = []
for value in index_one_list:
tem_index_one_list.append(value)
return_list.append(tem_index_one_list)
return return_list
# Used to input, roll_table > output > 3_line, 4_line, 5_line, probability > pre_calculate_rtp.
class slot_line_setting_analysis():
def __init__(self,input_number_of_cols, input_number_rows_in_each_col_list):
self.name = "[ana_slot:slot_line_setting_analysis]: "
self.number_of_row_list = input_number_rows_in_each_col_list
self.number_of_cols = input_number_of_cols
self.max_len = 0
def ajouter_mg_roll_table(self,input_mg_roll_list_list):
# Copy
mg_roll_list_list = input_mg_roll_list_list.copy()
# Save roll_table_final_index_pos [ 25, 19, 22, 26, 29]
self.roll_table_final_index_pos = [ len(any_list) for any_list in mg_roll_list_list]
# Obtenir all_equal_list_list
all_equal_size_list = []
# Obtenir max_len
len_list = [ len(_list) for _list in mg_roll_list_list]
max_len = max(len_list)
self.max_len = max_len # pour calculate_linable_pro_distribuion()
# Faire all_equal_list_list
for any_list in mg_roll_list_list:
if len(any_list) < max_len:
any_list.extend(["--"]*(max_len-len(any_list)))
# Append all_equal_list_list
all_equal_size_list.append(any_list)
# Data { R1, R2, ... RN }
data = { "R_%d"%(v+1): any_list for v, any_list in enumerate(mg_roll_list_list)}
#data = { "R_%d"%(v+1): any_list for v, any_list in enumerate(all_equal_size_list)}
# Data_Index = [ 1, 2, 3, ....... max_len ]
data_index = [ v+1 for v in range(max_len)]
# Save col_index_name: [R_1, R_2, R_3 , ....]
self.mg_roll_table_col_name_list = ["R_%d"%(v+1) for v , _ in enumerate(mg_roll_list_list)]
# Créer Panda DataFrame avec data_index
self.mg_roll_table_df = pd.DataFrame(data,index=data_index)
def les_information(self):
msg = self.name + "\n"
msg += "self.number_of_row_list = %s\n" % self.number_of_row_list
msg += "self.number_of_cols = %d\n" % self.number_of_cols
msg += "self.mg_roll_table_df = \n\n%s\n" % self.mg_roll_table_df
return msg
# Setting from Excel > Main() > Cette Agent.
def calculate_linable_probability_distribution___set_lineable_list(self,input_list):
self.line_able_list = copy.deepcopy(input_list)
def calculate_linable_probability_distribution(self):
msg = self.name + "\n"
# Import line_able_obj_list
# - ['G1', 'S1', 'S2', 'C1', 'C2', 'C3', 'I1', 'I2', 'I3', 'I4', 'W', 'SS']
line_able_list = copy.deepcopy(self.line_able_list)
print("可連線物品:\n%s\n"%line_able_list)
# Créer probability_data_frame = [ Initila by 0.0]
# - Count_for_R_1 Count_for_R_2 Count_for_R_3 Count_for_R_4 Count_for_R_5
#
data_empty_list = [[0.0 for _ in range(self.number_of_cols)] for _ in range(len(line_able_list))]
data_col_norm = ['Count_for_R_%d'%(v+1) for v in range(self.number_of_cols)]
probability_df = pd.DataFrame(data_empty_list,columns=data_col_norm,index=line_able_list)
print(probability_df)
# Adjouter ID Col:
__df = self.mg_roll_table_df.copy()
id_list = [ v+1 for v in range(self.max_len)]
__df['ID'] = id_list
print("Base on the datafrmae:\n",__df)
# Obtenir Symbol Cnts tout le Col. R_1, R_2, .....
col_start = 1
col_end = self.number_of_cols + 1
print("Check Max_Index In Each Roll ")
print(self.roll_table_final_index_pos)
for any_index_of_col in range(col_start,col_end):
col_norm = "R_%d"%any_index_of_col
count_norm = "Count_for_R_%d"%any_index_of_col
tem_df = __df.groupby(col_norm)['ID'].nunique()
tem_df = tem_df.div(self.roll_table_final_index_pos[any_index_of_col-1])
for each_symbol in tem_df.index:
if each_symbol != "--":
# [Count_for_R_1][C1] = tem_df[C1]
probability_df[count_norm][each_symbol] = tem_df[each_symbol]
elif each_symbol == "--":
pass
else:
print("Error: Get the unexpected index %s"%each_symbol)
# Afficher the probability table
print("各個Symbol出現在各個輪的機率")
print(probability_df)
# New Slot_bet_rate_class for 94_Line_Design_Project.
class slot_bet_rate_class_v1():
def __init__(self,obj_name):
self.name = "[slot_bet_rate_class_v1]: use pandas for kernel code"
self.bet_rate_df = pd.DataFrame()
def ajouter_obj_par_bet_rate_list(self,obj_symbol,obj_bet_rate_list):
# for 1, 2, 3, 4, 5 line bet : [0,0,3,4,5]
df_data_list = obj_bet_rate_list
df_index = obj_symbol # 'SS', 'J', 'W' for example
columns_list = ["Bet_%d"%(v+1) for v in range(len(df_data_list))] # [Bet_1, Bet_2 ... Bet_n]
_tem_df = pd.DataFrame([df_data_list],columns=columns_list,index=[df_index])
self.bet_rate_df = self.bet_rate_df.append(_tem_df)
def les_information(self):
msg = "[slot_bet_rate_class_v1]: implement with pandas:\n"
msg += "\n%s\n"%self.bet_rate_df
return msg
def obtenir_bet_rate_par_obj_et_bet_index(self,obj_symbol,obj_bet_rate_index):
col_name = "Bet_%d"%obj_bet_rate_index # Bet_1 or Bet_2 or Bet_3 , or Bet_6
value = self.bet_rate_df[col_name][obj_symbol]
return value
# Mon parent: slot_bet_rate_agent()
# support only 3, 4, 5 lines.
# it have some issue with the previous game like (Pandas, Way_Transformer: 6_line_able_bet_rate is not exist)
class slot_bet_rate_class():
def __init__(self,obj_name):
self.obj_name = obj_name
self.obj_3lines_bet_rate = 0.0
self.obj_4lines_bet_rate = 0.0
self.obj_5lines_bet_rate = 0.0
def set_345lines_bet_rate(self,trois,quatre,cinq):
self.obj_3lines_bet_rate = trois
self.obj_4lines_bet_rate = quatre
self.obj_5lines_bet_rate = cinq
def les_formation(self):
msg = "[slot_bet_rate_class]['%s']:\n"%self.obj_name
msg += "3lines_bet_rate = %s \n"%self.obj_3lines_bet_rate
msg += "4lines_bet_rate = %s \n"%self.obj_4lines_bet_rate
msg += "5lines_bet_rate = %s \n"%self.obj_5lines_bet_rate
return msg
def obtenir_bet_rate(self,trios_quatre_cinq):
if trios_quatre_cinq == 3: return self.obj_3lines_bet_rate
if trios_quatre_cinq == 4: return self.obj_4lines_bet_rate
if trios_quatre_cinq == 5: return self.obj_5lines_bet_rate
print("[slot_bet_rate_class]: Error ! , no match lines ")
return 543
# Mon enfant: slot_bet_rate_class()
class slot_bet_rate_agent():
def __init__(self):
self.agent_name = ""
self.bet_rate_class_number = 0
self.bet_rate_class_list = []
self.bet_rate_class_dictionary = {}
self.bet_rate_class_inverse_dictionary = {}
def initial_by_excel(self,input_excel_name, input_table_name, read_length):
# Set agent_norm:
self.agent_name = input_table_name
# Set numbers:
self.bet_rate_class_number = read_length
# Ouverte workbook
workbook = xlrd.open_workbook(input_excel_name)
# Ouverte sheet
sheet = workbook.sheet_by_name(input_table_name)
# Set bet_rate_class_list: col_index = 6
print("[目前取用連線物品固定11, 後續撥空參考原始滾輪設定改成可彈性自動判斷]: read_length = %d"%read_length)
for row in range(1,1+read_length):
# Créer un nouveau class avec obj_norm
trois = float(sheet.cell_value(row, 9))
quatre = float(sheet.cell_value(row,10))
cinq = float(sheet.cell_value(row,11))
tem_class = slot_bet_rate_class(sheet.cell_value(row,6))
tem_class.set_345lines_bet_rate(trois,quatre,cinq)
self.bet_rate_class_list.append(tem_class)
# Set dictionary: col_index = 6
self.bet_rate_class_dictionary = {index: sheet.cell_value(index+1,6) for index in range(read_length) }
self.bet_rate_class_inverse_dictionary = { v: k for k,v in self.bet_rate_class_inverse_dictionary.items()}
def obtenir_bet_rate(self,obj_name,trois_quatre_cinq):
for any_class in self.bet_rate_class_list:
if any_class.obj_name == obj_name:
bet_rate = any_class.obtenir_bet_rate(trois_quatre_cinq)
return bet_rate
def les_formation(self):
msg = "[slot_bet_rate_agent]: call les_formation...\n"
for any_class in self.bet_rate_class_list:
msg += any_class.les_formation()
return msg
# Generate the each run's env (Afficher toute le run par roll-table ou control-weight-table)
class slot_table_agent():
# Par Exaple: 5 X 3 (5 cols, each col has 3 rows to put symbol)
# self.number_of_cols = 5
# self.number_of_row_list = [3,3,3,3,3]
def __init__(self,input_number_of_cols,input_number_rows_in_each_col_list):
self.number_of_row_list = input_number_rows_in_each_col_list
self.number_of_cols = input_number_of_cols
def ajouter_mg_roll_table(self,mg_roll_list_list):
__mg_roll_list_list = copy.deepcopy(mg_roll_list_list)
# Save roll_table_final_index_pos [ 25, 19, 22, 26, 29]
self.roll_table_final_index_pos = [ len(any_list) for any_list in __mg_roll_list_list]
# Obtenir all_equal_list_list
all_equal_size_list = []
# Obtenir max_len
len_list = [ len(_list) for _list in __mg_roll_list_list]
max_len = max(len_list)
# Faire all_equal_list_list
for any_list in __mg_roll_list_list:
if len(any_list) < max_len:
any_list.extend(["--"]*(max_len-len(any_list)))
# Append all_equal_list_list
all_equal_size_list.append(any_list)
# Data { R1, R2, ... RN }
data = { "R_%d"%(v+1): any_list for v, any_list in enumerate(all_equal_size_list)}
# Data_Index = [ 1, 2, 3, ....... max_len ]
data_index = [ v+1 for v in range(max_len)]
# Save col_index_name: [R_1, R_2, R_3 , ....]
self.mg_roll_table_col_name_list = ["R_%d"%(v+1) for v , _ in enumerate(all_equal_size_list)]
# Créer Panda DataFrame avec data_index
self.mg_roll_table_df = pd.DataFrame(data,index=data_index)
# [obtenir_list_par_id]: Comment Faire:
# start_run = 0
# end_run = 50
# run_id = 0, 1, 2, 3, ..... 47, 48, 49.
# return list_r
def obtenir_mg_roll_table_par_id(self,run_id):
# Initial start_id_list: for id = 0 [ 1, 1, 1, 1, 1]
# for id = 1 [ 4, 4, 4, 4, 4] if roll_list = [3, 3,3,3,3]
start_id_list = [ 1 + (run_id * self.number_of_row_list[index]) for index in range(len(self.number_of_row_list))]
return_list_r = []
# Loop
for index, any_row_length in enumerate(self.number_of_row_list):
tem_list = []
for incre_value in range(any_row_length):
# Obtenir each tem_id
tem_id = start_id_list[index] + incre_value
# Calculate the value:
tem_value = (tem_id % self.roll_table_final_index_pos[index])
if tem_value == 0:
final_id = self.roll_table_final_index_pos[index]
elif tem_value < self.roll_table_final_index_pos[index]:
final_id = tem_value
else:
print("[Error]: Unexpected tem_value with self.roll_table_final_index_pos[index]= (%d, %d)"%(tem_value,self.roll_table_final_index_pos[index]))
tem_symbol = self.mg_roll_table_df[self.mg_roll_table_col_name_list[index]][final_id]
tem_list.append(tem_symbol)
# Append
return_list_r.append(tem_list)
return return_list_r
## [Jean]: 2020-04-08 Implement pour obtenir random
# Afficher:
# self.number_of_row_list = [3, 3, 3, 3, 3]
# self.roll_table_final_index_pos = [131, 129, 149, 139, 137]
# Random Range:
# Initial start_id_list: for id = 0 [ 1, 1, 1, 1, 1]
# for id = 1 [ 4, 4, 4, 4, 4] if roll_list = [3, 3,3,3,3]
# for 131 , the id range = (1 , 131) , random.randint(1,131)
# for 129 , the id range = (1 , 129) , random.randint(1,129)
def obtenir_mg_roll_table_par_natural_random(self,run_id):
return_list_r = []
start_id_list = []
for any_col in range(len(self.number_of_row_list)):
##### random_pick_id = random.randint(1,self.roll_table_final_index_pos[any_col])
random_pick_id = jason_random_generator_v1(1,self.roll_table_final_index_pos[any_col])
start_id_list.append(random_pick_id)
#print("start_id_list = ",start_id_list)
# Loop
for index, any_row_length in enumerate(self.number_of_row_list):
tem_list = []
for incre_value in range(any_row_length):
# Obtenir each tem_id
tem_id = start_id_list[index] + incre_value
# Calculate the value:
tem_value = (tem_id % self.roll_table_final_index_pos[index])
if tem_value == 0:
final_id = self.roll_table_final_index_pos[index]
elif tem_value < self.roll_table_final_index_pos[index]:
final_id = tem_value
else:
print("[Error]: Unexpected tem_value with self.roll_table_final_index_pos[index]= (%d, %d)"%(tem_value,self.roll_table_final_index_pos[index]))
tem_symbol = self.mg_roll_table_df[self.mg_roll_table_col_name_list[index]][final_id]
tem_list.append(tem_symbol)
# Append
return_list_r.append(tem_list)
return return_list_r
# Use https://github.com/wolfmib/ja_random_generator_services_via_api_golang
# Run main.go
# Referenced ja_python_test_api.py
def obtenir_mg_roll_table_par_natural_random_via_golang_services(self,run_id):
return_list_r = []
start_id_list = []
for any_col in range(len(self.number_of_row_list)):
##### random_pick_id = random.randint(1,self.roll_table_final_index_pos[any_col])
##### random_pick_id = jason_random_generator_v1(1,self.roll_table_final_index_pos[any_col])
my_url = "http://localhost:12345/get_random/"
low = "1"
high = str(self.roll_table_final_index_pos[any_col])
my_url = my_url + low + "/" + high
response = requests.get(my_url)
random_pick_id = response.json()["value"]
#############################
start_id_list.append(random_pick_id)
#print("start_id_list = ",start_id_list)
# Loop
for index, any_row_length in enumerate(self.number_of_row_list):
tem_list = []
for incre_value in range(any_row_length):
# Obtenir each tem_id
tem_id = start_id_list[index] + incre_value
# Calculate the value:
tem_value = (tem_id % self.roll_table_final_index_pos[index])
if tem_value == 0:
final_id = self.roll_table_final_index_pos[index]
elif tem_value < self.roll_table_final_index_pos[index]:
final_id = tem_value
else:
print("[Error]: Unexpected tem_value with self.roll_table_final_index_pos[index]= (%d, %d)"%(tem_value,self.roll_table_final_index_pos[index]))
tem_symbol = self.mg_roll_table_df[self.mg_roll_table_col_name_list[index]][final_id]
tem_list.append(tem_symbol)
# Append
return_list_r.append(tem_list)
return return_list_r
# Run first 10 runs: return msg
# Use self.mg_roll_table_df
# R_1: 1: G1, 2:I2, 3:I2, ........ 22:C2, 23:C3, 24:I1, 25:C1
# ----------------------------------------------------------------
# for id_run = 0 with roll_len = 3
# pick: 1 + 0*3 = 1 pick_list = [1, 2, 3] = [G1, I2, I2]
#
# for id_run = 7 with roll_len = 3
# pick: 1 + 7*3 = 22, pick_list = [ 22, 23, 24 ] = [C2, C3, I1]
#
# for id_run = 8 with roll_len = 3
# pick: 1 + 8*3 = 25, pick_list = [ 25, 1 , 2 ] = [C1, G1, I2]
# -----------------------------------------------------------------
# for case 3: (25, 26, 27) convert to (25, 1, 2)
# How ? only if the number > 24
# --------
# 25 % 25 = 0 -> 25
# 26 % 25 = 1
# 26 % 25 = 2
# -------------------------
#
# ----------------------------------------------------------------
# for id_run = 16 with roll_len = 3
# pick: 1 + 16* 3 = 49 pick_list = [49, 50, 51] -> [ 24 ,25, 1]
def show_mg_roll_table_head(self):
# [Jason]: start_id = [1,1,1,1,1] depend on the row_list [3,3,3,3,3]
start_id_list = [ 1 for _ in range(len(self.number_of_row_list))]
# Implement ici apres
# print("[Debug, 25, 19, 22, 26, 29] = %s"%self.roll_table_final_index_pos)
# self.roll_table_final_index_pos = [25, 19, 22 , 26, 29]
# make first_10_list_r_list
first_10_list_r_list = []
msg = ""
for any_pd_index in range(10):
print(start_id_list)
# Loop each roll
tem_list_r = []
for index, any_row in enumerate(self.number_of_row_list):
tem_msg = "For R_%d's roll with run_id = [%d]:\n"%((index+1),any_pd_index)
# Initial tem_list_r
tem_list = []
# Loop each symbol in each roll
for incre_value in range(any_row):
# Obtenir each tem_id
tem_id = start_id_list[index] + incre_value
# Calculate the value , tem_id % the roll_length
tem_value = (tem_id % self.roll_table_final_index_pos[index] )
if tem_value == 0:
final_id = self.roll_table_final_index_pos[index]
elif tem_value < self.roll_table_final_index_pos[index]:
final_id = tem_value
else:
print("[Error]: Unexpected tem_value with self.roll_table_final_index_pos[index]= (%d, %d)"%(tem_value,self.roll_table_final_index_pos[index]))
tem_msg += "%d , "%final_id
# Append each symbol by final_id
tem_symbol = self.mg_roll_table_df[self.mg_roll_table_col_name_list[index]][final_id]
#print(tem_symbol)
tem_list.append(tem_symbol)
msg += tem_msg + "\n"
tem_list_r.append(tem_list)
# Append each tem_list_r run by run
first_10_list_r_list.append(tem_list_r)
# Go to next run_id
start_id_list = [start_id_list[i] + self.number_of_row_list[i] for i in range(len(start_id_list))]
#print(first_10_list_r_list)
#Afficher
afficher_msg = ""
for any_list_r in first_10_list_r_list:
afficher_msg += self.afficher_table_par_list_r(any_list_r)
afficher_msg += "\n"
print(afficher_msg)
#print(msg)
def les_information(self):
msg = "slot_table_agent avec pandas:\n"
msg += "self.number_of_row_list = %s\n"%self.number_of_row_list
msg += "self.number_of_cols = %d\n"%self.number_of_cols
msg += "self.mg_roll_table_df = \n\n%s\n"%self.mg_roll_table_df
return msg
def afficher_table_par_list_r(self,list_r):
# Obtenir all_equal_size:
all_equal_size_list_r = []
max_len = max(self.number_of_row_list)
for any_list in list_r:
if len(any_list) < max_len:
any_list.extend(["--"]*(max_len-len(any_list)))
# Append all_equal_size_list
all_equal_size_list_r.append(any_list)
msg ="[slot_table_agent]: 盤面表現\n"
data = { "R_%d"%(v+1): any_list for v, any_list in enumerate(all_equal_size_list_r)}
# Créer Panda Dataframe
df = pd.DataFrame(data)
# Afficher Panda Data
msg += "%s\n"%df
return msg
# Previous Code , implement par pandas format to get good display:
#def afficher_table_par_list_r(self, list_r):
# tmp = []
# msg = ""
# msg +="[slot_table_agent]: 盤面表現\n"
# n = max(self.number_of_row_list)
# for _list in list_r:
# m = len(_list)
# tmp.extend(_list)
# if m != n:
# tmp.extend([""]*(n-m))
# for i in range(n):
# tem_str = json.dumps(tmp[i::n])
# msg += tem_str
# msg += "\n"
# return msg
# Calculer RTP (input list_r et faire the rtp calculation)
class slot_calculer_rtp_agent():
def __init__(self,input_obj_name):
self.norm = input_obj_name
self.wild_list = []
self.linable_list = []
self.line_setting_list = [] # For line only
self.col_lens_by_line_setting_list = 0 # Obtenir after calling set_line_setting_list()
self.apply_type = ""
self.apply_version = ""
self.partner_agent_index = 0
self.partner_agent_list = []
self.partner_agent_dictionary ={}
self.partner_agent_inverse_dictionary = []
# set_type = 'line' or 'way'
def set_type(self,input_type):
self.apply_type = input_type
# set_version = 'v1' or 'v2' or 'name'
def set_version(self,input_version):
self.apply_version = input_version
def set_wild_list(self,input_wild_list):
self.wild_list = input_wild_list
def set_linable_list(self,input_lineable_list):
self.linable_list = input_lineable_list
def set_line_setting_list(self,input_line_setting_list):
self.line_setting_list = input_line_setting_list
if self.line_setting_list:
self.col_lens_by_line_setting_list = len(self.line_setting_list[0])
else:
print("[Error]: input_empty input_line_setting_list = %s"%input_line_setting_list)
def les_information(self):
msg ="[%s]\n"%self.norm
msg +="self.wild_list = %s \n"%self.wild_list
msg +="self.linable_list = %s \n"%self.linable_list
msg +="self.apply_type = %s \n"%self.apply_type
msg +="self.apply_version = %s \n"%self.apply_version
msg +="self.partner_agent_dictionary = %s \n"%self.partner_agent_dictionary
msg +="self.partner_agent_inverse_dict = %s \n"%self.partner_agent_inverse_dictionary
msg +="self.partner_agent_list = %s \n"%self.partner_agent_list
msg +="self.col_lens_by_line_setting_list = %d \n"%self.col_lens_by_line_setting_list
# Afficher 25 line settings if set:
msg +="Self.line_setting_list: 0=row_0, 1=row_1, etc.. \n"
for any_list in self.line_setting_list:
msg += "\t\t\t\t %s\n"%any_list
return msg
# ############### Line Function List #######################################
# - add_partner
# - call_partner_by_name
# - calculer_rtp_by_line_setting_v1
def add_partner(self,input_name,input_class):
temp_class = input_class
self.partner_agent_list.append(temp_class)
self.partner_agent_dictionary.update({self.partner_agent_index: input_name })
self.partner_agent_inverse_dictionary = { v: k for k, v in self.partner_agent_dictionary.items()}
self.partner_agent_index +=1
def call_partner_by_name(self,input_name, input_attr_name, option_input_list=False):
# Example: get_msg = bet_rate_agent.les_information()
# input_name = 'bet_rate_agent'
# input_attr_name = 'les_information'
# tem_attr = getattr(input_name,input_attr_name)
# get_msg = tem_attr()
if input_name in self.partner_agent_inverse_dictionary:
tem_attr = getattr(self.partner_agent_list[self.partner_agent_inverse_dictionary[input_name]],input_attr_name)
else:
print("[Error]: Didn't add this partner class : %s in current parther list = %s"%(input_name,self.partner_agent_inverse_dictionary))
if option_input_list:
pass # implement the input list later
else:
return tem_attr()
def calculer_rtp_by_line_setting_v1(self,run_id,list_r,bet_rate_agent,optional_static_agent=False,optional_pd=None):
debug_msg = "[calculer_rtp_by_line_setting_v1]:\n"
system_msg = ""
gain = 0
# Loop Each Line: line_index from 0 - 24, in excel format is 1-25
# Test
test_return_g1_cnt = 0
# Loop Each Obj
for obj in self.linable_list:
for line_index , each_line_list in enumerate(self.line_setting_list):
# Initial set function
check_obj = 0
count = 0
current_line = 0
# Loop Each col_index
for col_index in range(self.col_lens_by_line_setting_list):
# count > line_cnts
count += 1
current_symbol = list_r[col_index][each_line_list[col_index]]
# current_symbol != target_obj, break! and not in wild_list
if current_symbol != obj and current_symbol not in self.wild_list:
break
elif current_symbol in self.wild_list:
if obj not in self.wild_list:
current_line = count
else: # if the target_obj is 'W' itself...faire other way
check_obj = obj
current_line = count
elif current_symbol == obj:
check_obj = obj
current_line = count
if current_line >=3 and check_obj !=0:
# Obtenir gain: bet_amount= 1/ how_manh_lines
bet_amount = 1 / len(self.line_setting_list)
bet_rate = bet_rate_agent.obtenir_bet_rate_par_obj_et_bet_index(check_obj,current_line)
# Obtenir_bet_rate_par_obj_et_bet_index('G1',3) means: bet rate with G1_3_lines
current_gain = bet_amount * bet_rate
gain += current_gain
# Obtenir the win_line: 1~25 , since line_index is 0~24
win_line_index = line_index + 1
# Enregistrer des données a Pandas
# Target: {'Run_ID': int(0),'Win_Obj':'S1',"Bet_Rate_3": 5,"Win_Line":3}
if optional_pd is not None:
run_id_col = "Run_ID"
run_id_value = int(run_id)
win_obj_col = "Win_Obj"
win_obj_value = check_obj
bet_rate_index_col = "Bet_Rate_%d"%current_line
bet_rate_index_value = bet_rate
win_line_col = "Win_Line"
win_line_value = int(win_line_index)
gain_col = "Gain"
gain_value = current_gain
list_r_col = "List_R"
list_r_value = copy.deepcopy(list_r)
group_line_col ="Group_Win_Line"
group_line_value = int(current_line)
input_dict = {group_line_col:group_line_value,run_id_col:run_id_value,win_obj_col:win_obj_value,bet_rate_index_col:bet_rate_index_value,win_line_col:win_line_value,gain_col:gain_value,list_r_col:list_r_value}
optional_pd = optional_pd.append(input_dict,ignore_index=True)
# Change to int
my_int_col_list = [run_id_col,win_line_col,group_line_col]
optional_pd[ my_int_col_list] = optional_pd[my_int_col_list].astype(int)
# Set the list_r column in the last column
front_or_end_list = [group_line_col,run_id_col, win_obj_col,win_line_col,gain_col,list_r_col]
optional_pd = optional_pd[[run_id_col,group_line_col, win_obj_col, win_line_col, gain_col] +[c for c in optional_pd if c not in front_or_end_list] + [list_r_col]]
else:
pass
# Afficher les information
system_msg += "第%3d條線, Symbol: %3s, %3d連線, \n"%(win_line_index,check_obj,current_line)
# Créer les information pour Obj_Win_3
if current_line == 3:
if optional_static_agent:
# Hard Code here. the format
# obj_3, obj_4, obj_5 etc..
# G1_3, S1_3, W_4, SS_3
key_code = "%s_%d" % (check_obj, current_line)
optional_static_agent.increase_cnts(key_code)
else:
pass
#return 0, system_msg, debug_msg
if optional_pd is not None:
return gain, system_msg, debug_msg, optional_pd
else:
return gain, system_msg, debug_msg
# 🐳 🎁 the line calculer support obj >= 2, in game_806_progressive_jackpot
# 🎹: implement the version 2: with W1, W2 etc..
# 🦌: fix the bug for case
# 0 L3 L4 L5 H5 H2
# 1 L2 H3 L2 L5 L2
# 2 W1 L2 H5 L2 L5
#
# 第 15條線, Symbol: L2, 5連線,
# din't print (2,2,1,2) (W1-L2-L2-L2), line-7, 4symbols
# need to add specific calculate the wild logic
# 🎹, 🧪: implement the version 2: add pytest_data for checking the testing data coverage
# 🌟: # 🌟🌟 load v2.1 version:
# 1. support prevent checking S1 inside the rtp_agent.run() .
# 2. S1 count logic is being iplement outside the libs.
def calculer_rtp_by_line_setting_v2_for_slot_3(self,run_id,list_r,bet_rate_agent,optional_static_agent=False,optional_pd=None):
debug_msg = "[calculer_rtp_by_line_setting_v1]:\n"
system_msg = ""
gain = 0
# 🎹,🧪 : Initial winning_data_list
pytest_data_list = []
# 🦉 : 先算線: 因為要比較這個線 有沒有出現 wild-2 or A-3 case
# 🎹 : remove wild group: W1, W2 etc..
def __remove_wild_group(input_list):
return_list = []
for any_obj in input_list:
if any_obj[0] not in ["W"]:
return_list.append(any_obj)
return return_list
"""
print("[jackpot_anlyzer_slot_v2]: manualy input the list_r ")
print("[[W1,H1,H2],[W1,H1,H2],[W1,H1,H2],[W1,H1,H2],[W1,H1,H2]]")
input()
list_r = [["W1","H1","H2"],["W1","H1","H2"],["W1","H1","H2"],["W1","H1","H2"],["W1","H1","H2"]]
"""
# 🦉: let's loop non_wild_list first.
non_wild_list = __remove_wild_group(self.linable_list)
# Loop Each Obj with non-wild list.
current_gain = 0.0
for obj in non_wild_list:
if obj == "S1":
pass
else:
for line_index , each_line_list in enumerate(self.line_setting_list):
# Initial set function
check_obj = 0
count = 0
current_line = 0
# 🦉: Reset the gain after compare
current_obj_gain = 0.0
current_wild_gain = 0.0
# Loop Each col_index
for col_index in range(self.col_lens_by_line_setting_list):
# count > line_cnts
count += 1
current_symbol = list_r[col_index][each_line_list[col_index]]
# current_symbol != target_obj, break! and not in wild_list
if current_symbol != obj and current_symbol not in self.wild_list:
#system_msg += "[Debug_01]: line_index: %d , col_index: %d , current_symbol: %s , self.wild_list = %s\n"%(line_index,col_index,current_symbol,self.wild_list)
break
elif current_symbol in self.wild_list:
if obj not in self.wild_list:
current_line = count
else: # if the target_obj is 'W' itself...faire other way
check_obj = obj
current_line = count
elif current_symbol == obj:
check_obj = obj
current_line = count
# 🐳 🎁 : v0: count the obj (except wild), the bet_win_line is >=3
# v1: implement code to support >=2, made by douge.
# 洗患你 那那那🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳print("[Debug][01]: check_obj: %s, current_lins: %d, current_wild_gain: %.4f current_obj_gain: %.4f "%(check_obj,current_line,current_wild_gain,current_obj_gain))
if current_line >=2 and check_obj !=0:
# Obtenir gain: bet_amount= 1/ how_manh_lines
bet_amount = 1 / len(self.line_setting_list)
bet_rate = bet_rate_agent.obtenir_bet_rate_par_obj_et_bet_index(check_obj,current_line)
# Obtenir_bet_rate_par_obj_et_bet_index('G1',3) means: bet rate with G1_3_lines
current_obj_gain = bet_amount * bet_rate
# 洗患你 那那那🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳print("[Debug][01-enter-obj]: check_obj: %s, current_lins: %d, current_wild_gain: %.4f current_obj_gain: %.4f "%(check_obj,current_line,current_wild_gain,current_obj_gain))
# 🦉: Stop here! let's compare the wining_obj's gain is greater the wild_obj in this line or not
# 🎹: v2 version, compare if the symbol is W1 or W2 by checking [0]
wild_count = 0
wild_current_line = 0
for __col_index in range(self.col_lens_by_line_setting_list):
wild_count += 1
current_symbol = list_r[__col_index][each_line_list[__col_index]]
# 🎹:
if current_symbol[0] != "W":
break
# 🎹:
elif current_symbol[0] in ["W"]:
wild_current_line = wild_count
if wild_current_line >=2:
# 洗患你 那那那🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳print("[Debug][02-wild]: check_obj: %s, current_lins: %d, current_wild_gain: %.4f current_obj_gain: %.4f "%(check_obj,current_line,current_wild_gain,current_obj_gain))
# 🎹: v2 version. "W" changed to "W1" to present the wild scores
wild_bet_rate = bet_rate_agent.obtenir_bet_rate_par_obj_et_bet_index("W1",wild_current_line)
current_wild_gain = bet_amount * wild_bet_rate
else:
# 洗患你 那那那🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳print("[Debug][02-else]: check_obj: %s, current_lins: %d, current_wild_gain: %.4f current_obj_gain: %.4f "%(check_obj,current_line,current_wild_gain,current_obj_gain))
# 反之 我們就設定 gain = 0
current_wild_gain = 0.0
# Compare with wild vs obj gain !
if current_obj_gain > current_wild_gain:
current_gain = current_obj_gain
# 洗患你 那那那🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳print("[Debug][03-obj>wild]: check_obj: %s, current_lins: %d, current_wild_gain: %.4f current_obj_gain: %.4f "%(check_obj,current_line,current_wild_gain,current_obj_gain))
elif current_obj_gain < current_wild_gain:
# 🦉: Changed the saving information to wild, cause wild_gain is Bigger , you mother fucker !!
current_gain = current_wild_gain
# 🎹: v2 version. "W" changed to "W1" to present the wild winning counts
check_obj = "W1"
current_line = wild_current_line
bet_rate = wild_bet_rate
# 洗患你 那那那🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳print("[Debug][03-obj<wild]: check_obj: %s, current_lins: %d, current_wild_gain: %.4f current_obj_gain: %.4f "%(check_obj,current_line,current_wild_gain,current_obj_gain))
elif current_obj_gain == current_wild_gain and current_obj_gain == 0.0:
current_gain = 0.0 # both obj_gain=0.0 and wild_gain=0.0 is common....
elif current_obj_gain == current_wild_gain:
print("[Error]: obj_gain ~ wild_gain")
assert current_obj_gain == current_wild_gain
# 洗患你 那那那🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳🍳print("[Debug][01]: check_obj: %s, current_lins: %d, current_wild_gain: %.4f current_obj_gain: %.4f "%(check_obj,current_line,current_wild_gain,current_obj_gain))
gain += current_gain
############################ print("\n\n[slotv2_1.py]L line 986:")
############################ print("check_obj ",check_obj)
############################ print("current_line ",current_line)
############################ print("current_gain ",current_gain)
############################ print("bet_rate ",bet_rate)
############################ input("Press Enter \n")
# 🎹,🧪 : save current data_list
tem_winning_data_dict = {}
tem_winning_data_dict["win_obj"] = check_obj
tem_winning_data_dict["win_line"] = current_line # win_3, win_4, win_5 etc..
tem_winning_data_dict["win_list_r"] = list_r
tem_winning_data_dict["win_gain" ] = round(current_gain,10)
tem_winning_data_dict["win_line_index"] = line_index # 0,1,2,3,4,5 .... 19 , 20 lines for example.
tem_winning_data_dict["win_bet_rate"] = round(bet_rate,10)
pytest_data_list.append(tem_winning_data_dict)
# Obtenir the win_line: 1~25 , since line_index is 0~24
win_line_index = line_index + 1
# Enregistrer des données a Pandas
# Target: {'Run_ID': int(0),'Win_Obj':'S1',"Bet_Rate_3": 5,"Win_Line":3}
#🙃🙃🙃🙃 print("😅: curr gain = ", current_gain)
#🙃🙃🙃🙃 input()
if optional_pd is not None:
#🙃🙃🙃🙃 print("😅: enter optional pd")
#🙃🙃🙃🙃 input()
run_id_col = "Run_ID"
run_id_value = int(run_id)
win_obj_col = "Win_Obj"
win_obj_value = check_obj
bet_rate_index_col = "Bet_Rate_%d"%current_line
bet_rate_index_value = bet_rate
win_line_col = "Win_Line"
win_line_value = int(win_line_index)
gain_col = "Gain"
gain_value = current_gain
list_r_col = "List_R"
list_r_value = copy.deepcopy(list_r)
group_line_col ="Group_Win_Line"
group_line_value = int(current_line)
input_dict = {group_line_col:group_line_value,run_id_col:run_id_value,win_obj_col:win_obj_value,bet_rate_index_col:bet_rate_index_value,win_line_col:win_line_value,gain_col:gain_value,list_r_col:list_r_value}
optional_pd = optional_pd.append(input_dict,ignore_index=True)
# Change to int
my_int_col_list = [run_id_col,win_line_col,group_line_col]
optional_pd[ my_int_col_list] = optional_pd[my_int_col_list].astype(int)
# Set the list_r column in the last column
front_or_end_list = [group_line_col,run_id_col, win_obj_col,win_line_col,gain_col,list_r_col]
optional_pd = optional_pd[[run_id_col,group_line_col, win_obj_col, win_line_col, gain_col] +[c for c in optional_pd if c not in front_or_end_list] + [list_r_col]]
# 🙃🙃🙃🙃🙃🙃🙃 print("Run is = %d Current optional_pd: "%run_id)
# 🙃🙃🙃🙃🙃🙃🙃 print(optional_pd)
# 🙃🙃🙃🙃🙃🙃🙃 input("Press Enter :\n")
else:
pass
# Afficher les information
#system_msg += "[%s]: 五連線: gain = %2.4f , 線數 = %d, 賠率 = %2.4f\n" % (obj, gain, line_cinq,self.partner_agent_list[0].obtenir_bet_rate(obj, 5))
system_msg += "[%4d]: 第%3d條線, Symbol: %3s, %3d連線, bet_rate = %d , gain = %.4f\n"%(run_id,win_line_index,check_obj,current_line, bet_rate,current_gain)
system_msg += " : list_r\n%s\n-----------\n\n"%list_r
# Créer les information pour Obj_Win_3
# 🖨
if current_line >= 3:
if optional_static_agent:
# Hard Code here. the format
# obj_3, obj_4, obj_5 etc..
# G1_3, S1_3, W_4, SS_3
key_code = "%s_%d" % (check_obj, current_line)
optional_static_agent.increase_cnts(key_code)
else:
pass
# 🖨
if current_line == 2 and check_obj == "W1":
try:
key_code = "%s_%d" % (check_obj, current_line)
optional_static_agent.increase_cnts(key_code)
except:
print("[🖨]No wild-2 in this job-card")
# calculate the wild
# 🎹: v2 version and special fix the Wild self-winning logic issue
# 再次確認中: 結果發現 每次 loop non-wild-obj , we get w-w-w-L1, w-w-w-h1 etc.. logic is correct to add comparing wild line
# but, w-w-w-w-w 5 lines, will not being count... fix now !
for line_index , each_line_list in enumerate(self.line_setting_list):
count = 0
current_line = 0
current_wild_gain = 0.0
for col_index in range(self.col_lens_by_line_setting_list):
# count > line_cnts
count += 1
current_symbol = list_r[col_index][each_line_list[col_index]]
if current_symbol not in ["W1"]:
break
else:
current_line = count
# 🎹: v2 version and special fix the Wild self-winning logic issue
# 再次確認中: 結果發現 每次 loop non-wild-obj , we get w-w-w-L1, w-w-w-h1 etc.. logic is correct to add comparing wild line
# but, w-w-w-w-w 5 lines, will not being count... fix now !
# 🎹: check w-w-w-w-w w-5 case only , see see.
if current_line >= 5:
# Obtenir gain: bet_amount= 1/ how_manh_lines
bet_amount = 1 / len(self.line_setting_list)
bet_rate = bet_rate_agent.obtenir_bet_rate_par_obj_et_bet_index("W1",current_line)
# Obtenir_bet_rate_par_obj_et_bet_index('G1',3) means: bet rate with G1_3_lines
current_wild_gain = bet_amount * bet_rate
# 💰: add wild_gain
gain += current_wild_gain
# 🐼
win_line_index = line_index + 1
system_msg += "[%4d]: 第%3d條線, Symbol: %3s, %3d連線, bet_rate = %d , gain = %.4f\n"%(run_id,win_line_index,"W1",current_line, bet_rate,current_wild_gain)
# Enregistrer des données a Pandas
# Target: {'Run_ID': int(0),'Win_Obj':'S1',"Bet_Rate_3": 5,"Win_Line":3}
if optional_pd is not None:
run_id_col = "Run_ID"
run_id_value = int(run_id)
win_obj_col = "Win_Obj"
win_obj_value = "W1"
bet_rate_index_col = "Bet_Rate_%d"%current_line
bet_rate_index_value = bet_rate
win_line_col = "Win_Line"
win_line_value = int(win_line_index)
gain_col = "Gain"
gain_value = current_wild_gain
list_r_col = "List_R"
list_r_value = copy.deepcopy(list_r)
group_line_col ="Group_Win_Line"
group_line_value = int(current_line)
input_dict = {group_line_col:group_line_value,run_id_col:run_id_value,win_obj_col:win_obj_value,bet_rate_index_col:bet_rate_index_value,win_line_col:win_line_value,gain_col:gain_value,list_r_col:list_r_value}
optional_pd = optional_pd.append(input_dict,ignore_index=True)
# Change to int
my_int_col_list = [run_id_col,win_line_col,group_line_col]
optional_pd[ my_int_col_list] = optional_pd[my_int_col_list].astype(int)
# Set the list_r column in the last column
front_or_end_list = [group_line_col,run_id_col, win_obj_col,win_line_col,gain_col,list_r_col]
optional_pd = optional_pd[[run_id_col,group_line_col, win_obj_col, win_line_col, gain_col] +[c for c in optional_pd if c not in front_or_end_list] + [list_r_col]]
else:
pass
#return 0, system_msg, debug_msg
# curr_gain , sys_msg, _ , pytest_data_list , self.main_pd
if optional_pd is not None:
return gain, system_msg, debug_msg, pytest_data_list , optional_pd
else:
return gain, system_msg, debug_msg, pytest_data_list , optional_pd
# ################ About claculer_rtp #######################################
# Utilier the self.apply_type et self.apply_version pour choisr les fontion
# Tu peux implment apres the mutual function de way et line.
# Implement this later, since, i got two partner agent: static_agent, bet_rate_agent,
# currently, je ne sais pas comment faire pout pass these two agent into calculer_rtp in good way.
def calculer_rtp(self,input_list_r):
# Case 1: way
if self.apply_type == "way":
if self.apply_version == "v1":
RTP,debug_msg, system_msg = self.calculer_rtp_by_way_v1(input_list_r)
return RTP, debug_msg, system_msg
else:
print("[Error]: No Match Version for %s"%self.apply_version)
# Case 2: line
elif self.apply_type == "line":
if self.apply_version == "line_setting_v1":
return self.calculer_rtp_by_line_setting_v1(input_list_r) # Return: RTP, Debug_Msg, System_MSG
# Case 3: no line, no way, error!
else:
print("[Error]: No Match slot_type for %s"%self.apply_type)
# --------------------------------------------------------------------------------------------------------------------------------------------------
# ########################## Way Functions List #######################################################################
##### calculer_rtp_by_way_v1 for Panda -> Need to implement later
##### calculer_rtp_by_way_v2 for Way_Transforemer -> Need to implement later
##### ----------------------------------------------------------
#### This function use add_bet_rate_agent() -> implement in the future version
#### This function use self.partner_agent_partner_agent_list[0] , I shall use self.partner_list["function_name"] to avoid call un-known function.
# [Jason]:don't use this , bad_naming. use: add_partner_agent instead
def add_bet_rate_agent(self,input_name,input_class):
temp_class = input_class
self.partner_agent_list.append(temp_class)
self.partner_agent_dictionary.update({self.partner_agent_index: input_name })
self.partner_agent_inverse_dictionary = { v: k for k, v in self.partner_agent_dictionary.items()}
self.partner_agent_index +=1
# calculer_rtp_by_way_v1:
### __judge_wild_line_ou_non()
##### __deep_loop_judge_wild()
##### ----------------------------------------------------------
#### This function use add_bet_rate_agent() -> implement in the future version
#### This function use self.partner_agent_partner_agent_list[0] , I shall use self.partner_list["function_name"] to avoid call un-known function.
def calculer_rtp_by_way_v1(self,list_r):
#print("list_r inside calculer_rtp_by_way_v1 is %s" %list_r)
debug_msg = "[calculer_rtp_by_way_v1]: \n"
system_msg = ""
RTP = 0
# Compute the numero de symbol avec wild
# Puis, numero de symbol = numero de symbol avec wild - numero de wild
for obj in self.linable_list:
# Deep Loop Checking with obj
# Check 3 lines
input_list_r = copy.deepcopy(list_r)
line_trois, tem_debug_msg, tem_sys_msg = self.__judge_wild_line_ou_non(input_list_r,3,obj)
debug_msg += tem_debug_msg + "\n"
#system_msg += tem_sys_msg + "\n"
# Check 4 lines
input_list_r = copy.deepcopy(list_r)
line_quatre, tem_debug_msg, tem_sys_msg = self.__judge_wild_line_ou_non(input_list_r,4,obj)
debug_msg += tem_debug_msg + "\n"
#system_msg += tem_sys_msg + "\n"
# Check 5 lines
input_list_r = copy.deepcopy(list_r)
line_cinq, tem_debug_msg, tem_sys_msg = self.__judge_wild_line_ou_non(input_list_r,5,obj)
debug_msg += tem_debug_msg + "\n"
#system_msg += tem_sys_msg + "\n"
gain = 0
if line_cinq > 0:
gain = line_cinq * self.partner_agent_list[0].obtenir_bet_rate(obj,5)
debug_msg += "[%s]: 5 lines: gain = %2.4f , line_number = %d, rate = %2.4f\n" % (obj, gain, line_cinq, \
self.partner_agent_list[0].obtenir_bet_rate(obj, 5))
system_msg += "[%s]: 五連線: gain = %2.4f , 線數 = %d, 賠率 = %2.4f\n" % (obj, gain, line_cinq, \
self.partner_agent_list[0].obtenir_bet_rate(obj, 5))
elif line_quatre > 0:
gain = line_quatre * self.partner_agent_list[0].obtenir_bet_rate(obj, 4)
debug_msg += "[%s]: 4 lines: gain = %2.4f , line_number = %d, rate = %2.4f\n" % (obj, gain, line_quatre,\
self.partner_agent_list[0].obtenir_bet_rate(obj, 4))
system_msg += "[%s]: 四連線: gain = %2.4f , 線數 = %d, 賠率 = %2.4f\n" % (obj, gain, line_quatre, \
self.partner_agent_list[0].obtenir_bet_rate(obj, 4))
elif line_trois > 0:
gain = line_trois * self.partner_agent_list[0].obtenir_bet_rate(obj, 3)
debug_msg += "[%s]: 3 lines: gain = %2.4f , line_number = %d, rate = %2.4f\n" % (obj, gain, line_trois, \
self.partner_agent_list[0].obtenir_bet_rate(obj, 3))
system_msg += "[%s]: 三連線: gain = %2.4f , 線數 = %d, 賠率 = %2.4f\n" % (obj, gain, line_trois,\
self.partner_agent_list[0].obtenir_bet_rate(obj, 3))
RTP += gain
return RTP,debug_msg, system_msg
# calculer_rtp_by_way_v2: [Way]: Apply to Way
### __judge_wild_line_ou_non_v1(default_col_size=3 , optional_col_size_list=False)
##### if optional_col_size_list:
##### __deep_loop_judge_wild_v1_par_col_size_list(col_size_list)
##### else:
##### __deep_loop_judge_wild(default_col_size)
def calculer_rtp_by_way_v2(self, list_r):
#print("list_r inside calculer_rtp_by_way_v1 is %s" %list_r)
debug_msg = "[calculer_rtp_by_way_v1]: \n"
system_msg = ""
RTP = 0
# Faire col_size_list pour __deep_loop
input_col_size_list = [len(tem_list) for tem_list in list_r]
print("list_r = ",list_r)
print("input_col_size_list = ",input_col_size_list)
# Compute the numero de symbol avec wild
# Puis, numero de symbol = numero de symbol avec wild - numero de wild
for obj in self.linable_list:
# Deep Loop Checking with obj
# Check 3 lines
input_list_r = copy.deepcopy(list_r)
line_trois, tem_debug_msg, tem_sys_msg = self.__judge_wild_line_ou_non_v1(input_list_r, 3, obj,optional_col_size_list=input_col_size_list)
debug_msg += tem_debug_msg + "\n"
#system_msg += tem_sys_msg + "\n"
# Check 4 lines
input_list_r = copy.deepcopy(list_r)
line_quatre, tem_debug_msg, tem_sys_msg = self.__judge_wild_line_ou_non_v1(input_list_r, 4, obj,optional_col_size_list=input_col_size_list)
debug_msg += tem_debug_msg + "\n"
#system_msg += tem_sys_msg + "\n"
# Check 5 lines
input_list_r = copy.deepcopy(list_r)
line_cinq, tem_debug_msg, tem_sys_msg = self.__judge_wild_line_ou_non_v1(input_list_r, 5, obj,optional_col_size_list=input_col_size_list)
debug_msg += tem_debug_msg + "\n"
#system_msg += tem_sys_msg + "\n"
# Check 6 lines
input_list_r = copy.deepcopy(list_r)
line_six, tem_debug_msg, tem_sys_msg = self.__judge_wild_line_ou_non_v1(input_list_r, 6, obj,optional_col_size_list=input_col_size_list)
debug_msg += tem_debug_msg + "\n"
#system_msg += tem_sys_msg + "\n"
gain = 0
if line_six > 0:
gain = line_six * self.partner_agent_list[0].obtenir_bet_rate(obj, 5)
debug_msg += "[%s]: 6 lines: gain = %2.4f , line_number = %d, rate = %2.4f\n" % (obj, gain, line_six,self.partner_agent_list[0].obtenir_bet_rate(obj, 6))
system_msg += "[%s]: 六連線: gain = %2.4f , 線數 = %d, 賠率 = %2.4f\n" % (obj, gain, line_six,self.partner_agent_list[0].obtenir_bet_rate(obj, 6))
elif line_cinq > 0:
gain = line_cinq * self.partner_agent_list[0].obtenir_bet_rate(obj, 5)
debug_msg += "[%s]: 5 lines: gain = %2.4f , line_number = %d, rate = %2.4f\n" % (obj, gain, line_cinq,self.partner_agent_list[0].obtenir_bet_rate(obj, 5))
system_msg += "[%s]: 五連線: gain = %2.4f , 線數 = %d, 賠率 = %2.4f\n" % (obj, gain, line_cinq,self.partner_agent_list[0].obtenir_bet_rate(obj, 5))
elif line_quatre > 0:
gain = line_quatre * \
self.partner_agent_list[0].obtenir_bet_rate(obj, 4)
debug_msg += "[%s]: 4 lines: gain = %2.4f , line_number = %d, rate = %2.4f\n" % (obj, gain, line_quatre,
self.partner_agent_list[0].obtenir_bet_rate(obj, 4))
system_msg += "[%s]: 四連線: gain = %2.4f , 線數 = %d, 賠率 = %2.4f\n" % (obj, gain, line_quatre,
self.partner_agent_list[0].obtenir_bet_rate(obj, 4))
elif line_trois > 0:
gain = line_trois * \
self.partner_agent_list[0].obtenir_bet_rate(obj, 3)
debug_msg += "[%s]: 3 lines: gain = %2.4f , line_number = %d, rate = %2.4f\n" % (obj, gain, line_trois,
self.partner_agent_list[0].obtenir_bet_rate(obj, 3))
system_msg += "[%s]: 三連線: gain = %2.4f , 線數 = %d, 賠率 = %2.4f\n" % (obj, gain, line_trois,
self.partner_agent_list[0].obtenir_bet_rate(obj, 3))
RTP += gain
return RTP, debug_msg, system_msg
def __judge_wild_line_ou_non(self,input_list_r, input_numero_de_line, input_symbol):
system_msg = ""
debug_msg = "__judge_wild_line_ou_non(input_list_r,input_numero_de_line, input_symbol): \n"
# Check si le symbol dans le range ou non:
judge_list_r = [each_col for index, each_col in enumerate(input_list_r) if index < input_numero_de_line]
debug_msg += "judge_list_r= " + "%s\n"%judge_list_r
# Chercher the symbol cnts dans tout le col.
# Par example: pour list_r = [['B', 'A', 'T'], ['J', 'SE', 'K'], ['Q', 'B', 'K']]
# judge_list_r_cnts_symbol_list for 'B' = [ 1 , 0 , 1 ]
judge_list_r_cnts_symbol_list = []
check_total_symbol_cnts = 0
for any_list_r in judge_list_r:
# Obtenir symbol_cnts:
tem_cnts_symbol_list = collections.Counter(any_list_r)
tem_cnts_symbol = tem_cnts_symbol_list[input_symbol]
# Ajouter symbol_cnts_total:
check_total_symbol_cnts += tem_cnts_symbol
judge_list_r_cnts_symbol_list.append(tem_cnts_symbol)
debug_msg += "detect '%s' , judge_list_r_cnts_symbol_list = "%input_symbol + "%s\n"%judge_list_r_cnts_symbol_list
# Si symbol n'est pas dans la region. return False
if check_total_symbol_cnts == 0:
return 0, debug_msg,system_msg # no any line_cnts for this symbol
else:
# Deep loop
input_limit = input_numero_de_line - 1 #input_numero_de_line = 3, input_limit = 2
input_layer = -1 # Start from 0,...etc..
tem_list = [0 for _ in range(input_numero_de_line)] #input_numero_de_line = 3, [0,0,0]
each_col_size = 3
check_all_kind_of_set_list = []
__debug_msg_list = []
# Run
check_all_kind_of_set_list = self.__deep_loop_judge_wild(input_symbol,judge_list_r,input_limit,input_layer,each_col_size,tem_list,check_all_kind_of_set_list,__debug_msg_list)
for any_debug_msg in __debug_msg_list:
debug_msg += any_debug_msg + "\n"
#line_cnts
debug_msg += "check_all_kind_of_set_list = %s"%check_all_kind_of_set_list
line_cnts = len(check_all_kind_of_set_list)
return line_cnts, debug_msg, system_msg
# New version v1:
## add col_size input setting (Default)
## add support optional_col_size_list for different col_size in different col..
def __judge_wild_line_ou_non_v1(self,input_list_r, input_numero_de_line, input_symbol,default_col_size=3,optional_col_size_list=False):
system_msg = ""
debug_msg = "__judge_wild_line_ou_non(input_list_r,input_numero_de_line, input_symbol): \n"
# Check si le symbol dans le range ou non:
judge_list_r = [each_col for index, each_col in enumerate(input_list_r) if index < input_numero_de_line]
debug_msg += "judge_list_r= " + "%s\n"%judge_list_r
# Chercher the symbol cnts dans tout le col.
# Par example: pour list_r = [['B', 'A', 'T'], ['J', 'SE', 'K'], ['Q', 'B', 'K']]
# judge_list_r_cnts_symbol_list for 'B' = [ 1 , 0 , 1 ]
judge_list_r_cnts_symbol_list = []
check_total_symbol_cnts = 0
for any_list_r in judge_list_r:
# Obtenir symbol_cnts:
tem_cnts_symbol_list = collections.Counter(any_list_r)
tem_cnts_symbol = tem_cnts_symbol_list[input_symbol]
# Ajouter symbol_cnts_total:
check_total_symbol_cnts += tem_cnts_symbol
judge_list_r_cnts_symbol_list.append(tem_cnts_symbol)
debug_msg += "detect '%s' , judge_list_r_cnts_symbol_list = "%input_symbol + "%s\n"%judge_list_r_cnts_symbol_list
# Si symbol n'est pas dans la region. return False
if check_total_symbol_cnts == 0:
return 0, debug_msg,system_msg # no any line_cnts for this symbol
else:
# Deep loop
input_limit = input_numero_de_line - 1 #input_numero_de_line = 3, input_limit = 2
input_layer = -1 # Start from 0,...etc..
tem_list = [0 for _ in range(input_numero_de_line)] #input_numero_de_line = 3, [0,0,0]
check_all_kind_of_set_list = []
__debug_msg_list = []
# optional_col_size is set, used col_size_list for each col
if optional_col_size_list:
debug_msg += "[Info]: Apply different col_size for each col with __deep_loop_judge_wild_v1_par_col_size_list(). Please make sure, you got the setting correctly.\n"
# Run v1
check_all_kind_of_set_list = self.__deep_loop_judge_wild_v1_par_col_size_list(input_symbol,judge_list_r,input_limit,input_layer,optional_col_size_list,tem_list,check_all_kind_of_set_list,__debug_msg_list)
# optional_col_size is not set, use default fixed col size
else:
each_col_size = default_col_size
# Run v0
check_all_kind_of_set_list = self.__deep_loop_judge_wild(input_symbol,judge_list_r,input_limit,input_layer,each_col_size,tem_list,check_all_kind_of_set_list,__debug_msg_list)
for any_debug_msg in __debug_msg_list:
debug_msg += any_debug_msg + "\n"
#line_cnts
debug_msg += "check_all_kind_of_set_list = %s"%check_all_kind_of_set_list
line_cnts = len(check_all_kind_of_set_list)
return line_cnts, debug_msg, system_msg
#----------- How to use the DFS ----------------#
### index: 0,1,2
# input_limit = 2
# input_layer = -1
### tem_list: (need to tune for 3: [0,0,0] 4: [0,0,0,0])
### each_col_size = 3 (fixed size for each col )
#check_all_kind_of_set_list = []
#tem_list = [0,0,0]
#each_col_size = 3
### Run:
#check_all_kind_of_set_list = __deep_loop_judge_wild(judge_list,input_limit,input_layer,3,tem_list,check_all_kind_of_set_list)
def __deep_loop_judge_wild(self,input_symbol,judge_list, layer_limit, input_layer, each_col_size, tem_list, check_all_kind_of_set_list,__debug_msg_list):
input_layer += 1
# Arriver deep bottom
if input_layer > layer_limit:
# Copy new instance
new_tem_list = copy.deepcopy(tem_list)
# Judge each set et updated check_all_kind_of_set_list
self.__judge_each_set_est_line_ou_non(input_symbol,judge_list, new_tem_list,check_all_kind_of_set_list, __debug_msg_list)
return check_all_kind_of_set_list
# Entrer deep
for any_value in range(each_col_size):
tem_list[input_layer] = any_value
check_all_kind_of_set_list = self.__deep_loop_judge_wild(input_symbol,judge_list, layer_limit, input_layer, each_col_size, tem_list, check_all_kind_of_set_list,__debug_msg_list)
return check_all_kind_of_set_list
# le nouvell version: pour faire la col_size different par 'col_size_list': Way_Transformer project , will be applied.
def __deep_loop_judge_wild_v1_par_col_size_list(self,input_symbol,judge_list, layer_limit, input_layer, col_size_list, tem_list, check_all_kind_of_set_list,__debug_msg_list):
input_layer += 1
# Arriver deep bottom
if input_layer > layer_limit:
# Copy new instance
new_tem_list = copy.deepcopy(tem_list)
# Judge each set et updated check_all_kind_of_set_list
self.__judge_each_set_est_line_ou_non(input_symbol,judge_list, new_tem_list,check_all_kind_of_set_list, __debug_msg_list)
return check_all_kind_of_set_list
# Entrer deep, et loop par different col_size
for any_value in range(col_size_list[input_layer]):
tem_list[input_layer] = any_value
check_all_kind_of_set_list = self.__deep_loop_judge_wild_v1_par_col_size_list(input_symbol,judge_list, layer_limit, input_layer, col_size_list, tem_list, check_all_kind_of_set_list,__debug_msg_list)
return check_all_kind_of_set_list
# Check each set like ['B','A','B'] etc..
def __judge_each_set_est_line_ou_non(self,input_symbol,input_judge_list, input_new_tem_list,__get_line_list,__debug_msg_list):
each_line_set = []
for any_layer, any_layer_index in enumerate(input_new_tem_list):
tem_symbol = input_judge_list[any_layer][any_layer_index]
each_line_set.append(tem_symbol)
if input_symbol not in each_line_set:
#print("No, with line_set = %s" % each_line_set)
return
else:
# Check if any obj is ok
for any_obj in each_line_set:
if any_obj == input_symbol or any_obj == 'W':
pass
else:
#print("No, with line_set = %s" % each_line_set)
return
# Pass all if
debug_msg = "Yes, with line_set = %s" % each_line_set
__debug_msg_list.append(debug_msg)
__get_line_list.append(input_new_tem_list)
# #####################################################################################################################################################
if __name__ == "__main__":
# Test_00: slot_table_agent: implement later ..
"""
# Les setting:
list_r = [ ['A','B','C'], \
['B1','B2','B3'], \
['C1','C2','C3'], \
['D1','D2','D3'], \
['E','E','E']
]
input_cols = 5
input_row_list = [3, 3, 3, 3, 3]
# Initial Agent
msg = "Test list_r = %s\n"%list_r
msg += "Input row list = %s\n"%input_row_list
table_agent = slot_table_agent(input_cols,input_row_list)
msg += table_agent.afficher_table_par_list_r(list_r)
print(msg)
# Test_00: slot_tabler_agent: Test different size of row
# Les setting:
list_r = [ ['A','B','C'], \
['B1','B2','B3','B4','B5'], \
['C1','C2','C3'], \
['D1','D2','D3'], \
['E','E','E','E']
]
input_cols = 5
input_row_list = [3, 5, 3, 3, 4]
# Initial Agent
msg = "Test list_r = %s\n"%list_r
msg += "Input row list = %s\n"%input_row_list
table_agent = slot_table_agent(input_cols,input_row_list)
msg += table_agent.afficher_table_par_list_r(list_r)
print(msg)
# Test_01: different rolling with "test_case_01_different_row_size.xlsx"
# Obtenir les args:
get_main_args = ana_sys.analyzer_input_args()
# Lire le fichier par défaut: TEST Case Default files.
input_excelname = "test_case_01_different_row_size.xlsx"
# Lire réglage:
wb = xlrd.open_workbook(input_excelname)
setting_sheet = wb.sheet_by_name("Setting")
mg_roll_table_sheet = wb.sheet_by_name("MG_Roll_Table")
# Créer output_agent
# # Créer output_agent
##############################################
output_agent = ana_sys.analyzer_output_agent()
##############################################
output_agent.set_debug_mode(get_main_args.debug_mode)
output_agent.set_show_info(get_main_args.show_info)
output_agent.set_output_folder(get_main_args.output_folder)
input_default_file_name = "output_" + ana_sys.analyzer_get_tem_file_name_format_by_time()
input_summary_file_name = "Final_Summary_" + ana_sys.analyzer_get_tem_file_name_format_by_time()
output_agent.set_default_output_file_name(input_default_file_name)
output_info = output_agent.obtenir_info()
print(output_info)
# Lire les Setting:
excel_agent = ana_sys.read_excel_agent()
msg = excel_agent.show_les_key_mots_de_gauche_a_droite(setting_sheet)
print(msg)
setting_mode , _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col(setting_sheet,"Mode:")
setting_numbers_of_col , _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col(setting_sheet,"Column:",optional_format='int')
setting_scatter_symbol , _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col(setting_sheet,"Scatter:")
setting_wild_symbol , _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col(setting_sheet,"Wild:")
setting_scatter_numbers, _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col(setting_sheet,"ScatterNumberEnterFG:",optional_format='int')
setting_nubmer_of_row_list, _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col_avec_auto_search_value_list_lens(setting_sheet,"Row:",optional_format='int')
msg = "[系統]: 基本設定如下:\n"
msg += "模式: %s\n"%setting_mode
msg += "Col數量: %d\n"%setting_numbers_of_col
msg += "Scatter 圖標: %s\n"%setting_scatter_symbol
msg += "Wild 圖標: %s\n"%setting_wild_symbol
msg += "幾個%s to FG: %d\n"%(setting_scatter_symbol,setting_scatter_numbers)
msg += "Row number list: %s\n"%(setting_nubmer_of_row_list)
# Lire les MG Roll Table:
mg_roll_r1_list , _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col_avec_auto_search_value_list_lens(mg_roll_table_sheet,"R1:")
mg_roll_r2_list , _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col_avec_auto_search_value_list_lens(mg_roll_table_sheet,"R2:")
mg_roll_r3_list , _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col_avec_auto_search_value_list_lens(mg_roll_table_sheet,"R3:")
mg_roll_r4_list , _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col_avec_auto_search_value_list_lens(mg_roll_table_sheet,"R4:")
mg_roll_r5_list , _ = excel_agent.obtenir_value_par_norm_dans_tout_le_col_avec_auto_search_value_list_lens(mg_roll_table_sheet,"R5:")
msg += "MG第1輪: %s\n"%mg_roll_r1_list
msg += "MG第2輪: %s\n"%mg_roll_r2_list
msg += "MG第3輪: %s\n"%mg_roll_r3_list
msg += "MG第4輪: %s\n"%mg_roll_r4_list
msg += "MG第5輪: %s\n"%mg_roll_r5_list
# Affichier les setting:
print(msg)
# Faire mg_roll_list_list
mg_roll_list_list = []
mg_roll_list_list.append(mg_roll_r1_list)
mg_roll_list_list.append(mg_roll_r2_list)
mg_roll_list_list.append(mg_roll_r3_list)
mg_roll_list_list.append(mg_roll_r4_list)
mg_roll_list_list.append(mg_roll_r5_list)
# Adjuouter mg_roll_list_list
slot_table_agent = slot_table_agent(setting_numbers_of_col,setting_nubmer_of_row_list)
slot_table_agent.ajouter_mg_roll_table(mg_roll_list_list)
mg_roll_msg = slot_table_agent.les_information()
print(mg_roll_msg)
# Show first 5 runs:
slot_table_agent.show_mg_roll_table_head()
# Start the run
# ------- run_setting -------------------
start_run = 0
end_run = 10
# ------ output_setting ----------------
set_output_flag = True
output_agent.set_output_flag_by_excel(set_output_flag)
###################################################################
#for any_run in range(start_run,end_run):
for each_run in range(start_run,end_run):
msg = "[%5d]轉:\n---------------\n"%each_run
curr_list_r = slot_table_agent.obtenir_mg_roll_table_par_id(each_run)
msg += slot_table_agent.afficher_table_par_list_r(curr_list_r)
output_agent.output_agent(msg)
############### TEST 02- With Line_Setting_List ################
print("############### TEST 02- With Line_Setting_List ################\n")
setting_linable_obj_list = ['G1', 'S1', 'S2', 'C1', 'C2', 'C3', 'I1', 'I2', 'I3', 'I4', 'W', 'SS']
setting_wildable_obj_list = ['W']
setting_game_type = 'line'
# 1th, 2nd, 3rd line_setting.
mg_line_setting_list = [ [0, 0, 0, 0, 0],\
[1, 1, 1, 1, 1],\
[2, 2, 2, 2, 2] ]
rtp_agent = slot_calculer_rtp_agent('line_design_rtp_agent')
rtp_agent.set_linable_list(setting_linable_obj_list)
rtp_agent.set_wild_list(setting_wildable_obj_list)
rtp_agent.set_type(setting_game_type)
rtp_agent.set_version('line_setting_v1')
rtp_agent.set_line_setting_list(mg_line_setting_list)
#rtp_agent.add_bet_rate_agent('bet_rate_agent', main_slot_bet_rate_agent)
rtp_agent_info = rtp_agent.les_information()
output_agent.output_agent(rtp_agent_info)
# Test run
list_r = [['G1', 'I2', 'I2'], ['G1', 'S1', 'S2'], ['G1', 'I1', 'G1'], ['I4', 'I1', 'W'], ['SS', 'S1', 'S2']]
print(list_r)
_, msg , _ = rtp_agent.calculer_rtp(list_r)
print(msg)
# Test Run with wild
list_r = [['G1', 'W', 'I2'], ['G1', 'W', 'S2'], ['G1', 'W', 'G1'], ['I4', 'I1', 'W'], ['SS', 'S1', 'S2']]
print(list_r)
_, msg, _ = rtp_agent.calculer_rtp(list_r)
print(msg)
""" |
the-stack_106_25532 | '''
Derived SceneviewerWidget capable of editing node coordinate positions and derivatives.
'''
from enum import Enum
from PySide2 import QtCore
from opencmiss.maths.vectorops import add, cross, div, magnitude, mult, sub
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zincwidgets.sceneviewerwidget import SceneviewerWidget
from opencmiss.zinc.field import Field
from opencmiss.zinc.graphics import Graphics
from opencmiss.zinc.scenecoordinatesystem import SCENECOORDINATESYSTEM_LOCAL, SCENECOORDINATESYSTEM_WINDOW_PIXEL_TOP_LEFT
from opencmiss.zinc.result import RESULT_OK
class NodeEditorSceneviewerWidget(SceneviewerWidget):
'''
classdocs
'''
class AlignMode(Enum):
NONE = 0
ROTATION = 1
SCALE = 2
TRANSLATION = 3
def __init__(self, parent=None):
'''
Constructor
'''
super(NodeEditorSceneviewerWidget, self).__init__(parent)
self._model = None
self._alignKeyPressed = False
self._alignMode = self.AlignMode.NONE
self._editNode = None
self._editGraphics = None
self._lastMousePos = None
def projectLocal(self, x, y, z, localScene):
"""
Project the given point in local coordinates into window pixel coordinates
with the origin at the window's top left pixel.
Note the z pixel coordinate is a depth which is mapped so that -1 is
on the far clipping plane, and +1 is on the near clipping plane.
:param localScene: Scene within hierarchy of sceneviewer scene to project local transformation to.
"""
in_coords = [x, y, z]
result, out_coords = self._sceneviewer.transformCoordinates(SCENECOORDINATESYSTEM_LOCAL, SCENECOORDINATESYSTEM_WINDOW_PIXEL_TOP_LEFT, localScene, in_coords)
if result == RESULT_OK:
return out_coords # [out_coords[0] / out_coords[3], out_coords[1] / out_coords[3], out_coords[2] / out_coords[3]]
return None
def unprojectLocal(self, x, y, z, localScene):
"""
Unproject the given point in window pixel coordinates where the origin is
at the window's top left pixel into local coordinates.
Note the z pixel coordinate is a depth which is mapped so that -1 is
on the far clipping plane, and +1 is on the near clipping plane.
:param localScene: Scene within hierarchy of sceneviewer scene to project local transformation to.
"""
in_coords = [x, y, z]
result, out_coords = self._sceneviewer.transformCoordinates(SCENECOORDINATESYSTEM_WINDOW_PIXEL_TOP_LEFT, SCENECOORDINATESYSTEM_LOCAL, localScene, in_coords)
if result == RESULT_OK:
return out_coords # [out_coords[0] / out_coords[3], out_coords[1] / out_coords[3], out_coords[2] / out_coords[3]]
return None
def setGeneratorModel(self, model):
self._model = model
def getNearestNodeAndGraphics(self, x, y):
'''
:return: Node, Graphics OR None, None if none found.
'''
scenefiltermodule = self._context.getScenefiltermodule()
with ChangeManager(scenefiltermodule):
oldSelectionfilter = self.getSelectionfilter()
self.setSelectionfilter(scenefiltermodule.createScenefilterFieldDomainType(Field.DOMAIN_TYPE_NODES))
# print('pick',x,y,self._selectTol, 'DpiX', self.physicalDpiX(), self.logicalDpiX(), 'DpiY', self.physicalDpiY(), self.logicalDpiY())
# print(' width', self.width(), 'widthMM',self.widthMM(),'dpi',25.4*self.width()/self.widthMM(),
# 'height', self.height(), 'heightMM',self.heightMM(),'dpi',25.4*self.height()/self.heightMM())
# app = QtCore.QCoreApplication.instance()
# desktop = app.desktop()
# dpmm = self.width()/self.widthMM()
# print('dpmm',dpmm,'physicalDpiX',desktop.physicalDpiX(),'screenGeometry',desktop.screenGeometry(self))
tol = self._selectTol # *0.1*dpmm
# print('tol',tol)
self._scenepicker.setSceneviewerRectangle(self._sceneviewer, SCENECOORDINATESYSTEM_WINDOW_PIXEL_TOP_LEFT,
x - tol, y - tol, x + tol, y + tol)
node = self._scenepicker.getNearestNode()
if node.isValid():
graphics = self._scenepicker.getNearestNodeGraphics()
else:
node = None
graphics = None
self.setSelectionfilter(oldSelectionfilter)
return node, graphics
def selectNode(self, node):
nodeset = node.getNodeset()
fieldmodule = nodeset.getFieldmodule()
with ChangeManager(fieldmodule):
selectionGroup = self.getOrCreateSelectionGroup()
selectionGroup.clear()
nodegroup = selectionGroup.getFieldNodeGroup(nodeset)
if not nodegroup.isValid():
nodegroup = selectionGroup.createFieldNodeGroup(nodeset)
nodesetGroup = nodegroup.getNodesetGroup()
result = nodesetGroup.addNode(node)
def keyPressEvent(self, event):
if (event.key() == QtCore.Qt.Key_A) and (event.isAutoRepeat() == False):
self._alignKeyPressed = True
event.setAccepted(True)
else:
super(NodeEditorSceneviewerWidget, self).keyPressEvent(event)
def keyReleaseEvent(self, event):
if (event.key() == QtCore.Qt.Key_A) and (event.isAutoRepeat() == False):
self._alignKeyPressed = False
event.setAccepted(True)
else:
super(NodeEditorSceneviewerWidget, self).keyReleaseEvent(event)
def mousePressEvent(self, event):
if (self._alignMode == self.AlignMode.NONE) and not self._editNode:
button = event.button()
if self._selectionKeyPressed:
if button == QtCore.Qt.LeftButton:
node, graphics = self.getNearestNodeAndGraphics(event.x(), event.y())
if node and (graphics.getType() == Graphics.TYPE_POINTS) and (graphics.getFieldDomainType() == Field.DOMAIN_TYPE_NODES):
# print('NodeEditorSceneviewerWidget.mousePressEvent node:', node.getIdentifier())
self.selectNode(node)
self._editNode = node
self._editGraphics = graphics
self._lastMousePos = [event.x(), event.y()]
event.accept()
return
if self._model and self._alignKeyPressed:
# shift-Left button becomes middle button, to support Mac
if (button == QtCore.Qt.MiddleButton) or ((button == QtCore.Qt.LeftButton) and (event.modifiers() & QtCore.Qt.SHIFT)):
self._alignMode = self.AlignMode.TRANSLATION
elif button == QtCore.Qt.LeftButton:
self._alignMode = self.AlignMode.ROTATION
elif button == QtCore.Qt.RightButton:
self._alignMode = self.AlignMode.SCALE
if self._alignMode != self.AlignMode.NONE:
self._editNode = None
self._editGraphics = None
self._lastMousePos = [event.x(), event.y()]
event.accept()
return
self._lastMousePos = None
super(NodeEditorSceneviewerWidget, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if self._editNode:
mousePos = [event.x(), event.y()]
nodeset = self._editNode.getNodeset()
fieldmodule = nodeset.getFieldmodule()
with ChangeManager(fieldmodule):
meshEditsNodeset = self._model.getOrCreateMeshEditsNodesetGroup(nodeset)
meshEditsNodeset.addNode(self._editNode)
editCoordinateField = coordinateField = self._editGraphics.getCoordinateField()
localScene = self._editGraphics.getScene() # need set local scene to get correct transformation
if coordinateField.getCoordinateSystemType() != Field.COORDINATE_SYSTEM_TYPE_RECTANGULAR_CARTESIAN:
editCoordinateField = fieldmodule.createFieldCoordinateTransformation(coordinateField)
editCoordinateField.setCoordinateSystemType(Field.COORDINATE_SYSTEM_TYPE_RECTANGULAR_CARTESIAN)
fieldcache = fieldmodule.createFieldcache()
fieldcache.setNode(self._editNode)
componentsCount = coordinateField.getNumberOfComponents()
result, initialCoordinates = editCoordinateField.evaluateReal(fieldcache, componentsCount)
if result == RESULT_OK:
for c in range(componentsCount, 3):
initialCoordinates.append(0.0)
pointattr = self._editGraphics.getGraphicspointattributes()
editVectorField = vectorField = pointattr.getOrientationScaleField()
pointBaseSize = pointattr.getBaseSize(3)[1][0]
pointScaleFactor = pointattr.getScaleFactors(3)[1][0]
if editVectorField.isValid() and (vectorField.getNumberOfComponents() == componentsCount) \
and (pointBaseSize == 0.0) and (pointScaleFactor != 0.0):
if vectorField.getCoordinateSystemType() != Field.COORDINATE_SYSTEM_TYPE_RECTANGULAR_CARTESIAN:
editVectorField = fieldmodule.createFieldCoordinateTransformation(vectorField, coordinateField)
editVectorField.setCoordinateSystemType(Field.COORDINATE_SYSTEM_TYPE_RECTANGULAR_CARTESIAN)
result, initialVector = editVectorField.evaluateReal(fieldcache, componentsCount)
for c in range(componentsCount, 3):
initialVector.append(0.0)
initialTipCoordinates = [(initialCoordinates[c] + initialVector[c] * pointScaleFactor) for c in range(3)]
windowCoordinates = self.projectLocal(initialTipCoordinates[0], initialTipCoordinates[1], initialTipCoordinates[2], localScene)
finalTipCoordinates = self.unprojectLocal(mousePos[0], -mousePos[1], windowCoordinates[2], localScene)
finalVector = [(finalTipCoordinates[c] - initialCoordinates[c]) / pointScaleFactor for c in range(3)]
result = editVectorField.assignReal(fieldcache, finalVector)
else:
windowCoordinates = self.projectLocal(initialCoordinates[0], initialCoordinates[1], initialCoordinates[2], localScene)
xa = self.unprojectLocal(self._lastMousePos[0], -self._lastMousePos[1], windowCoordinates[2], localScene)
xb = self.unprojectLocal(mousePos[0], -mousePos[1], windowCoordinates[2], localScene)
finalCoordinates = [(initialCoordinates[c] + xb[c] - xa[c]) for c in range(3)]
result = editCoordinateField.assignReal(fieldcache, finalCoordinates)
del editVectorField
del editCoordinateField
del fieldcache
self._lastMousePos = mousePos
event.accept()
return
if self._alignMode != self.AlignMode.NONE:
mousePos = [event.x(), event.y()]
delta = [mousePos[0] - self._lastMousePos[0], mousePos[1] - self._lastMousePos[1]]
result, eye = self._sceneviewer.getEyePosition()
result, lookat = self._sceneviewer.getLookatPosition()
result, up = self._sceneviewer.getUpVector()
lookatToEye = sub(eye, lookat)
eyeDistance = magnitude(lookatToEye)
front = div(lookatToEye, eyeDistance)
right = cross(up, front)
if self._alignMode == self.AlignMode.ROTATION:
mag = magnitude(delta)
prop = div(delta, mag)
axis = add(mult(up, prop[0]), mult(right, prop[1]))
angle = mag * 0.002
# print('delta', delta, 'axis', axis, 'angle', angle)
self._model.interactionRotate(axis, angle)
elif self._alignMode == self.AlignMode.SCALE:
factor = 1.0 + delta[1] * 0.0005
if factor < 0.9:
factor = 0.9
self._model.interactionScale(factor)
elif self._alignMode == self.AlignMode.TRANSLATION:
result, l, r, b, t, near, far = self._sceneviewer.getViewingVolume()
viewportWidth = self.width()
viewportHeight = self.height()
if viewportWidth > viewportHeight:
eyeScale = (t - b) / viewportHeight
else:
eyeScale = (r - l) / viewportWidth
offset = add(mult(right, eyeScale * delta[0]), mult(up, -eyeScale * delta[1]))
self._model.interactionTranslate(offset)
self._lastMousePos = mousePos
event.accept()
return
else:
super(NodeEditorSceneviewerWidget, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
self._lastMousePos = None
if self._editNode:
if event.button() == QtCore.Qt.LeftButton:
self._editNode = None
self._editCoordinateField = None
self._editVectorField = None
event.accept()
return
elif self._alignMode != self.AlignMode.NONE:
self._model.interactionEnd()
self._alignMode = self.AlignMode.NONE
event.accept()
return
super(NodeEditorSceneviewerWidget, self).mouseReleaseEvent(event)
|
the-stack_106_25534 | # -*- coding: utf-8 -*-
from app import app
from flask import *
from app.models.Email import Email
from app.models.banco.Usuario import Usuario
from app.models.form.login_usuario import LoginForm
from app.models.form.cadastro_usuario import CadastroForm
from app.models.form.editar_usuario import EditarForm
from flask_login import login_user, login_required, logout_user, current_user
from hashlib import md5
usuario_bp = Blueprint('usuario', __name__, url_prefix='/usuario')
@usuario_bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
email = form.email.data
senha = md5(form.senha.data.encode())
usuario = Usuario.query.filter_by(email = email).first()
if usuario:
if usuario.senha == senha.hexdigest():
login_user(usuario)
else:
flash(u'Senha inválida!', 'danger')
else:
flash(u'Usuário inválido!', 'danger')
return redirect('/produto')
@usuario_bp.route('/cadastro', methods=['GET', 'POST'])
def cadastro():
form = CadastroForm()
if form.validate_on_submit():
nome = form.nome.data
email = form.email.data
senha = md5((form.senha.data).encode())
conf_senha = md5((form.conf_senha.data).encode())
endereco = form.endereco.data
cpf = form.cpf.data
data_nasc = form.data_nasc.data
if senha.hexdigest() == conf_senha.hexdigest():
novo_usuario = Usuario(nome = nome, email = email, senha = senha.hexdigest(), endereco = endereco, cpf = cpf, data_nasc = data_nasc)
cadastro_usuario(novo_usuario)
login_user(novo_usuario)
else:
flash(u'Ocorreu um problema ao tentar cadastrar usuário, as senhas não coincidem!', 'danger')
return redirect('/produto')
@usuario_bp.route('/funcionario/cadastro', methods=['GET', 'POST'])
@login_required
def cadastro_funcionario():
if current_user.cargo == 'administrador':
form = CadastroForm()
if request.method == 'POST':
nome = request.form['nome']
email = request.form['email']
senha = md5((request.form['senha']).encode())
conf_senha = md5((request.form['conf_senha']).encode())
endereco = request.form['endereco']
cpf = request.form['cpf']
data_nasc = request.form['data_nasc']
cargo = 'funcionario'
if senha.hexdigest() == conf_senha.hexdigest():
novo_usuario = Usuario(nome = nome, email = email, senha = senha.hexdigest(), endereco = endereco, cpf = cpf, data_nasc = data_nasc, cargo = cargo)
cadastro_usuario(novo_usuario)
return redirect("/produto")
else:
flash(u'Ocorreu um problema ao tentar cadastrar funcionário, as senhas não coincidem!', 'danger')
return render_template('adicionarfuncionario.html', form=form, titulo='Adicionar Funcionario')
else:
return redirect('/produto')
def cadastro_usuario(usuario):
usuario_foi_cadastrado = Usuario.salvar(usuario)
if usuario_foi_cadastrado:
flash(u'Usuário cadastrado com sucesso!', 'success')
if Email.send_verificacao_email(usuario.email):
flash(u'Email de verificação enviado com sucesso!', 'success')
else:
flash(u'Falha ao enviar email de verificação, tente novamente em outro momento!', 'danger')
else:
flash(u'Ocorreu um problema ao tentar cadastrar usuário, tente novamente!', 'danger')
return redirect('/produto')
@usuario_bp .route('/funcionario/listar', methods=['GET'])
@login_required
def listar():
if current_user.cargo == 'administrador':
funcionarios = Usuario.query.filter_by(cargo='funcionario')
return render_template('buscas/funcionarios.html', funcionarios = funcionarios)
else:
flash(u'Você não tem permissão para acessar esta rota!', 'danger')
return redirect('/produto')
@usuario_bp.route('/editar', methods=['GET', 'POST'])
@login_required
def editar_usuario():
form = EditarForm()
form.nome.data = current_user.nome
form.email.data = current_user.email
form.endereco.data = current_user.endereco
form.cpf.data = current_user.cpf
form.data_nasc.data = current_user.data_nasc
if request.method == 'POST':
usuario = Usuario.query.get(current_user.id)
usuario.nome = request.form['nome']
usuario.email = request.form['email']
usuario.endereco = request.form['endereco']
usuario.cpf = request.form['cpf']
usuario.data_nasc = request.form['data_nasc']
senha = request.form['senha']
conf_senha = request.form['conf_senha']
if senha.strip() and conf_senha.strip():
senha_md5 = md5(senha.encode())
conf_senha_md5 = md5(conf_senha.encode())
if senha_md5.hexdigest() == conf_senha_md5.hexdigest():
usuario.senha = senha_md5.hexdigest()
else:
flash(u'Ocorreu um problema ao tentar alterar funcionário, as senhas não coincidem!', 'danger')
usuario_foi_salvo = Usuario.salvar(usuario)
if usuario_foi_salvo:
flash(u'Usuario alterado com sucesso!', 'success')
return redirect('/produto')
else:
flash(
u'Ocorreu um problema ao tentar alterar informacoes, tente novamente!', 'danger')
return render_template('adicionarfuncionario.html', form=form, titulo='Editar')
return render_template('adicionarfuncionario.html', form = form, titulo='Editar')
@usuario_bp.route('/editar/<id>', methods=['GET', 'POST'])
def editar_funcionario(id = False):
form = EditarForm()
usuario = Usuario.query.get(id)
if current_user.cargo == 'administrador':
if usuario:
form.nome.data = usuario.nome
form.email.data = usuario.email
form.endereco.data = usuario.endereco
form.cpf.data = usuario.cpf
form.data_nasc.data = usuario.data_nasc
if request.method == 'POST':
usuario.nome = request.form['nome']
usuario.email = request.form['email']
usuario.endereco = request.form['endereco']
usuario.cpf = request.form['cpf']
usuario.data_nasc = request.form['data_nasc']
senha = request.form['senha']
conf_senha = request.form['conf_senha']
if senha.strip() and conf_senha.strip():
senha_md5 = md5(senha.encode())
conf_senha_md5 = md5(conf_senha.encode())
if senha_md5.hexdigest() == conf_senha_md5.hexdigest():
usuario.senha = senha_md5.hexdigest()
else:
flash(u'Ocorreu um problema ao tentar alterar funcionário, as senhas não coincidem!', 'danger')
usuario_foi_salvo = Usuario.salvar(usuario)
if usuario_foi_salvo:
flash(u'Funcionário alterado com sucesso!', 'success')
return redirect('/produto')
else:
flash(u'Ocorreu um problema ao tentar alterar informacoes, tente novamente!', 'danger')
return render_template('adicionarfuncionario.html', form=form, titulo='Editar')
else:
flash(u'Ocorreu um problema ao tentar buscar o usuário, tente novamente!', 'danger')
return redirect('/funcionario/listar')
return render_template('adicionarfuncionario.html', form = form, titulo='Editar')
else:
flash(u'Você não tem permissão para acessar esta rota!', 'danger')
return redirect('/produto')
@usuario_bp.route('/deletarconta')
@login_required
def excluir_conta(id = False):
id_usuario = current_user.id
if Usuario.excluir(id_usuario):
logout_user()
flash(u'Sua conta foi excluida com sucesso!', 'success')
else:
flash(u'Falha ao excluir sua conta!', 'danger')
return redirect('/produto')
@usuario_bp.route('/deletarconta/<id>')
@login_required
def excluir_conta_outro_user(id = False):
if id and current_user.cargo == 'administrador':
if Usuario.excluir(id):
flash(u'A conta foi excluida com sucesso!', 'success')
else:
flash(u'Erro ao excluir conta!', 'danger')
return redirect('/usuario/funcionario/listar')
else:
flash(u'Você não tem permissão para excluir contas de terceiros!', 'danger')
return redirect('/produto')
@usuario_bp.route('/logout')
@login_required
def logout():
logout_user()
return redirect('/produto')
|
the-stack_106_25535 | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
from datetime import date
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
# A list of call objects with the properties described above
calls = client.calls.list(
status="in-progress",
start_time_after=date(2009, 7, 4),
start_time_before=date(2009, 7, 6)
)
for call in calls:
print(call.to)
|
the-stack_106_25536 | """
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cocotb
from cocotb.triggers import Event
from cocotb.log import SimLog
import mmap
from collections import deque
from .version import __version__
from .constants import *
from .axi_channels import *
from .utils import hexdump, hexdump_str
class AxiRamWrite(object):
def __init__(self, entity, name, clock, reset=None, size=1024, mem=None):
self.log = SimLog("cocotb.%s.%s" % (entity._name, name))
self.log.info("AXI RAM model")
self.log.info("cocotbext-axi version %s", __version__)
self.log.info("Copyright (c) 2020 Alex Forencich")
self.log.info("https://github.com/alexforencich/cocotbext-axi")
if type(mem) is mmap.mmap:
self.mem = mem
else:
self.mem = mmap.mmap(-1, size)
self.size = len(self.mem)
self.reset = reset
self.aw_channel = AxiAWSink(entity, name, clock, reset)
self.w_channel = AxiWSink(entity, name, clock, reset)
self.b_channel = AxiBSource(entity, name, clock, reset)
self.in_flight_operations = 0
self.width = len(self.w_channel.bus.wdata)
self.byte_size = 8
self.byte_width = self.width // self.byte_size
self.strb_mask = 2**self.byte_width-1
assert self.byte_width == len(self.w_channel.bus.wstrb)
assert self.byte_width * self.byte_size == self.width
assert len(self.b_channel.bus.bid) == len(self.aw_channel.bus.awid)
cocotb.fork(self._process_write())
def read_mem(self, address, length):
self.mem.seek(address)
return self.mem.read(length)
def write_mem(self, address, data):
self.mem.seek(address)
self.mem.write(bytes(data))
def hexdump(self, address, length, prefix=""):
hexdump(self.mem, address, length, prefix=prefix)
def hexdump_str(self, address, length, prefix=""):
return hexdump_str(self.mem, address, length, prefix=prefix)
async def _process_write(self):
while True:
await self.aw_channel.wait()
aw = self.aw_channel.recv()
awid = int(aw.awid)
addr = int(aw.awaddr)
length = int(aw.awlen)
size = int(aw.awsize)
burst = int(aw.awburst)
prot = AxiProt(int(aw.awprot))
self.log.info(f"Write burst awid: {awid:#x} awaddr: {addr:#010x} awlen: {length} awsize: {size} awprot: {prot}")
num_bytes = 2**size
assert 0 < num_bytes <= self.byte_width
aligned_addr = (addr // num_bytes) * num_bytes
length += 1
transfer_size = num_bytes*length
if burst == AxiBurstType.WRAP:
lower_wrap_boundary = (addr // transfer_size) * transfer_size
upper_wrap_boundary = lower_wrap_boundary + transfer_size
if burst == AxiBurstType.INCR:
# check 4k boundary crossing
assert 0x1000-(aligned_addr&0xfff) >= transfer_size
cur_addr = aligned_addr
for n in range(length):
cur_word_addr = (cur_addr // self.byte_width) * self.byte_width
await self.w_channel.wait()
w = self.w_channel.recv()
data = int(w.wdata)
strb = int(w.wstrb)
last = int(w.wlast)
# todo latency
self.mem.seek(cur_word_addr % self.size)
data = data.to_bytes(self.byte_width, 'little')
self.log.debug(f"Write word awid: {awid:#x} addr: {cur_addr:#010x} wstrb: {strb:#04x} data: {' '.join((f'{c:02x}' for c in data))}")
for i in range(self.byte_width):
if strb & (1 << i):
self.mem.write(data[i:i+1])
else:
self.mem.seek(1, 1)
assert last == (n == length-1)
if burst != AxiBurstType.FIXED:
cur_addr += num_bytes
if burst == AxiBurstType.WRAP:
if cur_addr == upper_wrap_boundary:
cur_addr = lower_wrap_boundary
b = self.b_channel._transaction_obj()
b.bid = awid
b.bresp = AxiResp.OKAY
self.b_channel.send(b)
class AxiRamRead(object):
def __init__(self, entity, name, clock, reset=None, size=1024, mem=None):
self.log = SimLog("cocotb.%s.%s" % (entity._name, name))
if type(mem) is mmap.mmap:
self.mem = mem
else:
self.mem = mmap.mmap(-1, size)
self.size = len(self.mem)
self.reset = reset
self.ar_channel = AxiARSink(entity, name, clock, reset)
self.r_channel = AxiRSource(entity, name, clock, reset)
self.int_read_resp_command_queue = deque()
self.int_read_resp_command_sync = Event()
self.in_flight_operations = 0
self.width = len(self.r_channel.bus.rdata)
self.byte_size = 8
self.byte_width = self.width // self.byte_size
assert self.byte_width * self.byte_size == self.width
assert len(self.r_channel.bus.rid) == len(self.ar_channel.bus.arid)
cocotb.fork(self._process_read())
def read_mem(self, address, length):
self.mem.seek(address)
return self.mem.read(length)
def write_mem(self, address, data):
self.mem.seek(address)
self.mem.write(bytes(data))
def hexdump(self, address, length, prefix=""):
hexdump(self.mem, address, length, prefix=prefix)
def hexdump_str(self, address, length, prefix=""):
return hexdump_str(self.mem, address, length, prefix=prefix)
async def _process_read(self):
while True:
await self.ar_channel.wait()
ar = self.ar_channel.recv()
arid = int(ar.arid)
addr = int(ar.araddr)
length = int(ar.arlen)
size = int(ar.arsize)
burst = int(ar.arburst)
prot = AxiProt(ar.arprot)
self.log.info(f"Read burst arid: {arid:#x} araddr: {addr:#010x} arlen: {length} arsize: {size} arprot: {prot}")
num_bytes = 2**size
assert 0 < num_bytes <= self.byte_width
aligned_addr = (addr // num_bytes) * num_bytes
length += 1
transfer_size = num_bytes*length
if burst == AxiBurstType.WRAP:
lower_wrap_boundary = (addr // transfer_size) * transfer_size
upper_wrap_boundary = lower_wrap_boundary + transfer_size
if burst == AxiBurstType.INCR:
# check 4k boundary crossing
assert 0x1000-(aligned_addr&0xfff) >= transfer_size
cur_addr = aligned_addr
for n in range(length):
cur_word_addr = (cur_addr // self.byte_width) * self.byte_width
self.mem.seek(cur_word_addr % self.size)
data = self.mem.read(self.byte_width)
r = self.r_channel._transaction_obj()
r.rid = arid
r.rdata = int.from_bytes(data, 'little')
r.rlast = n == length-1
r.rresp = AxiResp.OKAY
self.r_channel.send(r)
self.log.debug(f"Read word arid: {arid:#x} addr: {cur_addr:#010x} data: {' '.join((f'{c:02x}' for c in data))}")
if burst != AxiBurstType.FIXED:
cur_addr += num_bytes
if burst == AxiBurstType.WRAP:
if cur_addr == upper_wrap_boundary:
cur_addr = lower_wrap_boundary
class AxiRam(object):
def __init__(self, entity, name, clock, reset=None, size=1024, mem=None):
self.write_if = None
self.read_if = None
if type(mem) is mmap.mmap:
self.mem = mem
else:
self.mem = mmap.mmap(-1, size)
self.size = len(self.mem)
self.write_if = AxiRamWrite(entity, name, clock, reset, mem=self.mem)
self.read_if = AxiRamRead(entity, name, clock, reset, mem=self.mem)
def read_mem(self, address, length):
self.mem.seek(address)
return self.mem.read(length)
def write_mem(self, address, data):
self.mem.seek(address)
self.mem.write(bytes(data))
def hexdump(self, address, length, prefix=""):
hexdump(self.mem, address, length, prefix=prefix)
def hexdump_str(self, address, length, prefix=""):
return hexdump_str(self.mem, address, length, prefix=prefix)
|
the-stack_106_25537 | """Test the Google mixin class."""
import unittest
from turnovertools import google
from turnovertools import mediaobjects as mobs
# pylint: disable=W0212
class TestGoogleMixin(unittest.TestCase):
"""Create various mobs classes mixed in with Google and confirm
that their attributes work properly."""
def setUp(self):
class GoogleSourceClip(google.Google, mobs.SourceClip):
"""Test class"""
self.test_object = GoogleSourceClip.dummy()
def test_google_hasattrs(self):
"""Creates a SourceClip child class using the Google mixin and
checks that it has all the attributes expected in both parent
classes."""
mob = self.test_object
for attr in mobs.SourceClip.standard_attrs():
self.assertTrue(hasattr(mob, attr))
for attr in google.Google._provides_attrs:
self.assertTrue(hasattr(mob, attr))
def test_google_provides_attrs(self):
mob = self.test_object
parents_provide = list()
parents_provide.extend(mobs.SourceClip.standard_attrs())
parents_provide.extend(google.Google._provides_attrs)
test_class_provides = dict.fromkeys(parents_provide, False)
for attr in mob.standard_attrs():
test_class_provides[attr] = True
for attr in test_class_provides:
self.assertTrue(test_class_provides[attr])
|
the-stack_106_25538 | from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
import json
from django.shortcuts import render, redirect
from django.contrib import messages
from django.http import HttpResponse, Http404, JsonResponse, HttpResponseRedirect
import random
from io import StringIO, BytesIO
import base64
from annotationweb.settings import BASE_DIR
from common.metaimage import *
import numpy as np
from annotationweb.models import Task, ImageAnnotation, Label
from common.utility import get_image_as_http_response
import common.task
from annotationweb.models import KeyFrameAnnotation
from spline_segmentation.models import ControlPoint
from django.db import transaction
def segment_next_image(request, task_id):
return segment_image(request, task_id, None)
def add_default_labels(task_id):
# Check if task has proper labels set up.
# If not add them to the database,
task = Task.objects.get(pk=task_id)
labels = (('Endocardium', (0, 255, 0)), ('Epicardium', (0, 0, 255)), ('Left atrium', (255, 0, 0)))
if len(task.label.all()) != 3:
# Remove old ones
for label in task.label.all():
task.label.remove(label)
print('Adding labels to task')
for label in labels:
try:
# Check if already exist
label_obj = Label.objects.get(name=label[0])
except Label.DoesNotExist:
label_obj = Label()
label_obj.name = label[0]
label_obj.color_red = label[1][0]
label_obj.color_green = label[1][1]
label_obj.color_blue = label[1][2]
label_obj.save()
task.label.add(label_obj)
task.save()
def segment_image(request, task_id, image_id):
add_default_labels(task_id)
try:
context = common.task.setup_task_context(request, task_id, Task.CARDIAC_SEGMENTATION, image_id)
image_id = context['image'].id # Because image_id can initially be None
context['javascript_files'] = ['cardiac/segmentation.js']
# Check if image is already segmented, if so get data and pass to template
try:
annotations = KeyFrameAnnotation.objects.filter(image_annotation__task_id=task_id,
image_annotation__image_id=image_id)
control_points = ControlPoint.objects.filter(image__in=annotations).order_by('index')
context['control_points'] = control_points
context['target_frames'] = annotations
except KeyFrameAnnotation.DoesNotExist:
pass
return render(request, 'cardiac/segment_image.html', context)
except common.task.NoMoreImages:
messages.info(request, 'This task is finished, no more images to segment.')
return redirect('index')
except RuntimeError as e:
messages.error(request, str(e))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def save_segmentation(request):
error_messages = ''
motion_mode_line = int(round(float(request.POST['motion_mode_line'])))
control_points = json.loads(request.POST['control_points'])
target_frame_types = json.loads(request.POST['target_frame_types'])
print(control_points)
objects = ('Endocardium', 'Epicardium', 'Left atrium')
rejected = request.POST['rejected'] == 'true'
if not rejected:
for frame_nr in control_points.keys():
for i in range(len(objects)):
if str(i) in control_points[frame_nr] and \
len(control_points[frame_nr][str(i)]['control_points']) < 1:
error_messages += objects[i] + ' annotation missing in frame ' + str(frame_nr) + '<br>'
if len(error_messages):
response = {
'success': 'false',
'message': error_messages,
}
else:
try:
# Use atomic transaction here so if something crashes the annotations are restored..
with transaction.atomic():
annotations = common.task.save_annotation(request)
# Save segmentation
# Save control points
for annotation in annotations:
frame_nr = str(annotation.frame_nr)
# Set frame metadata
annotation.frame_metadata = target_frame_types[frame_nr]
annotation.save()
for object in control_points[frame_nr]:
nr_of_control_points = len(control_points[frame_nr][object]['control_points'])
if nr_of_control_points < 3:
continue
for point in range(nr_of_control_points):
control_point = ControlPoint()
control_point.image = annotation
control_point.x = float(control_points[frame_nr][object]['control_points'][point]['x'])
control_point.y = float(control_points[frame_nr][object]['control_points'][point]['y'])
control_point.index = point
control_point.object = int(object)
# TODO modify this line to have proper label:
control_point.label = Label.objects.get(id=int(control_points[frame_nr][object]['label']['id']))
control_point.uncertain = bool(
control_points[frame_nr][object]['control_points'][point]['uncertain'])
control_point.save()
response = {
'success': 'true',
'message': 'Annotation saved',
}
except Exception as e:
response = {
'success': 'false',
'message': str(e),
}
return JsonResponse(response)
def show_segmentation(request, task_id, image_id):
pass
|
the-stack_106_25540 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
_API_CORE_VERSION = google.api_core.__version__
class UrlMapsTransport(abc.ABC):
"""Abstract transport class for UrlMaps."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): These two class methods are in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-api-core
# and google-auth are increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
# TODO: Remove this function once google-api-core >= 1.26.0 is required
@classmethod
def _get_self_signed_jwt_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Union[Optional[Sequence[str]], str]]:
"""Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version"""
self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {}
if _API_CORE_VERSION and (
packaging.version.parse(_API_CORE_VERSION)
>= packaging.version.parse("1.26.0")
):
self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES
self_signed_jwt_kwargs["scopes"] = scopes
self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST
else:
self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES
return self_signed_jwt_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.aggregated_list: gapic_v1.method.wrap_method(
self.aggregated_list, default_timeout=None, client_info=client_info,
),
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.invalidate_cache: gapic_v1.method.wrap_method(
self.invalidate_cache, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.patch: gapic_v1.method.wrap_method(
self.patch, default_timeout=None, client_info=client_info,
),
self.update: gapic_v1.method.wrap_method(
self.update, default_timeout=None, client_info=client_info,
),
self.validate: gapic_v1.method.wrap_method(
self.validate, default_timeout=None, client_info=client_info,
),
}
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListUrlMapsRequest],
Union[compute.UrlMapsAggregatedList, Awaitable[compute.UrlMapsAggregatedList]],
]:
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteUrlMapRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetUrlMapRequest], Union[compute.UrlMap, Awaitable[compute.UrlMap]]
]:
raise NotImplementedError()
@property
def insert(
self,
) -> Callable[
[compute.InsertUrlMapRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def invalidate_cache(
self,
) -> Callable[
[compute.InvalidateCacheUrlMapRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListUrlMapsRequest],
Union[compute.UrlMapList, Awaitable[compute.UrlMapList]],
]:
raise NotImplementedError()
@property
def patch(
self,
) -> Callable[
[compute.PatchUrlMapRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def update(
self,
) -> Callable[
[compute.UpdateUrlMapRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def validate(
self,
) -> Callable[
[compute.ValidateUrlMapRequest],
Union[
compute.UrlMapsValidateResponse, Awaitable[compute.UrlMapsValidateResponse]
],
]:
raise NotImplementedError()
__all__ = ("UrlMapsTransport",)
|
the-stack_106_25542 | '''
I used these to work through bugs and identify data's format
'''
def is_symmetric_sparse_arr(arr):
'''
arr is the coord array of the sparse array tuple
'''
for row in arr:
a,b = row[0], row[1]
if not [b,a] in arr:
return False
else:
return True
def has_diagonal(sparse_tuple):
indices = sparse_tuple[0]
values = sparse_tuple[1]
shape = sparse_tuple[2]
N = shape[0]
ans_1 = sum([r[0] == r[1] for r in indices])
return ans_1 == N
def only_ones(sparse_tuple):
indices = sparse_tuple[0]
values = sparse_tuple[1]
shape = sparse_tuple[2]
largest = np.max(values)
smallest = np.min(values)
return largest == smallest and largest == 1
def get_save_paths(save_dir):
save_path_model = save_dir + "model.ckpt"
save_path_training_vars = save_dir + "vars.npy"
return save_path_model, save_path_training_vars
from numpy.lib.npyio import save
from tensorflow.python.framework.c_api_util import tf_output
from model import GCN_LPA
import numpy as np
def load_training_vars(save_path_training_vars):
# Load the training variables from a file
training_vars = np.load(save_path_training_vars, allow_pickle = True)
# For some reason, it's wrapped in an array
training_vars = training_vars[()]
return training_vars
def load_train_result(args, data, save_dir):
# Reset tensorflow to default
#tf.reset_default_graph()
features, labels, adj, train_mask, val_mask, test_mask = [data[i] for i in range(6)]
model = GCN_LPA(args, features, labels, adj)
_, save_path_training_vars = get_save_paths(save_dir)
training_vars = load_training_vars(save_path_training_vars)
return training_vars, model
import tensorflow as tf
def evaluate_model(data, model, save_dir):
'''
The GCNLPA only.
'''
features, labels, adj, train_mask, val_mask, test_mask = [data[i] for i in range(6)]
# Recover the model outputs
saver = tf.train.Saver()
with tf.Session() as sess:
# Restore variables from disk.
save_path_model, _ = get_save_paths(save_dir)
saver.restore(sess, save_path_model)
# For some reason, the way the model is built, you need to give a mask
# The return values is the same regardless of model.label_mask
output = sess.run(model.outputs, feed_dict={model.label_mask: test_mask, model.dropout: 0})
return output |
the-stack_106_25544 | import argparse
import os
from os.path import join
import pandas as pd
import yaml
from constants import h5_internal_paths
from constants.dataset_tables import ModelsTableHeader, DatasetTableHeader
from file_actions.writers.h5 import \
assemble_tomo_from_subtomos
from networks.utils import build_prediction_output_dir
parser = argparse.ArgumentParser()
parser.add_argument("-yaml_file", "--yaml_file", help="yaml_file", type=str)
parser.add_argument("-tomos_set", "--tomos_set",
help="tomos set name to be used for training", type=int)
args = parser.parse_args()
yaml_file = args.yaml_file
config = yaml.safe_load(open(yaml_file))
tomos_set = args.tomos_set
tomo_list = config['tomos_sets'][tomos_set]['test_list']
# unet_hyperparameters = config['unet_hyperparameters']
output_dir = config["pred_output_dir"]
models_table = os.path.join(output_dir, "models")
models_table = os.path.join(models_table, "models.csv")
model_name = config["model_path"][:-4]
segmentation_label = model_name
class_number = config['prediction']['class_number']
ModelsHeader = ModelsTableHeader()
models_df = pd.read_csv(models_table,
dtype={ModelsHeader.model_name: str,
ModelsHeader.segmentation_names: str})
model_df = models_df[models_df[ModelsHeader.model_name] == model_name]
print(model_df)
assert model_df.shape[0] == 1
overlap = model_df.iloc[0][ModelsHeader.overlap]
box_shape = int(model_df.iloc[0][ModelsHeader.box_size])
box_shape = [box_shape, box_shape, box_shape]
semantic_names = model_df.iloc[0]['semantic_classes'].split(',')
semantic_class = semantic_names[class_number]
dataset_table = config['dataset_table']
test_partition = config["prediction"]['partition_name']
DTHeader = DatasetTableHeader(partition_name=test_partition)
df = pd.read_csv(dataset_table)
df[DTHeader.tomo_name] = df[DTHeader.tomo_name].astype(str)
output_dir = os.path.join(output_dir, "predictions")
output_dir = os.path.join(output_dir, model_name)
for tomo_name in tomo_list:
tomo_output_dir = os.path.join(output_dir, tomo_name)
tomo_output_dir = os.path.join(tomo_output_dir, semantic_class)
os.makedirs(tomo_output_dir, exist_ok=True)
output_path = os.path.join(tomo_output_dir, "prediction.mrc")
if os.path.isfile(output_path):
print("The prediction file exists")
else:
tomo_df = df[df[DTHeader.tomo_name] == tomo_name]
x_dim = int(tomo_df.iloc[0][DTHeader.x_dim])
y_dim = int(tomo_df.iloc[0][DTHeader.y_dim])
z_dim = int(tomo_df.iloc[0][DTHeader.z_dim])
output_shape = (z_dim, y_dim, x_dim)
data_partition = tomo_df.iloc[0][DTHeader.partition_name]
subtomos_internal_path = join(
h5_internal_paths.PREDICTED_SEGMENTATION_SUBTOMOGRAMS,
segmentation_label)
assemble_tomo_from_subtomos(
output_path=output_path,
partition_file_path=data_partition,
output_shape=output_shape,
subtomo_shape=box_shape,
subtomos_internal_path=subtomos_internal_path,
class_number=class_number, overlap=overlap,
reconstruction_type="prediction")
|
the-stack_106_25545 | # -*- coding: utf-8 -*-
'''
The static grains, these are the core, or built in grains.
When grains are loaded they are not loaded in the same way that modules are
loaded, grain functions are detected and executed, the functions MUST
return a dict which will be applied to the main grains dict. This module
will always be executed first, so that any grains loaded here in the core
module can be overwritten just by returning dict keys with the same value
as those returned here
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import socket
import sys
import re
import platform
import logging
import locale
import uuid
import zlib
from errno import EACCES, EPERM
import datetime
import warnings
# pylint: disable=import-error
try:
import dateutil.tz
_DATEUTIL_TZ = True
except ImportError:
_DATEUTIL_TZ = False
__proxyenabled__ = ['*']
__FQDN__ = None
# Extend the default list of supported distros. This will be used for the
# /etc/DISTRO-release checking that is part of linux_distribution()
from platform import _supported_dists
_supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
'slamd64', 'ovs', 'system', 'mint', 'oracle', 'void')
# linux_distribution deprecated in py3.7
try:
from platform import linux_distribution as _deprecated_linux_distribution
def linux_distribution(**kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return _deprecated_linux_distribution(**kwargs)
except ImportError:
from distro import linux_distribution
# Import salt libs
import salt.exceptions
import salt.log
import salt.utils.args
import salt.utils.dns
import salt.utils.files
import salt.utils.network
import salt.utils.path
import salt.utils.pkg.rpm
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
from salt.ext.six.moves import range
if salt.utils.platform.is_windows():
import salt.utils.win_osinfo
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
import salt.modules.smbios
__salt__ = {
'cmd.run': salt.modules.cmdmod._run_quiet,
'cmd.retcode': salt.modules.cmdmod._retcode_quiet,
'cmd.run_all': salt.modules.cmdmod._run_all_quiet,
'smbios.records': salt.modules.smbios.records,
'smbios.get': salt.modules.smbios.get,
}
log = logging.getLogger(__name__)
HAS_WMI = False
if salt.utils.platform.is_windows():
# attempt to import the python wmi module
# the Windows minion uses WMI for some of its grains
try:
import wmi # pylint: disable=import-error
import salt.utils.winapi
import win32api
import salt.utils.win_reg
HAS_WMI = True
except ImportError:
log.exception(
'Unable to import Python wmi module, some core grains '
'will be missing'
)
HAS_UNAME = True
if not hasattr(os, 'uname'):
HAS_UNAME = False
_INTERFACES = {}
def _windows_cpudata():
'''
Return some CPU information on Windows minions
'''
# Provides:
# num_cpus
# cpu_model
grains = {}
if 'NUMBER_OF_PROCESSORS' in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
except ValueError:
grains['num_cpus'] = 1
grains['cpu_model'] = salt.utils.win_reg.read_value(
hive="HKEY_LOCAL_MACHINE",
key="HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
vname="ProcessorNameString").get('vdata')
return grains
def _linux_cpudata():
'''
Return some CPU information for Linux minions
'''
# Provides:
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cpuinfo = '/proc/cpuinfo'
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
with salt.utils.files.fopen(cpuinfo, 'r') as _fp:
grains['num_cpus'] = 0
for line in _fp:
comps = line.split(':')
if not len(comps) > 1:
continue
key = comps[0].strip()
val = comps[1].strip()
if key == 'processor':
grains['num_cpus'] += 1
elif key == 'model name':
grains['cpu_model'] = val
elif key == 'flags':
grains['cpu_flags'] = val.split()
elif key == 'Features':
grains['cpu_flags'] = val.split()
# ARM support - /proc/cpuinfo
#
# Processor : ARMv6-compatible processor rev 7 (v6l)
# BogoMIPS : 697.95
# Features : swp half thumb fastmult vfp edsp java tls
# CPU implementer : 0x41
# CPU architecture: 7
# CPU variant : 0x0
# CPU part : 0xb76
# CPU revision : 7
#
# Hardware : BCM2708
# Revision : 0002
# Serial : 00000000
elif key == 'Processor':
grains['cpu_model'] = val.split('-')[0]
grains['num_cpus'] = 1
if 'num_cpus' not in grains:
grains['num_cpus'] = 0
if 'cpu_model' not in grains:
grains['cpu_model'] = 'Unknown'
if 'cpu_flags' not in grains:
grains['cpu_flags'] = []
return grains
def _linux_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
if __opts__.get('enable_lspci', True) is False:
return {}
if __opts__.get('enable_gpu_grains', True) is False:
return {}
lspci = salt.utils.path.which('lspci')
if not lspci:
log.debug(
'The `lspci` binary is not available on the system. GPU grains '
'will not be available.'
)
return {}
# dominant gpu vendors to search for (MUST be lowercase for matching below)
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware', 'matrox', 'aspeed']
gpu_classes = ('vga compatible controller', '3d controller')
devs = []
try:
lspci_out = __salt__['cmd.run']('{0} -vmm'.format(lspci))
cur_dev = {}
error = False
# Add a blank element to the lspci_out.splitlines() list,
# otherwise the last device is not evaluated as a cur_dev and ignored.
lspci_list = lspci_out.splitlines()
lspci_list.append('')
for line in lspci_list:
# check for record-separating empty lines
if line == '':
if cur_dev.get('Class', '').lower() in gpu_classes:
devs.append(cur_dev)
cur_dev = {}
continue
if re.match(r'^\w+:\s+.*', line):
key, val = line.split(':', 1)
cur_dev[key.strip()] = val.strip()
else:
error = True
log.debug('Unexpected lspci output: \'%s\'', line)
if error:
log.warning(
'Error loading grains, unexpected linux_gpu_data output, '
'check that you have a valid shell configured and '
'permissions to run lspci command'
)
except OSError:
pass
gpus = []
for gpu in devs:
vendor_strings = re.split('[^A-Za-z0-9]', gpu['Vendor'].lower())
# default vendor to 'unknown', overwrite if we match a known one
vendor = 'unknown'
for name in known_vendors:
# search for an 'expected' vendor name in the list of strings
if name in vendor_strings:
vendor = name
break
gpus.append({'vendor': vendor, 'model': gpu['Device']})
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _netbsd_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware', 'matrox', 'aspeed']
gpus = []
try:
pcictl_out = __salt__['cmd.run']('pcictl pci0 list')
for line in pcictl_out.splitlines():
for vendor in known_vendors:
vendor_match = re.match(
r'[0-9:]+ ({0}) (.+) \(VGA .+\)'.format(vendor),
line,
re.IGNORECASE
)
if vendor_match:
gpus.append({'vendor': vendor_match.group(1), 'model': vendor_match.group(2)})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _osx_gpudata():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
gpus = []
try:
pcictl_out = __salt__['cmd.run']('system_profiler SPDisplaysDataType')
for line in pcictl_out.splitlines():
fieldname, _, fieldval = line.partition(': ')
if fieldname.strip() == "Chipset Model":
vendor, _, model = fieldval.partition(' ')
vendor = vendor.lower()
gpus.append({'vendor': vendor, 'model': model})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _bsd_cpudata(osdata):
'''
Return CPU information for BSD-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
sysctl = salt.utils.path.which('sysctl')
arch = salt.utils.path.which('arch')
cmds = {}
if sysctl:
cmds.update({
'num_cpus': '{0} -n hw.ncpu'.format(sysctl),
'cpuarch': '{0} -n hw.machine'.format(sysctl),
'cpu_model': '{0} -n hw.model'.format(sysctl),
})
if arch and osdata['kernel'] == 'OpenBSD':
cmds['cpuarch'] = '{0} -s'.format(arch)
if osdata['kernel'] == 'Darwin':
cmds['cpu_model'] = '{0} -n machdep.cpu.brand_string'.format(sysctl)
cmds['cpu_flags'] = '{0} -n machdep.cpu.features'.format(sysctl)
grains = dict([(k, __salt__['cmd.run'](v)) for k, v in six.iteritems(cmds)])
if 'cpu_flags' in grains and isinstance(grains['cpu_flags'], six.string_types):
grains['cpu_flags'] = grains['cpu_flags'].split(' ')
if osdata['kernel'] == 'NetBSD':
grains['cpu_flags'] = []
for line in __salt__['cmd.run']('cpuctl identify 0').splitlines():
cpu_match = re.match(r'cpu[0-9]:\ features[0-9]?\ .+<(.+)>', line)
if cpu_match:
flag = cpu_match.group(1).split(',')
grains['cpu_flags'].extend(flag)
if osdata['kernel'] == 'FreeBSD' and os.path.isfile('/var/run/dmesg.boot'):
grains['cpu_flags'] = []
# TODO: at least it needs to be tested for BSD other then FreeBSD
with salt.utils.files.fopen('/var/run/dmesg.boot', 'r') as _fp:
cpu_here = False
for line in _fp:
if line.startswith('CPU: '):
cpu_here = True # starts CPU descr
continue
if cpu_here:
if not line.startswith(' '):
break # game over
if 'Features' in line:
start = line.find('<')
end = line.find('>')
if start > 0 and end > 0:
flag = line[start + 1:end].split(',')
grains['cpu_flags'].extend(flag)
try:
grains['num_cpus'] = int(grains['num_cpus'])
except ValueError:
grains['num_cpus'] = 1
return grains
def _sunos_cpudata():
'''
Return the CPU information for Solaris-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
grains['cpu_flags'] = []
grains['cpuarch'] = __salt__['cmd.run']('isainfo -k')
psrinfo = '/usr/sbin/psrinfo 2>/dev/null'
grains['num_cpus'] = len(__salt__['cmd.run'](psrinfo, python_shell=True).splitlines())
kstat_info = 'kstat -p cpu_info:*:*:brand'
for line in __salt__['cmd.run'](kstat_info).splitlines():
match = re.match(r'(\w+:\d+:\w+\d+:\w+)\s+(.+)', line)
if match:
grains['cpu_model'] = match.group(2)
isainfo = 'isainfo -n -v'
for line in __salt__['cmd.run'](isainfo).splitlines():
match = re.match(r'^\s+(.+)', line)
if match:
cpu_flags = match.group(1).split()
grains['cpu_flags'].extend(cpu_flags)
return grains
def _aix_cpudata():
'''
Return CPU information for AIX systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cmd = salt.utils.path.which('prtconf')
if cmd:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('cpuarch', r'(?im)^\s*Processor\s+Type:\s+(\S+)'),
('cpu_flags', r'(?im)^\s*Processor\s+Version:\s+(\S+)'),
('cpu_model', r'(?im)^\s*Processor\s+Implementation\s+Mode:\s+(.*)'),
('num_cpus', r'(?im)^\s*Number\s+Of\s+Processors:\s+(\S+)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
return grains
def _linux_memdata():
'''
Return the memory information for Linux-like systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
meminfo = '/proc/meminfo'
if os.path.isfile(meminfo):
with salt.utils.files.fopen(meminfo, 'r') as ifile:
for line in ifile:
comps = line.rstrip('\n').split(':')
if not len(comps) > 1:
continue
if comps[0].strip() == 'MemTotal':
# Use floor division to force output to be an integer
grains['mem_total'] = int(comps[1].split()[0]) // 1024
if comps[0].strip() == 'SwapTotal':
# Use floor division to force output to be an integer
grains['swap_total'] = int(comps[1].split()[0]) // 1024
return grains
def _osx_memdata():
'''
Return the memory information for BSD-like systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
sysctl = salt.utils.path.which('sysctl')
if sysctl:
mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
swap_total = __salt__['cmd.run']('{0} -n vm.swapusage'.format(sysctl)).split()[2].replace(',', '.')
if swap_total.endswith('K'):
_power = 2**10
elif swap_total.endswith('M'):
_power = 2**20
elif swap_total.endswith('G'):
_power = 2**30
swap_total = float(swap_total[:-1]) * _power
grains['mem_total'] = int(mem) // 1024 // 1024
grains['swap_total'] = int(swap_total) // 1024 // 1024
return grains
def _bsd_memdata(osdata):
'''
Return the memory information for BSD-like systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
sysctl = salt.utils.path.which('sysctl')
if sysctl:
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
if osdata['kernel'] == 'NetBSD' and mem.startswith('-'):
mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
grains['mem_total'] = int(mem) // 1024 // 1024
if osdata['kernel'] in ['OpenBSD', 'NetBSD']:
swapctl = salt.utils.path.which('swapctl')
swap_data = __salt__['cmd.run']('{0} -sk'.format(swapctl))
if swap_data == 'no swap devices configured':
swap_total = 0
else:
swap_total = swap_data.split(' ')[1]
else:
swap_total = __salt__['cmd.run']('{0} -n vm.swap_total'.format(sysctl))
grains['swap_total'] = int(swap_total) // 1024 // 1024
return grains
def _sunos_memdata():
'''
Return the memory information for SunOS-like systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
prtconf = '/usr/sbin/prtconf 2>/dev/null'
for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
comps = line.split(' ')
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
grains['mem_total'] = int(comps[2].strip())
swap_cmd = salt.utils.path.which('swap')
swap_data = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split()
try:
swap_avail = int(swap_data[-2][:-1])
swap_used = int(swap_data[-4][:-1])
swap_total = (swap_avail + swap_used) // 1024
except ValueError:
swap_total = None
grains['swap_total'] = swap_total
return grains
def _aix_memdata():
'''
Return the memory information for AIX systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
prtconf = salt.utils.path.which('prtconf')
if prtconf:
for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
comps = [x for x in line.strip().split(' ') if x]
if len(comps) > 2 and 'Memory' in comps[0] and 'Size' in comps[1]:
grains['mem_total'] = int(comps[2])
break
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
swap_cmd = salt.utils.path.which('swap')
if swap_cmd:
swap_data = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split()
try:
swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4
except ValueError:
swap_total = None
grains['swap_total'] = swap_total
else:
log.error('The \'swap\' binary was not found in $PATH.')
return grains
def _windows_memdata():
'''
Return the memory information for Windows systems
'''
grains = {'mem_total': 0}
# get the Total Physical memory as reported by msinfo32
tot_bytes = win32api.GlobalMemoryStatusEx()['TotalPhys']
# return memory info in gigabytes
grains['mem_total'] = int(tot_bytes / (1024 ** 2))
return grains
def _memdata(osdata):
'''
Gather information about the system memory
'''
# Provides:
# mem_total
# swap_total, for supported systems.
grains = {'mem_total': 0}
if osdata['kernel'] == 'Linux':
grains.update(_linux_memdata())
elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD'):
grains.update(_bsd_memdata(osdata))
elif osdata['kernel'] == 'Darwin':
grains.update(_osx_memdata())
elif osdata['kernel'] == 'SunOS':
grains.update(_sunos_memdata())
elif osdata['kernel'] == 'AIX':
grains.update(_aix_memdata())
elif osdata['kernel'] == 'Windows' and HAS_WMI:
grains.update(_windows_memdata())
return grains
def _aix_get_machine_id():
'''
Parse the output of lsattr -El sys0 for os_uuid
'''
grains = {}
cmd = salt.utils.path.which('lsattr')
if cmd:
data = __salt__['cmd.run']('{0} -El sys0'.format(cmd)) + os.linesep
uuid_regexes = [re.compile(r'(?im)^\s*os_uuid\s+(\S+)\s+(.*)')]
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['machine_id'] = res.group(1).strip()
break
else:
log.error('The \'lsattr\' binary was not found in $PATH.')
return grains
def _windows_virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# Provides:
# virtual
# virtual_subtype
grains = dict()
if osdata['kernel'] != 'Windows':
return grains
grains['virtual'] = 'physical'
# It is possible that the 'manufacturer' and/or 'productname' grains
# exist but have a value of None.
manufacturer = osdata.get('manufacturer', '')
if manufacturer is None:
manufacturer = ''
productname = osdata.get('productname', '')
if productname is None:
productname = ''
if 'QEMU' in manufacturer:
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
if 'Bochs' in manufacturer:
grains['virtual'] = 'kvm'
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif 'oVirt' in productname:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'oVirt'
# Red Hat Enterprise Virtualization
elif 'RHEV Hypervisor' in productname:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'rhev'
# Product Name: VirtualBox
elif 'VirtualBox' in productname:
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware Virtual Platform' in productname:
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif 'Microsoft' in manufacturer and \
'Virtual Machine' in productname:
grains['virtual'] = 'VirtualPC'
# Manufacturer: Parallels Software International Inc.
elif 'Parallels Software' in manufacturer:
grains['virtual'] = 'Parallels'
# Apache CloudStack
elif 'CloudStack KVM Hypervisor' in productname:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'cloudstack'
return grains
def _virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
# virtual_subtype
grains = {'virtual': 'physical'}
# Skip the below loop on platforms which have none of the desired cmds
# This is a temporary measure until we can write proper virtual hardware
# detection.
skip_cmds = ('AIX',)
# list of commands to be executed to determine the 'virtual' grain
_cmds = ['systemd-detect-virt', 'virt-what', 'dmidecode']
# test first for virt-what, which covers most of the desired functionality
# on most platforms
if not salt.utils.platform.is_windows() and osdata['kernel'] not in skip_cmds:
if salt.utils.path.which('virt-what'):
_cmds = ['virt-what']
# Check if enable_lspci is True or False
if __opts__.get('enable_lspci', True) is True:
# /proc/bus/pci does not exists, lspci will fail
if os.path.exists('/proc/bus/pci'):
_cmds += ['lspci']
# Add additional last resort commands
if osdata['kernel'] in skip_cmds:
_cmds = ()
# Quick backout for BrandZ (Solaris LX Branded zones)
# Don't waste time trying other commands to detect the virtual grain
if HAS_UNAME and osdata['kernel'] == 'Linux' and 'BrandZ virtual linux' in os.uname():
grains['virtual'] = 'zone'
return grains
failed_commands = set()
for command in _cmds:
args = []
if osdata['kernel'] == 'Darwin':
command = 'system_profiler'
args = ['SPDisplaysDataType']
elif osdata['kernel'] == 'SunOS':
virtinfo = salt.utils.path.which('virtinfo')
if virtinfo:
try:
ret = __salt__['cmd.run_all']('{0} -a'.format(virtinfo))
except salt.exceptions.CommandExecutionError:
if salt.log.is_logging_configured():
failed_commands.add(virtinfo)
else:
if ret['stdout'].endswith('not supported'):
command = 'prtdiag'
else:
command = 'virtinfo'
else:
command = 'prtdiag'
cmd = salt.utils.path.which(command)
if not cmd:
continue
cmd = '{0} {1}'.format(cmd, ' '.join(args))
try:
ret = __salt__['cmd.run_all'](cmd)
if ret['retcode'] > 0:
if salt.log.is_logging_configured():
# systemd-detect-virt always returns > 0 on non-virtualized
# systems
# prtdiag only works in the global zone, skip if it fails
if salt.utils.platform.is_windows() or 'systemd-detect-virt' in cmd or 'prtdiag' in cmd:
continue
failed_commands.add(command)
continue
except salt.exceptions.CommandExecutionError:
if salt.log.is_logging_configured():
if salt.utils.platform.is_windows():
continue
failed_commands.add(command)
continue
output = ret['stdout']
if command == "system_profiler":
macoutput = output.lower()
if '0x1ab8' in macoutput:
grains['virtual'] = 'Parallels'
if 'parallels' in macoutput:
grains['virtual'] = 'Parallels'
if 'vmware' in macoutput:
grains['virtual'] = 'VMware'
if '0x15ad' in macoutput:
grains['virtual'] = 'VMware'
if 'virtualbox' in macoutput:
grains['virtual'] = 'VirtualBox'
# Break out of the loop so the next log message is not issued
break
elif command == 'systemd-detect-virt':
if output in ('qemu', 'kvm', 'oracle', 'xen', 'bochs', 'chroot', 'uml', 'systemd-nspawn'):
grains['virtual'] = output
break
elif 'vmware' in output:
grains['virtual'] = 'VMware'
break
elif 'microsoft' in output:
grains['virtual'] = 'VirtualPC'
break
elif 'lxc' in output:
grains['virtual'] = 'LXC'
break
elif 'systemd-nspawn' in output:
grains['virtual'] = 'LXC'
break
elif command == 'virt-what':
try:
output = output.splitlines()[-1]
except IndexError:
pass
if output in ('kvm', 'qemu', 'uml', 'xen', 'lxc'):
grains['virtual'] = output
break
elif 'vmware' in output:
grains['virtual'] = 'VMware'
break
elif 'parallels' in output:
grains['virtual'] = 'Parallels'
break
elif 'hyperv' in output:
grains['virtual'] = 'HyperV'
break
elif command == 'dmidecode':
# Product Name: VirtualBox
if 'Vendor: QEMU' in output:
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
if 'Manufacturer: QEMU' in output:
grains['virtual'] = 'kvm'
if 'Vendor: Bochs' in output:
grains['virtual'] = 'kvm'
if 'Manufacturer: Bochs' in output:
grains['virtual'] = 'kvm'
if 'BHYVE' in output:
grains['virtual'] = 'bhyve'
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif 'Manufacturer: oVirt' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'ovirt'
# Red Hat Enterprise Virtualization
elif 'Product Name: RHEV Hypervisor' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'rhev'
elif 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware' in output:
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif ': Microsoft' in output and 'Virtual Machine' in output:
grains['virtual'] = 'VirtualPC'
# Manufacturer: Parallels Software International Inc.
elif 'Parallels Software' in output:
grains['virtual'] = 'Parallels'
elif 'Manufacturer: Google' in output:
grains['virtual'] = 'kvm'
# Proxmox KVM
elif 'Vendor: SeaBIOS' in output:
grains['virtual'] = 'kvm'
# Break out of the loop, lspci parsing is not necessary
break
elif command == 'lspci':
# dmidecode not available or the user does not have the necessary
# permissions
model = output.lower()
if 'vmware' in model:
grains['virtual'] = 'VMware'
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH
# VirtualBox Guest Service
elif 'virtualbox' in model:
grains['virtual'] = 'VirtualBox'
elif 'qemu' in model:
grains['virtual'] = 'kvm'
elif 'virtio' in model:
grains['virtual'] = 'kvm'
# Break out of the loop so the next log message is not issued
break
elif command == 'prtdiag':
model = output.lower().split("\n")[0]
if 'vmware' in model:
grains['virtual'] = 'VMware'
elif 'virtualbox' in model:
grains['virtual'] = 'VirtualBox'
elif 'qemu' in model:
grains['virtual'] = 'kvm'
elif 'joyent smartdc hvm' in model:
grains['virtual'] = 'kvm'
break
elif command == 'virtinfo':
grains['virtual'] = 'LDOM'
break
choices = ('Linux', 'HP-UX')
isdir = os.path.isdir
sysctl = salt.utils.path.which('sysctl')
if osdata['kernel'] in choices:
if os.path.isdir('/proc'):
try:
self_root = os.stat('/')
init_root = os.stat('/proc/1/root/.')
if self_root != init_root:
grains['virtual_subtype'] = 'chroot'
except (IOError, OSError):
pass
if isdir('/proc/vz'):
if os.path.isfile('/proc/vz/version'):
grains['virtual'] = 'openvzhn'
elif os.path.isfile('/proc/vz/veinfo'):
grains['virtual'] = 'openvzve'
# a posteriori, it's expected for these to have failed:
failed_commands.discard('lspci')
failed_commands.discard('dmidecode')
# Provide additional detection for OpenVZ
if os.path.isfile('/proc/self/status'):
with salt.utils.files.fopen('/proc/self/status') as status_file:
vz_re = re.compile(r'^envID:\s+(\d+)$')
for line in status_file:
vz_match = vz_re.match(line.rstrip('\n'))
if vz_match and int(vz_match.groups()[0]) != 0:
grains['virtual'] = 'openvzve'
elif vz_match and int(vz_match.groups()[0]) == 0:
grains['virtual'] = 'openvzhn'
if isdir('/proc/sys/xen') or \
isdir('/sys/bus/xen') or isdir('/proc/xen'):
if os.path.isfile('/proc/xen/xsd_kva'):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains['virtual_subtype'] = 'Xen Dom0'
else:
if osdata.get('productname', '') == 'HVM domU':
# Requires dmidecode!
grains['virtual_subtype'] = 'Xen HVM DomU'
elif os.path.isfile('/proc/xen/capabilities') and \
os.access('/proc/xen/capabilities', os.R_OK):
with salt.utils.files.fopen('/proc/xen/capabilities') as fhr:
if 'control_d' not in fhr.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
grains['virtual_subtype'] = 'Xen PV DomU'
else:
# Shouldn't get to this, but just in case
grains['virtual_subtype'] = 'Xen Dom0'
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
elif isdir('/sys/bus/xen'):
if 'xen:' in __salt__['cmd.run']('dmesg').lower():
grains['virtual_subtype'] = 'Xen PV DomU'
elif os.path.isfile('/sys/bus/xen/drivers/xenconsole'):
# An actual DomU will have the xenconsole driver
grains['virtual_subtype'] = 'Xen PV DomU'
# If a Dom0 or DomU was detected, obviously this is xen
if 'dom' in grains.get('virtual_subtype', '').lower():
grains['virtual'] = 'xen'
# Check container type after hypervisors, to avoid variable overwrite on containers running in virtual environment.
if os.path.isfile('/proc/1/cgroup'):
try:
with salt.utils.files.fopen('/proc/1/cgroup', 'r') as fhr:
fhr_contents = fhr.read()
if ':/lxc/' in fhr_contents:
grains['virtual_subtype'] = 'LXC'
elif ':/kubepods/' in fhr_contents:
grains['virtual_subtype'] = 'kubernetes'
elif ':/libpod_parent/' in fhr_contents:
grains['virtual_subtype'] = 'libpod'
else:
if any(x in fhr_contents
for x in (':/system.slice/docker', ':/docker/',
':/docker-ce/')):
grains['virtual_subtype'] = 'Docker'
except IOError:
pass
if os.path.isfile('/proc/cpuinfo'):
with salt.utils.files.fopen('/proc/cpuinfo', 'r') as fhr:
if 'QEMU Virtual CPU' in fhr.read():
grains['virtual'] = 'kvm'
if os.path.isfile('/sys/devices/virtual/dmi/id/product_name'):
try:
with salt.utils.files.fopen('/sys/devices/virtual/dmi/id/product_name', 'r') as fhr:
output = salt.utils.stringutils.to_unicode(fhr.read(), errors='replace')
if 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
elif 'RHEV Hypervisor' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'rhev'
elif 'oVirt Node' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'ovirt'
elif 'Google' in output:
grains['virtual'] = 'gce'
elif 'BHYVE' in output:
grains['virtual'] = 'bhyve'
except IOError:
pass
elif osdata['kernel'] == 'FreeBSD':
kenv = salt.utils.path.which('kenv')
if kenv:
product = __salt__['cmd.run'](
'{0} smbios.system.product'.format(kenv)
)
maker = __salt__['cmd.run'](
'{0} smbios.system.maker'.format(kenv)
)
if product.startswith('VMware'):
grains['virtual'] = 'VMware'
if product.startswith('VirtualBox'):
grains['virtual'] = 'VirtualBox'
if maker.startswith('Xen'):
grains['virtual_subtype'] = '{0} {1}'.format(maker, product)
grains['virtual'] = 'xen'
if maker.startswith('Microsoft') and product.startswith('Virtual'):
grains['virtual'] = 'VirtualPC'
if maker.startswith('OpenStack'):
grains['virtual'] = 'OpenStack'
if maker.startswith('Bochs'):
grains['virtual'] = 'kvm'
if sysctl:
hv_vendor = __salt__['cmd.run']('{0} hw.hv_vendor'.format(sysctl))
model = __salt__['cmd.run']('{0} hw.model'.format(sysctl))
jail = __salt__['cmd.run'](
'{0} -n security.jail.jailed'.format(sysctl)
)
if 'bhyve' in hv_vendor:
grains['virtual'] = 'bhyve'
if jail == '1':
grains['virtual_subtype'] = 'jail'
if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'OpenBSD':
if 'manufacturer' in osdata:
if osdata['manufacturer'] in ['QEMU', 'Red Hat', 'Joyent']:
grains['virtual'] = 'kvm'
if osdata['manufacturer'] == 'OpenBSD':
grains['virtual'] = 'vmm'
elif osdata['kernel'] == 'SunOS':
if grains['virtual'] == 'LDOM':
roles = []
for role in ('control', 'io', 'root', 'service'):
subtype_cmd = '{0} -c current get -H -o value {1}-role'.format(cmd, role)
ret = __salt__['cmd.run_all']('{0}'.format(subtype_cmd))
if ret['stdout'] == 'true':
roles.append(role)
if roles:
grains['virtual_subtype'] = roles
else:
# Check if it's a "regular" zone. (i.e. Solaris 10/11 zone)
zonename = salt.utils.path.which('zonename')
if zonename:
zone = __salt__['cmd.run']('{0}'.format(zonename))
if zone != 'global':
grains['virtual'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if isdir('/.SUNWnative'):
grains['virtual'] = 'zone'
elif osdata['kernel'] == 'NetBSD':
if sysctl:
if 'QEMU Virtual CPU' in __salt__['cmd.run'](
'{0} -n machdep.cpu_brand'.format(sysctl)):
grains['virtual'] = 'kvm'
elif 'invalid' not in __salt__['cmd.run'](
'{0} -n machdep.xen.suspend'.format(sysctl)):
grains['virtual'] = 'Xen PV DomU'
elif 'VMware' in __salt__['cmd.run'](
'{0} -n machdep.dmi.system-vendor'.format(sysctl)):
grains['virtual'] = 'VMware'
# NetBSD has Xen dom0 support
elif __salt__['cmd.run'](
'{0} -n machdep.idle-mechanism'.format(sysctl)) == 'xen':
if os.path.isfile('/var/run/xenconsoled.pid'):
grains['virtual_subtype'] = 'Xen Dom0'
for command in failed_commands:
log.info(
"Although '%s' was found in path, the current user "
'cannot execute it. Grains output might not be '
'accurate.', command
)
return grains
def _virtual_hv(osdata):
'''
Returns detailed hypervisor information from sysfs
Currently this seems to be used only by Xen
'''
grains = {}
# Bail early if we're not running on Xen
try:
if 'xen' not in osdata['virtual']:
return grains
except KeyError:
return grains
# Try to get the exact hypervisor version from sysfs
try:
version = {}
for fn in ('major', 'minor', 'extra'):
with salt.utils.files.fopen('/sys/hypervisor/version/{}'.format(fn), 'r') as fhr:
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
grains['virtual_hv_version'] = '{}.{}{}'.format(version['major'], version['minor'], version['extra'])
grains['virtual_hv_version_info'] = [version['major'], version['minor'], version['extra']]
except (IOError, OSError, KeyError):
pass
# Try to read and decode the supported feature set of the hypervisor
# Based on https://github.com/brendangregg/Misc/blob/master/xen/xen-features.py
# Table data from include/xen/interface/features.h
xen_feature_table = {0: 'writable_page_tables',
1: 'writable_descriptor_tables',
2: 'auto_translated_physmap',
3: 'supervisor_mode_kernel',
4: 'pae_pgdir_above_4gb',
5: 'mmu_pt_update_preserve_ad',
7: 'gnttab_map_avail_bits',
8: 'hvm_callback_vector',
9: 'hvm_safe_pvclock',
10: 'hvm_pirqs',
11: 'dom0',
12: 'grant_map_identity',
13: 'memory_op_vnode_supported',
14: 'ARM_SMCCC_supported'}
try:
with salt.utils.files.fopen('/sys/hypervisor/properties/features', 'r') as fhr:
features = salt.utils.stringutils.to_unicode(fhr.read().strip())
enabled_features = []
for bit, feat in six.iteritems(xen_feature_table):
if int(features, 16) & (1 << bit):
enabled_features.append(feat)
grains['virtual_hv_features'] = features
grains['virtual_hv_features_list'] = enabled_features
except (IOError, OSError, KeyError):
pass
return grains
def _ps(osdata):
'''
Return the ps grain
'''
grains = {}
bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS')
if osdata['os'] in bsd_choices:
grains['ps'] = 'ps auxwww'
elif osdata['os_family'] == 'Solaris':
grains['ps'] = '/usr/ucb/ps auxwww'
elif osdata['os'] == 'Windows':
grains['ps'] = 'tasklist.exe'
elif osdata.get('virtual', '') == 'openvzhn':
grains['ps'] = (
'ps -fH -p $(grep -l \"^envID:[[:space:]]*0\\$\" '
'/proc/[0-9]*/status | sed -e \"s=/proc/\\([0-9]*\\)/.*=\\1=\") '
'| awk \'{ $7=\"\"; print }\''
)
elif osdata['os_family'] == 'AIX':
grains['ps'] = '/usr/bin/ps auxww'
elif osdata['os_family'] == 'NILinuxRT':
grains['ps'] = 'ps -o user,pid,ppid,tty,time,comm'
else:
grains['ps'] = 'ps -efHww'
return grains
def _clean_value(key, val):
'''
Clean out well-known bogus values.
If it isn't clean (for example has value 'None'), return None.
Otherwise, return the original value.
NOTE: This logic also exists in the smbios module. This function is
for use when not using smbios to retrieve the value.
'''
if (val is None or not val or
re.match('none', val, flags=re.IGNORECASE)):
return None
elif 'uuid' in key:
# Try each version (1-5) of RFC4122 to check if it's actually a UUID
for uuidver in range(1, 5):
try:
uuid.UUID(val, version=uuidver)
return val
except ValueError:
continue
log.trace('HW %s value %s is an invalid UUID', key, val.replace('\n', ' '))
return None
elif re.search('serial|part|version', key):
# 'To be filled by O.E.M.
# 'Not applicable' etc.
# 'Not specified' etc.
# 0000000, 1234567 etc.
# begone!
if (re.match(r'^[0]+$', val) or
re.match(r'[0]?1234567[8]?[9]?[0]?', val) or
re.search(r'sernum|part[_-]?number|specified|filled|applicable', val, flags=re.IGNORECASE)):
return None
elif re.search('asset|manufacturer', key):
# AssetTag0. Manufacturer04. Begone.
if re.search(r'manufacturer|to be filled|available|asset|^no(ne|t)', val, flags=re.IGNORECASE):
return None
else:
# map unspecified, undefined, unknown & whatever to None
if (re.search(r'to be filled', val, flags=re.IGNORECASE) or
re.search(r'un(known|specified)|no(t|ne)? (asset|provided|defined|available|present|specified)',
val, flags=re.IGNORECASE)):
return None
return val
def _windows_platform_data():
'''
Use the platform module for as much as we can.
'''
# Provides:
# kernelrelease
# kernelversion
# osversion
# osrelease
# osservicepack
# osmanufacturer
# manufacturer
# productname
# biosversion
# serialnumber
# osfullname
# timezone
# windowsdomain
# windowsdomaintype
# motherboard.productname
# motherboard.serialnumber
# virtual
if not HAS_WMI:
return {}
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394102%28v=vs.85%29.aspx
systeminfo = wmi_c.Win32_ComputerSystem()[0]
# https://msdn.microsoft.com/en-us/library/aa394239(v=vs.85).aspx
osinfo = wmi_c.Win32_OperatingSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394077(v=vs.85).aspx
biosinfo = wmi_c.Win32_BIOS()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394498(v=vs.85).aspx
timeinfo = wmi_c.Win32_TimeZone()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394072(v=vs.85).aspx
motherboard = {'product': None,
'serial': None}
try:
motherboardinfo = wmi_c.Win32_BaseBoard()[0]
motherboard['product'] = motherboardinfo.Product
motherboard['serial'] = motherboardinfo.SerialNumber
except IndexError:
log.debug('Motherboard info not available on this system')
os_release = platform.release()
kernel_version = platform.version()
info = salt.utils.win_osinfo.get_os_version_info()
net_info = salt.utils.win_osinfo.get_join_info()
service_pack = None
if info['ServicePackMajor'] > 0:
service_pack = ''.join(['SP', six.text_type(info['ServicePackMajor'])])
# This creates the osrelease grain based on the Windows Operating
# System Product Name. As long as Microsoft maintains a similar format
# this should be future proof
version = 'Unknown'
release = ''
if 'Server' in osinfo.Caption:
for item in osinfo.Caption.split(' '):
# If it's all digits, then it's version
if re.match(r'\d+', item):
version = item
# If it starts with R and then numbers, it's the release
# ie: R2
if re.match(r'^R\d+$', item):
release = item
os_release = '{0}Server{1}'.format(version, release)
else:
for item in osinfo.Caption.split(' '):
# If it's a number, decimal number, Thin or Vista, then it's the
# version
if re.match(r'^(\d+(\.\d+)?)|Thin|Vista$', item):
version = item
os_release = version
grains = {
'kernelrelease': _clean_value('kernelrelease', osinfo.Version),
'kernelversion': _clean_value('kernelversion', kernel_version),
'osversion': _clean_value('osversion', osinfo.Version),
'osrelease': _clean_value('osrelease', os_release),
'osservicepack': _clean_value('osservicepack', service_pack),
'osmanufacturer': _clean_value('osmanufacturer', osinfo.Manufacturer),
'manufacturer': _clean_value('manufacturer', systeminfo.Manufacturer),
'productname': _clean_value('productname', systeminfo.Model),
# bios name had a bunch of whitespace appended to it in my testing
# 'PhoenixBIOS 4.0 Release 6.0 '
'biosversion': _clean_value('biosversion', biosinfo.Name.strip()),
'serialnumber': _clean_value('serialnumber', biosinfo.SerialNumber),
'osfullname': _clean_value('osfullname', osinfo.Caption),
'timezone': _clean_value('timezone', timeinfo.Description),
'windowsdomain': _clean_value('windowsdomain', net_info['Domain']),
'windowsdomaintype': _clean_value('windowsdomaintype', net_info['DomainType']),
'motherboard': {
'productname': _clean_value('motherboard.productname', motherboard['product']),
'serialnumber': _clean_value('motherboard.serialnumber', motherboard['serial']),
}
}
# test for virtualized environments
# I only had VMware available so the rest are unvalidated
if 'VRTUAL' in biosinfo.Version: # (not a typo)
grains['virtual'] = 'HyperV'
elif 'A M I' in biosinfo.Version:
grains['virtual'] = 'VirtualPC'
elif 'VMware' in systeminfo.Model:
grains['virtual'] = 'VMware'
elif 'VirtualBox' in systeminfo.Model:
grains['virtual'] = 'VirtualBox'
elif 'Xen' in biosinfo.Version:
grains['virtual'] = 'Xen'
if 'HVM domU' in systeminfo.Model:
grains['virtual_subtype'] = 'HVM domU'
elif 'OpenStack' in systeminfo.Model:
grains['virtual'] = 'OpenStack'
return grains
def _osx_platform_data():
'''
Additional data for macOS systems
Returns: A dictionary containing values for the following:
- model_name
- boot_rom_version
- smc_version
- system_serialnumber
'''
cmd = 'system_profiler SPHardwareDataType'
hardware = __salt__['cmd.run'](cmd)
grains = {}
for line in hardware.splitlines():
field_name, _, field_val = line.partition(': ')
if field_name.strip() == "Model Name":
key = 'model_name'
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Boot ROM Version":
key = 'boot_rom_version'
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "SMC Version (system)":
key = 'smc_version'
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Serial Number (system)":
key = 'system_serialnumber'
grains[key] = _clean_value(key, field_val)
return grains
def id_():
'''
Return the id
'''
return {'id': __opts__.get('id', '')}
_REPLACE_LINUX_RE = re.compile(r'\W(?:gnu/)?linux', re.IGNORECASE)
# This maps (at most) the first ten characters (no spaces, lowercased) of
# 'osfullname' to the 'os' grain that Salt traditionally uses.
# Please see os_data() and _supported_dists.
# If your system is not detecting properly it likely needs an entry here.
_OS_NAME_MAP = {
'redhatente': 'RedHat',
'gentoobase': 'Gentoo',
'archarm': 'Arch ARM',
'arch': 'Arch',
'debian': 'Debian',
'raspbian': 'Raspbian',
'fedoraremi': 'Fedora',
'chapeau': 'Chapeau',
'korora': 'Korora',
'amazonami': 'Amazon',
'alt': 'ALT',
'enterprise': 'OEL',
'oracleserv': 'OEL',
'cloudserve': 'CloudLinux',
'cloudlinux': 'CloudLinux',
'pidora': 'Fedora',
'scientific': 'ScientificLinux',
'synology': 'Synology',
'nilrt': 'NILinuxRT',
'poky': 'Poky',
'manjaro': 'Manjaro',
'manjarolin': 'Manjaro',
'univention': 'Univention',
'antergos': 'Antergos',
'sles': 'SUSE',
'void': 'Void',
'slesexpand': 'RES',
'linuxmint': 'Mint',
'neon': 'KDE neon',
}
# Map the 'os' grain to the 'os_family' grain
# These should always be capitalized entries as the lookup comes
# post-_OS_NAME_MAP. If your system is having trouble with detection, please
# make sure that the 'os' grain is capitalized and working correctly first.
_OS_FAMILY_MAP = {
'Ubuntu': 'Debian',
'Fedora': 'RedHat',
'Chapeau': 'RedHat',
'Korora': 'RedHat',
'FedBerry': 'RedHat',
'CentOS': 'RedHat',
'GoOSe': 'RedHat',
'Scientific': 'RedHat',
'Amazon': 'RedHat',
'CloudLinux': 'RedHat',
'OVS': 'RedHat',
'OEL': 'RedHat',
'XCP': 'RedHat',
'XCP-ng': 'RedHat',
'XenServer': 'RedHat',
'RES': 'RedHat',
'Sangoma': 'RedHat',
'Mandrake': 'Mandriva',
'ESXi': 'VMware',
'Mint': 'Debian',
'VMwareESX': 'VMware',
'Bluewhite64': 'Bluewhite',
'Slamd64': 'Slackware',
'SLES': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SLED': 'Suse',
'openSUSE': 'Suse',
'SUSE': 'Suse',
'openSUSE Leap': 'Suse',
'openSUSE Tumbleweed': 'Suse',
'SLES_SAP': 'Suse',
'Solaris': 'Solaris',
'SmartOS': 'Solaris',
'OmniOS': 'Solaris',
'OpenIndiana Development': 'Solaris',
'OpenIndiana': 'Solaris',
'OpenSolaris Development': 'Solaris',
'OpenSolaris': 'Solaris',
'Oracle Solaris': 'Solaris',
'Arch ARM': 'Arch',
'Manjaro': 'Arch',
'Antergos': 'Arch',
'ALT': 'RedHat',
'Trisquel': 'Debian',
'GCEL': 'Debian',
'Linaro': 'Debian',
'elementary OS': 'Debian',
'elementary': 'Debian',
'Univention': 'Debian',
'ScientificLinux': 'RedHat',
'Raspbian': 'Debian',
'Devuan': 'Debian',
'antiX': 'Debian',
'Kali': 'Debian',
'neon': 'Debian',
'Cumulus': 'Debian',
'Deepin': 'Debian',
'NILinuxRT': 'NILinuxRT',
'KDE neon': 'Debian',
'Void': 'Void',
'IDMS': 'Debian',
'Funtoo': 'Gentoo',
'AIX': 'AIX',
'TurnKey': 'Debian',
}
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
_LSB_REGEX = re.compile((
'^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?'
'([\\w\\s\\.\\-_]+)(?:\'|")?'
))
def _linux_bin_exists(binary):
'''
Does a binary exist in linux (depends on which, type, or whereis)
'''
for search_cmd in ('which', 'type -ap'):
try:
return __salt__['cmd.retcode'](
'{0} {1}'.format(search_cmd, binary)
) == 0
except salt.exceptions.CommandExecutionError:
pass
try:
return len(__salt__['cmd.run_all'](
'whereis -b {0}'.format(binary)
)['stdout'].split()) > 1
except salt.exceptions.CommandExecutionError:
return False
def _get_interfaces():
'''
Provide a dict of the connected interfaces and their ip addresses
'''
global _INTERFACES
if not _INTERFACES:
_INTERFACES = salt.utils.network.interfaces()
return _INTERFACES
def _parse_lsb_release():
ret = {}
try:
log.trace('Attempting to parse /etc/lsb-release')
with salt.utils.files.fopen('/etc/lsb-release') as ifile:
for line in ifile:
try:
key, value = _LSB_REGEX.match(line.rstrip('\n')).groups()[:2]
except AttributeError:
pass
else:
# Adds lsb_distrib_{id,release,codename,description}
ret['lsb_{0}'.format(key.lower())] = value.rstrip()
except (IOError, OSError) as exc:
log.trace('Failed to parse /etc/lsb-release: %s', exc)
return ret
def _parse_os_release(*os_release_files):
'''
Parse os-release and return a parameter dictionary
See http://www.freedesktop.org/software/systemd/man/os-release.html
for specification of the file format.
'''
ret = {}
for filename in os_release_files:
try:
with salt.utils.files.fopen(filename) as ifile:
regex = re.compile('^([\\w]+)=(?:\'|")?(.*?)(?:\'|")?$')
for line in ifile:
match = regex.match(line.strip())
if match:
# Shell special characters ("$", quotes, backslash,
# backtick) are escaped with backslashes
ret[match.group(1)] = re.sub(
r'\\([$"\'\\`])', r'\1', match.group(2)
)
break
except (IOError, OSError):
pass
return ret
def _parse_cpe_name(cpe):
'''
Parse CPE_NAME data from the os-release
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
:param cpe:
:return:
'''
part = {
'o': 'operating system',
'h': 'hardware',
'a': 'application',
}
ret = {}
cpe = (cpe or '').split(':')
if len(cpe) > 4 and cpe[0] == 'cpe':
if cpe[1].startswith('/'): # WFN to URI
ret['vendor'], ret['product'], ret['version'] = cpe[2:5]
ret['phase'] = cpe[5] if len(cpe) > 5 else None
ret['part'] = part.get(cpe[1][1:])
elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string
ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]]
ret['part'] = part.get(cpe[2])
return ret
def os_data():
'''
Return grains pertaining to the operating system
'''
grains = {
'num_gpus': 0,
'gpus': [],
}
# Windows Server 2008 64-bit
# ('Windows', 'MINIONNAME', '2008ServerR2', '6.1.7601', 'AMD64',
# 'Intel64 Fam ily 6 Model 23 Stepping 6, GenuineIntel')
# Ubuntu 10.04
# ('Linux', 'MINIONNAME', '2.6.32-38-server',
# '#83-Ubuntu SMP Wed Jan 4 11:26:59 UTC 2012', 'x86_64', '')
# pylint: disable=unpacking-non-sequence
(grains['kernel'], grains['nodename'],
grains['kernelrelease'], grains['kernelversion'], grains['cpuarch'], _) = platform.uname()
# pylint: enable=unpacking-non-sequence
if salt.utils.platform.is_proxy():
grains['kernel'] = 'proxy'
grains['kernelrelease'] = 'proxy'
grains['kernelversion'] = 'proxy'
grains['osrelease'] = 'proxy'
grains['os'] = 'proxy'
grains['os_family'] = 'proxy'
grains['osfullname'] = 'proxy'
elif salt.utils.platform.is_windows():
grains['os'] = 'Windows'
grains['os_family'] = 'Windows'
grains.update(_memdata(grains))
grains.update(_windows_platform_data())
grains.update(_windows_cpudata())
grains.update(_windows_virtual(grains))
grains.update(_ps(grains))
if 'Server' in grains['osrelease']:
osrelease_info = grains['osrelease'].split('Server', 1)
osrelease_info[1] = osrelease_info[1].lstrip('R')
else:
osrelease_info = grains['osrelease'].split('.')
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains['osrelease_info'] = tuple(osrelease_info)
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['os'],
ver=grains['osrelease'])
grains['init'] = 'Windows'
return grains
elif salt.utils.platform.is_linux():
# Add SELinux grain, if you have it
if _linux_bin_exists('selinuxenabled'):
log.trace('Adding selinux grains')
grains['selinux'] = {}
grains['selinux']['enabled'] = __salt__['cmd.retcode'](
'selinuxenabled'
) == 0
if _linux_bin_exists('getenforce'):
grains['selinux']['enforced'] = __salt__['cmd.run'](
'getenforce'
).strip()
# Add systemd grain, if you have it
if _linux_bin_exists('systemctl') and _linux_bin_exists('localectl'):
log.trace('Adding systemd grains')
grains['systemd'] = {}
systemd_info = __salt__['cmd.run'](
'systemctl --version'
).splitlines()
grains['systemd']['version'] = systemd_info[0].split()[1]
grains['systemd']['features'] = systemd_info[1]
# Add init grain
grains['init'] = 'unknown'
log.trace('Adding init grain')
try:
os.stat('/run/systemd/system')
grains['init'] = 'systemd'
except (OSError, IOError):
try:
with salt.utils.files.fopen('/proc/1/cmdline') as fhr:
init_cmdline = fhr.read().replace('\x00', ' ').split()
except (IOError, OSError):
pass
else:
try:
init_bin = salt.utils.path.which(init_cmdline[0])
except IndexError:
# Emtpy init_cmdline
init_bin = None
log.warning('Unable to fetch data from /proc/1/cmdline')
if init_bin is not None and init_bin.endswith('bin/init'):
supported_inits = (b'upstart', b'sysvinit', b'systemd')
edge_len = max(len(x) for x in supported_inits) - 1
try:
buf_size = __opts__['file_buffer_size']
except KeyError:
# Default to the value of file_buffer_size for the minion
buf_size = 262144
try:
with salt.utils.files.fopen(init_bin, 'rb') as fp_:
edge = b''
buf = fp_.read(buf_size).lower()
while buf:
buf = edge + buf
for item in supported_inits:
if item in buf:
if six.PY3:
item = item.decode('utf-8')
grains['init'] = item
buf = b''
break
edge = buf[-edge_len:]
buf = fp_.read(buf_size).lower()
except (IOError, OSError) as exc:
log.error(
'Unable to read from init_bin (%s): %s',
init_bin, exc
)
elif salt.utils.path.which('supervisord') in init_cmdline:
grains['init'] = 'supervisord'
elif salt.utils.path.which('dumb-init') in init_cmdline:
# https://github.com/Yelp/dumb-init
grains['init'] = 'dumb-init'
elif salt.utils.path.which('tini') in init_cmdline:
# https://github.com/krallin/tini
grains['init'] = 'tini'
elif init_cmdline == ['runit']:
grains['init'] = 'runit'
elif '/sbin/my_init' in init_cmdline:
# Phusion Base docker container use runit for srv mgmt, but
# my_init as pid1
grains['init'] = 'runit'
else:
log.debug(
'Could not determine init system from command line: (%s)',
' '.join(init_cmdline)
)
# Add lsb grains on any distro with lsb-release. Note that this import
# can fail on systems with lsb-release installed if the system package
# does not install the python package for the python interpreter used by
# Salt (i.e. python2 or python3)
try:
log.trace('Getting lsb_release distro information')
import lsb_release # pylint: disable=import-error
release = lsb_release.get_distro_information()
for key, value in six.iteritems(release):
key = key.lower()
lsb_param = 'lsb_{0}{1}'.format(
'' if key.startswith('distrib_') else 'distrib_',
key
)
grains[lsb_param] = value
# Catch a NameError to workaround possible breakage in lsb_release
# See https://github.com/saltstack/salt/issues/37867
except (ImportError, NameError):
# if the python library isn't available, try to parse
# /etc/lsb-release using regex
log.trace('lsb_release python bindings not available')
grains.update(_parse_lsb_release())
if grains.get('lsb_distrib_description', '').lower().startswith('antergos'):
# Antergos incorrectly configures their /etc/lsb-release,
# setting the DISTRIB_ID to "Arch". This causes the "os" grain
# to be incorrectly set to "Arch".
grains['osfullname'] = 'Antergos Linux'
elif 'lsb_distrib_id' not in grains:
log.trace(
'Failed to get lsb_distrib_id, trying to parse os-release'
)
os_release = _parse_os_release('/etc/os-release', '/usr/lib/os-release')
if os_release:
if 'NAME' in os_release:
grains['lsb_distrib_id'] = os_release['NAME'].strip()
if 'VERSION_ID' in os_release:
grains['lsb_distrib_release'] = os_release['VERSION_ID']
if 'VERSION_CODENAME' in os_release:
grains['lsb_distrib_codename'] = os_release['VERSION_CODENAME']
elif 'PRETTY_NAME' in os_release:
codename = os_release['PRETTY_NAME']
# https://github.com/saltstack/salt/issues/44108
if os_release['ID'] == 'debian':
codename_match = re.search(r'\((\w+)\)$', codename)
if codename_match:
codename = codename_match.group(1)
grains['lsb_distrib_codename'] = codename
if 'CPE_NAME' in os_release:
cpe = _parse_cpe_name(os_release['CPE_NAME'])
if not cpe:
log.error('Broken CPE_NAME format in /etc/os-release!')
elif cpe.get('vendor', '').lower() in ['suse', 'opensuse']:
grains['os'] = "SUSE"
# openSUSE `osfullname` grain normalization
if os_release.get("NAME") == "openSUSE Leap":
grains['osfullname'] = "Leap"
elif os_release.get("VERSION") == "Tumbleweed":
grains['osfullname'] = os_release["VERSION"]
# Override VERSION_ID, if CPE_NAME around
if cpe.get('version') and cpe.get('vendor') == 'opensuse': # Keep VERSION_ID for SLES
grains['lsb_distrib_release'] = cpe['version']
elif os.path.isfile('/etc/SuSE-release'):
log.trace('Parsing distrib info from /etc/SuSE-release')
grains['lsb_distrib_id'] = 'SUSE'
version = ''
patch = ''
with salt.utils.files.fopen('/etc/SuSE-release') as fhr:
for line in fhr:
if 'enterprise' in line.lower():
grains['lsb_distrib_id'] = 'SLES'
grains['lsb_distrib_codename'] = re.sub(r'\(.+\)', '', line).strip()
elif 'version' in line.lower():
version = re.sub(r'[^0-9]', '', line)
elif 'patchlevel' in line.lower():
patch = re.sub(r'[^0-9]', '', line)
grains['lsb_distrib_release'] = version
if patch:
grains['lsb_distrib_release'] += '.' + patch
patchstr = 'SP' + patch
if grains['lsb_distrib_codename'] and patchstr not in grains['lsb_distrib_codename']:
grains['lsb_distrib_codename'] += ' ' + patchstr
if not grains.get('lsb_distrib_codename'):
grains['lsb_distrib_codename'] = 'n.a'
elif os.path.isfile('/etc/altlinux-release'):
log.trace('Parsing distrib info from /etc/altlinux-release')
# ALT Linux
grains['lsb_distrib_id'] = 'altlinux'
with salt.utils.files.fopen('/etc/altlinux-release') as ifile:
# This file is symlinked to from:
# /etc/fedora-release
# /etc/redhat-release
# /etc/system-release
for line in ifile:
# ALT Linux Sisyphus (unstable)
comps = line.split()
if comps[0] == 'ALT':
grains['lsb_distrib_release'] = comps[2]
grains['lsb_distrib_codename'] = \
comps[3].replace('(', '').replace(')', '')
elif os.path.isfile('/etc/centos-release'):
log.trace('Parsing distrib info from /etc/centos-release')
# Maybe CentOS Linux; could also be SUSE Expanded Support.
# SUSE ES has both, centos-release and redhat-release.
if os.path.isfile('/etc/redhat-release'):
with salt.utils.files.fopen('/etc/redhat-release') as ifile:
for line in ifile:
if "red hat enterprise linux server" in line.lower():
# This is a SUSE Expanded Support Rhel installation
grains['lsb_distrib_id'] = 'RedHat'
break
grains.setdefault('lsb_distrib_id', 'CentOS')
with salt.utils.files.fopen('/etc/centos-release') as ifile:
for line in ifile:
# Need to pull out the version and codename
# in the case of custom content in /etc/centos-release
find_release = re.compile(r'\d+\.\d+')
find_codename = re.compile(r'(?<=\()(.*?)(?=\))')
release = find_release.search(line)
codename = find_codename.search(line)
if release is not None:
grains['lsb_distrib_release'] = release.group()
if codename is not None:
grains['lsb_distrib_codename'] = codename.group()
elif os.path.isfile('/etc.defaults/VERSION') \
and os.path.isfile('/etc.defaults/synoinfo.conf'):
grains['osfullname'] = 'Synology'
log.trace(
'Parsing Synology distrib info from /etc/.defaults/VERSION'
)
with salt.utils.files.fopen('/etc.defaults/VERSION', 'r') as fp_:
synoinfo = {}
for line in fp_:
try:
key, val = line.rstrip('\n').split('=')
except ValueError:
continue
if key in ('majorversion', 'minorversion',
'buildnumber'):
synoinfo[key] = val.strip('"')
if len(synoinfo) != 3:
log.warning(
'Unable to determine Synology version info. '
'Please report this, as it is likely a bug.'
)
else:
grains['osrelease'] = (
'{majorversion}.{minorversion}-{buildnumber}'
.format(**synoinfo)
)
# Use the already intelligent platform module to get distro info
# (though apparently it's not intelligent enough to strip quotes)
log.trace(
'Getting OS name, release, and codename from '
'distro.linux_distribution()'
)
(osname, osrelease, oscodename) = \
[x.strip('"').strip("'") for x in
linux_distribution(supported_dists=_supported_dists)]
# Try to assign these three names based on the lsb info, they tend to
# be more accurate than what python gets from /etc/DISTRO-release.
# It's worth noting that Ubuntu has patched their Python distribution
# so that linux_distribution() does the /etc/lsb-release parsing, but
# we do it anyway here for the sake for full portability.
if 'osfullname' not in grains:
# If NI Linux RT distribution, set the grains['osfullname'] to 'nilrt'
if grains.get('lsb_distrib_id', '').lower().startswith('nilrt'):
grains['osfullname'] = 'nilrt'
else:
grains['osfullname'] = grains.get('lsb_distrib_id', osname).strip()
if 'osrelease' not in grains:
# NOTE: This is a workaround for CentOS 7 os-release bug
# https://bugs.centos.org/view.php?id=8359
# /etc/os-release contains no minor distro release number so we fall back to parse
# /etc/centos-release file instead.
# Commit introducing this comment should be reverted after the upstream bug is released.
if 'CentOS Linux 7' in grains.get('lsb_distrib_codename', ''):
grains.pop('lsb_distrib_release', None)
grains['osrelease'] = grains.get('lsb_distrib_release', osrelease).strip()
grains['oscodename'] = grains.get('lsb_distrib_codename', '').strip() or oscodename
if 'Red Hat' in grains['oscodename']:
grains['oscodename'] = oscodename
distroname = _REPLACE_LINUX_RE.sub('', grains['osfullname']).strip()
# return the first ten characters with no spaces, lowercased
shortname = distroname.replace(' ', '').lower()[:10]
# this maps the long names from the /etc/DISTRO-release files to the
# traditional short names that Salt has used.
if 'os' not in grains:
grains['os'] = _OS_NAME_MAP.get(shortname, distroname)
grains.update(_linux_cpudata())
grains.update(_linux_gpu_data())
elif grains['kernel'] == 'SunOS':
if salt.utils.platform.is_smartos():
# See https://github.com/joyent/smartos-live/issues/224
if HAS_UNAME:
uname_v = os.uname()[3] # format: joyent_20161101T004406Z
else:
uname_v = os.name
uname_v = uname_v[uname_v.index('_')+1:]
grains['os'] = grains['osfullname'] = 'SmartOS'
# store a parsed version of YYYY.MM.DD as osrelease
grains['osrelease'] = ".".join([
uname_v.split('T')[0][0:4],
uname_v.split('T')[0][4:6],
uname_v.split('T')[0][6:8],
])
# store a untouched copy of the timestamp in osrelease_stamp
grains['osrelease_stamp'] = uname_v
elif os.path.isfile('/etc/release'):
with salt.utils.files.fopen('/etc/release', 'r') as fp_:
rel_data = fp_.read()
try:
release_re = re.compile(
r'((?:Open|Oracle )?Solaris|OpenIndiana|OmniOS) (Development)?'
r'\s*(\d+\.?\d*|v\d+)\s?[A-Z]*\s?(r\d+|\d+\/\d+|oi_\S+|snv_\S+)?'
)
osname, development, osmajorrelease, osminorrelease = release_re.search(rel_data).groups()
except AttributeError:
# Set a blank osrelease grain and fallback to 'Solaris'
# as the 'os' grain.
grains['os'] = grains['osfullname'] = 'Solaris'
grains['osrelease'] = ''
else:
if development is not None:
osname = ' '.join((osname, development))
if HAS_UNAME:
uname_v = os.uname()[3]
else:
uname_v = os.name
grains['os'] = grains['osfullname'] = osname
if osname in ['Oracle Solaris'] and uname_v.startswith(osmajorrelease):
# Oracla Solars 11 and up have minor version in uname
grains['osrelease'] = uname_v
elif osname in ['OmniOS']:
# OmniOS
osrelease = []
osrelease.append(osmajorrelease[1:])
osrelease.append(osminorrelease[1:])
grains['osrelease'] = ".".join(osrelease)
grains['osrelease_stamp'] = uname_v
else:
# Sun Solaris 10 and earlier/comparable
osrelease = []
osrelease.append(osmajorrelease)
if osminorrelease:
osrelease.append(osminorrelease)
grains['osrelease'] = ".".join(osrelease)
grains['osrelease_stamp'] = uname_v
grains.update(_sunos_cpudata())
elif grains['kernel'] == 'VMkernel':
grains['os'] = 'ESXi'
elif grains['kernel'] == 'Darwin':
osrelease = __salt__['cmd.run']('sw_vers -productVersion')
osname = __salt__['cmd.run']('sw_vers -productName')
osbuild = __salt__['cmd.run']('sw_vers -buildVersion')
grains['os'] = 'MacOS'
grains['os_family'] = 'MacOS'
grains['osfullname'] = "{0} {1}".format(osname, osrelease)
grains['osrelease'] = osrelease
grains['osbuild'] = osbuild
grains['init'] = 'launchd'
grains.update(_bsd_cpudata(grains))
grains.update(_osx_gpudata())
grains.update(_osx_platform_data())
elif grains['kernel'] == 'AIX':
osrelease = __salt__['cmd.run']('oslevel')
osrelease_techlevel = __salt__['cmd.run']('oslevel -r')
osname = __salt__['cmd.run']('uname')
grains['os'] = 'AIX'
grains['osfullname'] = osname
grains['osrelease'] = osrelease
grains['osrelease_techlevel'] = osrelease_techlevel
grains.update(_aix_cpudata())
else:
grains['os'] = grains['kernel']
if grains['kernel'] == 'FreeBSD':
try:
grains['osrelease'] = __salt__['cmd.run']('freebsd-version -u').split('-')[0]
except salt.exceptions.CommandExecutionError:
# freebsd-version was introduced in 10.0.
# derive osrelease from kernelversion prior to that
grains['osrelease'] = grains['kernelrelease'].split('-')[0]
grains.update(_bsd_cpudata(grains))
if grains['kernel'] in ('OpenBSD', 'NetBSD'):
grains.update(_bsd_cpudata(grains))
grains['osrelease'] = grains['kernelrelease'].split('-')[0]
if grains['kernel'] == 'NetBSD':
grains.update(_netbsd_gpu_data())
if not grains['os']:
grains['os'] = 'Unknown {0}'.format(grains['kernel'])
grains['os_family'] = 'Unknown'
else:
# this assigns family names based on the os name
# family defaults to the os name if not found
grains['os_family'] = _OS_FAMILY_MAP.get(grains['os'],
grains['os'])
# Build the osarch grain. This grain will be used for platform-specific
# considerations such as package management. Fall back to the CPU
# architecture.
if grains.get('os_family') == 'Debian':
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
elif grains.get('os_family') in ['RedHat', 'Suse']:
osarch = salt.utils.pkg.rpm.get_osarch()
elif grains.get('os_family') in ('NILinuxRT', 'Poky'):
archinfo = {}
for line in __salt__['cmd.run']('opkg print-architecture').splitlines():
if line.startswith('arch'):
_, arch, priority = line.split()
archinfo[arch.strip()] = int(priority.strip())
# Return osarch in priority order (higher to lower)
osarch = sorted(archinfo, key=archinfo.get, reverse=True)
else:
osarch = grains['cpuarch']
grains['osarch'] = osarch
grains.update(_memdata(grains))
# Get the hardware and bios data
grains.update(_hw_data(grains))
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_virtual_hv(grains))
grains.update(_ps(grains))
if grains.get('osrelease', ''):
osrelease_info = grains['osrelease'].split('.')
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains['osrelease_info'] = tuple(osrelease_info)
try:
grains['osmajorrelease'] = int(grains['osrelease_info'][0])
except (IndexError, TypeError, ValueError):
log.debug(
'Unable to derive osmajorrelease from osrelease_info \'%s\'. '
'The osmajorrelease grain will not be set.',
grains['osrelease_info']
)
os_name = grains['os' if grains.get('os') in (
'Debian', 'FreeBSD', 'OpenBSD', 'NetBSD', 'Mac', 'Raspbian') else 'osfullname']
grains['osfinger'] = '{0}-{1}'.format(
os_name, grains['osrelease'] if os_name in ('Ubuntu',) else grains['osrelease_info'][0])
return grains
def locale_info():
'''
Provides
defaultlanguage
defaultencoding
'''
grains = {}
grains['locale_info'] = {}
if salt.utils.platform.is_proxy():
return grains
try:
(
grains['locale_info']['defaultlanguage'],
grains['locale_info']['defaultencoding']
) = locale.getdefaultlocale()
except Exception:
# locale.getdefaultlocale can ValueError!! Catch anything else it
# might do, per #2205
grains['locale_info']['defaultlanguage'] = 'unknown'
grains['locale_info']['defaultencoding'] = 'unknown'
grains['locale_info']['detectedencoding'] = __salt_system_encoding__
if _DATEUTIL_TZ:
grains['locale_info']['timezone'] = datetime.datetime.now(dateutil.tz.tzlocal()).tzname()
return grains
def hostname():
'''
Return fqdn, hostname, domainname
'''
# This is going to need some work
# Provides:
# fqdn
# host
# localhost
# domain
global __FQDN__
grains = {}
if salt.utils.platform.is_proxy():
return grains
grains['localhost'] = socket.gethostname()
if __FQDN__ is None:
__FQDN__ = salt.utils.network.get_fqhostname()
# On some distros (notably FreeBSD) if there is no hostname set
# salt.utils.network.get_fqhostname() will return None.
# In this case we punt and log a message at error level, but force the
# hostname and domain to be localhost.localdomain
# Otherwise we would stacktrace below
if __FQDN__ is None: # still!
log.error('Having trouble getting a hostname. Does this machine have its hostname and domain set properly?')
__FQDN__ = 'localhost.localdomain'
grains['fqdn'] = __FQDN__
(grains['host'], grains['domain']) = grains['fqdn'].partition('.')[::2]
return grains
def append_domain():
'''
Return append_domain if set
'''
grain = {}
if salt.utils.platform.is_proxy():
return grain
if 'append_domain' in __opts__:
grain['append_domain'] = __opts__['append_domain']
return grain
def fqdns():
'''
Return all known FQDNs for the system by enumerating all interfaces and
then trying to reverse resolve them (excluding 'lo' interface).
'''
# Provides:
# fqdns
grains = {}
fqdns = set()
addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces())
addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces()))
err_message = 'Exception during resolving address: %s'
for ip in addresses:
try:
name, aliaslist, addresslist = socket.gethostbyaddr(ip)
fqdns.update([socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)])
except socket.herror as err:
if err.errno == 0:
# No FQDN for this IP address, so we don't need to know this all the time.
log.debug("Unable to resolve address %s: %s", ip, err)
else:
log.error(err_message, err)
except (socket.error, socket.gaierror, socket.timeout) as err:
log.error(err_message, err)
return {"fqdns": sorted(list(fqdns))}
def ip_fqdn():
'''
Return ip address and FQDN grains
'''
if salt.utils.platform.is_proxy():
return {}
ret = {}
ret['ipv4'] = salt.utils.network.ip_addrs(include_loopback=True)
ret['ipv6'] = salt.utils.network.ip_addrs6(include_loopback=True)
_fqdn = hostname()['fqdn']
for socket_type, ipv_num in ((socket.AF_INET, '4'), (socket.AF_INET6, '6')):
key = 'fqdn_ip' + ipv_num
if not ret['ipv' + ipv_num]:
ret[key] = []
else:
try:
start_time = datetime.datetime.utcnow()
info = socket.getaddrinfo(_fqdn, None, socket_type)
ret[key] = list(set(item[4][0] for item in info))
except socket.error:
timediff = datetime.datetime.utcnow() - start_time
if timediff.seconds > 5 and __opts__['__role'] == 'master':
log.warning(
'Unable to find IPv%s record for "%s" causing a %s '
'second timeout when rendering grains. Set the dns or '
'/etc/hosts for IPv%s to clear this.',
ipv_num, _fqdn, timediff, ipv_num
)
ret[key] = []
return ret
def ip_interfaces():
'''
Provide a dict of the connected interfaces and their ip addresses
The addresses will be passed as a list for each interface
'''
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for inet in ifaces[face].get('inet6', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip_interfaces': ret}
def ip4_interfaces():
'''
Provide a dict of the connected interfaces and their ip4 addresses
The addresses will be passed as a list for each interface
'''
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip4_interfaces': ret}
def ip6_interfaces():
'''
Provide a dict of the connected interfaces and their ip6 addresses
The addresses will be passed as a list for each interface
'''
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet6', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip6_interfaces': ret}
def hwaddr_interfaces():
'''
Provide a dict of the connected interfaces and their
hw addresses (Mac Address)
'''
# Provides:
# hwaddr_interfaces
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
if 'hwaddr' in ifaces[face]:
ret[face] = ifaces[face]['hwaddr']
return {'hwaddr_interfaces': ret}
def dns():
'''
Parse the resolver configuration file
.. versionadded:: 2016.3.0
'''
# Provides:
# dns
if salt.utils.platform.is_windows() or 'proxyminion' in __opts__:
return {}
resolv = salt.utils.dns.parse_resolv()
for key in ('nameservers', 'ip4_nameservers', 'ip6_nameservers',
'sortlist'):
if key in resolv:
resolv[key] = [six.text_type(i) for i in resolv[key]]
return {'dns': resolv} if resolv else {}
def get_machine_id():
'''
Provide the machine-id for machine/virtualization combination
'''
# Provides:
# machine-id
if platform.system() == 'AIX':
return _aix_get_machine_id()
locations = ['/etc/machine-id', '/var/lib/dbus/machine-id']
existing_locations = [loc for loc in locations if os.path.exists(loc)]
if not existing_locations:
return {}
else:
with salt.utils.files.fopen(existing_locations[0]) as machineid:
return {'machine_id': machineid.read().strip()}
def cwd():
'''
Current working directory
'''
return {'cwd': os.getcwd()}
def path():
'''
Return the path
'''
# Provides:
# path
return {'path': os.environ.get('PATH', '').strip()}
def pythonversion():
'''
Return the Python version
'''
# Provides:
# pythonversion
return {'pythonversion': list(sys.version_info)}
def pythonpath():
'''
Return the Python path
'''
# Provides:
# pythonpath
return {'pythonpath': sys.path}
def pythonexecutable():
'''
Return the python executable in use
'''
# Provides:
# pythonexecutable
return {'pythonexecutable': sys.executable}
def saltpath():
'''
Return the path of the salt module
'''
# Provides:
# saltpath
salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(salt_path)}
def saltversion():
'''
Return the version of salt
'''
# Provides:
# saltversion
from salt.version import __version__
return {'saltversion': __version__}
def zmqversion():
'''
Return the zeromq version
'''
# Provides:
# zmqversion
try:
import zmq
return {'zmqversion': zmq.zmq_version()} # pylint: disable=no-member
except ImportError:
return {}
def saltversioninfo():
'''
Return the version_info of salt
.. versionadded:: 0.17.0
'''
# Provides:
# saltversioninfo
from salt.version import __version_info__
return {'saltversioninfo': list(__version_info__)}
def _hw_data(osdata):
'''
Get system specific hardware data from dmidecode
Provides
biosversion
productname
manufacturer
serialnumber
biosreleasedate
uuid
.. versionadded:: 0.9.5
'''
if salt.utils.platform.is_proxy():
return {}
grains = {}
if osdata['kernel'] == 'Linux' and os.path.exists('/sys/class/dmi/id'):
# On many Linux distributions basic firmware information is available via sysfs
# requires CONFIG_DMIID to be enabled in the Linux kernel configuration
sysfs_firmware_info = {
'biosversion': 'bios_version',
'productname': 'product_name',
'manufacturer': 'sys_vendor',
'biosreleasedate': 'bios_date',
'uuid': 'product_uuid',
'serialnumber': 'product_serial'
}
for key, fw_file in sysfs_firmware_info.items():
contents_file = os.path.join('/sys/class/dmi/id', fw_file)
if os.path.exists(contents_file):
try:
with salt.utils.files.fopen(contents_file, 'r') as ifile:
grains[key] = salt.utils.stringutils.to_unicode(ifile.read().strip(), errors='replace')
if key == 'uuid':
grains['uuid'] = grains['uuid'].lower()
except (IOError, OSError) as err:
# PermissionError is new to Python 3, but corresponds to the EACESS and
# EPERM error numbers. Use those instead here for PY2 compatibility.
if err.errno == EACCES or err.errno == EPERM:
# Skip the grain if non-root user has no access to the file.
pass
elif salt.utils.path.which_bin(['dmidecode', 'smbios']) is not None and not (
salt.utils.platform.is_smartos() or
( # SunOS on SPARC - 'smbios: failed to load SMBIOS: System does not export an SMBIOS table'
osdata['kernel'] == 'SunOS' and
osdata['cpuarch'].startswith('sparc')
)):
# On SmartOS (possibly SunOS also) smbios only works in the global zone
# smbios is also not compatible with linux's smbios (smbios -s = print summarized)
grains = {
'biosversion': __salt__['smbios.get']('bios-version'),
'productname': __salt__['smbios.get']('system-product-name'),
'manufacturer': __salt__['smbios.get']('system-manufacturer'),
'biosreleasedate': __salt__['smbios.get']('bios-release-date'),
'uuid': __salt__['smbios.get']('system-uuid')
}
grains = dict([(key, val) for key, val in grains.items() if val is not None])
uuid = __salt__['smbios.get']('system-uuid')
if uuid is not None:
grains['uuid'] = uuid.lower()
for serial in ('system-serial-number', 'chassis-serial-number', 'baseboard-serial-number'):
serial = __salt__['smbios.get'](serial)
if serial is not None:
grains['serialnumber'] = serial
break
elif salt.utils.path.which_bin(['fw_printenv']) is not None:
# ARM Linux devices expose UBOOT env variables via fw_printenv
hwdata = {
'manufacturer': 'manufacturer',
'serialnumber': 'serial#',
'productname': 'DeviceDesc',
}
for grain_name, cmd_key in six.iteritems(hwdata):
result = __salt__['cmd.run_all']('fw_printenv {0}'.format(cmd_key))
if result['retcode'] == 0:
uboot_keyval = result['stdout'].split('=')
grains[grain_name] = _clean_value(grain_name, uboot_keyval[1])
elif osdata['kernel'] == 'FreeBSD':
# On FreeBSD /bin/kenv (already in base system)
# can be used instead of dmidecode
kenv = salt.utils.path.which('kenv')
if kenv:
# In theory, it will be easier to add new fields to this later
fbsd_hwdata = {
'biosversion': 'smbios.bios.version',
'manufacturer': 'smbios.system.maker',
'serialnumber': 'smbios.system.serial',
'productname': 'smbios.system.product',
'biosreleasedate': 'smbios.bios.reldate',
'uuid': 'smbios.system.uuid',
}
for key, val in six.iteritems(fbsd_hwdata):
value = __salt__['cmd.run']('{0} {1}'.format(kenv, val))
grains[key] = _clean_value(key, value)
elif osdata['kernel'] == 'OpenBSD':
sysctl = salt.utils.path.which('sysctl')
hwdata = {'biosversion': 'hw.version',
'manufacturer': 'hw.vendor',
'productname': 'hw.product',
'serialnumber': 'hw.serialno',
'uuid': 'hw.uuid'}
for key, oid in six.iteritems(hwdata):
value = __salt__['cmd.run']('{0} -n {1}'.format(sysctl, oid))
if not value.endswith(' value is not available'):
grains[key] = _clean_value(key, value)
elif osdata['kernel'] == 'NetBSD':
sysctl = salt.utils.path.which('sysctl')
nbsd_hwdata = {
'biosversion': 'machdep.dmi.board-version',
'manufacturer': 'machdep.dmi.system-vendor',
'serialnumber': 'machdep.dmi.system-serial',
'productname': 'machdep.dmi.system-product',
'biosreleasedate': 'machdep.dmi.bios-date',
'uuid': 'machdep.dmi.system-uuid',
}
for key, oid in six.iteritems(nbsd_hwdata):
result = __salt__['cmd.run_all']('{0} -n {1}'.format(sysctl, oid))
if result['retcode'] == 0:
grains[key] = _clean_value(key, result['stdout'])
elif osdata['kernel'] == 'Darwin':
grains['manufacturer'] = 'Apple Inc.'
sysctl = salt.utils.path.which('sysctl')
hwdata = {'productname': 'hw.model'}
for key, oid in hwdata.items():
value = __salt__['cmd.run']('{0} -b {1}'.format(sysctl, oid))
if not value.endswith(' is invalid'):
grains[key] = _clean_value(key, value)
elif osdata['kernel'] == 'SunOS' and osdata['cpuarch'].startswith('sparc'):
# Depending on the hardware model, commands can report different bits
# of information. With that said, consolidate the output from various
# commands and attempt various lookups.
data = ""
for (cmd, args) in (('/usr/sbin/prtdiag', '-v'), ('/usr/sbin/prtconf', '-vp'), ('/usr/sbin/virtinfo', '-a')):
if salt.utils.path.which(cmd): # Also verifies that cmd is executable
data += __salt__['cmd.run']('{0} {1}'.format(cmd, args))
data += '\n'
sn_regexes = [
re.compile(r) for r in [
r'(?im)^\s*Chassis\s+Serial\s+Number\n-+\n(\S+)', # prtdiag
r'(?im)^\s*chassis-sn:\s*(\S+)', # prtconf
r'(?im)^\s*Chassis\s+Serial#:\s*(\S+)', # virtinfo
]
]
obp_regexes = [
re.compile(r) for r in [
r'(?im)^\s*System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)', # prtdiag
r'(?im)^\s*version:\s*\'OBP\s+(\S+)\s+(\S+)', # prtconf
]
]
fw_regexes = [
re.compile(r) for r in [
r'(?im)^\s*Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)', # prtdiag
]
]
uuid_regexes = [
re.compile(r) for r in [
r'(?im)^\s*Domain\s+UUID:\s*(\S+)', # virtinfo
]
]
manufacture_regexes = [
re.compile(r) for r in [
r'(?im)^\s*System\s+Configuration:\s*(.*)(?=sun)', # prtdiag
]
]
product_regexes = [
re.compile(r) for r in [
r'(?im)^\s*System\s+Configuration:\s*.*?sun\d\S+[^\S\r\n]*(.*)', # prtdiag
r'(?im)^[^\S\r\n]*banner-name:[^\S\r\n]*(.*)', # prtconf
r'(?im)^[^\S\r\n]*product-name:[^\S\r\n]*(.*)', # prtconf
]
]
sn_regexes = [
re.compile(r) for r in [
r'(?im)Chassis\s+Serial\s+Number\n-+\n(\S+)', # prtdiag
r'(?i)Chassis\s+Serial#:\s*(\S+)', # virtinfo
r'(?i)chassis-sn:\s*(\S+)', # prtconf
]
]
obp_regexes = [
re.compile(r) for r in [
r'(?im)System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)', # prtdiag
r'(?im)version:\s*\'OBP\s+(\S+)\s+(\S+)', # prtconf
]
]
fw_regexes = [
re.compile(r) for r in [
r'(?i)Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)', # prtdiag
]
]
uuid_regexes = [
re.compile(r) for r in [
r'(?i)Domain\s+UUID:\s+(\S+)', # virtinfo
]
]
for regex in sn_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['serialnumber'] = res.group(1).strip().replace("'", "")
break
for regex in obp_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
obp_rev, obp_date = res.groups()[0:2] # Limit the number in case we found the data in multiple places
grains['biosversion'] = obp_rev.strip().replace("'", "")
grains['biosreleasedate'] = obp_date.strip().replace("'", "")
for regex in fw_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
fw_rev, fw_date = res.groups()[0:2]
grains['systemfirmware'] = fw_rev.strip().replace("'", "")
grains['systemfirmwaredate'] = fw_date.strip().replace("'", "")
break
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['uuid'] = res.group(1).strip().replace("'", "")
break
for regex in manufacture_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['manufacture'] = res.group(1).strip().replace("'", "")
break
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
t_productname = res.group(1).strip().replace("'", "")
if t_productname:
grains['product'] = t_productname
grains['productname'] = t_productname
break
elif osdata['kernel'] == 'AIX':
cmd = salt.utils.path.which('prtconf')
if cmd:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('serialnumber', r'(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)'),
('systemfirmware', r'(?im)^\s*Firmware\s+Version:\s+(.*)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
product_regexes = [re.compile(r'(?im)^\s*System\s+Model:\s+(\S+)')]
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['manufacturer'], grains['productname'] = res.group(1).strip().replace("'", "").split(",")
break
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
elif osdata['kernel'] == 'AIX':
cmd = salt.utils.path.which('prtconf')
if data:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('serialnumber', r'(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)'),
('systemfirmware', r'(?im)^\s*Firmware\s+Version:\s+(.*)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
product_regexes = [re.compile(r'(?im)^\s*System\s+Model:\s+(\S+)')]
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['manufacturer'], grains['productname'] = res.group(1).strip().replace("'", "").split(",")
break
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
return grains
def _get_hash_by_shell():
'''
Shell-out Python 3 for compute reliable hash
:return:
'''
id_ = __opts__.get('id', '')
id_hash = None
py_ver = sys.version_info[:2]
if py_ver >= (3, 3):
# Python 3.3 enabled hash randomization, so we need to shell out to get
# a reliable hash.
id_hash = __salt__['cmd.run']([sys.executable, '-c', 'print(hash("{0}"))'.format(id_)],
env={'PYTHONHASHSEED': '0'})
try:
id_hash = int(id_hash)
except (TypeError, ValueError):
log.debug('Failed to hash the ID to get the server_id grain. Result of hash command: %s', id_hash)
id_hash = None
if id_hash is None:
# Python < 3.3 or error encountered above
id_hash = hash(id_)
return abs(id_hash % (2 ** 31))
def get_server_id():
'''
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID
like this.
'''
# Provides:
# server_id
if salt.utils.platform.is_proxy():
server_id = {}
else:
use_crc = __opts__.get('server_id_use_crc')
if bool(use_crc):
id_hash = getattr(zlib, use_crc, zlib.adler32)(__opts__.get('id', '').encode()) & 0xffffffff
else:
log.debug('This server_id is computed not by Adler32 nor by CRC32. '
'Please use "server_id_use_crc" option and define algorithm you '
'prefer (default "Adler32"). Starting with Sodium, the '
'server_id will be computed with Adler32 by default.')
id_hash = _get_hash_by_shell()
server_id = {'server_id': id_hash}
return server_id
def get_master():
'''
Provides the minion with the name of its master.
This is useful in states to target other services running on the master.
'''
# Provides:
# master
return {'master': __opts__.get('master', '')}
def default_gateway():
'''
Populates grains which describe whether a server has a default gateway
configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps
for a `default` at the beginning of any line. Assuming the standard
`default via <ip>` format for default gateways, it will also parse out the
ip address of the default gateway, and put it in ip4_gw or ip6_gw.
If the `ip` command is unavailable, no grains will be populated.
Currently does not support multiple default gateways. The grains will be
set to the first default gateway found.
List of grains:
ip4_gw: True # ip/True/False if default ipv4 gateway
ip6_gw: True # ip/True/False if default ipv6 gateway
ip_gw: True # True if either of the above is True, False otherwise
'''
grains = {}
ip_bin = salt.utils.path.which('ip')
if not ip_bin:
return {}
grains['ip_gw'] = False
grains['ip4_gw'] = False
grains['ip6_gw'] = False
for ip_version in ('4', '6'):
try:
out = __salt__['cmd.run']([ip_bin, '-' + ip_version, 'route', 'show'])
for line in out.splitlines():
if line.startswith('default'):
grains['ip_gw'] = True
grains['ip{0}_gw'.format(ip_version)] = True
try:
via, gw_ip = line.split()[1:3]
except ValueError:
pass
else:
if via == 'via':
grains['ip{0}_gw'.format(ip_version)] = gw_ip
break
except Exception:
continue
return grains
def kernelparams():
'''
Return the kernel boot parameters
'''
if salt.utils.platform.is_windows():
# TODO: add grains using `bcdedit /enum {current}`
return {}
else:
try:
with salt.utils.files.fopen('/proc/cmdline', 'r') as fhr:
cmdline = fhr.read()
grains = {'kernelparams': []}
for data in [item.split('=') for item in salt.utils.args.shlex_split(cmdline)]:
value = None
if len(data) == 2:
value = data[1].strip('"')
grains['kernelparams'] += [(data[0], value)]
except IOError as exc:
grains = {}
log.debug('Failed to read /proc/cmdline: %s', exc)
return grains
|
the-stack_106_25546 | import uuid
from common.logger import get_logger
from payments.domain.order import Order
from payments.domain.payment import Payment
from payments.domain.paypal_payment import PaypalPayment
logger = get_logger(__name__)
class OrderFactory:
@staticmethod
def create_order_from_repository_order(order):
payments = []
for payment_info in order.payments:
if ("payment_method" in payment_info.payment_details) and \
(payment_info.payment_details["payment_method"] == "paypal"):
payment = PaypalPayment(
payment_id=payment_info.payment_id,
amount=payment_info.amount["amount"],
currency=payment_info.amount["currency"],
created_at=payment_info.created_at,
payment_status=payment_info.payment_status,
payment_details=payment_info.payment_details
)
else:
payment = Payment(
payment_id=payment_info.payment_id,
amount=payment_info.amount["amount"],
currency=payment_info.amount["currency"],
created_at=payment_info.created_at,
payment_status=payment_info.payment_status,
payment_details=payment_info.payment_details
)
payments.append(payment)
order = Order(
order_id=order.id,
amount=order.amount["amount"],
currency=order.amount["currency"],
item_details=order.item_details,
username=order.username,
payments=payments
)
return order
@staticmethod
def create_order(amount, currency, item_details, username):
order_id = str(uuid.uuid1())
order = Order(order_id=order_id, amount=amount, currency=currency,
item_details=item_details, username=username, payments=[])
logger.info(f"Order created with {order_id}")
return order
@staticmethod
def get_order_details(orders):
order_details = []
for order_item in orders:
order = OrderFactory.create_order_details_from_repository(order_item)
order_details.append(order)
return {"orders": order_details}
@staticmethod
def create_order_details_from_repository(order_item):
order = {
"order_id": order_item.id,
"price": {
"amount": order_item.amount["amount"],
"currency": order_item.amount["currency"]
},
"username": order_item.username,
"created_at": order_item.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"item_details": order_item.item_details,
"payments": []
}
for payment_item in order_item.payments:
payment = {
"payment_id": payment_item.payment_id,
"price": {
"amount": payment_item.amount["amount"],
"currency": payment_item.amount["currency"]
},
"payment_details": payment_item.payment_details,
"payment_status": payment_item.payment_status,
"created_at": payment_item.created_at.strftime("%Y-%m-%d %H:%M:%S")
}
order["payments"].append(payment)
return order
|
the-stack_106_25548 | import struct
import numpy as np
def _read_big_endian_int(s):
return struct.unpack(">l", s[:4])[0]
def _vector_convert(n):
# convert n to a 10-dimensional array a with only a[n] = 1.0
a = np.zeros((10, 1))
a[n] = 1.0
return a
def load(image_path, label_path):
"""Returns a tuple (data) of tuples (image, label), where image is a numpy
array containing the image data and label is a 10-dimensional numpy
array containing the expected activation (answer)"""
with open(image_path, 'rb') as imgf, open (label_path, 'rb') as labelf:
imgf.seek(4) # num imgs held at byte 4
labelf.seek(4) # num labels held at byte 4
num_imgs = _read_big_endian_int(imgf.read(4))
num_labels = _read_big_endian_int(labelf.read(4))
# get img size from width stored at byte 8 and height stored at byte 12
img_size = _read_big_endian_int(imgf.read(4)) * _read_big_endian_int(
imgf.read(4))
img_bytes = imgf.read()
label_bytes = labelf.read()
# read data into tuple of imgs
imgs = tuple( np.fromstring(
img_bytes[i*img_size:(i+1)*img_size],
dtype='B').reshape(img_size, 1)/255 for i in range(num_imgs))
labels = tuple(
_vector_convert(label_bytes[i]) for i in range(num_labels))
return [(img, label) for img, label in zip(imgs, labels)]
def load_all():
training_data = load(
"samples/train-images.idx3-ubyte", "samples/train-labels.idx1-ubyte")
test_data = load(
"samples/t10k-images.idx3-ubyte", "samples/t10k-labels.idx1-ubyte")
# use 10k training examples as evaluation data
eval_data = training_data[50000:]
training_data = training_data[:50000]
return (training_data, eval_data, test_data)
|
the-stack_106_25551 | # -*- encoding: utf-8 -*-
# Author: hushukai
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import backend as K
from .utils import pad_to_fixed_size_tf, remove_pad_tf
def nms(split_positions, scores, score_thresh=0.7, distance_thresh=16, max_outputs=50):
"""Non-Maximum-Suppression"""
indices = tf.where(scores >= score_thresh)[:, 0]
scores = tf.gather(scores, indices)
split_positions = tf.gather(split_positions, indices)
# 获取自适应的distance_thresh
if distance_thresh <= 1:
distance_thresh_ratio = distance_thresh
split_num = tf.cast(tf.shape(split_positions)[0], tf.float32)
split_cent = tf.reduce_mean(split_positions, axis=1)
split_minimum = tf.reduce_min(split_cent)
split_maximum = tf.reduce_max(split_cent)
distance_thresh = distance_thresh_ratio * (split_maximum - split_minimum) / (split_num - 1.)
ordered_indices = tf.argsort(scores)[::-1]
ordered_scores = tf.gather(scores, ordered_indices)
ordered_positions = tf.gather(split_positions, ordered_indices)
nms_scores = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
nms_positions = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
def loop_condition(j, ordered_scores, *args):
return tf.shape(ordered_scores)[0] > 0
def loop_body(j, ordered_scores, ordered_positions, nms_scores, nms_positions):
curr_score = ordered_scores[0]
curr_positions = ordered_positions[0]
nms_scores = nms_scores.write(j, curr_score)
nms_positions = nms_positions.write(j, curr_positions)
distances = tf.reduce_mean(ordered_positions[1:], axis=1) - tf.reduce_mean(curr_positions, keepdims=True)
_indices = tf.where(tf.abs(distances) > distance_thresh)[:, 0] + 1
ordered_scores = tf.gather(ordered_scores, _indices)
ordered_positions = tf.gather(ordered_positions, _indices)
return j + 1, ordered_scores, ordered_positions, nms_scores, nms_positions
_, _, _, nms_scores, nms_positions = tf.while_loop(cond=loop_condition, body=loop_body,
loop_vars=[0, ordered_scores, ordered_positions, nms_scores, nms_positions])
nms_scores = nms_scores.stack()
nms_positions = nms_positions.stack()
nms_scores = pad_to_fixed_size_tf(nms_scores[:, tf.newaxis], max_outputs)
nms_positions = pad_to_fixed_size_tf(nms_positions, max_outputs)
return [nms_positions, nms_scores]
class ExtractSplitPosition(layers.Layer):
def __init__(self, feat_stride=16, cls_score_thresh=0.7, distance_thresh=16, nms_max_outputs=50, **kwargs):
self.feat_stride = feat_stride
self.cls_score_thresh = cls_score_thresh
self.distance_thresh = distance_thresh
self.nms_max_outputs = nms_max_outputs
super(ExtractSplitPosition, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
pred_cls_logit, pred_delta, img_width, real_images_width = inputs
batch_size = tf.shape(pred_cls_logit)[0]
feat_width = img_width // self.feat_stride
interval_center = (tf.range(0., feat_width) + 0.5) * self.feat_stride
interval_center = tf.tile(interval_center[:, tf.newaxis], multiples=[1, 2])
interval_center = interval_center[tf.newaxis, ...] # shape (1, feat_width, 2)
pred_split_positions = pred_delta * self.feat_stride + interval_center
pred_scores = K.sigmoid(pred_cls_logit)
max_width = real_images_width[:, tf.newaxis, tf.newaxis] - 1.
pred_split_positions = tf.where(pred_split_positions < 0., 0., pred_split_positions)
pred_split_positions = tf.where(pred_split_positions > max_width, max_width, pred_split_positions)
# 非极大抑制
options = {"score_thresh": self.cls_score_thresh,
"distance_thresh": self.distance_thresh,
"max_outputs": self.nms_max_outputs}
nms_split_positions, nms_scores = tf.map_fn(fn=lambda x: nms(*x, **options),
elems=[pred_split_positions, pred_scores],
dtype=[tf.float32, tf.float32])
# In order to compute accuracy
nms_center = tf.reduce_mean(nms_split_positions[..., :2], axis=2)
x_interval_num = tf.floor(nms_center / self.feat_stride)
nms_indices = tf.where(nms_split_positions[..., 2] == 1.)
x_interval_num = tf.gather_nd(x_interval_num, nms_indices)
batch_indices = nms_indices[:, 0]
x_interval_num = tf.cast(x_interval_num, tf.int64)
target_indices = tf.stack([batch_indices, x_interval_num], axis=1)
pre_nms_cls = tf.ones_like(target_indices[:, 0], dtype=tf.float32)
nms_cls_ids = tf.scatter_nd(indices=target_indices, updates=pre_nms_cls, shape=[batch_size, feat_width]) # 0, 1
return nms_split_positions, nms_scores, nms_cls_ids
if __name__ == '__main__':
print("Done !")
|
the-stack_106_25552 | """Boyuan URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.static import serve
import Bo_yuan
from Bo_yuan import views, api
# from Boyuan.settings import STATIC_ROOT
from Boyuan import settings
from Boyuan.settings import MEDIA_ROOT
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('Bo_yuan.urls', namespace='api')),
# 后台管理
url(r'^$', views.rindex),
# 首页
url(r'^index', views.index, name='index'),
# 广告管理
# 首页banner管理
url(r'^ad/banner$', views.banner, name='banner'),
# 广告位管理
url(r'^ad/advertising$', views.advertising, name='advertising'),
# banner 创建
url(r'^ad/banner-create$', views.banner_create, name='banner_create'),
# 二手车管理
# 品牌管理
url(r'^car/brand$', views.brand, name='brand'),
# 车型管理
url(r'^car/car-type$', views.car_type, name='car_type'),
url(r'^car/create-type$', views.create_type, name='create_type'),
# 级别管理
url(r'^car/car-serie$', views.car_serie, name='car_serie'),
# 车源管理
url(r'^car/cars$', views.cars, name='cars'),
url(r'^car/car-yuan$', views.car_yuan, name='car_yuan'),
# 车辆审核管理
url(r'^car-check$', views.car_check, name='car_check'),
# 预览
# url(r'^car-look$', views.car_look, name='car_look'),
# 车辆审核 通过/拒绝
url(r'^car-operation$', views.car_operation, name='car_operation'),
# 拒绝
# url(r'^audit-failed$', views.audit_failed, name='audit_failed'),
# 预览
# url(r'^check-look$', views.audit_success, name='check_ok'),
# 设置级别
# url(r'^change-type$', views.change_type, name='change-type'),
# 预约管理
# 卖车预约管理
url(r'^appointment/sell-appointment$', views.sell_appointment, name='sell_appointment'),
# 会员预约买车管理
url(r'^appointment/vip-appointment$', views.vip_appointment, name='vip_appointment'),
# 会员管理
url(r'^vip$', views.vip, name='vip'),
url(r'^vip-details$', views.vip_details, name='vip_details'),
# 单页管理
# 企业文化
url(r'^corporate-culture$', views.corporate_culture, name='corporate_culture'),
# 模块管理
# 车况检测
url(r'^module/vehicle-inspection$', views.vehicle_inspection, name='vehicle_inspection'),
# 保养车
url(r'^module/car-maintenance$', views.car_maintenance, name='car_maintenance'),
# 保险超市
url(r'^module/insurance-products$', views.insurance_products, name='insurance_products'),
# 洗车服务
url(r'^module/fcwt$', views.fcwt, name='fcwt'),
# 系统设置
url(r'^login$', views.login),
url(r'^logout$', views.logout, name='logout'),
url(r'^change-passwd$', views.change_passwd, name='change_passwd'),
# # 品牌
# url(r'^brands/', views.brands),
# # 车型
# url(r'^cars_type/', views.brands),
# # 级别
# url(r'^brands/', views.brands),
# # 车源管理 (总车辆)
# url(r'^cars/', views.cars),
# # 会员 (总会员)
# url(r'^vips/', views.vips),
# # 今日新增
# url(r'^daus/', views.daus),
#
# # 预约看车
# url(r'^appointment/', views.appointment),
# # 预约卖车
# url(r'^sell_appointment/', views.sell_appointment),
# 发布
# url(r'^issue/', api.issue),
# # 售车
# url(r'^sellcar/', views.sellcar),
#
# url(r'^search_user_car/', views.search_user_car),
# url(r'^search_car/', views.search_car),
# url(r'^create_brands/', views.create_brands),
# static
# url(r'^static/(?P<path>.*)$', serve, {'document_root': STATIC_ROOT}),
# media
url(r'^media/(?P<path>.*)$', serve, {'document_root': MEDIA_ROOT}),
# url(r'^(?P<path>.*.txt)$', serve, {'document_root': settings.BASE_DIR}),
]
# error处理
handler404 = Bo_yuan.views.page_not_found
handler500 = Bo_yuan.views.server_error
|
the-stack_106_25553 | try:
import aiohttp.web
except ImportError:
print("The dashboard requires aiohttp to run.")
import sys
sys.exit(1)
import argparse
import copy
import datetime
import errno
import json
import logging
import os
import platform
import threading
import time
import traceback
import yaml
import uuid
import grpc
from google.protobuf.json_format import MessageToDict
import ray
import ray.ray_constants as ray_constants
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
from ray.core.generated import reporter_pb2
from ray.core.generated import reporter_pb2_grpc
from ray.core.generated import core_worker_pb2
from ray.core.generated import core_worker_pb2_grpc
from ray.dashboard.interface import BaseDashboardController
from ray.dashboard.interface import BaseDashboardRouteHandler
from ray.dashboard.memory import construct_memory_table, MemoryTable, \
GroupByType, SortingType
from ray.dashboard.metrics_exporter.client import Exporter
from ray.dashboard.metrics_exporter.client import MetricsExportClient
from ray.dashboard.node_stats import NodeStats
from ray.dashboard.util import to_unix_time, measures_to_dict, format_resource
from ray.metrics_agent import PrometheusServiceDiscoveryWriter
try:
from ray.tune import Analysis
from tensorboard import program
except ImportError:
Analysis = None
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
async def json_response(is_dev, result=None, error=None,
ts=None) -> aiohttp.web.Response:
if ts is None:
ts = datetime.datetime.utcnow()
headers = None
if is_dev:
headers = {"Access-Control-Allow-Origin": "*"}
return aiohttp.web.json_response(
{
"result": result,
"timestamp": to_unix_time(ts),
"error": error,
},
headers=headers)
class DashboardController(BaseDashboardController):
def __init__(self, redis_address, redis_password):
self.node_stats = NodeStats(redis_address, redis_password)
self.raylet_stats = RayletStats(
redis_address, redis_password=redis_password)
if Analysis is not None:
self.tune_stats = TuneCollector(2.0)
self.memory_table = MemoryTable([])
def _construct_raylet_info(self):
D = self.raylet_stats.get_raylet_stats()
workers_info_by_node = {
data["nodeId"]: data.get("workersStats")
for data in D.values()
}
infeasible_tasks = sum(
(data.get("infeasibleTasks", []) for data in D.values()), [])
# ready_tasks are used to render tasks that are not schedulable
# due to resource limitations.
# (e.g., Actor requires 2 GPUs but there is only 1 gpu available).
ready_tasks = sum((data.get("readyTasks", []) for data in D.values()),
[])
actors = self.node_stats.get_actors(workers_info_by_node,
infeasible_tasks, ready_tasks)
for address, data in D.items():
# process view data
measures_dicts = {}
for view_data in data["viewData"]:
view_name = view_data["viewName"]
if view_name in ("local_available_resource",
"local_total_resource",
"object_manager_stats"):
measures_dicts[view_name] = measures_to_dict(
view_data["measures"])
# process resources info
extra_info_strings = []
prefix = "ResourceName:"
for resource_name, total_resource in measures_dicts[
"local_total_resource"].items():
available_resource = measures_dicts[
"local_available_resource"].get(resource_name, .0)
resource_name = resource_name[len(prefix):]
extra_info_strings.append("{}: {} / {}".format(
resource_name,
format_resource(resource_name,
total_resource - available_resource),
format_resource(resource_name, total_resource)))
data["extraInfo"] = ", ".join(extra_info_strings) + "\n"
if os.environ.get("RAY_DASHBOARD_DEBUG"):
# process object store info
extra_info_strings = []
prefix = "ValueType:"
for stats_name in [
"used_object_store_memory", "num_local_objects"
]:
stats_value = measures_dicts["object_manager_stats"].get(
prefix + stats_name, .0)
extra_info_strings.append("{}: {}".format(
stats_name, stats_value))
data["extraInfo"] += ", ".join(extra_info_strings)
# process actor info
actors_str = json.dumps(actors, indent=2, sort_keys=True)
lines = actors_str.split("\n")
max_line_length = max(map(len, lines))
to_print = []
for line in lines:
to_print.append(line + (max_line_length - len(line)) * " ")
data["extraInfo"] += "\n" + "\n".join(to_print)
return {"nodes": D, "actors": actors}
def get_ray_config(self):
try:
config_path = os.path.expanduser("~/ray_bootstrap_config.yaml")
with open(config_path) as f:
cfg = yaml.safe_load(f)
except Exception:
error = "No config"
return error, None
D = {
"min_workers": cfg["min_workers"],
"max_workers": cfg["max_workers"],
"initial_workers": cfg["initial_workers"],
"autoscaling_mode": cfg["autoscaling_mode"],
"idle_timeout_minutes": cfg["idle_timeout_minutes"],
}
try:
D["head_type"] = cfg["head_node"]["InstanceType"]
except KeyError:
D["head_type"] = "unknown"
try:
D["worker_type"] = cfg["worker_nodes"]["InstanceType"]
except KeyError:
D["worker_type"] = "unknown"
return None, D
def get_node_info(self):
return self.node_stats.get_node_stats()
def get_raylet_info(self):
return self._construct_raylet_info()
def get_memory_table_info(self,
group_by=GroupByType.NODE_ADDRESS,
sort_by=SortingType.OBJECT_SIZE) -> MemoryTable:
# Collecting memory info adds big overhead to the cluster.
# This must be collected only when it is necessary.
self.raylet_stats.include_memory_info = True
D = self.raylet_stats.get_raylet_stats()
workers_info_by_node = {
data["nodeId"]: data.get("workersStats")
for data in D.values()
}
self.memory_table = construct_memory_table(
workers_info_by_node, group_by=group_by, sort_by=sort_by)
return self.memory_table
def stop_collecting_memory_table_info(self):
self.raylet_stats.include_memory_info = False
def tune_info(self):
if Analysis is not None:
D = self.tune_stats.get_stats()
else:
D = {}
return D
def tune_availability(self):
if Analysis is not None:
D = self.tune_stats.get_availability()
else:
D = {"available": False, "trials_available": False}
return D
def set_tune_experiment(self, experiment):
if Analysis is not None:
return self.tune_stats.set_experiment(experiment)
return "Tune Not Enabled", None
def enable_tune_tensorboard(self):
if Analysis is not None:
self.tune_stats.enable_tensorboard()
def launch_profiling(self, node_id, pid, duration):
profiling_id = self.raylet_stats.launch_profiling(
node_id=node_id, pid=pid, duration=duration)
return profiling_id
def check_profiling_status(self, profiling_id):
return self.raylet_stats.check_profiling_status(profiling_id)
def get_profiling_info(self, profiling_id):
return self.raylet_stats.get_profiling_info(profiling_id)
def kill_actor(self, actor_id, ip_address, port):
return self.raylet_stats.kill_actor(actor_id, ip_address, port)
def get_logs(self, hostname, pid):
return self.node_stats.get_logs(hostname, pid)
def get_errors(self, hostname, pid):
return self.node_stats.get_errors(hostname, pid)
def start_collecting_metrics(self):
self.node_stats.start()
self.raylet_stats.start()
if Analysis is not None:
self.tune_stats.start()
class DashboardRouteHandler(BaseDashboardRouteHandler):
def __init__(self, dashboard_controller: DashboardController,
is_dev=False):
self.dashboard_controller = dashboard_controller
self.is_dev = is_dev
def forbidden(self) -> aiohttp.web.Response:
return aiohttp.web.Response(status=403, text="403 Forbidden")
async def get_forbidden(self, _) -> aiohttp.web.Response:
return self.forbidden()
async def get_index(self, req) -> aiohttp.web.Response:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"client/build/index.html"))
async def get_favicon(self, req) -> aiohttp.web.Response:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"client/build/favicon.ico"))
async def ray_config(self, req) -> aiohttp.web.Response:
error, result = self.dashboard_controller.get_ray_config()
if error:
return await json_response(self.is_dev, error=error)
return await json_response(self.is_dev, result=result)
async def node_info(self, req) -> aiohttp.web.Response:
now = datetime.datetime.utcnow()
D = self.dashboard_controller.get_node_info()
return await json_response(self.is_dev, result=D, ts=now)
async def raylet_info(self, req) -> aiohttp.web.Response:
result = self.dashboard_controller.get_raylet_info()
return await json_response(self.is_dev, result=result)
async def memory_table_info(self, req) -> aiohttp.web.Response:
group_by = req.query.get("group_by")
sort_by = req.query.get("sort_by")
kwargs = {}
try:
if group_by:
kwargs["group_by"] = GroupByType(group_by)
if sort_by:
kwargs["sort_by"] = SortingType(sort_by)
except ValueError as e:
return aiohttp.web.HTTPBadRequest(reason=str(e))
memory_table = self.dashboard_controller.get_memory_table_info(
**kwargs)
return await json_response(self.is_dev, result=memory_table.__dict__())
async def stop_collecting_memory_table_info(self,
req) -> aiohttp.web.Response:
self.dashboard_controller.stop_collecting_memory_table_info()
return await json_response(self.is_dev, result={})
async def tune_info(self, req) -> aiohttp.web.Response:
result = self.dashboard_controller.tune_info()
return await json_response(self.is_dev, result=result)
async def tune_availability(self, req) -> aiohttp.web.Response:
result = self.dashboard_controller.tune_availability()
return await json_response(self.is_dev, result=result)
async def set_tune_experiment(self, req) -> aiohttp.web.Response:
data = await req.json()
error, result = self.dashboard_controller.set_tune_experiment(
data["experiment"])
if error:
return await json_response(self.is_dev, error=error)
return await json_response(self.is_dev, result=result)
async def enable_tune_tensorboard(self, req) -> aiohttp.web.Response:
self.dashboard_controller.enable_tune_tensorboard()
return await json_response(self.is_dev, result={})
async def launch_profiling(self, req) -> aiohttp.web.Response:
node_id = req.query.get("node_id")
pid = int(req.query.get("pid"))
duration = int(req.query.get("duration"))
profiling_id = self.dashboard_controller.launch_profiling(
node_id, pid, duration)
return await json_response(self.is_dev, result=str(profiling_id))
async def check_profiling_status(self, req) -> aiohttp.web.Response:
profiling_id = req.query.get("profiling_id")
status = self.dashboard_controller.check_profiling_status(profiling_id)
return await json_response(self.is_dev, result=status)
async def get_profiling_info(self, req) -> aiohttp.web.Response:
profiling_id = req.query.get("profiling_id")
profiling_info = self.dashboard_controller.get_profiling_info(
profiling_id)
return aiohttp.web.json_response(profiling_info)
async def kill_actor(self, req) -> aiohttp.web.Response:
actor_id = req.query.get("actor_id")
ip_address = req.query.get("ip_address")
port = req.query.get("port")
return await json_response(
self.is_dev,
self.dashboard_controller.kill_actor(actor_id, ip_address, port))
async def logs(self, req) -> aiohttp.web.Response:
hostname = req.query.get("hostname")
pid = req.query.get("pid")
result = self.dashboard_controller.get_logs(hostname, pid)
return await json_response(self.is_dev, result=result)
async def errors(self, req) -> aiohttp.web.Response:
hostname = req.query.get("hostname")
pid = req.query.get("pid")
result = self.dashboard_controller.get_errors(hostname, pid)
return await json_response(self.is_dev, result=result)
class MetricsExportHandler:
def __init__(self,
dashboard_controller: DashboardController,
metrics_export_client: MetricsExportClient,
dashboard_id,
is_dev=False):
assert metrics_export_client is not None
self.metrics_export_client = metrics_export_client
self.dashboard_controller = dashboard_controller
self.is_dev = is_dev
async def enable_export_metrics(self, req) -> aiohttp.web.Response:
if self.metrics_export_client.enabled:
return await json_response(
self.is_dev, result={"url": None}, error="Already enabled")
succeed, error = self.metrics_export_client.start_exporting_metrics()
error_msg = "Failed to enable it. Error: {}".format(error)
if not succeed:
return await json_response(
self.is_dev, result={"url": None}, error=error_msg)
url = self.metrics_export_client.dashboard_url
return await json_response(self.is_dev, result={"url": url})
async def get_dashboard_address(self, req) -> aiohttp.web.Response:
if not self.metrics_export_client.enabled:
return await json_response(
self.is_dev,
result={"url": None},
error="Metrics exporting is not enabled.")
url = self.metrics_export_client.dashboard_url
return await json_response(self.is_dev, result={"url": url})
async def redirect_to_dashboard(self, req) -> aiohttp.web.Response:
if not self.metrics_export_client.enabled:
return await json_response(
self.is_dev,
result={"url": None},
error="You should enable metrics export to use this endpoint.")
raise aiohttp.web.HTTPFound(self.metrics_export_client.dashboard_url)
def setup_metrics_export_routes(app: aiohttp.web.Application,
handler: MetricsExportHandler):
"""Routes that require dynamically changing class attributes."""
app.router.add_get("/api/metrics/enable", handler.enable_export_metrics)
app.router.add_get("/api/metrics/url", handler.get_dashboard_address)
app.router.add_get("/metrics/redirect", handler.redirect_to_dashboard)
def setup_static_dir(app):
build_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client/build")
if not os.path.isdir(build_dir):
raise OSError(
errno.ENOENT, "Dashboard build directory not found. If installing "
"from source, please follow the additional steps "
"required to build the dashboard"
"(cd python/ray/dashboard/client "
"&& npm ci "
"&& npm run build)", build_dir)
static_dir = os.path.join(build_dir, "static")
app.router.add_static("/static", static_dir)
return build_dir
def setup_speedscope_dir(app, build_dir):
speedscope_dir = os.path.join(build_dir, "speedscope-1.5.3")
app.router.add_static("/speedscope", speedscope_dir)
def setup_dashboard_route(app: aiohttp.web.Application,
handler: BaseDashboardRouteHandler,
index=None,
favicon=None,
ray_config=None,
node_info=None,
raylet_info=None,
tune_info=None,
tune_availability=None,
launch_profiling=None,
check_profiling_status=None,
get_profiling_info=None,
kill_actor=None,
logs=None,
errors=None,
memory_table=None,
stop_memory_table=None):
def add_get_route(route, handler_func):
if route is not None:
app.router.add_get(route, handler_func)
add_get_route(index, handler.get_index)
add_get_route(favicon, handler.get_favicon)
add_get_route(ray_config, handler.ray_config)
add_get_route(node_info, handler.node_info)
add_get_route(raylet_info, handler.raylet_info)
add_get_route(tune_info, handler.tune_info)
add_get_route(tune_availability, handler.tune_availability)
add_get_route(launch_profiling, handler.launch_profiling)
add_get_route(check_profiling_status, handler.check_profiling_status)
add_get_route(get_profiling_info, handler.get_profiling_info)
add_get_route(kill_actor, handler.kill_actor)
add_get_route(logs, handler.logs)
add_get_route(errors, handler.errors)
add_get_route(memory_table, handler.memory_table_info)
add_get_route(stop_memory_table, handler.stop_collecting_memory_table_info)
class Dashboard:
"""A dashboard process for monitoring Ray nodes.
This dashboard is made up of a REST API which collates data published by
Reporter processes on nodes into a json structure, and a webserver
which polls said API for display purposes.
Args:
host(str): Host address of dashboard aiohttp server.
port(str): Port number of dashboard aiohttp server.
redis_address(str): GCS address of a Ray cluster
temp_dir (str): The temporary directory used for log files and
information for this Ray session.
redis_passord(str): Redis password to access GCS
metrics_export_address(str): The address users host their dashboard.
"""
def __init__(self,
host,
port,
redis_address,
temp_dir,
redis_password=None,
metrics_export_address=None):
self.host = host
self.port = port
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self.temp_dir = temp_dir
self.dashboard_id = str(uuid.uuid4())
self.dashboard_controller = DashboardController(
redis_address, redis_password)
self.service_discovery = PrometheusServiceDiscoveryWriter(
redis_address, redis_password, temp_dir)
# Setting the environment variable RAY_DASHBOARD_DEV=1 disables some
# security checks in the dashboard server to ease development while
# using the React dev server. Specifically, when this option is set, we
# allow cross-origin requests to be made.
self.is_dev = os.environ.get("RAY_DASHBOARD_DEV") == "1"
self.app = aiohttp.web.Application()
route_handler = DashboardRouteHandler(
self.dashboard_controller, is_dev=self.is_dev)
# Setup Metrics exporting service if necessary.
self.metrics_export_address = metrics_export_address
if self.metrics_export_address:
self._setup_metrics_export()
# Setup Dashboard Routes
build_dir = setup_static_dir(self.app)
setup_speedscope_dir(self.app, build_dir)
setup_dashboard_route(
self.app,
route_handler,
index="/",
favicon="/favicon.ico",
ray_config="/api/ray_config",
node_info="/api/node_info",
raylet_info="/api/raylet_info",
tune_info="/api/tune_info",
tune_availability="/api/tune_availability",
launch_profiling="/api/launch_profiling",
check_profiling_status="/api/check_profiling_status",
get_profiling_info="/api/get_profiling_info",
kill_actor="/api/kill_actor",
logs="/api/logs",
errors="/api/errors",
memory_table="/api/memory_table",
stop_memory_table="/api/stop_memory_table")
self.app.router.add_get("/{_}", route_handler.get_forbidden)
self.app.router.add_post("/api/set_tune_experiment",
route_handler.set_tune_experiment)
self.app.router.add_post("/api/enable_tune_tensorboard",
route_handler.enable_tune_tensorboard)
def _setup_metrics_export(self):
exporter = Exporter(self.dashboard_id, self.metrics_export_address,
self.dashboard_controller)
self.metrics_export_client = MetricsExportClient(
self.metrics_export_address, self.dashboard_controller,
self.dashboard_id, exporter)
# Setup endpoints
metrics_export_handler = MetricsExportHandler(
self.dashboard_controller,
self.metrics_export_client,
self.dashboard_id,
is_dev=self.is_dev)
setup_metrics_export_routes(self.app, metrics_export_handler)
def _start_exporting_metrics(self):
result, error = self.metrics_export_client.start_exporting_metrics()
if not result and error:
url = ray.services.get_webui_url_from_redis(self.redis_client)
error += (" Please reenable the metrics export by going to "
"the url: {}/api/metrics/enable".format(url))
ray.utils.push_error_to_driver_through_redis(
self.redis_client, "metrics export failed", error)
def log_dashboard_url(self):
url = ray.services.get_webui_url_from_redis(self.redis_client)
if url is None:
raise ValueError("WebUI URL is not present in GCS.")
with open(os.path.join(self.temp_dir, "dashboard_url"), "w") as f:
f.write(url)
logger.info("Dashboard running on {}".format(url))
def run(self):
self.log_dashboard_url()
self.dashboard_controller.start_collecting_metrics()
self.service_discovery.start()
if self.metrics_export_address:
self._start_exporting_metrics()
aiohttp.web.run_app(self.app, host=self.host, port=self.port)
class RayletStats(threading.Thread):
def __init__(self, redis_address, redis_password=None):
self.nodes_lock = threading.Lock()
self.nodes = []
self.stubs = {}
self.reporter_stubs = {}
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self._raylet_stats_lock = threading.Lock()
self._raylet_stats = {}
self._profiling_stats = {}
self._update_nodes()
self.include_memory_info = False
super().__init__()
def _update_nodes(self):
with self.nodes_lock:
self.nodes = ray.nodes()
node_ids = [node["NodeID"] for node in self.nodes]
# First remove node connections of disconnected nodes.
for node_id in self.stubs.keys():
if node_id not in node_ids:
stub = self.stubs.pop(node_id)
stub.close()
reporter_stub = self.reporter_stubs.pop(node_id)
reporter_stub.close()
# Now add node connections of new nodes.
for node in self.nodes:
node_id = node["NodeID"]
if node_id not in self.stubs:
node_ip = node["NodeManagerAddress"]
channel = grpc.insecure_channel("{}:{}".format(
node_ip, node["NodeManagerPort"]))
stub = node_manager_pb2_grpc.NodeManagerServiceStub(
channel)
self.stubs[node_id] = stub
# Block wait until the reporter for the node starts.
while True:
reporter_port = self.redis_client.get(
"REPORTER_PORT:{}".format(node_ip))
if reporter_port:
break
reporter_channel = grpc.insecure_channel("{}:{}".format(
node_ip, int(reporter_port)))
reporter_stub = reporter_pb2_grpc.ReporterServiceStub(
reporter_channel)
self.reporter_stubs[node_id] = reporter_stub
assert len(self.stubs) == len(
self.reporter_stubs), (self.stubs.keys(),
self.reporter_stubs.keys())
def get_raylet_stats(self):
with self._raylet_stats_lock:
return copy.deepcopy(self._raylet_stats)
def launch_profiling(self, node_id, pid, duration):
profiling_id = str(uuid.uuid4())
def _callback(reply_future):
reply = reply_future.result()
with self._raylet_stats_lock:
self._profiling_stats[profiling_id] = reply
reporter_stub = self.reporter_stubs[node_id]
reply_future = reporter_stub.GetProfilingStats.future(
reporter_pb2.GetProfilingStatsRequest(pid=pid, duration=duration))
reply_future.add_done_callback(_callback)
return profiling_id
def check_profiling_status(self, profiling_id):
with self._raylet_stats_lock:
is_present = profiling_id in self._profiling_stats
if not is_present:
return {"status": "pending"}
reply = self._profiling_stats[profiling_id]
if reply.std_err:
return {"status": "error", "error": reply.std_err}
else:
return {"status": "finished"}
def get_profiling_info(self, profiling_id):
with self._raylet_stats_lock:
profiling_stats = self._profiling_stats.get(profiling_id)
assert profiling_stats, "profiling not finished"
return json.loads(profiling_stats.profiling_stats)
def kill_actor(self, actor_id, ip_address, port):
channel = grpc.insecure_channel("{}:{}".format(ip_address, int(port)))
stub = core_worker_pb2_grpc.CoreWorkerServiceStub(channel)
def _callback(reply_future):
_ = reply_future.result()
reply_future = stub.KillActor.future(
core_worker_pb2.KillActorRequest(
intended_actor_id=ray.utils.hex_to_binary(actor_id)))
reply_future.add_done_callback(_callback)
return {}
def run(self):
counter = 0
while True:
time.sleep(1.0)
replies = {}
try:
for node in self.nodes:
node_id = node["NodeID"]
stub = self.stubs[node_id]
reply = stub.GetNodeStats(
node_manager_pb2.GetNodeStatsRequest(
include_memory_info=self.include_memory_info),
timeout=2)
reply_dict = MessageToDict(reply)
reply_dict["nodeId"] = node_id
replies[node["NodeManagerAddress"]] = reply_dict
with self._raylet_stats_lock:
for address, reply_dict in replies.items():
self._raylet_stats[address] = reply_dict
except Exception:
logger.exception(traceback.format_exc())
finally:
counter += 1
# From time to time, check if new nodes have joined the cluster
# and update self.nodes
if counter % 10:
self._update_nodes()
class TuneCollector(threading.Thread):
"""Initialize collector worker thread.
Args
logdir (str): Directory path to save the status information of
jobs and trials.
reload_interval (float): Interval(in s) of space between loading
data from logs
"""
def __init__(self, reload_interval):
self._logdir = None
self._trial_records = {}
self._data_lock = threading.Lock()
self._reload_interval = reload_interval
self._trials_available = False
self._tensor_board_dir = ""
self._enable_tensor_board = False
self._errors = {}
super().__init__()
def get_stats(self):
with self._data_lock:
tensor_board_info = {
"tensorboard_current": self._logdir == self._tensor_board_dir,
"tensorboard_enabled": self._tensor_board_dir != ""
}
return {
"trial_records": copy.deepcopy(self._trial_records),
"errors": copy.deepcopy(self._errors),
"tensorboard": tensor_board_info
}
def set_experiment(self, experiment):
with self._data_lock:
if os.path.isdir(os.path.expanduser(experiment)):
self._logdir = os.path.expanduser(experiment)
return None, {"experiment": self._logdir}
else:
return "Not a Valid Directory", None
def enable_tensorboard(self):
with self._data_lock:
if not self._tensor_board_dir:
tb = program.TensorBoard()
tb.configure(argv=[None, "--logdir", str(self._logdir)])
tb.launch()
self._tensor_board_dir = self._logdir
def get_availability(self):
with self._data_lock:
return {
"available": True,
"trials_available": self._trials_available
}
def run(self):
while True:
with self._data_lock:
self.collect()
time.sleep(self._reload_interval)
def collect_errors(self, df):
sub_dirs = os.listdir(self._logdir)
trial_names = filter(
lambda d: os.path.isdir(os.path.join(self._logdir, d)), sub_dirs)
for trial in trial_names:
error_path = os.path.join(self._logdir, trial, "error.txt")
if os.path.isfile(error_path):
self._trials_available = True
with open(error_path) as f:
text = f.read()
self._errors[str(trial)] = {
"text": text,
"job_id": os.path.basename(self._logdir),
"trial_id": "No Trial ID"
}
other_data = df[df["logdir"].str.contains(trial)]
if len(other_data) > 0:
trial_id = other_data["trial_id"].values[0]
self._errors[str(trial)]["trial_id"] = str(trial_id)
if str(trial_id) in self._trial_records.keys():
self._trial_records[str(trial_id)]["error"] = text
self._trial_records[str(trial_id)][
"status"] = "ERROR"
def collect(self):
"""
Collects and cleans data on the running Tune experiment from the
Tune logs so that users can see this information in the front-end
client
"""
self._trial_records = {}
self._errors = {}
if not self._logdir:
return
# search through all the sub_directories in log directory
analysis = Analysis(str(self._logdir))
df = analysis.dataframe()
if len(df) == 0 or "trial_id" not in df.columns:
return
self._trials_available = True
# make sure that data will convert to JSON without error
df["trial_id_key"] = df["trial_id"].astype(str)
df = df.fillna(0)
trial_ids = df["trial_id"]
for i, value in df["trial_id"].iteritems():
if type(value) != str and type(value) != int:
trial_ids[i] = int(value)
df["trial_id"] = trial_ids
# convert df to python dict
df = df.set_index("trial_id_key")
trial_data = df.to_dict(orient="index")
# clean data and update class attribute
if len(trial_data) > 0:
trial_data = self.clean_trials(trial_data)
self._trial_records.update(trial_data)
self.collect_errors(df)
def clean_trials(self, trial_details):
first_trial = trial_details[list(trial_details.keys())[0]]
config_keys = []
float_keys = []
metric_keys = []
# list of static attributes for trial
default_names = [
"logdir", "time_this_iter_s", "done", "episodes_total",
"training_iteration", "timestamp", "timesteps_total",
"experiment_id", "date", "timestamp", "time_total_s", "pid",
"hostname", "node_ip", "time_since_restore",
"timesteps_since_restore", "iterations_since_restore",
"experiment_tag", "trial_id"
]
# filter attributes into floats, metrics, and config variables
for key, value in first_trial.items():
if isinstance(value, float):
float_keys.append(key)
if str(key).startswith("config/"):
config_keys.append(key)
elif key not in default_names:
metric_keys.append(key)
# clean data into a form that front-end client can handle
for trial, details in trial_details.items():
ts = os.path.getctime(details["logdir"])
formatted_time = datetime.datetime.fromtimestamp(ts).strftime(
"%Y-%m-%d %H:%M:%S")
details["start_time"] = formatted_time
details["params"] = {}
details["metrics"] = {}
# round all floats
for key in float_keys:
details[key] = round(details[key], 12)
# group together config attributes
for key in config_keys:
new_name = key[7:]
details["params"][new_name] = details[key]
details.pop(key)
# group together metric attributes
for key in metric_keys:
details["metrics"][key] = details[key]
details.pop(key)
if details["done"]:
details["status"] = "TERMINATED"
else:
details["status"] = "RUNNING"
details.pop("done")
details["job_id"] = os.path.basename(self._logdir)
details["error"] = "No Error"
return trial_details
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=("Parse Redis server for the "
"dashboard to connect to."))
parser.add_argument(
"--host",
required=True,
type=str,
help="The host to use for the HTTP server.")
parser.add_argument(
"--port",
required=True,
type=int,
help="The port to use for the HTTP server.")
parser.add_argument(
"--redis-address",
required=True,
type=str,
help="The address to use for Redis.")
parser.add_argument(
"--redis-password",
required=False,
type=str,
default=None,
help="the password to use for Redis")
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP)
parser.add_argument(
"--temp-dir",
required=False,
type=str,
default=None,
help="Specify the path of the temporary directory use by Ray process.")
args = parser.parse_args()
ray.utils.setup_logger(args.logging_level, args.logging_format)
# TODO(sang): Add a URL validation.
metrics_export_address = os.environ.get("METRICS_EXPORT_ADDRESS")
try:
dashboard = Dashboard(
args.host,
args.port,
args.redis_address,
args.temp_dir,
redis_password=args.redis_password,
metrics_export_address=metrics_export_address)
dashboard.run()
except Exception as e:
# Something went wrong, so push an error to all drivers.
redis_client = ray.services.create_redis_client(
args.redis_address, password=args.redis_password)
traceback_str = ray.utils.format_error_message(traceback.format_exc())
message = ("The dashboard on node {} failed with the following "
"error:\n{}".format(platform.node(), traceback_str))
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_DIED_ERROR, message)
if isinstance(e, OSError) and e.errno == errno.ENOENT:
logger.warning(message)
else:
raise e
|
the-stack_106_25555 | import pygame.font
class Button():
def __init__(self, ai_settings, screen, msg):
self.screen = screen
self.screen_rect = screen.get_rect()
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (0, 0, 0)
self.font = pygame.font.SysFont(None, 48)
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
self.prep_msg(msg)
def prep_msg(self, msg):
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect) |
the-stack_106_25556 | import asyncio
import dataclasses
import json
import logging
import sys
from enum import Enum
from typing import Any, Dict, List, Optional, Set, Union, AsyncGenerator
from galaxy.api.consts import Feature, OSCompatibility
from galaxy.api.jsonrpc import ApplicationError, Connection
from galaxy.api.types import (
Achievement, Authentication, Game, GameLibrarySettings, GameTime, LocalGame, NextStep, UserInfo, UserPresence,
Subscription, SubscriptionGame
)
from galaxy.task_manager import TaskManager
from galaxy.api.importer import Importer, CollectionImporter
logger = logging.getLogger(__name__)
class JSONEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if dataclasses.is_dataclass(o):
# filter None values
def dict_factory(elements):
return {k: v for k, v in elements if v is not None}
return dataclasses.asdict(o, dict_factory=dict_factory)
if isinstance(o, Enum):
return o.value
return super().default(o)
class Plugin:
"""Use and override methods of this class to create a new platform integration."""
def __init__(self, platform, version, reader, writer, handshake_token):
logger.info("Creating plugin for platform %s, version %s", platform.value, version)
self._platform = platform
self._version = version
self._features: Set[Feature] = set()
self._active = True
self._reader, self._writer = reader, writer
self._handshake_token = handshake_token
encoder = JSONEncoder()
self._connection = Connection(self._reader, self._writer, encoder)
self._persistent_cache = dict()
self._internal_task_manager = TaskManager("plugin internal")
self._external_task_manager = TaskManager("plugin external")
self._achievements_importer = Importer(
self._external_task_manager,
"achievements",
self.get_unlocked_achievements,
self.prepare_achievements_context,
self._game_achievements_import_success,
self._game_achievements_import_failure,
self._achievements_import_finished,
self.achievements_import_complete
)
self._game_time_importer = Importer(
self._external_task_manager,
"game times",
self.get_game_time,
self.prepare_game_times_context,
self._game_time_import_success,
self._game_time_import_failure,
self._game_times_import_finished,
self.game_times_import_complete
)
self._game_library_settings_importer = Importer(
self._external_task_manager,
"game library settings",
self.get_game_library_settings,
self.prepare_game_library_settings_context,
self._game_library_settings_import_success,
self._game_library_settings_import_failure,
self._game_library_settings_import_finished,
self.game_library_settings_import_complete
)
self._os_compatibility_importer = Importer(
self._external_task_manager,
"os compatibility",
self.get_os_compatibility,
self.prepare_os_compatibility_context,
self._os_compatibility_import_success,
self._os_compatibility_import_failure,
self._os_compatibility_import_finished,
self.os_compatibility_import_complete
)
self._user_presence_importer = Importer(
self._external_task_manager,
"users presence",
self.get_user_presence,
self.prepare_user_presence_context,
self._user_presence_import_success,
self._user_presence_import_failure,
self._user_presence_import_finished,
self.user_presence_import_complete
)
self._local_size_importer = Importer(
self._external_task_manager,
"local size",
self.get_local_size,
self.prepare_local_size_context,
self._local_size_import_success,
self._local_size_import_failure,
self._local_size_import_finished,
self.local_size_import_complete
)
self._subscription_games_importer = CollectionImporter(
self._subscriptions_games_partial_import_finished,
self._external_task_manager,
"subscription games",
self.get_subscription_games,
self.prepare_subscription_games_context,
self._subscription_games_import_success,
self._subscription_games_import_failure,
self._subscription_games_import_finished,
self.subscription_games_import_complete
)
# internal
self._register_method("shutdown", self._shutdown, internal=True)
self._register_method("get_capabilities", self._get_capabilities, internal=True, immediate=True)
self._register_method(
"initialize_cache",
self._initialize_cache,
internal=True,
immediate=True,
sensitive_params="data"
)
self._register_method("ping", self._ping, internal=True, immediate=True)
# implemented by developer
self._register_method(
"init_authentication",
self.authenticate,
sensitive_params=["stored_credentials"]
)
self._register_method(
"pass_login_credentials",
self.pass_login_credentials,
sensitive_params=["cookies", "credentials"]
)
self._register_method(
"import_owned_games",
self.get_owned_games,
result_name="owned_games"
)
self._detect_feature(Feature.ImportOwnedGames, ["get_owned_games"])
self._register_method("start_achievements_import", self._start_achievements_import)
self._detect_feature(Feature.ImportAchievements, ["get_unlocked_achievements"])
self._register_method("import_local_games", self.get_local_games, result_name="local_games")
self._detect_feature(Feature.ImportInstalledGames, ["get_local_games"])
self._register_notification("launch_game", self.launch_game)
self._detect_feature(Feature.LaunchGame, ["launch_game"])
self._register_notification("install_game", self.install_game)
self._detect_feature(Feature.InstallGame, ["install_game"])
self._register_notification("uninstall_game", self.uninstall_game)
self._detect_feature(Feature.UninstallGame, ["uninstall_game"])
self._register_notification("shutdown_platform_client", self.shutdown_platform_client)
self._detect_feature(Feature.ShutdownPlatformClient, ["shutdown_platform_client"])
self._register_notification("launch_platform_client", self.launch_platform_client)
self._detect_feature(Feature.LaunchPlatformClient, ["launch_platform_client"])
self._register_method("import_friends", self.get_friends, result_name="friend_info_list")
self._detect_feature(Feature.ImportFriends, ["get_friends"])
self._register_method("start_game_times_import", self._start_game_times_import)
self._detect_feature(Feature.ImportGameTime, ["get_game_time"])
self._register_method("start_game_library_settings_import", self._start_game_library_settings_import)
self._detect_feature(Feature.ImportGameLibrarySettings, ["get_game_library_settings"])
self._register_method("start_os_compatibility_import", self._start_os_compatibility_import)
self._detect_feature(Feature.ImportOSCompatibility, ["get_os_compatibility"])
self._register_method("start_user_presence_import", self._start_user_presence_import)
self._detect_feature(Feature.ImportUserPresence, ["get_user_presence"])
self._register_method("start_local_size_import", self._start_local_size_import)
self._detect_feature(Feature.ImportLocalSize, ["get_local_size"])
self._register_method("import_subscriptions", self.get_subscriptions, result_name="subscriptions")
self._detect_feature(Feature.ImportSubscriptions, ["get_subscriptions"])
self._register_method("start_subscription_games_import", self._start_subscription_games_import)
self._detect_feature(Feature.ImportSubscriptionGames, ["get_subscription_games"])
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
self.close()
await self.wait_closed()
@property
def features(self) -> List[Feature]:
return list(self._features)
@property
def persistent_cache(self) -> Dict[str, str]:
"""The cache is only available after the :meth:`~.handshake_complete()` is called.
"""
return self._persistent_cache
def _implements(self, methods: List[str]) -> bool:
for method in methods:
if method not in self.__class__.__dict__:
return False
return True
def _detect_feature(self, feature: Feature, methods: List[str]):
if self._implements(methods):
self._features.add(feature)
def _register_method(self, name, handler, result_name=None, internal=False, immediate=False,
sensitive_params=False):
def wrap_result(result):
if result_name:
result = {
result_name: result
}
return result
if immediate:
def method(*args, **kwargs):
result = handler(*args, **kwargs)
return wrap_result(result)
self._connection.register_method(name, method, True, sensitive_params)
else:
async def method(*args, **kwargs):
if not internal:
handler_ = self._wrap_external_method(handler, name)
else:
handler_ = handler
result = await handler_(*args, **kwargs)
return wrap_result(result)
self._connection.register_method(name, method, False, sensitive_params)
def _register_notification(self, name, handler, internal=False, immediate=False, sensitive_params=False):
if not internal and not immediate:
handler = self._wrap_external_method(handler, name)
self._connection.register_notification(name, handler, immediate, sensitive_params)
def _wrap_external_method(self, handler, name: str):
async def wrapper(*args, **kwargs):
return await self._external_task_manager.create_task(handler(*args, **kwargs), name, False)
return wrapper
async def run(self):
"""Plugin's main coroutine."""
await self._connection.run()
logger.debug("Plugin run loop finished")
def close(self) -> None:
if not self._active:
return
logger.info("Closing plugin")
self._connection.close()
self._external_task_manager.cancel()
async def shutdown():
try:
await asyncio.wait_for(self.shutdown(), 30)
except asyncio.TimeoutError:
logging.warning("Plugin shutdown timed out")
self._internal_task_manager.create_task(shutdown(), "shutdown")
self._active = False
async def wait_closed(self) -> None:
logger.debug("Waiting for plugin to close")
await self._external_task_manager.wait()
await self._internal_task_manager.wait()
await self._connection.wait_closed()
logger.debug("Plugin closed")
def create_task(self, coro, description):
"""Wrapper around asyncio.create_task - takes care of canceling tasks on shutdown"""
return self._external_task_manager.create_task(coro, description)
async def _pass_control(self):
while self._active:
try:
self.tick()
except Exception:
logger.exception("Unexpected exception raised in plugin tick")
await asyncio.sleep(1)
async def _shutdown(self):
logger.info("Shutting down")
self.close()
await self._external_task_manager.wait()
await self._internal_task_manager.wait()
def _get_capabilities(self):
return {
"platform_name": self._platform,
"features": self.features,
"token": self._handshake_token
}
def _initialize_cache(self, data: Dict):
self._persistent_cache = data
try:
self.handshake_complete()
except Exception:
logger.exception("Unhandled exception during `handshake_complete` step")
self._internal_task_manager.create_task(self._pass_control(), "tick")
@staticmethod
def _ping():
pass
# notifications
def store_credentials(self, credentials: Dict[str, Any]) -> None:
"""Notify the client to store authentication credentials.
Credentials are passed on the next authenticate call.
:param credentials: credentials that client will store; they are stored locally on a user pc
Example use case of store_credentials:
.. code-block:: python
:linenos:
async def pass_login_credentials(self, step, credentials, cookies):
if self.got_everything(credentials,cookies):
user_data = await self.parse_credentials(credentials,cookies)
else:
next_params = self.get_next_params(credentials,cookies)
next_cookies = self.get_next_cookies(credentials,cookies)
return NextStep("web_session", next_params, cookies=next_cookies)
self.store_credentials(user_data['credentials'])
return Authentication(user_data['userId'], user_data['username'])
"""
# temporary solution for persistent_cache vs credentials issue
self.persistent_cache["credentials"] = credentials # type: ignore
self._connection.send_notification("store_credentials", credentials, sensitive_params=True)
def add_game(self, game: Game) -> None:
"""Notify the client to add game to the list of owned games
of the currently authenticated user.
:param game: Game to add to the list of owned games
Example use case of add_game:
.. code-block:: python
:linenos:
async def check_for_new_games(self):
games = await self.get_owned_games()
for game in games:
if game not in self.owned_games_cache:
self.owned_games_cache.append(game)
self.add_game(game)
"""
params = {"owned_game": game}
self._connection.send_notification("owned_game_added", params)
def remove_game(self, game_id: str) -> None:
"""Notify the client to remove game from the list of owned games
of the currently authenticated user.
:param game_id: the id of the game to remove from the list of owned games
Example use case of remove_game:
.. code-block:: python
:linenos:
async def check_for_removed_games(self):
games = await self.get_owned_games()
for game in self.owned_games_cache:
if game not in games:
self.owned_games_cache.remove(game)
self.remove_game(game.game_id)
"""
params = {"game_id": game_id}
self._connection.send_notification("owned_game_removed", params)
def update_game(self, game: Game) -> None:
"""Notify the client to update the status of a game
owned by the currently authenticated user.
:param game: Game to update
"""
params = {"owned_game": game}
self._connection.send_notification("owned_game_updated", params)
def unlock_achievement(self, game_id: str, achievement: Achievement) -> None:
"""Notify the client to unlock an achievement for a specific game.
:param game_id: the id of the game for which to unlock an achievement.
:param achievement: achievement to unlock.
"""
params = {
"game_id": game_id,
"achievement": achievement
}
self._connection.send_notification("achievement_unlocked", params)
def _game_achievements_import_success(self, game_id: str, achievements: List[Achievement]) -> None:
params = {
"game_id": game_id,
"unlocked_achievements": achievements
}
self._connection.send_notification("game_achievements_import_success", params)
def _game_achievements_import_failure(self, game_id: str, error: ApplicationError) -> None:
params = {
"game_id": game_id,
"error": error.json()
}
self._connection.send_notification("game_achievements_import_failure", params)
def _achievements_import_finished(self) -> None:
self._connection.send_notification("achievements_import_finished", None)
def update_local_game_status(self, local_game: LocalGame) -> None:
"""Notify the client to update the status of a local game.
:param local_game: the LocalGame to update
Example use case triggered by the :meth:`.tick` method:
.. code-block:: python
:linenos:
:emphasize-lines: 5
async def _check_statuses(self):
for game in await self._get_local_games():
if game.status == self._cached_game_statuses.get(game.id):
continue
self.update_local_game_status(LocalGame(game.id, game.status))
self._cached_games_statuses[game.id] = game.status
await asyncio.sleep(5) # interval
def tick(self):
if self._check_statuses_task is None or self._check_statuses_task.done():
self._check_statuses_task = asyncio.create_task(self._check_statuses())
"""
params = {"local_game": local_game}
self._connection.send_notification("local_game_status_changed", params)
def add_friend(self, user: UserInfo) -> None:
"""Notify the client to add a user to friends list of the currently authenticated user.
:param user: UserInfo of a user that the client will add to friends list
"""
params = {"friend_info": user}
self._connection.send_notification("friend_added", params)
def remove_friend(self, user_id: str) -> None:
"""Notify the client to remove a user from friends list of the currently authenticated user.
:param user_id: id of the user to remove from friends list
"""
params = {"user_id": user_id}
self._connection.send_notification("friend_removed", params)
def update_friend_info(self, user: UserInfo) -> None:
"""Notify the client about the updated friend information.
:param user: UserInfo of a friend whose info was updated
"""
self._connection.send_notification("friend_updated", params={"friend_info": user})
def update_game_time(self, game_time: GameTime) -> None:
"""Notify the client to update game time for a game.
:param game_time: game time to update
"""
params = {"game_time": game_time}
self._connection.send_notification("game_time_updated", params)
def update_user_presence(self, user_id: str, user_presence: UserPresence) -> None:
"""Notify the client about the updated user presence information.
:param user_id: the id of the user whose presence information is updated
:param user_presence: presence information of the specified user
"""
self._connection.send_notification(
"user_presence_updated",
{
"user_id": user_id,
"presence": user_presence
}
)
def _game_time_import_success(self, game_id: str, game_time: GameTime) -> None:
params = {"game_time": game_time}
self._connection.send_notification("game_time_import_success", params)
def _game_time_import_failure(self, game_id: str, error: ApplicationError) -> None:
params = {
"game_id": game_id,
"error": error.json()
}
self._connection.send_notification("game_time_import_failure", params)
def _game_times_import_finished(self) -> None:
self._connection.send_notification("game_times_import_finished", None)
def _game_library_settings_import_success(self, game_id: str, game_library_settings: GameLibrarySettings) -> None:
params = {"game_library_settings": game_library_settings}
self._connection.send_notification("game_library_settings_import_success", params)
def _game_library_settings_import_failure(self, game_id: str, error: ApplicationError) -> None:
params = {
"game_id": game_id,
"error": error.json()
}
self._connection.send_notification("game_library_settings_import_failure", params)
def _game_library_settings_import_finished(self) -> None:
self._connection.send_notification("game_library_settings_import_finished", None)
def _os_compatibility_import_success(self, game_id: str, os_compatibility: Optional[OSCompatibility]) -> None:
self._connection.send_notification(
"os_compatibility_import_success",
{
"game_id": game_id,
"os_compatibility": os_compatibility
}
)
def _os_compatibility_import_failure(self, game_id: str, error: ApplicationError) -> None:
self._connection.send_notification(
"os_compatibility_import_failure",
{
"game_id": game_id,
"error": error.json()
}
)
def _os_compatibility_import_finished(self) -> None:
self._connection.send_notification("os_compatibility_import_finished", None)
def _user_presence_import_success(self, user_id: str, user_presence: UserPresence) -> None:
self._connection.send_notification(
"user_presence_import_success",
{
"user_id": user_id,
"presence": user_presence
}
)
def _user_presence_import_failure(self, user_id: str, error: ApplicationError) -> None:
self._connection.send_notification(
"user_presence_import_failure",
{
"user_id": user_id,
"error": error.json()
}
)
def _user_presence_import_finished(self) -> None:
self._connection.send_notification("user_presence_import_finished", None)
def _local_size_import_success(self, game_id: str, size: Optional[int]) -> None:
self._connection.send_notification(
"local_size_import_success",
{
"game_id": game_id,
"local_size": size
}
)
def _local_size_import_failure(self, game_id: str, error: ApplicationError) -> None:
self._connection.send_notification(
"local_size_import_failure",
{
"game_id": game_id,
"error": error.json()
}
)
def _local_size_import_finished(self) -> None:
self._connection.send_notification("local_size_import_finished", None)
def _subscription_games_import_success(self, subscription_name: str,
subscription_games: Optional[List[SubscriptionGame]]) -> None:
self._connection.send_notification(
"subscription_games_import_success",
{
"subscription_name": subscription_name,
"subscription_games": subscription_games
}
)
def _subscription_games_import_failure(self, subscription_name: str, error: ApplicationError) -> None:
self._connection.send_notification(
"subscription_games_import_failure",
{
"subscription_name": subscription_name,
"error": error.json()
}
)
def _subscriptions_games_partial_import_finished(self, subscription_name: str) -> None:
self._connection.send_notification(
"subscription_games_partial_import_finished",
{
"subscription_name": subscription_name
}
)
def _subscription_games_import_finished(self) -> None:
self._connection.send_notification("subscription_games_import_finished", None)
def lost_authentication(self) -> None:
"""Notify the client that integration has lost authentication for the
current user and is unable to perform actions which would require it.
"""
self._connection.send_notification("authentication_lost", None)
def push_cache(self) -> None:
"""Push local copy of the persistent cache to the GOG Galaxy Client replacing existing one.
"""
self._connection.send_notification(
"push_cache",
params={"data": self._persistent_cache},
sensitive_params="data"
)
async def refresh_credentials(self, params: Dict[str, Any], sensitive_params) -> Dict[str, Any]:
return await self._connection.send_request("refresh_credentials", params, sensitive_params)
# handlers
def handshake_complete(self) -> None:
"""This method is called right after the handshake with the GOG Galaxy Client is complete and
before any other operations are called by the GOG Galaxy Client.
Persistent cache is available when this method is called.
Override it if you need to do additional plugin initializations.
This method is called internally."""
def tick(self) -> None:
"""This method is called periodically.
Override it to implement periodical non-blocking tasks.
This method is called internally.
Example of possible override of the method:
.. code-block:: python
:linenos:
def tick(self):
if not self.checking_for_new_games:
asyncio.create_task(self.check_for_new_games())
if not self.checking_for_removed_games:
asyncio.create_task(self.check_for_removed_games())
if not self.updating_game_statuses:
asyncio.create_task(self.update_game_statuses())
"""
async def shutdown(self) -> None:
"""This method is called on integration shutdown.
Override it to implement tear down.
This method is called by the GOG Galaxy Client."""
# methods
async def authenticate(self, stored_credentials: Optional[Dict] = None) -> Union[NextStep, Authentication]:
"""Override this method to handle user authentication.
This method should either return :class:`~galaxy.api.types.Authentication` if the authentication is finished
or :class:`~galaxy.api.types.NextStep` if it requires going to another url.
This method is called by the GOG Galaxy Client.
:param stored_credentials: If the client received any credentials to store locally
in the previous session they will be passed here as a parameter.
Example of possible override of the method:
.. code-block:: python
:linenos:
async def authenticate(self, stored_credentials=None):
if not stored_credentials:
return NextStep("web_session", PARAMS, cookies=COOKIES)
else:
try:
user_data = self._authenticate(stored_credentials)
except AccessDenied:
raise InvalidCredentials()
return Authentication(user_data['userId'], user_data['username'])
"""
raise NotImplementedError()
async def pass_login_credentials(self, step: str, credentials: Dict[str, str], cookies: List[Dict[str, str]]) \
-> Union[NextStep, Authentication]:
"""This method is called if we return :class:`~galaxy.api.types.NextStep` from :meth:`.authenticate`
or :meth:`.pass_login_credentials`.
This method's parameters provide the data extracted from the web page navigation that previous NextStep finished on.
This method should either return :class:`~galaxy.api.types.Authentication` if the authentication is finished
or :class:`~galaxy.api.types.NextStep` if it requires going to another cef url.
This method is called by the GOG Galaxy Client.
:param step: deprecated.
:param credentials: end_uri previous NextStep finished on.
:param cookies: cookies extracted from the end_uri site.
Example of possible override of the method:
.. code-block:: python
:linenos:
async def pass_login_credentials(self, step, credentials, cookies):
if self.got_everything(credentials,cookies):
user_data = await self.parse_credentials(credentials,cookies)
else:
next_params = self.get_next_params(credentials,cookies)
next_cookies = self.get_next_cookies(credentials,cookies)
return NextStep("web_session", next_params, cookies=next_cookies)
self.store_credentials(user_data['credentials'])
return Authentication(user_data['userId'], user_data['username'])
"""
raise NotImplementedError()
async def get_owned_games(self) -> List[Game]:
"""Override this method to return owned games for currently logged in user.
This method is called by the GOG Galaxy Client.
Example of possible override of the method:
.. code-block:: python
:linenos:
async def get_owned_games(self):
if not self.authenticated():
raise AuthenticationRequired()
games = self.retrieve_owned_games()
return games
"""
raise NotImplementedError()
async def _start_achievements_import(self, game_ids: List[str]) -> None:
await self._achievements_importer.start(game_ids)
async def prepare_achievements_context(self, game_ids: List[str]) -> Any:
"""Override this method to prepare context for get_unlocked_achievements.
This allows for optimizations like batch requests to platform API.
Default implementation returns None.
:param game_ids: the ids of the games for which achievements are imported
:return: context
"""
return None
async def get_unlocked_achievements(self, game_id: str, context: Any) -> List[Achievement]:
"""Override this method to return list of unlocked achievements
for the game identified by the provided game_id.
This method is called by import task initialized by GOG Galaxy Client.
:param game_id: the id of the game for which the achievements are returned
:param context: the value returned from :meth:`prepare_achievements_context`
:return: list of Achievement objects
"""
raise NotImplementedError()
def achievements_import_complete(self):
"""Override this method to handle operations after achievements import is finished
(like updating cache).
"""
async def get_local_games(self) -> List[LocalGame]:
"""Override this method to return the list of
games present locally on the users pc.
This method is called by the GOG Galaxy Client.
Example of possible override of the method:
.. code-block:: python
:linenos:
async def get_local_games(self):
local_games = []
for game in self.games_present_on_user_pc:
local_game = LocalGame()
local_game.game_id = game.id
local_game.local_game_state = game.get_installation_status()
local_games.append(local_game)
return local_games
"""
raise NotImplementedError()
async def launch_game(self, game_id: str) -> None:
"""Override this method to launch the game
identified by the provided game_id.
This method is called by the GOG Galaxy Client.
:param str game_id: the id of the game to launch
Example of possible override of the method:
.. code-block:: python
:linenos:
async def launch_game(self, game_id):
await self.open_uri(f"start client://launchgame/{game_id}")
"""
raise NotImplementedError()
async def install_game(self, game_id: str) -> None:
"""Override this method to install the game
identified by the provided game_id.
This method is called by the GOG Galaxy Client.
:param str game_id: the id of the game to install
Example of possible override of the method:
.. code-block:: python
:linenos:
async def install_game(self, game_id):
await self.open_uri(f"start client://installgame/{game_id}")
"""
raise NotImplementedError()
async def uninstall_game(self, game_id: str) -> None:
"""Override this method to uninstall the game
identified by the provided game_id.
This method is called by the GOG Galaxy Client.
:param str game_id: the id of the game to uninstall
Example of possible override of the method:
.. code-block:: python
:linenos:
async def uninstall_game(self, game_id):
await self.open_uri(f"start client://uninstallgame/{game_id}")
"""
raise NotImplementedError()
async def shutdown_platform_client(self) -> None:
"""Override this method to gracefully terminate platform client.
This method is called by the GOG Galaxy Client."""
raise NotImplementedError()
async def launch_platform_client(self) -> None:
"""Override this method to launch platform client. Preferably minimized to tray.
This method is called by the GOG Galaxy Client."""
raise NotImplementedError()
async def get_friends(self) -> List[UserInfo]:
"""Override this method to return the friends list
of the currently authenticated user.
This method is called by the GOG Galaxy Client.
Example of possible override of the method:
.. code-block:: python
:linenos:
async def get_friends(self):
if not self._http_client.is_authenticated():
raise AuthenticationRequired()
friends = self.retrieve_friends()
return friends
"""
raise NotImplementedError()
async def _start_game_times_import(self, game_ids: List[str]) -> None:
await self._game_time_importer.start(game_ids)
async def prepare_game_times_context(self, game_ids: List[str]) -> Any:
"""Override this method to prepare context for get_game_time.
This allows for optimizations like batch requests to platform API.
Default implementation returns None.
:param game_ids: the ids of the games for which game time are imported
:return: context
"""
return None
async def get_game_time(self, game_id: str, context: Any) -> GameTime:
"""Override this method to return the game time for the game
identified by the provided game_id.
This method is called by import task initialized by GOG Galaxy Client.
:param game_id: the id of the game for which the game time is returned
:param context: the value returned from :meth:`prepare_game_times_context`
:return: GameTime object
"""
raise NotImplementedError()
def game_times_import_complete(self) -> None:
"""Override this method to handle operations after game times import is finished
(like updating cache).
"""
async def _start_game_library_settings_import(self, game_ids: List[str]) -> None:
await self._game_library_settings_importer.start(game_ids)
async def prepare_game_library_settings_context(self, game_ids: List[str]) -> Any:
"""Override this method to prepare context for get_game_library_settings.
This allows for optimizations like batch requests to platform API.
Default implementation returns None.
:param game_ids: the ids of the games for which game library settings are imported
:return: context
"""
return None
async def get_game_library_settings(self, game_id: str, context: Any) -> GameLibrarySettings:
"""Override this method to return the game library settings for the game
identified by the provided game_id.
This method is called by import task initialized by GOG Galaxy Client.
:param game_id: the id of the game for which the game library settings are imported
:param context: the value returned from :meth:`prepare_game_library_settings_context`
:return: GameLibrarySettings object
"""
raise NotImplementedError()
def game_library_settings_import_complete(self) -> None:
"""Override this method to handle operations after game library settings import is finished
(like updating cache).
"""
async def _start_os_compatibility_import(self, game_ids: List[str]) -> None:
await self._os_compatibility_importer.start(game_ids)
async def prepare_os_compatibility_context(self, game_ids: List[str]) -> Any:
"""Override this method to prepare context for get_os_compatibility.
This allows for optimizations like batch requests to platform API.
Default implementation returns None.
:param game_ids: the ids of the games for which game os compatibility is imported
:return: context
"""
return None
async def get_os_compatibility(self, game_id: str, context: Any) -> Optional[OSCompatibility]:
"""Override this method to return the OS compatibility for the game with the provided game_id.
This method is called by import task initialized by GOG Galaxy Client.
:param game_id: the id of the game for which the game os compatibility is imported
:param context: the value returned from :meth:`prepare_os_compatibility_context`
:return: OSCompatibility flags indicating compatible OSs, or None if compatibility is not know
"""
raise NotImplementedError()
def os_compatibility_import_complete(self) -> None:
"""Override this method to handle operations after OS compatibility import is finished (like updating cache)."""
async def _start_user_presence_import(self, user_id_list: List[str]) -> None:
await self._user_presence_importer.start(user_id_list)
async def prepare_user_presence_context(self, user_id_list: List[str]) -> Any:
"""Override this method to prepare context for :meth:`get_user_presence`.
This allows for optimizations like batch requests to platform API.
Default implementation returns None.
:param user_id_list: the ids of the users for whom presence information is imported
:return: context
"""
return None
async def get_user_presence(self, user_id: str, context: Any) -> UserPresence:
"""Override this method to return presence information for the user with the provided user_id.
This method is called by import task initialized by GOG Galaxy Client.
:param user_id: the id of the user for whom presence information is imported
:param context: the value returned from :meth:`prepare_user_presence_context`
:return: UserPresence presence information of the provided user
"""
raise NotImplementedError()
def user_presence_import_complete(self) -> None:
"""Override this method to handle operations after presence import is finished (like updating cache)."""
async def _start_local_size_import(self, game_ids: List[str]) -> None:
await self._local_size_importer.start(game_ids)
async def prepare_local_size_context(self, game_ids: List[str]) -> Any:
"""Override this method to prepare context for :meth:`get_local_size`
Default implementation returns None.
:param game_ids: the ids of the games for which information about size is imported
:return: context
"""
return None
async def get_local_size(self, game_id: str, context: Any) -> Optional[int]:
"""Override this method to return installed game size.
.. note::
It is preferable to avoid iterating over local game files when overriding this method.
If possible, please use a more efficient way of game size retrieval.
:param game_id: the id of the installed game
:param context: the value returned from :meth:`prepare_local_size_context`
:return: the size of the game on a user-owned storage device (in bytes) or `None` if the size cannot be determined
"""
raise NotImplementedError()
def local_size_import_complete(self) -> None:
"""Override this method to handle operations after local game size import is finished (like updating cache)."""
async def get_subscriptions(self) -> List[Subscription]:
"""Override this method to return a list of
Subscriptions available on platform.
This method is called by the GOG Galaxy Client.
"""
raise NotImplementedError()
async def _start_subscription_games_import(self, subscription_names: List[str]) -> None:
await self._subscription_games_importer.start(subscription_names)
async def prepare_subscription_games_context(self, subscription_names: List[str]) -> Any:
"""Override this method to prepare context for :meth:`get_subscription_games`
Default implementation returns None.
:param subscription_names: the names of the subscriptions' for which subscriptions games are imported
:return: context
"""
return None
async def get_subscription_games(self, subscription_name: str, context: Any) -> AsyncGenerator[
List[SubscriptionGame], None]:
"""Override this method to provide SubscriptionGames for a given subscription.
This method should `yield` a list of SubscriptionGames -> yield [sub_games]
This method will only be used if :meth:`get_subscriptions` has been implemented.
:param context: the value returned from :meth:`prepare_subscription_games_context`
:return a generator object that yields SubscriptionGames
.. code-block:: python
:linenos:
async def get_subscription_games(subscription_name: str, context: Any):
while True:
games_page = await self._get_subscriptions_from_backend(subscription_name, i)
if not games_pages:
yield None
yield [SubGame(game['game_id'], game['game_title']) for game in games_page]
"""
raise NotImplementedError()
def subscription_games_import_complete(self) -> None:
"""Override this method to handle operations after
subscription games import is finished (like updating cache).
"""
def create_and_run_plugin(plugin_class, argv):
"""Call this method as an entry point for the implemented integration.
:param plugin_class: your plugin class.
:param argv: command line arguments with which the script was started.
Example of possible use of the method:
.. code-block:: python
:linenos:
def main():
create_and_run_plugin(PlatformPlugin, sys.argv)
if __name__ == "__main__":
main()
"""
if len(argv) < 3:
logger.critical("Not enough parameters, required: token, port")
sys.exit(1)
token = argv[1]
try:
port = int(argv[2])
except ValueError:
logger.critical("Failed to parse port value: %s", argv[2])
sys.exit(2)
if not (1 <= port <= 65535):
logger.critical("Port value out of range (1, 65535)")
sys.exit(3)
if not issubclass(plugin_class, Plugin):
logger.critical("plugin_class must be subclass of Plugin")
sys.exit(4)
async def coroutine():
reader, writer = await asyncio.open_connection("127.0.0.1", port)
try:
extra_info = writer.get_extra_info("sockname")
logger.info("Using local address: %s:%u", *extra_info)
async with plugin_class(reader, writer, token) as plugin:
await plugin.run()
finally:
writer.close()
await writer.wait_closed()
try:
if sys.platform == "win32":
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
asyncio.run(coroutine())
except Exception:
logger.exception("Error while running plugin")
sys.exit(5)
|
the-stack_106_25558 | #!/usr/bin/env python
#
# @author Jorge Santos
# License: 3-Clause BSD
import actionlib
import copy
import rospy
import nav_msgs.srv as nav_srvs
import mbf_msgs.msg as mbf_msgs
import move_base_msgs.msg as mb_msgs
from dynamic_reconfigure.client import Client
from dynamic_reconfigure.server import Server
from geometry_msgs.msg import PoseStamped
from move_base.cfg import MoveBaseConfig
"""
move_base legacy relay node:
Relays old move_base actions to the new mbf move_base action, similar but with richer result and feedback.
We also relay the simple goal topic published by RViz, the make_plan service and dynamic reconfiguration
calls (note that some parameters have changed names; see http://wiki.ros.org/move_base_flex for details)
"""
# keep configured base local and global planners to send to MBF
bgp = None
blp = None
def simple_goal_cb(msg):
mbf_mb_ac.send_goal(mbf_msgs.MoveBaseGoal(target_pose=msg, planner=bgp, controller=blp))
rospy.logdebug("Relaying move_base_simple/goal pose to mbf")
def mb_execute_cb(msg):
mbf_mb_ac.send_goal(mbf_msgs.MoveBaseGoal(target_pose=msg.target_pose, planner=bgp, controller=blp),
feedback_cb=mbf_feedback_cb)
rospy.logdebug("Relaying legacy move_base goal to mbf")
mbf_mb_ac.wait_for_result()
status = mbf_mb_ac.get_state()
result = mbf_mb_ac.get_result()
rospy.logdebug("MBF execution completed with result [%d]: %s", result.outcome, result.message)
if result.outcome == mbf_msgs.MoveBaseResult.SUCCESS:
mb_as.set_succeeded(mb_msgs.MoveBaseResult(), "Goal reached.")
else:
mb_as.set_aborted(mb_msgs.MoveBaseResult(), result.message)
def make_plan_cb(request):
mbf_gp_ac.send_goal(mbf_msgs.GetPathGoal(start_pose=request.start, target_pose=request.goal,
use_start_pose=bool(request.start.header.frame_id),
planner=bgp, tolerance=request.tolerance))
rospy.logdebug("Relaying legacy make_plan service to mbf get_path action server")
mbf_gp_ac.wait_for_result()
status = mbf_gp_ac.get_state()
result = mbf_gp_ac.get_result()
rospy.logdebug("MBF get_path execution completed with result [%d]: %s", result.outcome, result.message)
if result.outcome == mbf_msgs.GetPathResult.SUCCESS:
return nav_srvs.GetPlanResponse(plan=result.path)
def mbf_feedback_cb(feedback):
mb_as.publish_feedback(mb_msgs.MoveBaseFeedback(base_position=feedback.current_pose))
# wangbin added the global variable to skip the first time parameter overwritten by somebody
global first_entry
first_entry = True
def mb_reconf_cb(config, level):
rospy.logdebug("Relaying legacy move_base reconfigure request to mbf")
if not hasattr(mb_reconf_cb, "default_config"):
mb_reconf_cb.default_config = copy.deepcopy(config)
if config.get('restore_defaults'):
config = mb_reconf_cb.default_config
mbf_config = copy.deepcopy(config)
print("++++++++++++++++++ Move Base Legacy Relay - reconfigure +++++++++++++++++++++++++++++++")
print(mbf_config)
global first_entry
if first_entry:
print("++++++++++++++++++ Move Base Legacy Relay - reconfigure - First Time SKIP +++++++++++++")
first_entry = False
return config
print("++++++++++++++++++ Move Base Legacy Relay - reconfigure - NOT First Time Continue +++++++++")
# Map move_base legacy parameters to new mbf ones, and drop those not supported
# mbf doesn't allow changing plugins dynamically, but we can provide them in the
# action goal, so we keep both base_local_planner and base_global_planner
if 'base_local_planner' in mbf_config:
global blp
blp = mbf_config.pop('base_local_planner')
if 'controller_frequency' in mbf_config:
mbf_config['controller_frequency'] = mbf_config.pop('controller_frequency')
if 'controller_patience' in mbf_config:
mbf_config['controller_patience'] = mbf_config.pop('controller_patience')
if 'max_controller_retries' in mbf_config:
mbf_config['controller_max_retries'] = mbf_config.pop('max_controller_retries')
if 'base_global_planner' in mbf_config:
global bgp
bgp = mbf_config.pop('base_global_planner')
if 'planner_frequency' in mbf_config:
mbf_config['planner_frequency'] = mbf_config.pop('planner_frequency')
if 'planner_patience' in mbf_config:
mbf_config['planner_patience'] = mbf_config.pop('planner_patience')
if 'max_planning_retries' in mbf_config:
mbf_config['planner_max_retries'] = mbf_config.pop('max_planning_retries')
if 'recovery_behavior_enabled' in mbf_config:
mbf_config['recovery_enabled'] = mbf_config.pop('recovery_behavior_enabled')
if 'conservative_reset_dist' in mbf_config:
mbf_config.pop('conservative_reset_dist') # no mbf equivalent for this!
if 'clearing_rotation_allowed' in mbf_config:
mbf_config.pop('clearing_rotation_allowed') # no mbf equivalent for this!
if 'make_plan_add_unreachable_goal' in mbf_config:
mbf_config.pop('make_plan_add_unreachable_goal') # no mbf equivalent for this!
if 'make_plan_clear_costmap' in mbf_config:
mbf_config.pop('make_plan_clear_costmap') # no mbf equivalent for this!
mbf_drc.update_configuration(mbf_config)
return config
if __name__ == '__main__':
rospy.init_node("move_base")
# TODO what happens with malformed target goal??? FAILURE or INVALID_POSE
# txt must be: "Aborting on goal because it was sent with an invalid quaternion"
# move_base_flex get_path and move_base action clients
mbf_mb_ac = actionlib.SimpleActionClient("move_base_flex/move_base", mbf_msgs.MoveBaseAction)
mbf_gp_ac = actionlib.SimpleActionClient("move_base_flex/get_path", mbf_msgs.GetPathAction)
mbf_mb_ac.wait_for_server(rospy.Duration(20))
mbf_gp_ac.wait_for_server(rospy.Duration(10))
# move_base_flex dynamic reconfigure client
mbf_drc = Client("move_base_flex", timeout=10)
# move_base simple topic and action server
mb_sg = rospy.Subscriber('move_base_simple/goal', PoseStamped, simple_goal_cb)
mb_as = actionlib.SimpleActionServer('move_base', mb_msgs.MoveBaseAction, mb_execute_cb, auto_start=False)
mb_as.start()
# move_base make_plan service
mb_mps = rospy.Service('~make_plan', nav_srvs.GetPlan, make_plan_cb)
# move_base dynamic reconfigure server
mb_drs = Server(MoveBaseConfig, mb_reconf_cb)
rospy.spin()
|
the-stack_106_25559 | """Write the output to a CSV file."""
from collections import defaultdict
import pandas as pd
from ..pylib import util
def csv_writer(args, rows):
"""Output the data."""
rows = sorted(rows, key=lambda r: (r["flora_id"], r["family"], r["taxon"]))
for row in rows:
row["raw_traits"] = [e._.data for e in row["doc"].ents]
del row["doc"]
build_columns(row)
df = pd.DataFrame(rows)
df["raw_traits"] = None
df.to_csv(args.csv_file, index=False)
def build_columns(row):
"""Expand values into separate columns."""
extras = set(""" sex location group """.split())
skips = extras | {"start", "end"}
columns = defaultdict(list)
for trait in row["raw_traits"]:
if trait["trait"] in ("part", "subpart"):
continue
if "part" not in trait:
continue
if "subpart" in trait:
label = f'{trait["part"]}_{trait["subpart"]}_{trait["trait"]}'
else:
label = f'{trait["part"]}_{trait["trait"]}'
trait = {
k: v for k, v in trait.items() if k not in ("part", "subpart", "trait")
}
header = sorted(v for k, v in trait.items() if k in extras)
header = ".".join([label] + header)
value = {k: v for k, v in trait.items() if k not in skips}
columns[header].append(value)
for header, value_list in columns.items():
keys = set()
all_strings = True
for data in value_list:
for key, value in data.items():
keys.add(key)
all_strings &= isinstance(value, str)
if len(keys) == 1 and all_strings:
value = {v[k] for v in value_list for k in v.keys()}
row[header] = ", ".join(sorted(value))
elif header.endswith("_size"):
extract_sizes(row, header, value_list)
else:
extract_traits(row, header, value_list)
return row
def extract_traits(row, header, value_list):
"""Extract non-size & non-value list traits."""
for i, extract in enumerate(value_list, 1):
for field, value in extract.items():
key = f"{header}.{i}.{field}"
row[key] = value
def extract_sizes(row, header, value_list):
"""Normalize size traits."""
for i, extract in enumerate(value_list, 1):
length_units = extract.get("length_units", extract.get("width_units"))
width_units = extract.get("width_units", extract.get("length_units"))
for field, value in extract.items():
key = f"{header}.{i}.{field}"
parts = field.split("_")
if parts[0] in ("trait", "part", "subpart"):
continue
if len(parts) > 1 and parts[1] == "units":
row[key] = value
elif parts[0] == "length":
row[key] = util.convert(value, length_units)
elif parts[0] == "width":
row[key] = util.convert(value, width_units)
elif parts[0].endswith("units"):
units = f"{parts[0]}_units"
row[key] = util.convert(value, extract.get(units))
|
the-stack_106_25561 | """
Support to interact with a Music Player Daemon.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.mpd/
"""
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, PLATFORM_SCHEMA,
SUPPORT_CLEAR_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE, SUPPORT_SHUFFLE_SET, SUPPORT_STOP, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_PAUSED,
STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['python-mpd2==1.0.0']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'MPD'
DEFAULT_PORT = 6600
PLAYLIST_UPDATE_INTERVAL = timedelta(seconds=120)
SUPPORT_MPD = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_VOLUME_MUTE | \
SUPPORT_PLAY_MEDIA | SUPPORT_PLAY | SUPPORT_SELECT_SOURCE | \
SUPPORT_CLEAR_PLAYLIST | SUPPORT_SHUFFLE_SET | SUPPORT_SEEK | \
SUPPORT_STOP | SUPPORT_TURN_OFF | SUPPORT_TURN_ON
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the MPD platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
password = config.get(CONF_PASSWORD)
device = MpdDevice(host, port, password, name)
add_devices([device], True)
class MpdDevice(MediaPlayerDevice):
"""Representation of a MPD server."""
# pylint: disable=no-member
def __init__(self, server, port, password, name):
"""Initialize the MPD device."""
import mpd
self.server = server
self.port = port
self._name = name
self.password = password
self._status = None
self._currentsong = None
self._playlists = []
self._currentplaylist = None
self._is_connected = False
self._muted = False
self._muted_volume = 0
# set up MPD client
self._client = mpd.MPDClient()
self._client.timeout = 5
self._client.idletimeout = None
def _connect(self):
"""Connect to MPD."""
import mpd
try:
self._client.connect(self.server, self.port)
if self.password is not None:
self._client.password(self.password)
except mpd.ConnectionError:
return
self._is_connected = True
def _disconnect(self):
"""Disconnect from MPD."""
import mpd
try:
self._client.disconnect()
except mpd.ConnectionError:
pass
self._is_connected = False
self._status = None
def _fetch_status(self):
"""Fetch status from MPD."""
self._status = self._client.status()
self._currentsong = self._client.currentsong()
self._update_playlists()
@property
def available(self):
"""Return true if MPD is available and connected."""
return self._is_connected
def update(self):
"""Get the latest data and update the state."""
import mpd
try:
if not self._is_connected:
self._connect()
self._fetch_status()
except (mpd.ConnectionError, OSError, BrokenPipeError, ValueError):
# Cleanly disconnect in case connection is not in valid state
self._disconnect()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self._status is None:
return STATE_OFF
elif self._status['state'] == 'play':
return STATE_PLAYING
elif self._status['state'] == 'pause':
return STATE_PAUSED
elif self._status['state'] == 'stop':
return STATE_OFF
return STATE_OFF
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._currentsong.get('file')
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
# Time does not exist for streams
return self._currentsong.get('time')
@property
def media_title(self):
"""Return the title of current playing media."""
name = self._currentsong.get('name', None)
title = self._currentsong.get('title', None)
file_name = self._currentsong.get('file', None)
if name is None and title is None:
if file_name is None:
return "None"
return os.path.basename(file_name)
elif name is None:
return title
elif title is None:
return name
return '{}: {}'.format(name, title)
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self._currentsong.get('artist')
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return self._currentsong.get('album')
@property
def volume_level(self):
"""Return the volume level."""
return int(self._status['volume'])/100
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_MPD
@property
def source(self):
"""Name of the current input source."""
return self._currentplaylist
@property
def source_list(self):
"""Return the list of available input sources."""
return self._playlists
def select_source(self, source):
"""Choose a different available playlist and play it."""
self.play_media(MEDIA_TYPE_PLAYLIST, source)
@Throttle(PLAYLIST_UPDATE_INTERVAL)
def _update_playlists(self, **kwargs):
"""Update available MPD playlists."""
self._playlists = []
for playlist_data in self._client.listplaylists():
self._playlists.append(playlist_data['playlist'])
def set_volume_level(self, volume):
"""Set volume of media player."""
self._client.setvol(int(volume * 100))
def volume_up(self):
"""Service to send the MPD the command for volume up."""
current_volume = int(self._status['volume'])
if current_volume <= 100:
self._client.setvol(current_volume + 5)
def volume_down(self):
"""Service to send the MPD the command for volume down."""
current_volume = int(self._status['volume'])
if current_volume >= 0:
self._client.setvol(current_volume - 5)
def media_play(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(0)
def media_pause(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(1)
def media_stop(self):
"""Service to send the MPD the command for stop."""
self._client.stop()
def media_next_track(self):
"""Service to send the MPD the command for next track."""
self._client.next()
def media_previous_track(self):
"""Service to send the MPD the command for previous track."""
self._client.previous()
def mute_volume(self, mute):
"""Mute. Emulated with set_volume_level."""
if mute is True:
self._muted_volume = self.volume_level
self.set_volume_level(0)
elif mute is False:
self.set_volume_level(self._muted_volume)
self._muted = mute
def play_media(self, media_type, media_id, **kwargs):
"""Send the media player the command for playing a playlist."""
_LOGGER.debug(str.format("Playing playlist: {0}", media_id))
if media_type == MEDIA_TYPE_PLAYLIST:
if media_id in self._playlists:
self._currentplaylist = media_id
else:
self._currentplaylist = None
_LOGGER.warning(str.format("Unknown playlist name %s.",
media_id))
self._client.clear()
self._client.load(media_id)
self._client.play()
else:
self._client.clear()
self._client.add(media_id)
self._client.play()
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return bool(self._status['random'])
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._client.random(int(shuffle))
def turn_off(self):
"""Service to send the MPD the command to stop playing."""
self._client.stop()
def turn_on(self):
"""Service to send the MPD the command to start playing."""
self._client.play()
self._update_playlists(no_throttle=True)
def clear_playlist(self):
"""Clear players playlist."""
self._client.clear()
def media_seek(self, position):
"""Send seek command."""
self._client.seekcur(position)
|
the-stack_106_25563 | import copy
from django.conf.urls import url, include
from django.shortcuts import HttpResponse, render, redirect
from django.urls import reverse
from supermatt.utils.pager import PageInfo
from supermatt.utils.filter_code import FilterList
class BaseSupermatt(object):
'''
该类可以把所有数据都拿到
'''
list_display = '__all__'
action_list = []
add_or_edit_model_form = None
filter_list = []
def __init__(self, model_class, site):
#当前请求的model的类,把类当作参数
self.model_class = model_class
self.site = site
self.request = None
self.app_label = self.model_class._meta.app_label
self.model_name = self.model_class._meta.model_name
def get_add_or_edit_model_form(self):
if self.add_or_edit_model_form:
return self.add_or_edit_model_form
else:
from django.forms import ModelForm
# 对象由类创建,类由type创建
# 通过对象找到提供的字段
# class MyModelForm(ModelForm):
# class Meta:
# model = self.model_class
# fields = '__all__'
_Meta = type('Meta', (object,), {'model':self.model_class, 'fields':'__all__'})
MyModelForm = type('MyModelForm', (ModelForm, ), {"Meta":_Meta})
return MyModelForm
@property
def urls(self):
info = self.model_class._meta.app_label, self.model_class._meta.model_name
urlpatterns = [
url(r'^$', self.changelist_view, name='%s_%s_changelist' % info),
url(r'^add/$', self.add_view, name='%s_%s_add' % info),
url(r'^(.+)/delete/$', self.delete_view, name='%s_%s_delete' % info),
url(r'^(.+)/change/$', self.change_view, name='%s_%s_change' % info),
]
return urlpatterns
def changelist_view(self, request):
'''
查看列表
:param request:
:return:
'''
# 以后使用
self.request = request
# 生成页面上的添加按钮的url,拼接点击之前的request.GET请求参数,用于操作完返回刚刚页面
# QueryDict
from django.http.request import QueryDict
# u = request.GET.urlencode()
param_dict = QueryDict(mutable=True) # 默认元素可以修改
if request.GET:
param_dict['_changlistfilter'] = request.GET.urlencode()
print(request.GET.urlencode())
base_add_url = "{2}:{0}_{1}_add".format(self.app_label, self.model_name, self.site.name_space)
add_url = reverse(base_add_url) + '?' + param_dict.urlencode()
test_url = reverse(base_add_url) + '?' + request.GET.urlencode()
# 数据有了需要页面
# 分页开始
condition = {}
base_page_url = reverse("{2}:{0}_{1}_changelist".format(self.app_label, self.model_name, self.site.name_space))
print('page', base_page_url)
# querydict 类型
page_param_dict = copy.deepcopy(request.GET)
page_param_dict._mutable = True
all_count = self.model_class.objects.filter(**condition).count()
page_obj = PageInfo(request.GET.get('page'), 3, all_count, base_page_url, page_param_dict)
result_list = self.model_class.objects.filter(**condition)[page_obj.start:page_obj.stop]
# 分页结束
# ########### Action操作 ###########
# GET: 显示下拉框
# POST:
action_list = []
for i in self.action_list:
tpl = {'name':i.__name__, 'text':i.text}
action_list.append(tpl)
if request.method == "POST":
# 1. 获取select 标签 name = action
func_name_str = request.POST.get('action')
ret = getattr(self, func_name_str)(request)
action_page_url = reverse("{2}:{0}_{1}_changelist".format(self.app_label, self.model_name, self.site.name_space))
if ret:
action_page_url = "{0}?{1}".format(action_page_url, request.GET.urlencode())
return redirect(action_page_url)
print('actionlist', action_list)
# ########### 组合搜索 ###########
# filter_list = self.filter_list
# 当用户访问列表页面时,进行一下的操作
# 定义一个空列表,用于存放传给前端渲染数据FilterList的实例对象
filter_list = []
# 循环对象自身的filter_list(用户自定义的)
# 里面是FilterOption对象
for option in self.filter_list:
# 如果是函数
if option.is_func:
# 调用自身,最后返回的必须是FilterList对象
data_list = option.field_or_func(self, option, request)
else:
# 如果是field 字段名:'username', 'ug', 'role'
from django.db.models.fields.related import ForeignKey, ManyToManyField
# 根据model字段名字获取其对象
field = self.model_class._meta.get_field(option.field_or_func)
# 判断是否是FK/M2M
# 如果是,FilterList传入的queryset则是关联的表的数据
# 否则是自己的数据
if isinstance(field, ForeignKey):
data_list = FilterList(option, field.rel.model.objects.all(), request)
elif isinstance(field, ManyToManyField):
print('2', field.rel.model) # role
data_list = FilterList(option, field.rel.model.objects.all(), request)
else:
data_list = FilterList(option, field.model.objects.all(), request)
filter_list.append(data_list)
context = {
'result_list':result_list,
'list_display':self.list_display,
'BaseSupermattObj':self,
'add_url':add_url,
'page_str':page_obj.pager(),
'action_list':action_list,
'filter_list':filter_list,
}
return render(request, 'change_list.html', context)
def add_view(self, request):
'''
添加功能
:param request:
:return:
'''
# print(request.GET.get('_changlistfilter')) #以后使用
# print('addddd',request.GET) #以后使用
if request.method == 'GET':
model_form_obj = self.get_add_or_edit_model_form()()
else:
model_form_obj = self.get_add_or_edit_model_form()(data=request.POST, files=request.FILES)
if model_form_obj.is_valid():
# 这个obj就是提交的数据
obj = model_form_obj.save()
# 如果是popup返回给调用框
popid = request.GET.get('popup')
if popid:
# 获取新增数据
return render(request, 'popup_response.html',
{'data_dict':{
'text':str(obj),
'pk':obj.pk,
'popid':popid}
})
else:
# 添加成功跳转回列表显示页
# su/app01/userinfo + request.GET.get
base_list_url = reverse("{2}:{0}_{1}_changelist".format(self.app_label, self.model_name, self.site.name_space))
list_url = '{0}?{1}'.format(base_list_url, request.GET.get('_changlistfilter'))
return redirect(list_url)
context = {
'form': model_form_obj,
}
return render(request, 'add.html', context)
def delete_view(self, request, pk):
# 根据pk获取数据,然后删除
# 获取url,跳转回上次列表页面
if request.method == 'GET':
self.model_class.objects.filter(pk=pk).first().delete()
base_list_url = reverse("{2}:{0}_{1}_changelist".format(self.app_label, self.model_name, self.site.name_space))
list_url = '{0}?{1}'.format(base_list_url, request.GET.get('_changlistfilter'))
return redirect(list_url)
def change_view(self, request, pk):
# 1.获取_changlistfilter传递的参数
# request.GET.get("_changlistfilter")
# 2.获取数据默认显示并选中ModelForm
# get_add_or_edit_model_form
obj = self.model_class.objects.filter(pk=pk).first()
if request.method == 'GET':
if not obj:
return HttpResponse('id不存在')
# instance=obj自动选中默认值
model_form_obj = self.get_add_or_edit_model_form()(instance=obj)
# 3.返回页面
else:
# 更新必须传instance
model_form_obj = self.get_add_or_edit_model_form()(data=request.POST, files=request.FILES, instance=obj)
if model_form_obj.is_valid():
model_form_obj.save()
base_list_url = reverse("{2}:{0}_{1}_changelist".format(self.app_label, self.model_name, self.site.name_space))
list_url = '{0}?{1}'.format(base_list_url, request.GET.get('_changlistfilter'))
return redirect(list_url)
context = {
'form':model_form_obj
}
return render(request, 'edit.html', context)
class SuperMattSite(object):
def __init__(self):
'''
构造方法
'''
self._registry = {}
self.name_space = 'supermatt'
self.app_name = 'supermatt'
def register(self, model_class, m = BaseSupermatt):
self._registry[model_class] = m(model_class, self)
'''
{
UserInfo类名: BaseSupermatt(UserInfo类名, SuperMattSite对象)#SuperMattUserInfo
Role 类名:BaseSupermatt(Role类名, SuperMattSite对象)
XX 类名: BaseSupermatt(Role类名, SuperMattSite对象)
}
'''
def login(self, request):
return HttpResponse('login')
def get_urls(self):
ret = [
url(r'^login/$', self.login, name='login'),
]
for model_class, supermatt_obj in self._registry.items():
# print(model_class._meta.app_label, model_class._meta.model_name, supermatt_obj)
# 获取model_class的app名字和类名
# http://127.0.0.1:8000/su/app01/role
app_label = model_class._meta.app_label
model_name = model_class._meta.model_name
ret.append(url(r'^%s/%s/' % (app_label, model_name), include(supermatt_obj.urls)))
return ret
@property
def urls(self):
return self.get_urls(),self.app_name,self.name_space
site = SuperMattSite() |
the-stack_106_25565 | # -*- coding: utf-8 -*-
"""
Created on Fri May 10 03:44:30 2019
@author: Shani
"""
import os, sys
from PIL import Image
# open an image file (.bmp,.jpg,.png,.gif) you have in the working folder
imageFile = '3599.jpeg'
im1 = Image.open(imageFile)
# adjust width and height to your needs
width = 50
height = 50
# use one of these filter options to resize the image
im2 = im1.resize((width, height), Image.NEAREST) # use nearest neighbour
# best down-sizing filter
im2.save(imageFile)
|
the-stack_106_25566 | import json
import os
import re
import shutil
from argparse import ArgumentParser
import cv2
from circuit_recognizer.annotations import Annotation
from circuit_recognizer.utils import get_annotation_source_image, get_image_path
def get_source_dims(anno):
image = get_annotation_source_image(anno)
return image.shape[1], image.shape[0]
def load(img_path, labels_f, components, source=None):
if source is None:
source = img_path
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
return loads(image, labels_f.read(), components, source)
def from_line(line, width, height, components, source=None):
chunks = line.split(" ")
if len(chunks) == 5:
chunks.append(None)
label_index, cx, cy, box_w, box_h, confidence = chunks
label = components[int(label_index)]
cx = float(cx) * width
box_w = float(box_w) * width
cy = float(cy) * height
box_h = float(box_h) * height
x = cx - box_w / 2
y = cy - box_h / 2
right = x + box_w
bottom = y + box_h
return Annotation.from_component(label, ((x, y), (right, bottom)), source)
def loads(image, data_str, components, source=None):
width, height = image.shape[1], image.shape[0]
lines = (l for l in data_str.split("\n") if l.strip() != "")
return [from_line(line, width, height, components, source) for line in lines]
def dumps(annotations, components):
lines = []
width, height = get_source_dims(annotations[0])
for anno in annotations:
left, top, box_w, box_h = anno.bounding_box()
x = left + box_w / 2
y = top + box_h / 2
x, y, box_w, box_h = x / width, y / height, box_w / width, box_h / height
label_index = components.index(anno.label())
lines.append(f"{label_index} {x} {y} {box_w} {box_h}")
return "\n".join(lines)
def dump(annotations, f, components):
text = dumps(annotations, components)
f.write(text)
|
the-stack_106_25568 | #!/usr/bin/env python3
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common libdot util code."""
import argparse
import base64
import hashlib
import importlib.machinery
import io
import logging
import logging.handlers
import os
from pathlib import Path
import subprocess
import sys
import time
import types
from typing import Dict, List, Optional, Union
import urllib.error
import urllib.request
# Require recent Python 3 versions as a sanity check.
# NB: We cannot require newer versions than CrOS itself supports.
assert (sys.version_info.major, sys.version_info.minor) >= (3, 6), (
'Python 3.6 or newer is required; found %s' % (sys.version,))
BIN_DIR = Path(__file__).resolve().parent
DIR = BIN_DIR.parent
LIBAPPS_DIR = DIR.parent
class ColoredFormatter(logging.Formatter):
"""Colorize warning/error messages automatically."""
_COLOR_MAPPING = {
'WARNING': '\033[1;33m',
'ERROR': '\033[1;31m'
}
_RESET = '\033[m'
def __init__(self, *args, **kwargs):
"""Initialize!"""
self._use_colors = 'NOCOLOR' not in os.environ
super().__init__(*args, **kwargs)
def format(self, record):
"""Formats |record| with color."""
msg = super().format(record)
color = self._COLOR_MAPPING.get(record.levelname)
if self._use_colors and color:
msg = '%s%s%s' % (color, msg, self._RESET)
return msg
def setup_logging(debug=False, quiet=0):
"""Setup the logging module."""
fmt = '%(asctime)s: %(levelname)-7s: '
if debug:
fmt += '%(filename)s:%(funcName)s: '
fmt += '%(message)s'
# 'Sat, 05 Oct 2013 18:58:50 -0400 (EST)'
datefmt = '%a, %d %b %Y %H:%M:%S %z'
tzname = time.strftime('%Z', time.localtime())
if tzname and ' ' not in tzname and len(tzname) <= 5:
# If the name is verbose, don't include it. Some systems like to use
# "Eastern Daylight Time" which is much too chatty.
datefmt += f' ({tzname})'
if debug:
level = logging.DEBUG
elif quiet <= 0:
level = logging.INFO
elif quiet <= 1:
level = logging.WARNING
elif quiet <= 2:
level = logging.ERROR
elif quiet <= 3:
level = logging.CRITICAL
formatter = ColoredFormatter(fmt, datefmt)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(level)
class ArgumentParser(argparse.ArgumentParser):
"""Custom parser to hold a consistent set of options & runtime env."""
def __init__(self, short_options=True, **kwargs):
"""Initialize!"""
super().__init__(**kwargs)
self.add_common_arguments(short_options=short_options)
def parse_args(self, args=None, namespace=None):
"""Parse all the |args| and save the results to |namespace|."""
# This will call our parse_known_args below, so don't use setup_logging.
namespace = argparse.ArgumentParser.parse_args(
self, args=args, namespace=namespace)
return namespace
def parse_known_args(self, args=None, namespace=None):
"""Parse all the |args| and save the results to |namespace|."""
namespace, unknown_args = argparse.ArgumentParser.parse_known_args(
self, args=args, namespace=namespace)
setup_logging(debug=namespace.debug, quiet=namespace.quiet)
return (namespace, unknown_args)
def add_common_arguments(self, short_options=True):
"""Add our custom/consistent set of command line flags."""
getopts = lambda *args: args if short_options else args[1:]
self.add_argument(*getopts('-d', '--debug'), action='store_true',
help='Run with debug output.')
self.add_argument(*getopts('-q', '--quiet'), action='count', default=0,
help='Use once to hide info messages, twice to hide '
'warnings, and thrice to hide errors.')
def touch(path):
"""Touch (and truncate) |path|."""
open(path, 'wb').close()
def unlink(path):
"""Remove |path| and ignore errors if it doesn't exist."""
try:
os.unlink(path)
except FileNotFoundError:
pass
def symlink(target, path):
"""Always symlink |path| to a relativized |target|."""
unlink(path)
path = os.path.realpath(path)
target = os.path.relpath(os.path.realpath(target), os.path.dirname(path))
logging.info('Symlinking %s -> %s', path, target)
os.symlink(target, path)
def cmdstr(cmd):
"""Return a string for the |cmd| list w/reasonable quoting."""
if isinstance(cmd, str):
return cmd
quoted = []
for arg in cmd:
if isinstance(arg, Path):
arg = str(arg)
if ' ' in arg:
arg = '"%s"' % (arg,)
quoted.append(arg)
return ' '.join(quoted)
def run(cmd: List[str],
cmd_prefix: List[str] = None,
log_prefix: List[str] = None,
check: bool = True,
cwd: str = None,
extra_env: Dict[str, str] = None,
**kwargs):
"""Run |cmd| inside of |cwd| and exit if it fails.
Args:
cmd: The command to run.
cmd_prefix: (Unlogged) prefix for the command to run. Useful for passing
interpreters like `java` or `python` but omitting from default output.
log_prefix: Prefix for logging the command, but not running. Useful for
wrapper scripts that get executed directly and use |cmd_prefix|.
check: Whether to exit if |cmd| execution fails.
cwd: The working directory to run |cmd| inside of.
extra_env: Extra environment settings to set before running.
Returns:
A subprocess.CompletedProcess instance.
"""
# Python 3.6 doesn't support capture_output.
if sys.version_info < (3, 7):
capture_output = kwargs.pop('capture_output', None)
if capture_output:
assert 'stdout' not in kwargs and 'stderr' not in kwargs
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
# The |env| setting specifies the entire environment, so we need to manually
# merge our |extra_env| settings into it before passing it along.
if extra_env is not None:
env = kwargs.pop('env', os.environ)
env = env.copy()
env.update(extra_env)
kwargs['env'] = env
if not log_prefix:
log_prefix = []
log_cmd = log_prefix + cmd
if not cmd_prefix:
cmd_prefix = []
real_cmd = cmd_prefix + cmd
if cwd is None:
cwd = os.getcwd()
logging.info('Running: %s\n (cwd = %s)', cmdstr(log_cmd), cwd)
if cmd_prefix:
logging.debug('Real full command: %s', cmdstr(real_cmd))
result = subprocess.run(real_cmd, cwd=cwd, check=False, **kwargs)
if check and result.returncode:
logging.error('Running %s failed!', log_cmd[0])
if result.stdout is not None:
logging.error('stdout:\n%s', result.stdout)
if result.stderr is not None:
logging.error('stderr:\n%s', result.stderr)
sys.exit(result.returncode)
return result
def sha256(path: Union[Path, str]) -> str:
"""Return sha256 hex digest of |path|."""
# The file shouldn't be too big to load into memory, so be lazy.
with open(path, 'rb') as fp:
data = fp.read()
m = hashlib.sha256()
m.update(data)
return m.hexdigest()
def unpack(archive: Union[Path, str],
cwd: Optional[Path] = None,
files: Optional[List[Union[Path, str]]] = ()):
"""Unpack |archive| into |cwd|."""
archive = Path(archive)
if cwd is None:
cwd = Path.cwd()
if files:
files = ['--'] + list(files)
else:
files = []
# Try to make symlink usage easier in Windows.
extra_env = {
'MSYS': 'winsymlinks:nativestrict',
}
logging.info('Unpacking %s', archive.name)
# We use relpath here to help out tar on platforms where it doesn't like
# paths with colons in them (e.g. Windows). We have to construct the full
# before running through relpath as relative archives will implicitly be
# checked against os.getcwd rather than the explicit cwd.
src = os.path.relpath(cwd / archive, cwd)
run(['tar', '--no-same-owner', '-xf', src] + files, cwd=cwd,
extra_env=extra_env)
def pack(archive: Union[Path, str],
paths: List[Union[Path, str]],
cwd: Optional[Path] = None,
exclude: Optional[List[Union[Path, str]]] = ()):
"""Create an |archive| with |paths| in |cwd|.
The output will use XZ compression.
"""
archive = Path(archive)
if cwd is None:
cwd = Path.cwd()
if archive.suffix == '.xz':
archive = archive.with_suffix('')
# Make sure all the paths have sane permissions.
def walk(path):
if path.is_symlink():
return
elif path.is_dir():
# All dirs should be 755.
mode = path.stat().st_mode & 0o777
if mode != 0o755:
path.chmod(0o755)
for subpath in path.glob('*'):
walk(subpath)
elif path.is_file():
# All scripts should be 755 while other files should be 644.
mode = path.stat().st_mode & 0o777
if mode in (0o755, 0o644):
return
if mode & 0o111:
path.chmod(0o755)
else:
path.chmod(0o644)
else:
raise ValueError(f'{path}: unknown file type')
logging.info('Forcing sane permissions on inputs')
for path in paths:
walk(cwd / path)
logging.info('Creating %s tarball', archive.name)
# We use relpath here to help out tar on platforms where it doesn't like
# paths with colons in them (e.g. Windows). We have to construct the full
# before running through relpath as relative archives will implicitly be
# checked against os.getcwd rather than the explicit cwd.
tar = os.path.relpath(cwd / archive, cwd)
run(['tar', '--owner=0', '--group=0', '-cf', tar] +
[f'--exclude={x}' for x in exclude] + ['--'] + paths, cwd=cwd)
logging.info('Compressing tarball')
run(['xz', '-f', '-T0', '-9', tar], cwd=cwd)
def fetch_data(uri: str, output=None, verbose: bool = False, b64: bool = False):
"""Fetch |uri| and write the results to |output| (or return BytesIO)."""
# This is the timeout used on each blocking operation, not the entire
# life of the connection. So it's used for initial urlopen and for each
# read attempt (which may be partial reads). 5 minutes should be fine.
TIMEOUT = 5 * 60
if output is None:
output = io.BytesIO()
try:
with urllib.request.urlopen(uri, timeout=TIMEOUT) as infp:
mb = 0
length = infp.length
while True:
data = infp.read(1024 * 1024)
if not data:
break
# Show a simple progress bar if the user is interactive.
if verbose:
mb += 1
print('~%i MiB downloaded' % (mb,), end='')
if length:
percent = mb * 1024 * 1024 * 100 / length
print(' (%.2f%%)' % (percent,), end='')
print('\r', end='', flush=True)
if b64:
data = base64.b64decode(data)
output.write(data)
except urllib.error.HTTPError as e:
logging.error('%s: %s', uri, e)
sys.exit(1)
return output
def fetch(uri, output, b64=False):
"""Download |uri| and save it to |output|."""
output = os.path.abspath(output)
distdir, name = os.path.split(output)
if os.path.exists(output):
logging.info('Using existing download: %s', name)
return
logging.info('Downloading %s to %s', uri, output)
os.makedirs(distdir, exist_ok=True)
# Use kokoro build cache or Gentoo distdir if available.
for envvar in ('KOKORO_GFILE_DIR', 'DISTDIR'):
cache_dir = os.getenv(envvar)
if cache_dir:
cache_file = os.path.join(cache_dir, name)
if os.path.exists(cache_file):
logging.info(' Cache hit via %s', envvar)
symlink(cache_file, output)
return
# Don't be verbose if running on CI systems.
verbose = os.isatty(sys.stdout.fileno())
# We use urllib rather than wget or curl to avoid external utils & libs.
# This seems to be good enough for our needs.
tmpfile = output + '.tmp'
for _ in range(0, 5):
try:
with open(tmpfile, 'wb') as outfp:
fetch_data(uri, outfp, verbose=verbose, b64=b64)
break
except ConnectionError as e:
time.sleep(1)
logging.warning('Download failed; retrying: %s', e)
else:
logging.error('Unabled to download; giving up')
unlink(tmpfile)
sys.exit(1)
# Clear the progress bar.
if verbose:
print(' ' * 80, end='\r')
os.rename(tmpfile, output)
def node_and_npm_setup():
"""Download our copies of node & npm to our tree and updates env ($PATH)."""
# We have to update modules first as it'll nuke the dir node lives under.
node.modules_update()
node.update()
def load_module(name, path):
"""Load a module from the filesystem.
Args:
name: The name of the new module to import.
path: The full path to the file to import.
"""
loader = importlib.machinery.SourceFileLoader(name, path)
module = types.ModuleType(loader.name)
loader.exec_module(module)
return module
class HelperProgram:
"""Wrapper around local programs that get reused by other projects.
This allows people to do inprocess execution rather than having to fork+exec
another Python instance.
This allows us to avoid filesystem symlinks (which aren't portable), and to
avoid naming programs with .py extensions, and to avoid clashes between
projects that use the same program name (e.g. "import lint" would confuse
libdot/bin/lint & nassh/bin/lint), and to avoid merging all libdot helpers
into the single libdot.py module.
"""
_BIN_DIR = BIN_DIR
def __init__(self, name, path=None):
"""Initialize.
Args:
name: The base name of the program to import.
path: The full path to the file. It defaults to libdot/bin/|name|.
"""
self._name = name
if path is None:
path = os.path.join(self._BIN_DIR, name)
self._path = path
self._module_cache = None
@property
def _module(self):
"""Load & cache the program module."""
if self._module_cache is None:
self._module_cache = load_module(self._name, self._path)
return self._module_cache
def __getattr__(self, name):
"""Dynamic forwarder to module members."""
return getattr(self._module, name)
# Wrappers around libdot/bin/ programs for other tools to access directly.
closure_compiler = HelperProgram('closure-compiler')
concat = HelperProgram('concat')
cpplint = HelperProgram('cpplint')
eslint = HelperProgram('eslint')
headless_chrome = HelperProgram('headless-chrome')
jsonlint = HelperProgram('jsonlint')
lint = HelperProgram('lint')
load_tests = HelperProgram('load_tests')
mdlint = HelperProgram('mdlint')
minify_translations = HelperProgram('minify-translations')
node = HelperProgram('node')
npm = HelperProgram('npm')
pylint = HelperProgram('pylint')
|
the-stack_106_25570 | import asyncio
import itertools
import logging
import threading
# pylint: disable=invalid-name
# pylint: disable=global-statement
try:
# Python 3.8 or newer has a suitable process watcher
asyncio.ThreadedChildWatcher
except AttributeError:
# backport the Python 3.8 threaded child watcher
import os
import warnings
# Python 3.7 preferred API
_get_running_loop = getattr(asyncio, "get_running_loop", asyncio.get_event_loop)
class _Py38ThreadedChildWatcher(asyncio.AbstractChildWatcher):
def __init__(self):
self._pid_counter = itertools.count(0)
self._threads = {}
def is_active(self):
return True
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __del__(self, _warn=warnings.warn):
threads = [t for t in list(self._threads.values()) if t.is_alive()]
if threads:
_warn(
f"{self.__class__} has registered but not finished child processes",
ResourceWarning,
source=self,
)
def add_child_handler(self, pid, callback, *args):
loop = _get_running_loop()
thread = threading.Thread(
target=self._do_waitpid,
name=f"waitpid-{next(self._pid_counter)}",
args=(loop, pid, callback, args),
daemon=True,
)
self._threads[pid] = thread
thread.start()
def remove_child_handler(self, pid):
# asyncio never calls remove_child_handler() !!!
# The method is no-op but is implemented because
# abstract base class requires it
return True
def attach_loop(self, loop):
pass
def _do_waitpid(self, loop, expected_pid, callback, args):
assert expected_pid > 0
try:
pid, status = os.waitpid(expected_pid, 0)
except ChildProcessError:
# The child process is already reaped
# (may happen if waitpid() is called elsewhere).
pid = expected_pid
returncode = 255
logger.warning(
"Unknown child process pid %d, will report returncode 255", pid
)
else:
if os.WIFSIGNALED(status):
returncode = -os.WTERMSIG(status)
elif os.WIFEXITED(status):
returncode = os.WEXITSTATUS(status)
else:
returncode = status
if loop.get_debug():
logger.debug(
"process %s exited with returncode %s", expected_pid, returncode
)
if loop.is_closed():
logger.warning("Loop %r that handles pid %r is closed", loop, pid)
else:
loop.call_soon_threadsafe(callback, pid, returncode, *args)
self._threads.pop(expected_pid)
# add the watcher to the loop policy
asyncio.get_event_loop_policy().set_child_watcher(_Py38ThreadedChildWatcher())
__all__ = ["EventLoopThread", "get_event_loop", "stop_event_loop", "run_coroutine"]
logger = logging.getLogger(__name__)
class EventLoopThread(threading.Thread):
loop = None
_count = itertools.count(0)
def __init__(self):
name = f"{type(self).__name__}-{next(self._count)}"
super().__init__(name=name, daemon=True)
def __repr__(self):
loop, r, c, d = self.loop, False, True, False
if loop is not None:
r, c, d = loop.is_running(), loop.is_closed(), loop.get_debug()
return (
f"<{type(self).__name__} {self.name} id={self.ident} "
f"running={r} closed={c} debug={d}>"
)
def run(self):
self.loop = loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_forever()
finally:
try:
shutdown_asyncgens = loop.shutdown_asyncgens()
except AttributeError:
pass
else:
loop.run_until_complete(shutdown_asyncgens)
loop.close()
asyncio.set_event_loop(None)
def stop(self):
loop, self.loop = self.loop, None
if loop is None:
return
loop.call_soon_threadsafe(loop.stop)
self.join()
_lock = threading.Lock()
_loop_thread = None
def get_event_loop():
global _loop_thread
with _lock:
if _loop_thread is None:
_loop_thread = EventLoopThread()
_loop_thread.start()
return _loop_thread.loop
def stop_event_loop():
global _loop_thread
with _lock:
if _loop_thread is not None:
_loop_thread.stop()
_loop_thread = None
def run_coroutine(coro):
return asyncio.run_coroutine_threadsafe(coro, get_event_loop())
|
the-stack_106_25572 | # coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
from influxdb_client.domain.statement import Statement
class ExpressionStatement(Statement):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'expression': 'Expression'
}
attribute_map = {
'type': 'type',
'expression': 'expression'
}
def __init__(self, type=None, expression=None): # noqa: E501,D401,D403
"""ExpressionStatement - a model defined in OpenAPI.""" # noqa: E501
Statement.__init__(self) # noqa: E501
self._type = None
self._expression = None
self.discriminator = None
if type is not None:
self.type = type
if expression is not None:
self.expression = expression
@property
def type(self):
"""Get the type of this ExpressionStatement.
Type of AST node
:return: The type of this ExpressionStatement.
:rtype: str
""" # noqa: E501
return self._type
@type.setter
def type(self, type):
"""Set the type of this ExpressionStatement.
Type of AST node
:param type: The type of this ExpressionStatement.
:type: str
""" # noqa: E501
self._type = type
@property
def expression(self):
"""Get the expression of this ExpressionStatement.
:return: The expression of this ExpressionStatement.
:rtype: Expression
""" # noqa: E501
return self._expression
@expression.setter
def expression(self, expression):
"""Set the expression of this ExpressionStatement.
:param expression: The expression of this ExpressionStatement.
:type: Expression
""" # noqa: E501
self._expression = expression
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in self.openapi_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, ExpressionStatement):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
|
the-stack_106_25577 | #!/Users/apple/Desktop/ForestMIR/venv/bin/python3.9
'''Convert a jams file into one or more lab files.'''
import argparse
import collections
import sys
import os
import json
import pandas as pd
import jams
def get_output_name(output_prefix, namespace, index):
'''Get the output name (prefix)
Parameters
----------
output_prefix : str
The path prefix of the target filename
namespace : str
The namespace of the annotation in question
index : int
The index number of this annotation within the namespace
Returns
-------
output_name : str
"output_prefix__namespace__index"
'''
return '{:s}__{:s}__{:02d}'.format(output_prefix, namespace, index)
def get_comments(jam, ann):
'''Get the metadata from a jam and an annotation, combined as a string.
Parameters
----------
jam : JAMS
The jams object
ann : Annotation
An annotation object
Returns
-------
comments : str
The jam.file_metadata and ann.annotation_metadata, combined and serialized
'''
jam_comments = jam.file_metadata.__json__
ann_comments = ann.annotation_metadata.__json__
return json.dumps({'metadata': jam_comments,
'annotation metadata': ann_comments},
indent=2)
def lab_dump(ann, comment, filename, sep, comment_char):
'''Save an annotation as a lab/csv.
Parameters
----------
ann : Annotation
The annotation object
comment : str
The comment string header
filename : str
The output filename
sep : str
The separator string for output
comment_char : str
The character used to denote comments
'''
intervals, values = ann.to_interval_values()
frame = pd.DataFrame(columns=['Time', 'End Time', 'Label'],
data={'Time': intervals[:, 0],
'End Time': intervals[:, 1],
'Label': values})
with open(filename, 'w') as fdesc:
for line in comment.split('\n'):
fdesc.write('{:s} {:s}\n'.format(comment_char, line))
frame.to_csv(path_or_buf=fdesc, index=False, sep=sep)
def convert_jams(jams_file, output_prefix, csv=False, comment_char='#', namespaces=None):
'''Convert jams to labs.
Parameters
----------
jams_file : str
The path on disk to the jams file in question
output_prefix : str
The file path prefix of the outputs
csv : bool
Whether to output in csv (True) or lab (False) format
comment_char : str
The character used to denote comments
namespaces : list-like
The set of namespace patterns to match for output
'''
if namespaces is None:
raise ValueError('No namespaces provided. Try ".*" for all namespaces.')
jam = jams.load(jams_file)
# Get all the annotations
# Filter down to the unique ones
# For each annotation
# generate the comment string
# generate the output filename
# dump to csv
# Make a counter object for each namespace type
counter = collections.Counter()
annotations = []
for query in namespaces:
annotations.extend(jam.search(namespace=query))
if csv:
suffix = 'csv'
sep = ','
else:
suffix = 'lab'
sep = '\t'
for ann in annotations:
index = counter[ann.namespace]
counter[ann.namespace] += 1
filename = os.path.extsep.join([get_output_name(output_prefix,
ann.namespace,
index),
suffix])
comment = get_comments(jam, ann)
# Dump to disk
lab_dump(ann, comment, filename, sep, comment_char)
def parse_arguments(args):
'''Parse arguments from the command line'''
parser = argparse.ArgumentParser(description='Convert JAMS to .lab files')
parser.add_argument('-c',
'--comma-separated',
dest='csv',
action='store_true',
default=False,
help='Output in .csv instead of .lab')
parser.add_argument('--comment', dest='comment_char', type=str, default='#',
help='Comment character')
parser.add_argument('-n',
'--namespace',
dest='namespaces',
nargs='+',
default=['.*'],
help='One or more namespaces to output. Default is all.')
parser.add_argument('jams_file',
help='Path to the input jams file')
parser.add_argument('output_prefix', help='Prefix for output files')
return vars(parser.parse_args(args))
if __name__ == '__main__':
convert_jams(**parse_arguments(sys.argv[1:]))
|
the-stack_106_25578 | """
Incremental update pdf file
New in version 2
- attach multi-object to end of the pdf file. base on 'portion_of_rewrite_objects' in config.py
"""
__version__ = '0.2'
__author__ = 'Morteza'
from config import iu_config
import sys
import PyPDF2
import pdf_object_preprocess as poc
import random
import datetime
import math
def read_pdf_file(host_id):
with open(iu_config['raw_host_directory'] + host_id + '.pdf', 'br') as f:
data = f.read()
return data
def write_pdf_file(host_id, description ,new_pdf_file):
with open(iu_config['new_host_directory'] + host_id + '/'
+ host_id + description + '.pdf', 'bw') as f:
f.write(new_pdf_file)
def get_last_object_id(host_id):
with open(iu_config['raw_host_directory'] + host_id + '.pdf', 'br') as f:
read_pdf = PyPDF2.PdfFileReader(f)
last_object_id = read_pdf.trailer['/Size'] - 1 # size xref - 1
return last_object_id
def get_one_object():
""" provide one pdf data object whether an existing object in corpus or
an online new generated object from learnt model
this function is not complete yet!
"""
object_file_path = '../trainset/pdf_object_trainset_100_to_500_percent33.txt'
seq = poc.load_from_file(object_file_path)
obj_list = poc.get_list_of_object(seq, is_sort=False)
random_object_index = random.randint(50, len(obj_list) - 1)
obj = obj_list[random_object_index]
return obj
def incremental_update(single_object_update, host_id, sequential_number):
""" shape the incremental update """
data = read_pdf_file(host_id)
last_object_id = str(get_last_object_id(host_id))
rewrite_object_content = get_one_object()
if single_object_update:
if iu_config['update_policy'] == 'random':
rewrite_object_id = str(random.randint(1, int(last_object_id)))
elif iu_config['update_policy' == 'bottom_up']:
rewrite_object_id = last_object_id
data = attach_new_object(data, last_object_id,
rewrite_object_content, rewrite_object_id)
# set name for new pdf files like:
# host1_sou_85_6_20180307_114117
dt = datetime.datetime.now().strftime('_%Y%m%d_%H%M%S')
name_description = '_sou_' + str(sequential_number) + '_' + str(rewrite_object_id) + dt
write_pdf_file(host_id, name_description, data)
print('save new pdf file successfully')
else:
number_of_of_rewrite_objects = math.ceil(iu_config['portion_of_rewrite_objects'] * int(last_object_id))
# print(host_id, number_of_of_rewrite_objects)
rewrite_object_ids = ''
for i in range(number_of_of_rewrite_objects):
rewrite_object_content = get_one_object()
if iu_config['update_policy'] == 'random':
rewrite_object_id = str(random.randint(1, int(last_object_id)))
elif iu_config['update_policy' == 'bottom_up']:
rewrite_object_id = last_object_id - i
rewrite_object_ids += rewrite_object_id
data = attach_new_object(data, last_object_id,
rewrite_object_content, rewrite_object_id)
# set name for new pdf files like:
# host1_sou_85_6_20180307_114117
dt = datetime.datetime.now().strftime('_%Y%m%d_%H%M%S')
name_description = '_mou_' + str(sequential_number) + '_' + str(rewrite_object_ids) + dt
write_pdf_file(host_id, name_description, data)
print('save new pdf file successfully')
def attach_new_object(data, last_object_id, rewrite_object_content, rewrite_object_id):
""" incremental update pdf file """
# find last trailer in a pdf file
trailer_index = 0
while data.find(b'trailer', trailer_index + 7) != -1:
trailer_index = data.find(b'trailer', trailer_index + 7)
print('trailer_index', trailer_index)
trailer_index_dic_endof = data.find(b'>>', trailer_index)
print('trailer_index_dic_endof', trailer_index_dic_endof)
trailer_content = data[trailer_index: trailer_index_dic_endof + 2]
print('trailer_content', trailer_content)
# find last startxref offset in a pdf file
startxref_index = trailer_index
while data.find(b'startxref', startxref_index + 9) != -1:
startxref_index = data.find(b'startxref', startxref_index + 9)
# print('index ===', index_startxref)
index_eof = data.find(b'%%EOF', startxref_index)
# print('index 2===', index_eof)
if data[startxref_index + 9] == b'\n' or b'\r':
# print('yes', data[index_startxref+9])
startxref_index += 10
if data[index_eof - 1] == b'\n' or b'\r':
index_eof -= 1
startxref_offset = int(data[startxref_index: index_eof])
print('startxref_offset', startxref_offset)
# print(type(trailer_content))
# remove all /Prev 1234 from trailer if exist
# trailer_content = trailer_content.replace(b'/Prev', b'')
# trailer_content = re.sub(r'/Prev \d+', b'', str(trailer_content))
index_prev = trailer_content.find(b'/Prev')
if index_prev != -1:
index_curr = 0
# print('##', trailer_content[index_prev+5], index_prev)
# check whether a byte is ascii number or space
eliminate_content = trailer_content[index_prev+5+index_curr]
print('eliminate content', eliminate_content)
while (48 <= eliminate_content <= 57) or (eliminate_content == 32):
print('###', trailer_content[index_prev+5+index_curr])
index_curr += 1
eliminate_content = trailer_content[index_prev + 5 + index_curr]
trailer_content = trailer_content[:index_prev] + trailer_content[index_prev+5+index_curr:]
trailer_content_new = trailer_content[:-2] + b' /Prev ' \
+ bytes(str(startxref_offset), 'ascii') + b' \n>>'
print('trailer_content_new', trailer_content_new)
print('len_rewrite_object_content', len(rewrite_object_content))
startxref_offset_new = len(data) + 1 + len(rewrite_object_id) + 3 + len(rewrite_object_content) # if we attach just one obj
print('startxref_offset_new', startxref_offset_new)
attach_content = bytes(str(rewrite_object_id + ' 0 ' + rewrite_object_content + '\nxref\n0 1\n0000000000 65535 f\n' + \
rewrite_object_id + ' 1\n' + str(len(data)).zfill(10) + ' 00000 n\n'), 'ascii') + \
trailer_content_new + b'\nstartxref\n' + \
bytes(str(startxref_offset_new), 'ascii') + b'\n%%EOF\n'
# print('attach_content\n', attach_content)
new_pdf_file = data + attach_content
return new_pdf_file
def main(argv):
host_id = 'host2'
for i in range(0, 10):
incremental_update(iu_config['single_object_update'], host_id, i)
print('*** end ***')
if __name__ == '__main__':
main(sys.argv)
|
the-stack_106_25579 | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('../data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('../data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
|
the-stack_106_25581 | #!/usr/bin/env python
import numpy as np
#import sys
#import warnings
#
#if not sys.warnoptions:
# warnings.simplefilter("ignore")
__all__ = ["Orbit"]
class Orbit:
def __init__(self, roa=None, ror=None, i_pl=None, aor=None):
if roa is not None and aor is None:
self.roa = roa
self.aor = 1 / self.roa
elif roa is None and aor is not None:
self.aor = aor
self.roa = 1 / self.aor
elif roa is not None and aor is not None:
raise ValueError(
"only one of `roa` or `aor` may be given, not both."
)
else:
self.roa = None
self.aor = None
self.ror = ror
self.i_pl = None if i_pl is None else np.deg2rad(i_pl)
def __str__(self):
return f"Orbit: a/R = {self.aor}, r/R = {self.ror}, i_pl = {np.rad2deg(self.i_pl)} deg"
def get_planet_position(self, x):
xp = np.sin(2 * np.pi * x) / self.roa
yp = -np.cos(2 * np.pi * x) * np.cos(self.i_pl) / self.roa
return xp, yp
def get_planet_mu(self, x):
xp, yp = self.get_planet_position(x)
mu = self._xy_to_mu(xp, yp)
return mu
def _xy_to_mu(self, x, y):
delta = np.sqrt(x**2 + y**2)
mu = np.sqrt(1 - delta**2)
return mu
#
def _transform_to_orthogonal(self, x, y, l):
# l = np.deg2rad(l)
xn = x * np.cos(l) - y * np.sin(l)
yn = x * np.sin(l) + y * np.cos(l)
zn = np.sqrt(1 - xn**2 - yn**2)
return xn, yn, zn
def _rotate_around_x(self, x, y, z, i_star):
# i_star = np.deg2rad(i_star)
beta = 0.5*np.pi - i_star
xrot = x
yrot = z * np.sin(beta) + y * np.cos(beta)
zrot = z * np.cos(beta) - y * np.sin(beta)
return xrot, yrot, zrot
def get_planet_position_orthogonal(self, x, l):
xp, yp = self.get_planet_position(x)
xn, yn, zn = self._transform_to_orthogonal(xp, yp, l)
return xn, yn, zn
def get_planet_position_rotated(self, x, l, i_star):
xn, yn, zn = self.get_planet_position_orthogonal(x, l)
xrot, yrot, zrot = self._rotate_around_x(xn, yn, zn, i_star)
return xrot, yrot, zrot
def get_latitudes(self, x, l, i_star):
""" returns the latitudes relative to the equator transited by the
planet """
yrot = self.get_planet_position_rotated(x, l, i_star)[1]
return np.rad2deg(np.arcsin(yrot))
|
the-stack_106_25584 | from __future__ import absolute_import
import abc
from copy import deepcopy
import time
from enum import Enum
import six
from simpleflow.base import Submittable
from simpleflow.history import History
from . import futures
from .activity import Activity
if False:
from typing import Optional, Any, Dict, Union, Type # NOQA
def get_actual_value(value):
"""
Unwrap the result of a Future or return the value.
"""
if isinstance(value, futures.Future):
return futures.get_result_or_raise(value)
return value
@six.add_metaclass(abc.ABCMeta)
class Task(Submittable):
"""A Task represents a work that can be scheduled for execution.
"""
@abc.abstractproperty
def name(self):
raise NotImplementedError()
@staticmethod
def resolve_args(*args):
return [get_actual_value(arg) for arg in args]
@staticmethod
def resolve_kwargs(**kwargs):
return {key: get_actual_value(val) for
key, val in kwargs.items()}
class ActivityTask(Task):
"""
Activity task.
:type activity: Activity
:type idempotent: Optional[bool]
:type id: str
"""
def __init__(self, activity, *args, **kwargs):
if not isinstance(activity, Activity):
raise TypeError('Wrong value for `activity`, got {} instead'.format(type(activity)))
# Keep original arguments for use in subclasses
# For instance this helps casting a generic class to a simpleflow.swf.task,
# see simpleflow.swf.task.ActivityTask.from_generic_task() factory
self._args = deepcopy(args)
self._kwargs = deepcopy(kwargs)
self.activity = activity
self.idempotent = activity.idempotent
self.context = kwargs.pop("context", None)
self.args = self.resolve_args(*args)
self.kwargs = self.resolve_kwargs(**kwargs)
self.id = None
@property
def name(self):
return 'activity-{}'.format(self.activity.name)
def __repr__(self):
return '{}(activity={}, args={}, kwargs={}, id={})'.format(
self.__class__.__name__,
self.activity,
self.args,
self.kwargs,
self.id)
def execute(self):
method = self.activity.callable
if getattr(method, 'add_context_in_kwargs', False):
self.kwargs["context"] = self.context
if hasattr(method, 'execute'):
task = method(*self.args, **self.kwargs)
task.context = self.context
result = task.execute()
if hasattr(task, 'post_execute'):
task.post_execute()
return result
else:
# NB: the following line attaches some *state* to the callable, so it
# can be used directly for advanced usage. This works well because we
# don't do multithreading, but if we ever do, DANGER!
method.context = self.context
return method(*self.args, **self.kwargs)
def propagate_attribute(self, attr, val):
"""
Propagate to the activity.
"""
setattr(self.activity, attr, val)
class WorkflowTask(Task):
"""
Child workflow.
:type executor: type(simpleflow.executor.Executor)
:type workflow: type(simpleflow.workflow.Workflow)
:type id: str
"""
def __init__(self, executor, workflow, *args, **kwargs):
# Keep original arguments for use in subclasses
# For instance this helps casting a generic class to a simpleflow.swf.task,
# see simpleflow.swf.task.WorkflowTask.from_generic_task() factory
self._args = deepcopy(args)
self._kwargs = deepcopy(kwargs)
self.executor = executor
self.workflow = workflow
self.idempotent = getattr(workflow, 'idempotent', False)
get_workflow_id = getattr(workflow, 'get_workflow_id', None)
self.args = self.resolve_args(*args)
self.kwargs = self.resolve_kwargs(**kwargs)
if get_workflow_id:
self.id = get_workflow_id(workflow, *self.args, **self.kwargs)
else:
self.id = None
@property
def name(self):
return 'workflow-{}'.format(self.workflow.name)
def __repr__(self):
return '{}(workflow={}, args={}, kwargs={}, id={})'.format(
self.__class__.__name__,
self.workflow.__module__ + '.' + self.workflow.__name__,
self.args,
self.kwargs,
self.id)
def execute(self):
workflow = self.workflow(self.executor)
return workflow.run(*self.args, **self.kwargs)
def propagate_attribute(self, attr, val):
"""
Propagate to the workflow.
"""
setattr(self.workflow, attr, val)
class SignalTask(Task):
"""
Signal.
"""
def __init__(self, name, *args, **kwargs):
self._name = name
self.args = self.resolve_args(*args)
self.kwargs = self.resolve_kwargs(**kwargs)
@property
def name(self):
"""
:return:
:rtype: str
"""
return self._name
def execute(self):
pass
class MarkerTask(Task):
def __init__(self, name, details):
"""
:param name: Marker name
:param details: Serializable marker details
"""
self._name = name
self.args = self.resolve_args(details)
self.kwargs = {}
@property
def name(self):
"""
:return:
:rtype: str
"""
return self._name
@property
def details(self):
return self.args[0]
def execute(self):
pass
class TimerTask(Task):
"""
Timer.
"""
def __init__(self, timer_id, timeout, control=None):
self.timer_id = timer_id
self.timeout = timeout
self.control = control
self.args = ()
self.kwargs = {}
@property
def name(self):
return self.timer_id
@property
def id(self):
return self.timer_id
def __repr__(self):
return '<{} timer_id="{}" timeout={}>'.format(self.__class__.__name__, self.timer_id, self.timeout)
def execute(self):
# Local execution
time.sleep(self.timeout)
class CancelTimerTask(Task):
"""
Timer cancellation.
"""
def __init__(self, timer_id):
self.timer_id = timer_id
self.args = ()
self.kwargs = {}
@property
def name(self):
return self.timer_id
@property
def id(self):
return self.timer_id
def __repr__(self):
return '<{} timer_id="{}">'.format(self.__class__.__name__, self.timer_id)
def execute(self):
# Local execution: no-op
return
class TaskFailureContext(object):
"""
Some context for a task/workflow failure.
"""
class Decision(Enum):
none = 0
abort = 1
ignore = 2
retry_now = 3
retry_later = 4
cancel = 5
handled = 6
def __init__(self,
a_task, # type: Union[ActivityTask, WorkflowTask]
event, # type: Dict[str, Any]
future, # type: futures.Future
exception_class, # type: Type[Exception]
history=None, # type: Optional[History]
):
self.a_task = a_task
self.event = event
self.future = future
self.exception_class = exception_class
self.history = history
self.decision = TaskFailureContext.Decision.none
self.retry_wait_timeout = None
self._task_error = None
def __repr__(self):
return '<TaskFailureContext' \
' task type={type}' \
' task.id={id}' \
' task.name={name}' \
' event={event}' \
' future={future}' \
' task_error={task_error}' \
' current_started_decision_id={started_decision_id}' \
' last_completed_decision_id={completed_decision_id}' \
' decision={decision}' \
' retry_wait_timeout={retry_wait_timeout}' \
'>' \
.format(
type=type(self.a_task),
id=getattr(self.a_task, 'id', None),
name=getattr(self.a_task, 'name', None),
event=self.event,
future=self.future,
task_error=self.task_error,
started_decision_id=self.current_started_decision_id,
completed_decision_id=self.last_completed_decision_id,
decision=self.decision,
retry_wait_timeout=self.retry_wait_timeout,
)
@property
def retry_count(self):
return self.event.get('retry')
@property
def task_name(self):
if hasattr(self.a_task, 'payload'):
return self.a_task.payload.name
if hasattr(self.a_task, 'name'):
return self.a_task.name
return None
@property
def exception(self):
return self.future.exception
@property
def current_started_decision_id(self):
return self.history.started_decision_id if self.history else None
@property
def last_completed_decision_id(self):
return self.history.completed_decision_id if self.history else None
@property
def task_error(self):
if self._task_error is None:
from simpleflow.exceptions import TaskFailed
from simpleflow.utils import json_loads_or_raw
self._task_error = () # falsy value different from None
if isinstance(self.exception, TaskFailed) and self.exception.details:
details = json_loads_or_raw(self.exception.details)
if isinstance(details, dict) and 'error' in details:
self._task_error = details['error']
return self._task_error
@property
def id(self):
# type: () -> Optional[int]
event = self.event
return History.get_event_id(event)
|
the-stack_106_25585 | # Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import numpy as np
import tensorflow as tf
from fastestimator.op.numpyop import LambdaOp
from fastestimator.test.unittest_util import is_equal
class TestLambdaOp(unittest.TestCase):
def test_single_input(self):
op = LambdaOp(fn=np.sum)
data = op.forward(data=[[1, 2, 3]], state={})
self.assertEqual(data, 6)
def test_multi_input(self):
op = LambdaOp(fn=np.reshape)
data = op.forward(data=[np.array([1, 2, 3, 4]), (2, 2)], state={})
self.assertTrue(is_equal(data, np.array([[1, 2], [3, 4]])))
def test_batch_forward(self):
op = LambdaOp(fn=np.sum)
data = tf.convert_to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = op.forward_batch(data=[data], state={})
self.assertEqual(result, 45)
|
the-stack_106_25586 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "pytmc/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
the-stack_106_25587 | # Copyright (c) 2009-2010 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The batch HTTP client provides a convenience interface around an
`httplib2.Http` instance for combining multiple requests into one MIME-encoded
batch request, dispatching the subresponses to the requests' associated
callbacks.
"""
import email
try:
from email.feedparser import FeedParser
from email.header import Header
except ImportError:
from email.Parser import FeedParser
from email.Header import Header
from httplib import HTTPException
import logging
import mimetools
import new
from StringIO import StringIO
from urlparse import urljoin, urlparse, urlunparse
import weakref
import httplib2
from batchhttp.multipart import MultipartHTTPMessage, HTTPRequestMessage
log = logging.getLogger(__name__)
class BatchError(Exception):
"""An Exception raised when the `BatchClient` cannot open, add, or
complete a batch request."""
pass
class NonBatchResponseError(BatchError):
"""An exception raised when the `BatchClient` receives a response
with an HTTP status code other than 207."""
def __init__(self, status, reason):
self.status = status
self.reason = reason
message = ('Received non-batch response: %d %s'
% (self.status, self.reason))
BatchError.__init__(self, message)
class WeaklyBoundMethod(object):
"""A bound method that only weakly holds the instance to which it's bound.
A `WeaklyBoundMethod` instance is similar to a regular `instancemethod`,
but if all other references to the method's object are released, the
method "dies" and can no longer be invoked.
This implementation is inspired by Peter Parente's article and example
implementation at http://mindtrove.info/articles/python-weak-references/ .
"""
def __init__(self, method):
"""Configures this `WeaklyBoundMethod` to be otherwise equivalent to
the `method` parameter, an `instancemethod`."""
self.instance = weakref.ref(method.im_self)
self.function = method.im_func
self.methclass = method.im_class
def alive(self):
"""Returns whether this `WeaklyBoundMethod` instance still holds its
referent.
If all other strong references to the instance to which this method is
bound are released, the instance will be collected and this method
will return `False`.
"""
if self.instance() is None:
return False
return True
def __call__(self, *args, **kwargs):
"""Invokes this `WeaklyBoundMethod` instance.
If there still exist strong references to the instance to which this
`WeaklyBoundMethod` is bound, the bound method will be called with all
the given parameters.
If there are no more strong references to this method's referent and
it has been collected, a `ReferenceError` is raised instead.
You can use its `alive()` method to determine if the
`WeaklyBoundMethod` instance still has its referent without invoking
the bound function.
"""
instance = self.instance()
if instance is None:
raise ReferenceError('Instance to which method was weakly bound has been collected')
method = new.instancemethod(self.function, instance, self.methclass)
return method(*args, **kwargs)
class WeakCallback(object):
"""A callback that is held through a weak reference.
Using `WeakCallback` to hold an `instancemethod` will probably not do what
you mean, as `instancemethod` instances are created on demand when you use
the instance's attribute of that name. To hold an `instancemethod` while
weakly referring to its instance, use `WeaklyBoundMethod` instead.
"""
def __init__(self, callback):
"""Configures this `WeakCallback` instance to weakly refer to callable
`callback`."""
self.callback = weakref.ref(callback)
def alive(self):
"""Returns whether the callable referent of this `WeakCallback`
instance is still held.
If all other strong references to the instance to which this method is
bound are released, the instance will be collected and this method
will return `False`.
"""
if self.callback() is None:
return False
return True
def __call__(self, *args, **kwargs):
"""Invokes the referent of this `WeakCallback` instance with the given
parameters.
If the target `callable` object of this `WeakCallback` instance no
longer exists, a `ReferenceError` is raised.
"""
callback = self.callback()
if callback is None:
raise ReferenceError('Callback to which this callback was weakly bound has been collected')
return callback(*args, **kwargs)
class Request(object):
"""A subrequest of a batched HTTP request.
A batch request comprises one or more `Request` instances. Once the batch
request is performed, the subresponses and their contents are dispatched
to the callbacks of the associated `Request` instances.
In order to reduce unnecessary subrequests that may be come into being
before the batch request is completed, `Request` instances hold weak
references to their callbacks. These weak references are instances of
either `WeaklyBoundMethod` (if the requested callback is an
`instancemethod`) or `WeakCallback` (for all other callables).
If the callback ceases to be referenced by any other code between the
creation of the `Request` instance and the completion of the batch request
through a `BatchClient.complete_request()` call, the subrequest will be
omitted from the batch and the callback will not be called.
"""
def __init__(self, reqinfo, callback):
"""Initializes the `Request` instance with the given request and
subresponse callback.
Parameter `reqinfo` is the HTTP request to perform, specified as a
mapping of keyword arguments suitable for passing to an
`httplib2.Http.request()` call.
Parameter `callback` is the callable object to which to supply the
subresponse once the batch request is performed. No strong reference
to `callback` is kept by the `Request` instance, so unless it (or its
bound instance, if it's an `instancemethod`) continues to be
referenced elsewhere, the subrequest will be omitted from the batch
request and `callback` will not be called with a subresponse.
Callbacks should expect three positional parameters:
* the URL of the original subrequest
* an `httplib2.Response` representing the subresponse and its headers
* the textual body of the subresponse
"""
self.reqinfo = reqinfo
if hasattr(callback, 'im_self'): # instancemethod
self.callback = WeaklyBoundMethod(callback)
else:
self.callback = WeakCallback(callback)
def alive(self):
"""Returns whether this `Request` instance's callback still exists."""
return self.callback.alive()
def _update_headers_from_cache(self, http):
objreq = self.reqinfo
if http.cache is not None or http.authorizations:
class StopCharade(Exception):
pass
class CaptureConnections(object):
def __contains__(self, key):
return True
def __getitem__(self, key):
return self.http
class CaptureHTTPConnection(object):
def connect(self):
pass
def request(self, method, request_uri, body, headers):
self.url = request_uri
self.body = body
self.headers = headers
raise StopCharade()
def close(self):
pass
real_connections = http.connections
conns = CaptureConnections()
conn = CaptureHTTPConnection()
conns.http = conn
http.connections = conns
try:
try:
http.request(**objreq)
except StopCharade:
return conn.headers, conn.body
finally:
# Put the real connections back.
http.connections = real_connections
# We didn't finish our request, or there was no cache, so return what
# we were given.
return objreq.get('headers', {}), objreq.get('body')
def _update_response_from_cache(self, http, response, realbody):
if http.cache is not None or http.authorizations:
class HandoffConnections(object):
def __contains__(self, key):
return True
def __getitem__(self, key):
return self.http
class HandoffHTTPConnection(object):
def connect(self):
pass
def request(self, method, request_uri, body, headers):
pass
def read(self):
return httplib2._decompressContent(response, realbody)
def getresponse(self):
return self
def close(self):
pass
def __getattr__(self, key):
return getattr(response, key)
real_connections = http.connections
fc = HandoffConnections()
fc.http = HandoffHTTPConnection()
http.connections = fc
unset = object()
real_follow_redirects = getattr(http, 'follow_redirects', unset)
http.follow_redirects = False
try:
response, realbody = http.request(**self.reqinfo)
finally:
http.connections = real_connections
if real_follow_redirects is unset:
del http.follow_redirects
else:
http.follow_redirects = real_follow_redirects
# Fix up the status code, since httplib2 writes the 304 through
# to the cache, but we want to treat it like a 200.
if response.status == 304:
response.status = 200
return response, realbody
def as_message(self, http, id):
"""Converts this `Request` instance into a
`batchhttp.multipart.HTTPRequestMessage` suitable for adding to a
`batchhttp.multipart.MultipartHTTPMessage` instance.
If this `Request` instance's callback no longer exists, a
`ReferenceError` is raised.
"""
if not self.callback.alive():
raise ReferenceError("No callback to return request's response to")
headers, body = self._update_headers_from_cache(http)
objreq = self.reqinfo
url = objreq['uri']
method = objreq.get('method', 'GET')
parts = urlparse(url)
host = parts[1]
# Use whole URL in request line per HTTP/1.1 5.1.2 (proxy behavior).
requesttext = "%s %s HTTP/1.1\r\n" % (method, url)
headers['host'] = host
# Prevent compression as it's unlikely to survive batching.
headers['accept-encoding'] = 'identity'
for header, value in headers.iteritems():
requesttext += "%s: %s\r\n" % (header, value)
requesttext += '\r\n'
requesttext += body or ''
requesttext = requesttext.encode('ascii')
submsg = HTTPRequestMessage(requesttext, id)
return submsg
def decode_response(self, http, part):
"""Decodes and dispatches the given subresponse to this `Request`
instance's callback.
Parameter `http` is the `httplib2.Http` instance to use for retrieving
unmodified content from cache, updating with new authorization
headers, etc. Parameter `part` is the `email.message.Message`
containing the subresponse content to decode.
If this `Request` instance's callback no longer exists, a
`ReferenceError` is raised instead of decoding anything. If the
subresponse cannot be decoded properly, a `BatchError` is raised.
"""
if not self.callback.alive():
raise ReferenceError("No callback to return response to")
# Parse the part body into a status line and a Message.
messagetext = part.get_payload(decode=True)
messagefile = StringIO(messagetext)
status_line = messagefile.readline()
message = email.message_from_file(messagefile)
if status_line.startswith('HTTP/'):
status_code = status_line.split(' ')[1]
else:
status_code = status_line.split(' ')[0]
message['status'] = int(status_code)
httpresponse = httplib2.Response(message)
# httplib2.Response doesn't lower case header keys itself, so a
# Response from an email Message is inconsistent with one from an
# httplib.HTTPResponse. Enforce lower case here.
for k, v in httpresponse.items():
del httpresponse[k]
httpresponse[k.lower()] = v
body = message.get_payload()
if body is None:
raise BatchError('Could not decode subrequest body from MIME payload')
httpresponse, body = self._update_response_from_cache(http, httpresponse, body)
if body is None:
raise BatchError('Could not decode subrequest body through httplib2')
self.callback(self.reqinfo['uri'], httpresponse, body)
class BatchRequest(object):
"""A collection of HTTP responses that should be performed in a batch as
one response."""
def __init__(self, headers=None):
self.requests = list()
self.headers = headers
def __len__(self):
"""Returns the number of subrequests there are to perform.
This count *does not include* subrequests that will not be performed
due to the garbage collection of their callbacks. Callbacks that have
already expired don't count.
"""
return len([r for r in self.requests if r.alive()])
def add(self, reqinfo, callback):
"""Adds a new `Request` instance to this `BatchRequest` instance.
Parameters `reqinfo` and `callback` should be an HTTP request info
mapping and a callable object, suitable for using to construct a new
`Request` instance.
"""
r = Request(reqinfo, callback)
self.requests.append(r)
def process(self, http, endpoint):
"""Performs a batch request.
Parameter `http` is an `httplib2.Http` instance to use when building
subrequests and decoding subresponses as well as performing the actual
batch HTTP request.
Parameter `endpoint` is a URL specifying where the batch processor is.
The batch request will be made to the ``/batch-processor`` resource at
the root of the site named in `endpoint`.
If this `BatchRequest` instance contains no `Request` instances that
can deliver their subresponses, no batch request will occur.
"""
headers, body = self.construct(http)
if self.headers and headers:
headers.update(self.headers)
if headers and body:
batch_url = urljoin(endpoint, '/batch-processor')
response, content = http.request(batch_url, body=body, method="POST", headers=headers)
self.handle_response(http, response, content)
def construct(self, http):
"""Builds a batch HTTP request from the `BatchRequest` instance's
constituent subrequests.
The batch request is returned as a tuple containing a mapping of HTTP
headers and the text of the request body.
"""
if not len(self):
log.debug('No requests were made for the batch')
return None, None
msg = MultipartHTTPMessage()
request_id = 1
for request in self.requests:
try:
submsg = request.as_message(http, request_id)
except ReferenceError:
pass
else:
msg.attach(submsg)
request_id += 1
# Do this ahead of getting headers, since the boundary is not
# assigned until we bake the multipart message:
content = msg.as_string(write_headers=False)
hdrs = msg.items()
headers = {}
for hdr in hdrs:
headers[hdr[0]] = hdr[1]
# lets prefer gzip encoding on the batch response
headers['accept-encoding'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
return headers, content
def handle_response(self, http, response, content):
"""Dispatches the subresponses contained in the given batch HTTP
response to the associated callbacks.
Parameter `http` is the `httplib2.Http` instance to use for retrieving
unmodified subresponse bodies, updating authorization headers, etc.
Parameters `response` and `content` are the `httplib2.Response`
instance representing the batch HTTP response information and its
associated text content respectively.
If the response is not a successful ``207 Multi-Status`` HTTP
response, or the batch response content cannot be decoded into its
constituent subresponses, a `BatchError` is raised.
"""
# was the response okay?
if response.status != 207:
log.debug('Received non-batch response %d %s with content:\n%s'
% (response.status, response.reason, content))
raise NonBatchResponseError(response.status, response.reason)
# parse content into pieces
# Prevent the application/http-response sub-parts from turning into
# Messages, as the HTTP status line will confuse the parser and
# we'll just get a text/plain Message with our response for the
# payload anyway.
class HttpAverseParser(FeedParser):
def _parse_headers(self, lines):
FeedParser._parse_headers(self, lines)
if self._cur.get_content_type() == 'application/http-response':
self._set_headersonly()
p = HttpAverseParser()
headers = ""
for hdr in response:
headers += "%s: %s\n" % (hdr, Header(response[hdr]).encode(), )
p.feed(headers)
p.feed("\n")
p.feed(content)
message = p.close()
if not message.is_multipart():
log.debug('RESPONSE: ' + str(response))
log.debug('CONTENT: ' + content)
raise BatchError('Response was not a MIME multipart response set')
response = {}
messages = message.get_payload()
for part in messages:
if part.get_content_type() != 'application/http-response':
raise BatchError('Batch response included a part that was not an HTTP response message')
try:
request_id = int(part['Multipart-Request-ID'])
except KeyError:
raise BatchError('Batch response included a part with no Multipart-Request-ID header')
except ValueError:
raise BatchError('Batch response included a part with an invalid Multipart-Request-ID header')
request = self.requests[request_id-1]
try:
request.decode_response(http, part)
except ReferenceError:
# We shouldn't have lost any references to request objects
# since the request, but just in case.
pass
class BatchClient(httplib2.Http):
"""Sort of an HTTP client for performing a batch HTTP request."""
def __init__(self, endpoint=None, **kwargs):
"""Configures the `BatchClient` instance to use the given batch
processor endpoint.
Parameter `endpoint` is the base URL at which to find the batch
processor to which to submit the batch request. The batch processor
should be the resource ``/batch-processor`` at the root of the site
specified in `endpoint`.
"""
self.endpoint = endpoint
super(BatchClient, self).__init__(**kwargs)
def batch_request(self, headers=None):
"""Opens a batch request.
If a batch request is already open, a `BatchError` is raised.
In Python 2.5 or later, you can use this method with the ``with``
statement::
>>> with client.batch_request():
... client.batch({'uri': uri}, callback=handle_result)
The batch request is then completed automatically at the end of the
``with`` block.
You may use the headers parameter to supply additional headers that are
specific to this batch request. This may be useful if your batch
processor uses headers for authentication purposes, for example.
"""
import traceback
if hasattr(self, 'batchrequest'):
# hey, we already have a request. this is invalid...
log.debug('Batch request previously opened at:\n'
+ ''.join(traceback.format_list(self._opened)))
log.debug('New now at:\n' + ''.join(traceback.format_stack()))
raise BatchError("There's already an open batch request")
self.batchrequest = BatchRequest(headers=headers)
self._opened = traceback.extract_stack()
# Return ourself so we can enter a "with" context.
return self
def complete_batch(self):
"""Closes a batch request, submitting it and dispatching the
subresponses.
If no batch request is open, a `BatchError` is raised.
"""
if not hasattr(self, 'batchrequest'):
raise BatchError("There's no open batch request to complete")
if self.endpoint is None:
raise BatchError("There's no batch processor endpoint to which to send a batch request")
try:
log.debug('Making batch request for %d items' % len(self.batchrequest))
self.batchrequest.process(self, self.endpoint)
finally:
del self.batchrequest
def clear_batch(self):
"""Closes a batch request without performing it."""
try:
del self.batchrequest
except AttributeError:
# well it's already cleared then isn't it
pass
def batch(self, reqinfo, callback):
"""Adds the given subrequest to the batch request.
Parameter `reqinfo` is the HTTP request to perform, specified as a
mapping of keyword arguments suitable for passing to an
`httplib2.Http.request()` call.
Parameter `callback` is the callable object to which to supply the
subresponse once the batch request is performed. No strong reference
to `callback` is kept by the `Request` instance, so unless it (or its
bound instance, if it's an `instancemethod`) continues to be
referenced elsewhere, the subrequest will be omitted from the batch
request and `callback` will not be called with a subresponse.
If no batch request is open, a `BatchError` is raised.
"""
if not hasattr(self, 'batchrequest'):
raise BatchError("There's no open batch request to add an object to")
self.batchrequest.add(reqinfo, callback)
def request(self, uri, method="GET", body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
req_log = logging.getLogger('.'.join((__name__, 'request')))
if req_log.isEnabledFor(logging.DEBUG):
if headers is None:
headeritems = ()
else:
headeritems = headers.items()
req_log.debug('Making request:\n%s %s\n%s\n\n%s', method, uri,
'\n'.join([
'%s: %s' % (k, v) for k, v in headeritems
]), body or '')
response, content = super(BatchClient, self).request(uri, method, body, headers, redirections, connection_type)
resp_log = logging.getLogger('.'.join((__name__, 'response')))
if resp_log.isEnabledFor(logging.DEBUG):
resp_log.debug('Got response:\n%s\n\n%s',
'\n'.join([
'%s: %s' % (k, v) for k, v in response.items()
]), content)
return response, content
def __enter__(self):
return self.batchrequest
def __exit__(self, *exc_info):
if None not in exc_info:
# Exception! Let's forget the whole thing.
self.clear_batch()
else:
# Finished the context. Try to complete the request.
self.complete_batch()
|
the-stack_106_25588 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import traceback
from zulint.printer import print_err, colors
from typing import cast, Any, Callable, Dict, List, Optional, Tuple, Iterable
Rule = Dict[str, Any]
RuleList = List[Dict[str, Any]]
LineTup = Tuple[int, str, str, str]
FILES_WITH_LEGACY_SUBJECT = {
# This basically requires a big DB migration:
'zerver/lib/topic.py',
# This is for backward compatibility.
'zerver/tests/test_legacy_subject.py',
# Other migration-related changes require extreme care.
'zerver/lib/fix_unreads.py',
'zerver/tests/test_migrations.py',
# These use subject in the email sense, and will
# probably always be exempt:
'zerver/lib/email_mirror.py',
'zerver/lib/error_notify.py',
'zerver/lib/feedback.py',
'zerver/lib/send_email.py',
'zerver/tests/test_new_users.py',
# These are tied more to our API than our DB model.
'zerver/lib/api_test_helpers.py',
# TRY TO FIX THESE! If you can't fix them, try to
# add comments here and/or in the file itself about
# why sweeping subject is tricky.
'zerver/lib/stream_topic.py',
# This has lots of query data embedded, so it's hard
# to fix everything until we migrate the DB to "topic".
'zerver/tests/test_narrow.py',
}
def get_line_info_from_file(fn: str) -> List[LineTup]:
line_tups = []
for i, line in enumerate(open(fn)):
line_newline_stripped = line.strip('\n')
line_fully_stripped = line_newline_stripped.strip()
if line_fully_stripped.endswith(' # nolint'):
continue
tup = (i, line, line_newline_stripped, line_fully_stripped)
line_tups.append(tup)
return line_tups
def get_rules_applying_to_fn(fn: str, rules: RuleList) -> RuleList:
rules_to_apply = []
for rule in rules:
excluded = False
for item in rule.get('exclude', set()):
if fn.startswith(item):
excluded = True
break
if excluded:
continue
if rule.get("include_only"):
found = False
for item in rule.get("include_only", set()):
if item in fn:
found = True
if not found:
continue
rules_to_apply.append(rule)
return rules_to_apply
def check_file_for_pattern(fn: str,
line_tups: List[LineTup],
identifier: str,
color: Optional[Iterable[str]],
rule: Rule) -> bool:
'''
DO NOT MODIFY THIS FUNCTION WITHOUT PROFILING.
This function gets called ~40k times, once per file per regex.
Inside it's doing a regex check for every line in the file, so
it's important to do things like pre-compiling regexes.
DO NOT INLINE THIS FUNCTION.
We need to see it show up in profiles, and the function call
overhead will never be a bottleneck.
'''
exclude_lines = {
line for
(exclude_fn, line) in rule.get('exclude_line', set())
if exclude_fn == fn
}
pattern = re.compile(rule['pattern'])
strip_rule = rule.get('strip') # type: Optional[str]
ok = True
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if line_fully_stripped in exclude_lines:
exclude_lines.remove(line_fully_stripped)
continue
try:
line_to_check = line_fully_stripped
if strip_rule is not None:
if strip_rule == '\n':
line_to_check = line_newline_stripped
else:
raise Exception("Invalid strip rule")
if pattern.search(line_to_check):
if rule.get("exclude_pattern"):
if re.search(rule['exclude_pattern'], line_to_check):
continue
print_err(identifier, color, '{} at {} line {}:'.format(
rule['description'], fn, i+1))
print_err(identifier, color, line)
ok = False
except Exception:
print("Exception with %s at %s line %s" % (rule['pattern'], fn, i+1))
traceback.print_exc()
if exclude_lines:
print('Please remove exclusions for file %s: %s' % (fn, exclude_lines))
return ok
def custom_check_file(fn: str,
identifier: str,
rules: RuleList,
color: Optional[Iterable[str]],
max_length: Optional[int]=None) -> bool:
failed = False
line_tups = get_line_info_from_file(fn=fn)
rules_to_apply = get_rules_applying_to_fn(fn=fn, rules=rules)
for rule in rules_to_apply:
ok = check_file_for_pattern(
fn=fn,
line_tups=line_tups,
identifier=identifier,
color=color,
rule=rule,
)
if not ok:
failed = True
# TODO: Move the below into more of a framework.
firstline = None
lastLine = None
if line_tups:
firstline = line_tups[0][3] # line_fully_stripped for the first line.
lastLine = line_tups[-1][1]
if max_length is not None:
ok = check_file_for_long_lines(
fn=fn,
max_length=max_length,
line_tups=line_tups,
)
if not ok:
failed = True
if firstline:
if os.path.splitext(fn)[1] and 'zerver/' in fn:
shebang_rules = [{'pattern': '^#!',
'description': "zerver library code shouldn't have a shebang line."}]
else:
shebang_rules = [{'pattern': '#!/usr/bin/python',
'description': "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/python`"},
{'pattern': '#!/usr/bin/env python$',
'description': "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/env python`."}]
for rule in shebang_rules:
if re.search(rule['pattern'], firstline):
print_err(identifier, color,
'{} at {} line 1:'.format(rule['description'], fn))
print_err(identifier, color, firstline)
failed = True
if lastLine and ('\n' not in lastLine):
print("No newline at the end of file. Fix with `sed -i '$a\\' %s`" % (fn,))
failed = True
return failed
def check_file_for_long_lines(fn: str,
max_length: int,
line_tups: List[LineTup]) -> bool:
ok = True
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if isinstance(line, bytes):
line_length = len(line.decode("utf-8"))
else:
line_length = len(line)
if (line_length > max_length and
'# type' not in line and 'test' not in fn and 'example' not in fn and
# Don't throw errors for markdown format URLs
not re.search(r"^\[[ A-Za-z0-9_:,&()-]*\]: http.*", line) and
# Don't throw errors for URLs in code comments
not re.search(r"[#].*http.*", line) and
not re.search(r"`\{\{ api_url \}\}[^`]+`", line) and
"# ignorelongline" not in line and 'migrations' not in fn):
print("Line too long (%s) at %s line %s: %s" % (len(line), fn, i+1, line_newline_stripped))
ok = False
return ok
def build_custom_checkers(by_lang):
# type: (Dict[str, List[str]]) -> Tuple[Callable[[], bool], Callable[[], bool]]
# By default, a rule applies to all files within the extension for which it is specified (e.g. all .py files)
# There are three operators we can use to manually include or exclude files from linting for a rule:
# 'exclude': 'set([<path>, ...])' - if <path> is a filename, excludes that file.
# if <path> is a directory, excludes all files directly below the directory <path>.
# 'exclude_line': 'set([(<path>, <line>), ...])' - excludes all lines matching <line> in the file <path> from linting.
# 'include_only': 'set([<path>, ...])' - includes only those files where <path> is a substring of the filepath.
trailing_whitespace_rule = {
'pattern': r'\s+$',
'strip': '\n',
'description': 'Fix trailing whitespace'
}
whitespace_rules = [
# This linter should be first since bash_rules depends on it.
trailing_whitespace_rule,
{'pattern': 'http://zulip.readthedocs.io',
'description': 'Use HTTPS when linking to ReadTheDocs',
},
{'pattern': '\t',
'strip': '\n',
'exclude': set(['tools/travis/success-http-headers.txt']),
'description': 'Fix tab-based whitespace'},
] # type: RuleList
comma_whitespace_rule = [
{'pattern': ', {2,}[^#/ ]',
'exclude': set(['zerver/tests', 'frontend_tests/node_tests']),
'description': "Remove multiple whitespaces after ','",
'good_lines': ['foo(1, 2, 3)', 'foo = bar # some inline comment'],
'bad_lines': ['foo(1, 2, 3)', 'foo(1, 2, 3)']},
] # type: RuleList
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != r'\s+$']) + [
# Two spaces trailing a line with other content is okay--it's a markdown line break.
# This rule finds one space trailing a non-space, three or more trailing spaces, and
# spaces on an empty line.
{'pattern': r'((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)',
'strip': '\n',
'description': 'Fix trailing whitespace'},
{'pattern': '^#+[A-Za-z0-9]',
'strip': '\n',
'description': 'Missing space after # in heading',
'good_lines': ['### some heading', '# another heading'],
'bad_lines': ['###some heading', '#another heading']},
] # type: RuleList
js_rules = cast(RuleList, [
{'pattern': r'[^_]function\(',
'description': 'The keyword "function" should be followed by a space'},
{'pattern': r'.*blueslip.warning\(.*',
'description': 'The module blueslip has no function warning, try using blueslip.warn'},
{'pattern': '[)]{$',
'description': 'Missing space between ) and {'},
{'pattern': r'i18n\.t\([^)]+[^,\{\)]$',
'description': 'i18n string should not be a multiline string'},
{'pattern': r'''i18n\.t\(['"].+?['"]\s*\+''',
'description': 'Do not concatenate arguments within i18n.t()'},
{'pattern': r'i18n\.t\(.+\).*\+',
'description': 'Do not concatenate i18n strings'},
{'pattern': r'\+.*i18n\.t\(.+\)',
'description': 'Do not concatenate i18n strings'},
{'pattern': '[.]includes[(]',
'exclude': ['frontend_tests/'],
'description': '.includes() is incompatible with Internet Explorer. Use .indexOf() !== -1 instead.'},
{'pattern': '[.]html[(]',
'exclude_pattern': '[.]html[(]("|\'|templates|html|message.content|sub.rendered_description|i18n.t|rendered_|$|[)]|error_text|widget_elem|[$]error|[$][(]"<p>"[)])',
'exclude': ['static/js/portico', 'static/js/lightbox.js', 'static/js/ui_report.js',
'static/js/confirm_dialog.js',
'frontend_tests/'],
'description': 'Setting HTML content with jQuery .html() can lead to XSS security bugs. Consider .text() or using rendered_foo as a variable name if content comes from handlebars and thus is already sanitized.'},
{'pattern': '["\']json/',
'description': 'Relative URL for JSON route not supported by i18n'},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="'},
{'pattern': '^[ ]*//[A-Za-z0-9]',
'description': 'Missing space after // in comment'},
{'pattern': 'if[(]',
'description': 'Missing space between if and ('},
{'pattern': 'else{$',
'description': 'Missing space between else and {'},
{'pattern': '^else {$',
'description': 'Write JS else statements on same line as }'},
{'pattern': '^else if',
'description': 'Write JS else statements on same line as }'},
{'pattern': r'const\s',
'exclude': set(['frontend_tests/zjsunit',
'frontend_tests/node_tests',
'static/js/portico',
'tools/']),
'description': 'Avoid ES6 constructs until we upgrade our pipeline.'},
{'pattern': 'console[.][a-z]',
'exclude': set(['static/js/blueslip.js',
'frontend_tests/zjsunit',
'frontend_tests/casper_lib/common.js',
'frontend_tests/node_tests',
'static/js/debug.js',
'tools/setup/generate-custom-icon-webfont']),
'description': 'console.log and similar should not be used in webapp'},
{'pattern': r'''[.]text\(["'][a-zA-Z]''',
'description': 'Strings passed to $().text should be wrapped in i18n.t() for internationalization',
'exclude': set(['frontend_tests/node_tests/'])},
{'pattern': r'''compose_error\(["']''',
'description': 'Argument to compose_error should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'ui.report_success\(',
'description': 'Deprecated function, use ui_report.success.'},
{'pattern': r'''report.success\(["']''',
'description': 'Argument to report_success should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'ui.report_error\(',
'description': 'Deprecated function, use ui_report.error.'},
{'pattern': r'''report.error\(["']''',
'description': 'Argument to report_error should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'\$\(document\)\.ready\(',
'description': "`Use $(f) rather than `$(document).ready(f)`",
'good_lines': ['$(function () {foo();}'],
'bad_lines': ['$(document).ready(function () {foo();}']},
{'pattern': '[$][.](get|post|patch|delete|ajax)[(]',
'description': "Use channel module for AJAX calls",
'exclude': set([
# Internal modules can do direct network calls
'static/js/blueslip.js',
'static/js/channel.js',
# External modules that don't include channel.js
'static/js/stats/',
'static/js/portico/',
'static/js/billing/',
]),
'good_lines': ['channel.get(...)'],
'bad_lines': ['$.get()', '$.post()', '$.ajax()']},
{'pattern': 'style ?=',
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
'exclude': set([
'frontend_tests/node_tests/copy_and_paste.js',
'frontend_tests/node_tests/upload.js',
'frontend_tests/node_tests/templates.js',
'static/js/upload.js',
'static/js/stream_color.js',
]),
'good_lines': ['#my-style {color: blue;}'],
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
]) + whitespace_rules + comma_whitespace_rule
python_rules = cast(RuleList, [
{'pattern': 'subject|SUBJECT',
'exclude_pattern': 'subject to the|email|outbox',
'description': 'avoid subject as a var',
'good_lines': ['topic_name'],
'bad_lines': ['subject="foo"', ' MAX_SUBJECT_LEN'],
'exclude': FILES_WITH_LEGACY_SUBJECT,
'include_only': set([
'zerver/data_import/',
'zerver/lib/',
'zerver/tests/',
'zerver/views/'])},
{'pattern': '^(?!#)@login_required',
'description': '@login_required is unsupported; use @zulip_login_required',
'good_lines': ['@zulip_login_required', '# foo @login_required'],
'bad_lines': ['@login_required', ' @login_required']},
{'pattern': '^user_profile[.]save[(][)]',
'description': 'Always pass update_fields when saving user_profile objects',
'exclude_line': set([
('zerver/lib/actions.py', "user_profile.save() # Can't use update_fields because of how the foreign key works."),
]),
'exclude': set(['zerver/tests', 'zerver/lib/create_user.py']),
'good_lines': ['user_profile.save(update_fields=["pointer"])'],
'bad_lines': ['user_profile.save()']},
{'pattern': r'^[^"]*"[^"]*"%\(',
'description': 'Missing space around "%"',
'good_lines': ['"%s" % ("foo")', '"%s" % (foo)'],
'bad_lines': ['"%s"%("foo")', '"%s"%(foo)']},
{'pattern': r"^[^']*'[^']*'%\(",
'description': 'Missing space around "%"',
'good_lines': ["'%s' % ('foo')", "'%s' % (foo)"],
'bad_lines': ["'%s'%('foo')", "'%s'%(foo)"]},
{'pattern': 'self: Any',
'description': 'you can omit Any annotation for self',
'good_lines': ['def foo (self):'],
'bad_lines': ['def foo(self: Any):']},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="',
'good_lines': ['a = b', '5 == 6'],
'bad_lines': ['a =b', 'asdf =42']},
{'pattern': r'":\w[^"]*$',
'description': 'Missing whitespace after ":"',
'good_lines': ['"foo": bar', '"some:string:with:colons"'],
'bad_lines': ['"foo":bar', '"foo":1']},
{'pattern': r"':\w[^']*$",
'description': 'Missing whitespace after ":"',
'good_lines': ["'foo': bar", "'some:string:with:colons'"],
'bad_lines': ["'foo':bar", "'foo':1"]},
{'pattern': r"^\s+#\w",
'strip': '\n',
'exclude': set(['tools/droplets/create.py']),
'description': 'Missing whitespace after "#"',
'good_lines': ['a = b # some operation', '1+2 # 3 is the result'],
'bad_lines': [' #some operation', ' #not valid!!!']},
{'pattern': "assertEquals[(]",
'description': 'Use assertEqual, not assertEquals (which is deprecated).',
'good_lines': ['assertEqual(1, 2)'],
'bad_lines': ['assertEquals(1, 2)']},
{'pattern': "== None",
'description': 'Use `is None` to check whether something is None',
'good_lines': ['if foo is None'],
'bad_lines': ['foo == None']},
{'pattern': "type:[(]",
'description': 'Missing whitespace after ":" in type annotation',
'good_lines': ['# type: (Any, Any)', 'colon:separated:string:containing:type:as:keyword'],
'bad_lines': ['# type:(Any, Any)']},
{'pattern': "type: ignore$",
'exclude': set(['tools/tests',
'zerver/lib/test_runner.py',
'zerver/tests']),
'description': '"type: ignore" should always end with "# type: ignore # explanation for why"',
'good_lines': ['foo = bar # type: ignore # explanation'],
'bad_lines': ['foo = bar # type: ignore']},
{'pattern': "# type [(]",
'description': 'Missing : after type in type annotation',
'good_lines': ['foo = 42 # type: int', '# type: (str, int) -> None'],
'bad_lines': ['# type (str, int) -> None']},
{'pattern': "#type",
'description': 'Missing whitespace after "#" in type annotation',
'good_lines': ['foo = 42 # type: int'],
'bad_lines': ['foo = 42 #type: int']},
{'pattern': r'\b(if|else|while)[(]',
'description': 'Put a space between statements like if, else, etc. and (.',
'good_lines': ['if (1 == 2):', 'while (foo == bar):'],
'bad_lines': ['if(1 == 2):', 'while(foo == bar):']},
{'pattern': ", [)]",
'description': 'Unnecessary whitespace between "," and ")"',
'good_lines': ['foo = (1, 2, 3,)', 'foo(bar, 42)'],
'bad_lines': ['foo = (1, 2, 3, )']},
{'pattern': "% [(]",
'description': 'Unnecessary whitespace between "%" and "("',
'good_lines': ['"foo %s bar" % ("baz",)'],
'bad_lines': ['"foo %s bar" % ("baz",)']},
# This next check could have false positives, but it seems pretty
# rare; if we find any, they can be added to the exclude list for
# this rule.
{'pattern': r''' % [a-zA-Z0-9_."']*\)?$''',
'exclude_line': set([
('tools/tests/test_template_parser.py', '{% foo'),
]),
'description': 'Used % comprehension without a tuple',
'good_lines': ['"foo %s bar" % ("baz",)'],
'bad_lines': ['"foo %s bar" % "baz"']},
{'pattern': r'''.*%s.* % \([a-zA-Z0-9_."']*\)$''',
'description': 'Used % comprehension without a tuple',
'good_lines': ['"foo %s bar" % ("baz",)"'],
'bad_lines': ['"foo %s bar" % ("baz")']},
{'pattern': 'sudo',
'include_only': set(['scripts/']),
'exclude': set(['scripts/lib/setup_venv.py']),
'exclude_line': set([
('scripts/lib/zulip_tools.py', '# We need sudo here, since the path will be under /srv/ in the'),
('scripts/lib/zulip_tools.py', 'subprocess.check_call(["sudo", "/bin/bash", "-c",'),
('scripts/lib/zulip_tools.py', 'subprocess.check_call(["sudo", "rm", "-rf", directory])'),
]),
'description': 'Most scripts are intended to run on systems without sudo.',
'good_lines': ['subprocess.check_call(["ls"])'],
'bad_lines': ['subprocess.check_call(["sudo", "ls"])']},
{'pattern': 'django.utils.translation',
'include_only': set(['test/']),
'description': 'Test strings should not be tagged for translation',
'good_lines': [''],
'bad_lines': ['django.utils.translation']},
{'pattern': 'userid',
'description': 'We prefer user_id over userid.',
'good_lines': ['id = alice.user_id'],
'bad_lines': ['id = alice.userid']},
{'pattern': r'json_success\({}\)',
'description': 'Use json_success() to return nothing',
'good_lines': ['return json_success()'],
'bad_lines': ['return json_success({})']},
{'pattern': r'\Wjson_error\(_\(?\w+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to json_error should be a literal string enclosed by _()',
'good_lines': ['return json_error(_("string"))'],
'bad_lines': ['return json_error(_variable)', 'return json_error(_(variable))']},
{'pattern': r'''\Wjson_error\(['"].+[),]$''',
'exclude': set(['zerver/tests']),
'exclude_line': set([
# We don't want this string tagged for translation.
('zerver/views/compatibility.py', 'return json_error("Client is too old")'),
]),
'description': 'Argument to json_error should a literal string enclosed by _()'},
# To avoid JsonableError(_variable) and JsonableError(_(variable))
{'pattern': r'\WJsonableError\(_\(?\w.+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': r'''\WJsonableError\(["'].+\)''',
'exclude': set(['zerver/tests']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': r'''([a-zA-Z0-9_]+)=REQ\(['"]\1['"]''',
'description': 'REQ\'s first argument already defaults to parameter name'},
{'pattern': r'self\.client\.(get|post|patch|put|delete)',
'description': \
'''Do not call self.client directly for put/patch/post/get.
See WRAPPER_COMMENT in test_helpers.py for details.
'''},
# Directly fetching Message objects in e.g. views code is often a security bug.
{'pattern': '[^r]Message.objects.get',
'exclude': set(["zerver/tests",
"zerver/lib/onboarding.py",
"zilencer/management/commands/add_mock_conversation.py",
"zerver/worker/queue_processors.py"]),
'description': 'Please use access_message() to fetch Message objects',
},
{'pattern': 'Stream.objects.get',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'get_stream[(]',
'include_only': set(["zerver/views/", "zerver/lib/actions.py"]),
'exclude_line': set([
# This one in check_message is kinda terrible, since it's
# how most instances are written, but better to exclude something than nothing
('zerver/lib/actions.py', 'stream = get_stream(stream_name, realm)'),
('zerver/lib/actions.py', 'get_stream(admin_realm_signup_notifications_stream, admin_realm)'),
# Here we need get_stream to access streams you've since unsubscribed from.
('zerver/views/messages.py', 'stream = get_stream(operand, self.user_profile.realm)'),
# Use stream_id to exclude mutes.
('zerver/views/messages.py', 'stream_id = get_stream(stream_name, user_profile.realm).id'),
]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'Stream.objects.filter',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': '^from (zerver|analytics|confirmation)',
'include_only': set(["/migrations/"]),
'exclude': set([
'zerver/migrations/0032_verify_all_medium_avatar_images.py',
'zerver/migrations/0060_move_avatars_to_be_uid_based.py',
'zerver/migrations/0104_fix_unreads.py',
'pgroonga/migrations/0002_html_escape_subject.py',
]),
'description': "Don't import models or other code in migrations; see docs/subsystems/schema-migrations.md",
},
{'pattern': 'datetime[.](now|utcnow)',
'include_only': set(["zerver/", "analytics/"]),
'description': "Don't use datetime in backend code.\n"
"See https://zulip.readthedocs.io/en/latest/contributing/code-style.html#naive-datetime-objects",
},
{'pattern': r'render_to_response\(',
'description': "Use render() instead of render_to_response().",
},
{'pattern': 'from os.path',
'description': "Don't use from when importing from the standard library",
},
{'pattern': 'import os.path',
'description': "Use import os instead of import os.path",
},
{'pattern': r'(logging|logger)\.warn\W',
'description': "Logger.warn is a deprecated alias for Logger.warning; Use 'warning' instead of 'warn'.",
'good_lines': ["logging.warning('I am a warning.')", "logger.warning('warning')"],
'bad_lines': ["logging.warn('I am a warning.')", "logger.warn('warning')"]},
{'pattern': r'\.pk',
'exclude_pattern': '[.]_meta[.]pk',
'description': "Use `id` instead of `pk`.",
'good_lines': ['if my_django_model.id == 42', 'self.user_profile._meta.pk'],
'bad_lines': ['if my_django_model.pk == 42']},
{'pattern': r'^[ ]*# type: \(',
'exclude': set([
# These directories, especially scripts/ and puppet/,
# have tools that need to run before a Zulip environment
# is provisioned; in some of those, the `typing` module
# might not be available yet, so care is required.
'scripts/',
'tools/',
'puppet/',
# Zerver files that we should just clean.
'zerver/tests',
'zerver/lib/api_test_helpers.py',
'zerver/lib/request.py',
'zerver/views/streams.py',
# thumbor is (currently) python2 only
'zthumbor/',
]),
'description': 'Comment-style function type annotation. Use Python3 style annotations instead.',
},
{'pattern': r' = models[.].*null=True.*\) # type: (?!Optional)',
'include_only': {"zerver/models.py"},
'description': 'Model variable with null=true not annotated as Optional.',
'good_lines': ['desc = models.TextField(null=True) # type: Optional[Text]',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]',
'desc = models.TextField() # type: Text',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream'],
'bad_lines': ['desc = models.CharField(null=True) # type: Text',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Stream'],
},
{'pattern': r' = models[.](?!NullBoolean).*\) # type: Optional', # Optional tag, except NullBoolean(Field)
'exclude_pattern': 'null=True',
'include_only': {"zerver/models.py"},
'description': 'Model variable annotated with Optional but variable does not have null=true.',
'good_lines': ['desc = models.TextField(null=True) # type: Optional[Text]',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]',
'desc = models.TextField() # type: Text',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream'],
'bad_lines': ['desc = models.TextField() # type: Optional[Text]',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Optional[Stream]'],
},
{'pattern': r'[\s([]Text([^\s\w]|$)',
'exclude': set([
# We are likely to want to keep these dirs Python 2+3 compatible,
# since the plan includes extracting them to a separate project eventually.
'tools/lib',
'tools/zulint',
# TODO: Update our migrations from Text->str.
'zerver/migrations/',
# thumbor is (currently) python2 only
'zthumbor/',
]),
'description': "Now that we're a Python 3 only codebase, we don't need to use typing.Text. Please use str instead.",
},
]) + whitespace_rules + comma_whitespace_rule
bash_rules = cast(RuleList, [
{'pattern': '#!.*sh [-xe]',
'description': 'Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches'
' to set -x|set -e'},
{'pattern': 'sudo',
'description': 'Most scripts are intended to work on systems without sudo',
'include_only': set(['scripts/']),
'exclude': set([
'scripts/lib/install',
'scripts/lib/create-zulip-admin',
'scripts/setup/terminate-psql-sessions',
'scripts/setup/configure-rabbitmq'
]), },
]) + whitespace_rules[0:1]
css_rules = cast(RuleList, [
{'pattern': r'calc\([^+]+\+[^+]+\)',
'description': "Avoid using calc with '+' operator. See #8403 : in CSS.",
'good_lines': ["width: calc(20% - -14px);"],
'bad_lines': ["width: calc(20% + 14px);"]},
{'pattern': r'^[^:]*:\S[^:]*;$',
'description': "Missing whitespace after : in CSS",
'good_lines': ["background-color: white;", "text-size: 16px;"],
'bad_lines': ["background-color:white;", "text-size:16px;"]},
{'pattern': '[a-z]{',
'description': "Missing whitespace before '{' in CSS.",
'good_lines': ["input {", "body {"],
'bad_lines': ["input{", "body{"]},
{'pattern': 'https://',
'description': "Zulip CSS should have no dependencies on external resources",
'good_lines': ['background: url(/static/images/landing-page/pycon.jpg);'],
'bad_lines': ['background: url(https://example.com/image.png);']},
{'pattern': '^[ ][ ][a-zA-Z0-9]',
'description': "Incorrect 2-space indentation in CSS",
'strip': '\n',
'good_lines': [" color: white;", "color: white;"],
'bad_lines': [" color: white;"]},
{'pattern': r'{\w',
'description': "Missing whitespace after '{' in CSS (should be newline).",
'good_lines': ["{\n"],
'bad_lines': ["{color: LightGoldenRodYellow;"]},
{'pattern': ' thin[ ;]',
'description': "thin CSS attribute is under-specified, please use 1px.",
'good_lines': ["border-width: 1px;"],
'bad_lines': ["border-width: thin;", "border-width: thin solid black;"]},
{'pattern': ' medium[ ;]',
'description': "medium CSS attribute is under-specified, please use pixels.",
'good_lines': ["border-width: 3px;"],
'bad_lines': ["border-width: medium;", "border: medium solid black;"]},
{'pattern': ' thick[ ;]',
'description': "thick CSS attribute is under-specified, please use pixels.",
'good_lines': ["border-width: 5px;"],
'bad_lines': ["border-width: thick;", "border: thick solid black;"]},
]) + whitespace_rules + comma_whitespace_rule
prose_style_rules = cast(RuleList, [
{'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
'description': "javascript should be spelled JavaScript"},
{'pattern': r'''[^\/\-\."'\_\=\>]([gG]ithub)[^\.\-\_"\<]''', # exclude usage in hrefs/divs
'description': "github should be spelled GitHub"},
{'pattern': '[oO]rganisation', # exclude usage in hrefs/divs
'description': "Organization is spelled with a z",
'exclude_line': [('docs/translating/french.md', '* organization - **organisation**')]},
{'pattern': '!!! warning',
'description': "!!! warning is invalid; it's spelled '!!! warn'"},
{'pattern': 'Terms of service',
'description': "The S in Terms of Service is capitalized"},
{'pattern': '[^-_]botserver(?!rc)|bot server',
'description': "Use Botserver instead of botserver or bot server."},
]) + comma_whitespace_rule
html_rules = whitespace_rules + prose_style_rules + [
{'pattern': r'placeholder="[^{#](?:(?!\.com).)+$',
'description': "`placeholder` value should be translatable.",
'exclude_line': [('templates/zerver/register.html', 'placeholder="acme"'),
('templates/zerver/register.html', 'placeholder="Acme or Aκμή"')],
'good_lines': ['<input class="stream-list-filter" type="text" placeholder="{{ _(\'Search streams\') }}" />'],
'bad_lines': ['<input placeholder="foo">']},
{'pattern': "placeholder='[^{]",
'description': "`placeholder` value should be translatable.",
'good_lines': ['<input class="stream-list-filter" type="text" placeholder="{{ _(\'Search streams\') }}" />'],
'bad_lines': ["<input placeholder='foo'>"]},
{'pattern': "aria-label='[^{]",
'description': "`aria-label` value should be translatable.",
'good_lines': ['<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'],
'bad_lines': ["<button aria-label='foo'></button>"]},
{'pattern': 'aria-label="[^{]',
'description': "`aria-label` value should be translatable.",
'good_lines': ['<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'],
'bad_lines': ['<button aria-label="foo"></button>']},
{'pattern': 'script src="http',
'description': "Don't directly load dependencies from CDNs. See docs/subsystems/front-end-build-process.md",
'exclude': set(["templates/corporate/billing.html", "templates/zerver/hello.html",
"templates/corporate/upgrade.html"]),
'good_lines': ["{{ render_bundle('landing-page') }}"],
'bad_lines': ['<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>']},
{'pattern': "title='[^{]",
'description': "`title` value should be translatable.",
'good_lines': ['<link rel="author" title="{{ _(\'About these documents\') }}" />'],
'bad_lines': ["<p title='foo'></p>"]},
{'pattern': r'title="[^{\:]',
'exclude_line': set([
('templates/zerver/app/markdown_help.html',
'<td><img alt=":heart:" class="emoji" src="/static/generated/emoji/images/emoji/heart.png" title=":heart:" /></td>')
]),
'exclude': set(["templates/zerver/emails"]),
'description': "`title` value should be translatable."},
{'pattern': r'''\Walt=["'][^{"']''',
'description': "alt argument should be enclosed by _() or it should be an empty string.",
'exclude': set(['static/templates/settings/display-settings.handlebars',
'templates/zerver/app/keyboard_shortcuts.html',
'templates/zerver/app/markdown_help.html']),
'good_lines': ['<img src="{{source_url}}" alt="{{ _(name) }}" />', '<img alg="" />'],
'bad_lines': ['<img alt="Foo Image" />']},
{'pattern': r'''\Walt=["']{{ ?["']''',
'description': "alt argument should be enclosed by _().",
'good_lines': ['<img src="{{source_url}}" alt="{{ _(name) }}" />'],
'bad_lines': ['<img alt="{{ " />']},
{'pattern': r'\bon\w+ ?=',
'description': "Don't use inline event handlers (onclick=, etc. attributes) in HTML. Instead,"
"attach a jQuery event handler ($('#foo').on('click', function () {...})) when "
"the DOM is ready (inside a $(function () {...}) block).",
'exclude': set(['templates/zerver/dev_login.html']),
'good_lines': ["($('#foo').on('click', function () {}"],
'bad_lines': ["<button id='foo' onclick='myFunction()'>Foo</button>", "<input onchange='myFunction()'>"]},
{'pattern': 'style ?=',
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
'exclude_pattern': r'.*style ?=["' + "'" + '](display: ?none|background: {{|color: {{|background-color: {{).*',
'exclude': set([
# KaTeX output uses style attribute
'templates/zerver/app/markdown_help.html',
# 5xx page doesn't have external CSS
'static/html/5xx.html',
# Group PMs color is dynamically calculated
'static/templates/group_pms.handlebars',
# exclude_pattern above handles color, but have other issues:
'static/templates/draft.handlebars',
'static/templates/subscription.handlebars',
'static/templates/single_message.handlebars',
# Old-style email templates need to use inline style
# attributes; it should be possible to clean these up
# when we convert these templates to use premailer.
'templates/zerver/emails/missed_message.html',
'templates/zerver/emails/email_base_messages.html',
# Email log templates; should clean up.
'templates/zerver/email.html',
'templates/zerver/email_log.html',
# Probably just needs to be changed to display: none so the exclude works
'templates/zerver/app/navbar.html',
# Needs the width cleaned up; display: none is fine
'static/templates/settings/account-settings.handlebars',
# background image property is dynamically generated
'static/templates/user_profile_modal.handlebars',
# Inline styling for an svg; could be moved to CSS files?
'templates/zerver/landing_nav.html',
'templates/zerver/billing_nav.html',
'templates/zerver/app/home.html',
'templates/zerver/features.html',
'templates/zerver/portico-header.html',
'templates/corporate/billing.html',
# Miscellaneous violations to be cleaned up
'static/templates/user_info_popover_title.handlebars',
'static/templates/subscription_invites_warning_modal.handlebars',
'templates/zerver/reset_confirm.html',
'templates/zerver/config_error.html',
'templates/zerver/dev_env_email_access_details.html',
'templates/zerver/confirm_continue_registration.html',
'templates/zerver/register.html',
'templates/zerver/accounts_send_confirm.html',
'templates/zerver/integrations/index.html',
'templates/zerver/documentation_main.html',
'templates/analytics/realm_summary_table.html',
'templates/corporate/zephyr.html',
'templates/corporate/zephyr-mirror.html',
]),
'good_lines': ['#my-style {color: blue;}', 'style="display: none"', "style='display: none"],
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
] # type: RuleList
handlebars_rules = html_rules + [
{'pattern': "[<]script",
'description': "Do not use inline <script> tags here; put JavaScript in static/js instead."},
{'pattern': '{{ t ("|\')',
'description': 'There should be no spaces before the "t" in a translation tag.'},
{'pattern': r"{{t '.*' }}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': r'{{t ".*" }}[\.\?!]',
'description': "Period should be part of the translatable string."},
{'pattern': r"{{/tr}}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': '{{t ("|\') ',
'description': 'Translatable strings should not have leading spaces.'},
{'pattern': "{{t '[^']+ ' }}",
'description': 'Translatable strings should not have trailing spaces.'},
{'pattern': '{{t "[^"]+ " }}',
'description': 'Translatable strings should not have trailing spaces.'},
]
jinja2_rules = html_rules + [
{'pattern': r"{% endtrans %}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': r"{{ _(.+) }}[\.\?!]",
'description': "Period should be part of the translatable string."},
]
json_rules = [
# Here, we don't use `whitespace_rules`, because the tab-based
# whitespace rule flags a lot of third-party JSON fixtures
# under zerver/webhooks that we want preserved verbatim. So
# we just include the trailing whitespace rule and a modified
# version of the tab-based whitespace rule (we can't just use
# exclude in whitespace_rules, since we only want to ignore
# JSON files with tab-based whitespace, not webhook code).
trailing_whitespace_rule,
{'pattern': '\t',
'strip': '\n',
'exclude': set(['zerver/webhooks/']),
'description': 'Fix tab-based whitespace'},
{'pattern': r'":["\[\{]',
'exclude': set(['zerver/webhooks/', 'zerver/tests/fixtures/']),
'description': 'Require space after : in JSON'},
] # type: RuleList
markdown_rules = markdown_whitespace_rules + prose_style_rules + [
{'pattern': r'\[(?P<url>[^\]]+)\]\((?P=url)\)',
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.'},
{'pattern': 'https://zulip.readthedocs.io/en/latest/[a-zA-Z0-9]',
'exclude': ['docs/overview/contributing.md', 'docs/overview/readme.md', 'docs/README.md'],
'include_only': set(['docs/']),
'description': "Use relative links (../foo/bar.html) to other documents in docs/",
},
{'pattern': "su zulip -c [^']",
'include_only': set(['docs/']),
'description': "Always quote arguments using `su zulip -c '` to avoid confusion about how su works.",
},
{'pattern': r'\][(][^#h]',
'include_only': set(['README.md', 'CONTRIBUTING.md']),
'description': "Use absolute links from docs served by GitHub",
},
]
help_markdown_rules = markdown_rules + [
{'pattern': '[a-z][.][A-Z]',
'description': "Likely missing space after end of sentence"},
{'pattern': r'\b[rR]ealm[s]?\b',
'good_lines': ['Organization', 'deactivate_realm', 'realm_filter'],
'bad_lines': ['Users are in a realm', 'Realm is the best model'],
'description': "Realms are referred to as Organizations in user-facing docs."},
]
txt_rules = whitespace_rules
def check_custom_checks_py():
# type: () -> bool
failed = False
color = next(colors)
for fn in by_lang['py']:
if 'custom_check.py' in fn:
continue
if custom_check_file(fn, 'py', python_rules, color, max_length=110):
failed = True
return failed
def check_custom_checks_nonpy():
# type: () -> bool
failed = False
color = next(colors)
for fn in by_lang['js']:
if custom_check_file(fn, 'js', js_rules, color):
failed = True
color = next(colors)
for fn in by_lang['sh']:
if custom_check_file(fn, 'sh', bash_rules, color):
failed = True
color = next(colors)
for fn in by_lang['css']:
if custom_check_file(fn, 'css', css_rules, color):
failed = True
color = next(colors)
for fn in by_lang['handlebars']:
if custom_check_file(fn, 'handlebars', handlebars_rules, color):
failed = True
color = next(colors)
for fn in by_lang['html']:
if custom_check_file(fn, 'html', jinja2_rules, color):
failed = True
color = next(colors)
for fn in by_lang['json']:
if custom_check_file(fn, 'json', json_rules, color):
failed = True
color = next(colors)
markdown_docs_length_exclude = {
# Has some example Vagrant output that's very long
"docs/development/setup-vagrant.md",
# Have wide output in code blocks
"docs/subsystems/logging.md",
"docs/subsystems/migration-renumbering.md",
# Have curl commands with JSON that would be messy to wrap
"zerver/webhooks/helloworld/doc.md",
"zerver/webhooks/trello/doc.md",
# Has a very long configuration line
"templates/zerver/integrations/perforce.md",
# Has some example code that could perhaps be wrapped
"templates/zerver/api/incoming-webhooks-walkthrough.md",
# This macro has a long indented URL
"templates/zerver/help/include/git-webhook-url-with-branches-indented.md",
# These two are the same file and have some too-long lines for GitHub badges
"README.md",
"docs/overview/readme.md",
}
for fn in by_lang['md']:
max_length = None
if fn not in markdown_docs_length_exclude:
max_length = 120
rules = markdown_rules
if fn.startswith("templates/zerver/help"):
rules = help_markdown_rules
if custom_check_file(fn, 'md', rules, color, max_length=max_length):
failed = True
color = next(colors)
for fn in by_lang['txt'] + by_lang['text']:
if custom_check_file(fn, 'txt', txt_rules, color):
failed = True
color = next(colors)
for fn in by_lang['rst']:
if custom_check_file(fn, 'rst', txt_rules, color):
failed = True
color = next(colors)
for fn in by_lang['yaml']:
if custom_check_file(fn, 'yaml', txt_rules, color):
failed = True
return failed
return (check_custom_checks_py, check_custom_checks_nonpy)
|
the-stack_106_25590 | ##################################################################################################
#
# TracSynth.py - GW quality trace element analysis & Monte Carlo modeling setup
#
# (1) clean up (account for non-detects, etc.)
# (2) limit to useful analytes (e.g., sufficient detections)
# (3) compute and list correlation matrix
# (4) compute covariance matrix and posit correlated random parameter values (e.g., for PHREEQC)
#
##################################################################################################
from numpy import *
from pandas import *
import matplotlib.pyplot as plt
import subprocess
import os
def Tally(results, sorbed, analytes):
# tally adsorbed masses per components in analytes list
for colName in list(results):
results.rename(columns={colName: colName.replace('m_', '')}, inplace=True)
sorbedResults = results[sorbed].copy()
for i, chem in enumerate(analytes):
results[chem + '-sorbed'] = 0.
for sorb in sorbed:
if chem in sorb: results[chem + '-sorbed'] = results[chem + '-sorbed'] + results[sorb]
# drop individual adsorbed species from results
results.drop(sorbed, axis=1, inplace=True)
return results
def ReadSorbed():
# read text file with list of adsorbed species
lineInput = []
sorbed = []
inputFile = open('surface_species.txt','r')
for line in inputFile: lineInput.append(line.split()[0])
inputFile.close()
for i in range(len(lineInput)): sorbed.append(lineInput[i])
print('Read surface species listing.')
return sorbed
def ReadWQ(numSampleMin):
# read water quality data and return data frame
wq = read_csv('water_quality.csv')
analytes = list(wq)
analytes.remove('Well')
analytes.remove('Date')
wq['Date'] = to_datetime(wq['Date'])
ndCount = zeros(len(analytes), int)
for j, chem in enumerate(analytes):
conc = list(wq[chem].astype('str'))
for i in range(len(conc)):
# delete R, J, and U flags (comments in analytical data)
if conc[i].find('R')!=-1:
conc[i] = conc[i].replace('R', '')
conc[i] = conc[i].strip()
if conc[i].find('J-')!=-1:
conc[i] = conc[i].replace('J-', '')
conc[i] = conc[i].strip()
if conc[i].find('J')!=-1:
conc[i] = conc[i].replace('J', '')
conc[i] = conc[i].strip()
if conc[i].find('U')!=-1:
conc[i] = conc[i].replace('U', '')
conc[i] = conc[i].strip()
# convert U to 0.5 x DL, or remove sample, depending on modeDL
if conc[i].find('<')!=-1:
conc[i] = conc[i].replace('<', '')
conc[i] = conc[i].strip()
conc[i] = 0.5 * float(conc[i])
ndCount[j] += 1
wq[chem] = array(conc).astype('float')
if chem != 'pH': wq[chem] = log10(wq[chem])
wq.dropna(axis=0, how='all', subset=analytes, inplace=True)
# remove analytes with too many non-detects from consideration
samples = wq.count()
samples.drop(labels=['Well', 'Date'], inplace=True)
samples = samples-ndCount
samples = samples[samples>numSampleMin]
validAnalytes = list(samples.keys())
names = validAnalytes
header = ['Well', 'Date']
header.extend(validAnalytes)
print('Read and processed water quality data.')
return wq[header], names
def CorrelPlots(corrData, corrSynth, names, analytes, indx):
# generate correlation vs correlation plots (synthetic versus observed)
for i, chem in enumerate(analytes):
plt.figure(indx)
x = array(corrData[chem])
y = array(corrSynth[chem])
plt.scatter(x, y, s=15, facecolors='blue', edgecolors='blue')
for j, name in enumerate(names):
plt.annotate(name, (x[j]+0.02, y[j]))
plt.title(chem)
plt.xlabel('Correlation (observations)')
plt.ylabel('Correlation (synthetic)')
plt.show()
def Scatter(data, synth, xSpecies, ySpeciesSet):
# generate selected scatter plots
for i, chem in enumerate(ySpeciesSet):
plt.figure(i)
plt.scatter(data[xSpecies], data[chem], s=25, facecolors='black', edgecolors='black', label = 'Data')
plt.scatter(synth[xSpecies], synth[chem], s=3, facecolors='none', edgecolors='red', label = 'Synthetic')
plt.xscale('log')
plt.xlabel(xSpecies + ' (mol/L)')
if chem != 'pH':
plt.ylabel(chem + ' (mol/L)')
plt.yscale('log')
else:
plt.ylabel(chem)
plt.legend(loc=2)
plt.show()
def WriteInput(synthetic, names, equilParam, equilMin, analytes, special, sorbed, phases):
# write out PHREEQC input file
N = len(synthetic)
phrqFile = open('phrqInput.txt','w')
phrqFile.writelines(['TITLE GW Speciation Model', '\n'])
# solution spread keyword block
phrqFile.writelines(['', '\n'])
phrqFile.writelines(['SOLUTION_SPREAD', '\n'])
phrqFile.writelines(['\t', '-units', '\t', 'ug/l', '\n'])
header = '\t'
equils = '\t'
for i, chem in enumerate(names):
header += chem + '\t'
for j, param in enumerate(equilParam):
if chem == param: equils += equilMin[j] + ' 0' + '\t' # fix concentrations via phase equilibrium
else: equils += '\t'
phrqFile.writelines([header, '\n'])
phrqFile.writelines([equils, '\n'])
for i in range(N):
row = '\t'
for j in range(len(names)): row += str(synthetic[names[j]].iloc[i]) + '\t'
phrqFile.writelines([row, '\n'])
# surface blocks
for i in range(N):
phrqFile.writelines(['', '\n'])
phrqFile.writelines(['SURFACE ' + str(i+1), '\n'])
phrqFile.writelines(['\t', '-equilibrate with solution', '\t', str(i+1), '\n'])
phrqFile.writelines(['\t', 'Hfo_w', '\t', '0.005', '\t', '600', '\t', '1', '\n'])
phrqFile.writelines(['\t', 'Hfo_s', '\t', '0.00005', '\n'])
# equilibrate with phases across all solutions; use run_cells to distribute
phrqFile.writelines(['', '\n'])
phrqFile.writelines(['EQUILIBRIUM_PHASES', '\t', str(1) + '-' + str(N), '\n'])
for phase in phases:
phrqFile.writelines([phase, '\t', str(0.), '\t', str(0.), '\n'])
phrqFile.writelines(['', '\n'])
phrqFile.writelines(['RUN_CELLS', '\n'])
phrqFile.writelines(['\t', '-cells', '\t', str(1) + '-' + str(N), '\n'])
# selected output block
phrqFile.writelines(['', '\n'])
phrqFile.writelines(['SELECTED_OUTPUT', '\n'])
phrqFile.writelines(['\t', '-file', '\t', 'selected_output.txt', '\n'])
phrqFile.writelines(['\t', '-reset', '\t', 'false', '\n'])
phrqFile.writelines(['\t', '-state', '\t', 'true', '\n'])
phrqFile.writelines(['\t', '-solution', '\t', 'true', '\n'])
phrqFile.writelines(['\t', '-pH', '\t', 'true', '\n'])
phrqFile.writelines(['\t', '-pe', '\t', 'true', '\n'])
phrqFile.writelines(['\t', '-percent_error', '\t', 'true', '\n'])
analytesString = ['\t', '-totals']
for chem in special:
analytesString.append('\t')
analytesString.append(chem)
for chem in analytes:
analytesString.append('\t')
analytesString.append(chem)
analytesString.append('\n')
phrqFile.writelines(analytesString)
sorbedString = ['\t', '-molalities']
for sorb in sorbed:
sorbedString.append('\t')
sorbedString.append(sorb)
sorbedString.append('\n')
phrqFile.writelines(sorbedString)
# note that tracked saturation indices do not impact model and may not be needed; hard-wired here for expediency
phrqFile.writelines(['\t', '-saturation_indices', '\t', 'Calcite CO2(g) Hydroxylapatite FCO3Apatite', '\n'])
phrqFile.writelines(['', '\n'])
phrqFile.writelines(['END', '\n'])
phrqFile.close()
### main script ###
def TraceSynth(N):
numSampleMin = 5 # minimum number of detections to include analyte
wq, names = ReadWQ(numSampleMin) # process water quality observational data
# species that will be processed as output
components = read_csv('components.csv')
analytes = list(components['Analyte'])
MW = list(components['MW'])
compsDict = dict(zip(analytes, MW))
# additonal parameters to be included in output
sorbed = ReadSorbed()
sorbedElements = ['P', 'S', 'As', 'Ca', 'Cr', 'Pb', 'Mg', 'V'] # track total amounts adsorbed
special = ['As(3)', 'As(5)']
diffTrack = ['As', 'P', 'pH'] # track updated concentrations following mineral equilibration reactions
# phases for equilibrium phase blocks
phases = ['Hydroxylapatite']
# note correlations between parameters in data
corrWQ = DataFrame(wq.corr())
corrWQ.to_csv('data_corr.csv')
# exclude analytes that are not sufficiently co-reported with other analaytes
exclude = ['Mn', 'TPH-g', 'TPH-r', 'TPH-d'] # hard-wired list of analytes to be dropped from analysis
wq.drop(exclude, axis=1, inplace=True)
for x in exclude: names.remove(x) # names = set of useable parameter observations
# generate synthetic data (from log ug/L concentrations)
mean0 = wq.mean().values
cov0 = wq.cov().values
r = random.multivariate_normal(mean0, cov0, N)
synthetic =DataFrame(r)
synthetic.columns = names
synthetic['pe'] = 4.0
names.append('pe')
# convert synthetic data to linear units (ug/L)
for chem in names:
if (chem != 'pH') and (chem != 'pe'):
wq[chem] = 10**wq[chem]
synthetic[chem] = 10**synthetic[chem]
# remove posited solutions that exceed concentration caps
clipComp = ['Cl'] # components list and associated maximum allowed concentration
clipMax = [3000.*1000.]
for i, chem in enumerate(clipComp):
synthetic = synthetic[synthetic[chem]<=clipMax[i]].copy()
# run modeled solutions, surfaces, and equilibrium phases in a single batch
WriteInput(synthetic, names, ['pe'], ['Ferrihydrite'], analytes, special, sorbed, phases) # write PHREEQC input file
subprocess.call('PHREEQC phrqInput.txt output.txt minteq.v4.dat', shell=True) # run PHREEQC
# process model output; drop results with very poor charge imbalances (e.g., greater than 40%)
selected = read_csv('selected_output.txt', delim_whitespace=True)
selected.drop_duplicates(inplace=True)
results0 = selected[(selected['state']=='i_surf') & (abs(selected['pct_err'])<=40)].copy()
results0.drop('state', axis=1, inplace=True)
results0 = Tally(results0, sorbed, analytes)
resultsM = selected[(selected['state']=='react') & (abs(selected['pct_err'])<=40)].copy()
resultsM.drop('state', axis=1, inplace=True)
results0.to_csv('synthetic_results.csv', index=False)
# process output for graphical representations
analytes.append('pH')
syntheticFiltered = results0[analytes] # filtered to include specific analytes of interest (with molecular weights)
for chem in analytes: # convert water quality data to mol/L to facilitate comparison
if chem!='pH': wq[chem] = 1e-6 * wq[chem] / compsDict.get(chem)
# show example observed & synthetic data scatter plots
yShifts = zeros(len(analytes), float)
Scatter(wq, syntheticFiltered, 'As', analytes)
# write screened observational data to file
wq.to_csv('wq_set.csv', index=False)
# convert back to log space
synthSubset = results0[analytes].copy()
for chem in analytes:
if chem != 'pH':
wq[chem] = log10(wq[chem])
synthSubset[chem] = log10(synthSubset[chem])
# display correlation-to-correlation plots
corrData = DataFrame(wq.corr())
corrSynth = DataFrame(synthSubset.corr())
indx = len(analytes)
CorrelPlots(corrData, corrSynth, analytes, ['Fe'], indx)
# merge in post-prepipitation results; display selected parameter distributions to graphs
keep = ['soln']
keep.extend(diffTrack)
resultsM = resultsM[keep]
for chem in diffTrack: resultsM.rename(columns={chem: chem + '_m'}, inplace=True)
resultsAll = results0.merge(resultsM, on='soln', how='inner')
resultsAll.to_csv('results_all.csv', index=False)
resultsAll['diffP'] = resultsAll['P'] - resultsAll['P_m']
resultsAll['rAs'] = (resultsAll['As'] - resultsAll['As_m']) / resultsAll['As']
resultsAll['rP'] = (resultsAll['P'] - resultsAll['P_m']) / resultsAll['P']
# scatter plot comparing initial arsenic concentrations versus those corresponding to P removal
plt.figure(indx)
resultsAll.plot.scatter(x='As', y='As_m', s=resultsAll['diffP'] * 100000)
plt.xlabel('Arsenic (observed)')
plt.ylabel('Arsenic (corrected)')
plt.xscale('log')
plt.yscale('log')
plt.show()
# histogram of arsenic concentration reductions (from adsorption)
plt.figure(indx+2)
hist = resultsAll['rAs'].hist(bins=20)
plt.title('Arsenic Concentration Changes')
plt.xlabel('Concentration Reduction (fraction)')
plt.ylabel('N')
plt.show()
# histogram of phosphate concentration reductions (from hydroxylapatite precipitation)
plt.figure(indx+3)
hist = resultsAll['rP'].hist(bins=20)
plt.title('Phosphate Concentration Changes')
plt.xlabel('Concentration Reduction (fraction)')
plt.ylabel('N')
plt.show()
print('N = ', len(resultsAll))
print('Done.')
### run script ###
N = 750 # requested size of synthetic population; may drop because of implied charge imbalances
TraceSynth(N)
|
the-stack_106_25592 | """Support for MQTT discovery."""
from __future__ import annotations
import asyncio
from collections import deque
import functools
import json
import logging
import re
import time
from homeassistant.const import CONF_DEVICE, CONF_PLATFORM
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResultType
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.loader import async_get_mqtt
from .. import mqtt
from .abbreviations import ABBREVIATIONS, DEVICE_ABBREVIATIONS
from .const import (
ATTR_DISCOVERY_HASH,
ATTR_DISCOVERY_PAYLOAD,
ATTR_DISCOVERY_TOPIC,
CONF_AVAILABILITY,
CONF_TOPIC,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
TOPIC_MATCHER = re.compile(
r"(?P<component>\w+)/(?:(?P<node_id>[a-zA-Z0-9_-]+)/)"
r"?(?P<object_id>[a-zA-Z0-9_-]+)/config"
)
SUPPORTED_COMPONENTS = [
"alarm_control_panel",
"binary_sensor",
"button",
"camera",
"climate",
"cover",
"device_automation",
"device_tracker",
"fan",
"humidifier",
"light",
"lock",
"number",
"scene",
"siren",
"select",
"sensor",
"switch",
"tag",
"vacuum",
]
ALREADY_DISCOVERED = "mqtt_discovered_components"
PENDING_DISCOVERED = "mqtt_pending_components"
DATA_CONFIG_FLOW_LOCK = "mqtt_discovery_config_flow_lock"
DISCOVERY_UNSUBSCRIBE = "mqtt_discovery_unsubscribe"
INTEGRATION_UNSUBSCRIBE = "mqtt_integration_discovery_unsubscribe"
MQTT_DISCOVERY_UPDATED = "mqtt_discovery_updated_{}"
MQTT_DISCOVERY_NEW = "mqtt_discovery_new_{}_{}"
MQTT_DISCOVERY_DONE = "mqtt_discovery_done_{}"
LAST_DISCOVERY = "mqtt_last_discovery"
TOPIC_BASE = "~"
class MQTTConfig(dict):
"""Dummy class to allow adding attributes."""
discovery_data: dict
def clear_discovery_hash(hass: HomeAssistant, discovery_hash: tuple) -> None:
"""Clear entry in ALREADY_DISCOVERED list."""
del hass.data[ALREADY_DISCOVERED][discovery_hash]
def set_discovery_hash(hass: HomeAssistant, discovery_hash: tuple):
"""Clear entry in ALREADY_DISCOVERED list."""
hass.data[ALREADY_DISCOVERED][discovery_hash] = {}
async def async_start( # noqa: C901
hass: HomeAssistant, discovery_topic, config_entry=None
) -> None:
"""Start MQTT Discovery."""
mqtt_integrations = {}
async def async_discovery_message_received(msg):
"""Process the received message."""
hass.data[LAST_DISCOVERY] = time.time()
payload = msg.payload
topic = msg.topic
topic_trimmed = topic.replace(f"{discovery_topic}/", "", 1)
if not (match := TOPIC_MATCHER.match(topic_trimmed)):
if topic_trimmed.endswith("config"):
_LOGGER.warning(
"Received message on illegal discovery topic '%s'", topic
)
return
component, node_id, object_id = match.groups()
if component not in SUPPORTED_COMPONENTS:
_LOGGER.warning("Integration %s is not supported", component)
return
if payload:
try:
payload = json.loads(payload)
except ValueError:
_LOGGER.warning("Unable to parse JSON %s: '%s'", object_id, payload)
return
payload = MQTTConfig(payload)
for key in list(payload):
abbreviated_key = key
key = ABBREVIATIONS.get(key, key)
payload[key] = payload.pop(abbreviated_key)
if CONF_DEVICE in payload:
device = payload[CONF_DEVICE]
for key in list(device):
abbreviated_key = key
key = DEVICE_ABBREVIATIONS.get(key, key)
device[key] = device.pop(abbreviated_key)
if TOPIC_BASE in payload:
base = payload.pop(TOPIC_BASE)
for key, value in payload.items():
if isinstance(value, str) and value:
if value[0] == TOPIC_BASE and key.endswith("topic"):
payload[key] = f"{base}{value[1:]}"
if value[-1] == TOPIC_BASE and key.endswith("topic"):
payload[key] = f"{value[:-1]}{base}"
if payload.get(CONF_AVAILABILITY):
for availability_conf in cv.ensure_list(payload[CONF_AVAILABILITY]):
if not isinstance(availability_conf, dict):
continue
if topic := availability_conf.get(CONF_TOPIC):
if topic[0] == TOPIC_BASE:
availability_conf[CONF_TOPIC] = f"{base}{topic[1:]}"
if topic[-1] == TOPIC_BASE:
availability_conf[CONF_TOPIC] = f"{topic[:-1]}{base}"
# If present, the node_id will be included in the discovered object id
discovery_id = " ".join((node_id, object_id)) if node_id else object_id
discovery_hash = (component, discovery_id)
if payload:
# Attach MQTT topic to the payload, used for debug prints
setattr(payload, "__configuration_source__", f"MQTT (topic: '{topic}')")
discovery_data = {
ATTR_DISCOVERY_HASH: discovery_hash,
ATTR_DISCOVERY_PAYLOAD: payload,
ATTR_DISCOVERY_TOPIC: topic,
}
setattr(payload, "discovery_data", discovery_data)
payload[CONF_PLATFORM] = "mqtt"
if discovery_hash in hass.data[PENDING_DISCOVERED]:
pending = hass.data[PENDING_DISCOVERED][discovery_hash]["pending"]
pending.appendleft(payload)
_LOGGER.info(
"Component has already been discovered: %s %s, queuing update",
component,
discovery_id,
)
return
await async_process_discovery_payload(component, discovery_id, payload)
async def async_process_discovery_payload(component, discovery_id, payload):
"""Process the payload of a new discovery."""
_LOGGER.debug("Process discovery payload %s", payload)
discovery_hash = (component, discovery_id)
if discovery_hash in hass.data[ALREADY_DISCOVERED] or payload:
async def discovery_done(_):
pending = hass.data[PENDING_DISCOVERED][discovery_hash]["pending"]
_LOGGER.debug("Pending discovery for %s: %s", discovery_hash, pending)
if not pending:
hass.data[PENDING_DISCOVERED][discovery_hash]["unsub"]()
hass.data[PENDING_DISCOVERED].pop(discovery_hash)
else:
payload = pending.pop()
await async_process_discovery_payload(
component, discovery_id, payload
)
if discovery_hash not in hass.data[PENDING_DISCOVERED]:
hass.data[PENDING_DISCOVERED][discovery_hash] = {
"unsub": async_dispatcher_connect(
hass,
MQTT_DISCOVERY_DONE.format(discovery_hash),
discovery_done,
),
"pending": deque([]),
}
if discovery_hash in hass.data[ALREADY_DISCOVERED]:
# Dispatch update
_LOGGER.info(
"Component has already been discovered: %s %s, sending update",
component,
discovery_id,
)
async_dispatcher_send(
hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload
)
elif payload:
# Add component
_LOGGER.info("Found new component: %s %s", component, discovery_id)
hass.data[ALREADY_DISCOVERED][discovery_hash] = None
async_dispatcher_send(
hass, MQTT_DISCOVERY_NEW.format(component, "mqtt"), payload
)
else:
# Unhandled discovery message
async_dispatcher_send(
hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None
)
hass.data[DATA_CONFIG_FLOW_LOCK] = asyncio.Lock()
hass.data[ALREADY_DISCOVERED] = {}
hass.data[PENDING_DISCOVERED] = {}
discovery_topics = [
f"{discovery_topic}/+/+/config",
f"{discovery_topic}/+/+/+/config",
]
hass.data[DISCOVERY_UNSUBSCRIBE] = await asyncio.gather(
*(
mqtt.async_subscribe(hass, topic, async_discovery_message_received, 0)
for topic in discovery_topics
)
)
hass.data[LAST_DISCOVERY] = time.time()
mqtt_integrations = await async_get_mqtt(hass)
hass.data[INTEGRATION_UNSUBSCRIBE] = {}
for (integration, topics) in mqtt_integrations.items():
async def async_integration_message_received(integration, msg):
"""Process the received message."""
key = f"{integration}_{msg.subscribed_topic}"
# Lock to prevent initiating many parallel config flows.
# Note: The lock is not intended to prevent a race, only for performance
async with hass.data[DATA_CONFIG_FLOW_LOCK]:
# Already unsubscribed
if key not in hass.data[INTEGRATION_UNSUBSCRIBE]:
return
data = mqtt.MqttServiceInfo(
topic=msg.topic,
payload=msg.payload,
qos=msg.qos,
retain=msg.retain,
subscribed_topic=msg.subscribed_topic,
timestamp=msg.timestamp,
)
result = await hass.config_entries.flow.async_init(
integration, context={"source": DOMAIN}, data=data
)
if (
result
and result["type"] == FlowResultType.ABORT
and result["reason"]
in ("already_configured", "single_instance_allowed")
):
unsub = hass.data[INTEGRATION_UNSUBSCRIBE].pop(key, None)
if unsub is None:
return
unsub()
for topic in topics:
key = f"{integration}_{topic}"
hass.data[INTEGRATION_UNSUBSCRIBE][key] = await mqtt.async_subscribe(
hass,
topic,
functools.partial(async_integration_message_received, integration),
0,
)
async def async_stop(hass: HomeAssistant) -> None:
"""Stop MQTT Discovery."""
if DISCOVERY_UNSUBSCRIBE in hass.data:
for unsub in hass.data[DISCOVERY_UNSUBSCRIBE]:
unsub()
hass.data[DISCOVERY_UNSUBSCRIBE] = []
if INTEGRATION_UNSUBSCRIBE in hass.data:
for key, unsub in list(hass.data[INTEGRATION_UNSUBSCRIBE].items()):
unsub()
hass.data[INTEGRATION_UNSUBSCRIBE].pop(key)
|
the-stack_106_25594 | import requests
import json
import time
import random
import sys
import getopt
# 一些默认的参数
sckey = "" # 用于 servre酱 消息推送,若不需要,无需修改保持现状
prefix = "御坂"
suffix = "号"
zfill_n = 0
chk_range = "1,20001" # 闭区间
filename_out = "lists.txt"
# 以下一般无需修改
url = "https://passport.bilibili.com/web/generic/check/nickname"
hea = {
"Accept": "application/json, text/plain, */*",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/80.0.3987.132 Safari/537.36",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9"
}
help_info = '''
-r [--range] 编号检测范围(闭区间,英文逗号分隔)
例如: --range=1,200 表示从 1 检测到 200
-p [--prefix] 名称前缀
-s [--suffix] 名称后缀
-z [--zfill] 将编号补齐的位数
例如: --zfill=5 会将 1 补齐为 00001
-k [--key] 用于 "server酱" 推送的sckey (push token)
-f [--filename] 用于设置保存结果的文件名 默认为 lists.txt
'''
def options():
"""用于处理传入参数"""
print("")
global chk_range, prefix, suffix, zfill_n, sckey, filename_out
opts, args = getopt.getopt(sys.argv[1:], '-h-r:-p:-s:-z:-k:-f:',
['help', 'range=', 'prefix=', 'suffix=', 'zfill=', 'key=', 'filename='])
if len(opts) < 1: # 若未接收到已经预设的命令行参数,则直接采用默认参数
print("[*] 未检测到传入参数,采用默认格式,如 御坂2233号\n")
return 0
for opt_name, opt_value in opts:
if opt_name in ('-h', '--help'):
print("[+] Help info :\n" + help_info)
exit()
if opt_name in ('-r', '--range'):
print("[+] 范围: ", opt_value)
chk_range = str(opt_value)
continue
if opt_name in ('-p', '--prefix'):
print("[+] 前缀: ", opt_value)
prefix = str(opt_value)
continue
if opt_name in ('-s', '--suffix'):
print("[+] 后缀: ", opt_value)
suffix = str(opt_value)
continue
if opt_name in ('-z', '--zfill'):
print("[+] 补齐位数: ", opt_value)
zfill_n = int(opt_value)
continue
if opt_name in ('-k', '--key'):
sckey = str(opt_value)
print("[+] Sckey: ", sckey[:12] + "*" * (len(sckey) - 6 - 12) + sckey[(len(sckey) - 6):])
continue
if opt_name in ('-f', '--filename'):
filename_out = str(opt_value)
print("[+] 输出文件名: ", filename_out)
continue
print("")
def send_wxmsg(_sckey, _title="misaka", _context="正文"):
url_postmsg = "https://sc.ftqq.com/%s.send" % _sckey
_context = _context + "\n\n" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
data = {
"text": "%s" % _title,
"desp": "%s" % _context
}
try:
res = requests.post(url=url_postmsg, data=data)
msg_back = json.loads(res.text)
if msg_back["errmsg"] == "success":
print("消息推送成功!")
else:
print("发送可能失败 返回值:%s" % (msg_back["errmsg"]))
except Exception:
print("消息发送错误")
def check():
retry_n = 0
error_status = 0
while retry_n <= 3:
if retry_n != 0:
print("\nRetry:%d times:%d :" % (i, retry_n))
try:
res = requests.get(url=url, params=par, headers=hea, timeout=10)
except (requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout):
print(i, "timeout")
error_status = 1
retry_n += 1
time.sleep(2)
except Exception as err_info:
print("%d Error!\n" % i, err_info)
error_status = 2
retry_n += 1
time.sleep(5)
else:
error_status = 0
result = json.loads(res.text)
if result["code"] == 0:
print(i, "available")
lst_available.append(i)
break
elif result["code"] == 2001 or result["code"] == 40014:
print(i, "unavailable")
lst_unavailable.append(i)
break
else:
print(i, "unknown")
error_status = 3
retry_n += 1
time.sleep(3)
# 以下--循环(异常重试)结束后 对错误状态进行判断 并写入列表
if error_status == 1:
timeout.append(i)
elif error_status == 2:
errs.append(i)
elif error_status == 3:
lst_unknown.append(i)
if (i % 20 == 0) or (i + 1 == i_end):
# 每检测20个写一次文件
write_result()
def write_result():
f.seek(0)
f.write("Available: " + str(lst_available) + "\nUnavailable: " + str(lst_unavailable) + "\nTimeout: " + str(
timeout) + "\nError: " + str(errs) + "\nUnknown: " + str(
lst_unknown) + "\n\nAvailable_count = %d\nUnavailable_count = %d\n" % (
len(lst_available), len(lst_unavailable)))
f.flush()
def sleep():
if i == 0:
return 0
if i % 100 == 0:
sleep_time_1 = random.randint(5, 15)
print("\n达到整百,随机暂停 %d s\n" % sleep_time_1)
time.sleep(sleep_time_1)
elif i % 10 == 0:
time.sleep(random.uniform(0.2, 0.6)) # 取0.2-0.7之间的随机float
else:
time.sleep(0.02)
def loop(_start_n, _end_n):
global i, par
for i in range(_start_n, _end_n):
par = {"nickName": "%s" % (prefix + str(i) + suffix)} # request.get 参数
check()
sleep()
def loop_zfill(_start_n, _end_n):
global i, par
for i in range(_start_n, _end_n):
par = {"nickName": "%s" % (prefix + str(i).zfill(zfill_n) + suffix)} # request.get 参数
check()
sleep()
if __name__ == "__main__":
# Some vars
start_time = time.time()
options()
i = 1 # 初始化检测编号
lst_available = []
lst_unavailable = []
lst_unknown = []
timeout = []
errs = []
par = {}
chk_range = chk_range.split(",")
i_start = int(chk_range[0])
i_end = int(chk_range[1]) + 1
# run
f = open("./" + filename_out, "w+", encoding="utf-8")
try:
if zfill_n == 0: # 判断是否补齐0位,并进入for循环
loop(i_start, i_end)
else:
loop_zfill(i_start, i_end)
except KeyboardInterrupt:
print("\nRaised Control-C. Cancled!\n")
except Exception as err_info_1:
print("\nError!\n", err_info_1)
else:
total_time = time.time() - start_time
print("\n\nFinished\nTotal time: %f s\n" % total_time)
if not sckey == "":
print("Server酱推送中...")
send_wxmsg(_sckey=sckey, _title="Misaka-ID", _context="Finished.\n\nTotal time: %f s" % total_time)
f.write("\n\nType: " + prefix + r"%d" + suffix + " --zfill=%d\n" % zfill_n)
f.close()
|
the-stack_106_25595 | import atexit
from Adafruit_MotorHAT import Adafruit_MotorHAT
class Motor(object):
"""Used to update speed of the Jetbot motors.
Args:
driver: An `Adafruit_MotorHAT` instance used to control the motor.
channel: Motor channel. Left is channel 1 and right is channel 2.
alpha: Motor configuration parameter.
beta: Motor configuration parameter.
"""
def __init__(self, driver, channel, alpha=1.0, beta=0.0, *args, **kwargs):
self._driver = driver
self._motor = self._driver.getMotor(channel)
self.alpha = alpha
self.beta = beta
self.value = 0
atexit.register(self._release) # Release at exit
def update_value(self, value):
"""Sets motor value between [-1, 1]."""
self.value = value
mapped_value = int(255.0 * (self.alpha * value + self.beta))
speed = min(max(abs(mapped_value), 0), 255)
self._motor.setSpeed(speed)
if mapped_value < 0:
self._motor.run(Adafruit_MotorHAT.FORWARD)
else:
self._motor.run(Adafruit_MotorHAT.BACKWARD)
def _release(self):
"""Stops motor by releasing control."""
self._motor.run(Adafruit_MotorHAT.RELEASE)
|
the-stack_106_25596 | from django.db import models
from .behaviors.models import Timestampable, Taggable, Versionable
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class PopoloDateTimeField(models.DateTimeField):
"""Converting datetime to popolo."""
def get_popolo_value(self, value):
return str(datetime.strftime(value, '%Y-%m-%d'))
class Person(Timestampable, models.Model):
"""Model for all people that are somehow connected to the parlament."""
name = models.CharField(_('name'),
max_length=128,
help_text=_('A person\'s preferred full name'))
name_parser = models.CharField(max_length=500,
help_text='Name for parser.',
blank=True, null=True)
classification = models.CharField(_('classification'),
max_length=128,
help_text='Classification for sorting purposes.',
blank=True,
null=True)
family_name = models.CharField(_('family name'),
max_length=128,
blank=True, null=True,
help_text=_('One or more family names'))
given_name = models.CharField(_('given name'),
max_length=128,
blank=True, null=True,
help_text=_('One or more primary given names'))
additional_name = models.CharField(_('additional name'),
max_length=128,
blank=True, null=True,
help_text=_('One or more secondary given names'))
honorific_prefix = models.CharField(_('honorific prefix'),
max_length=128,
blank=True, null=True,
help_text=_('One or more honorifics preceding a person\'s name'))
honorific_suffix = models.CharField(_('honorific suffix'),
max_length=128,
blank=True, null=True,
help_text=_('One or more honorifics following a person\'s name'))
patronymic_name = models.CharField(_('patronymic name'),
max_length=128,
blank=True, null=True,
help_text=_('One or more patronymic names'))
sort_name = models.CharField(_('sort name'),
max_length=128,
blank=True, null=True,
help_text=_('A name to use in an lexicographically ordered list'))
previous_occupation = models.TextField(_('previous occupation'),
blank=True, null=True,
help_text=_('The person\'s previous occupation'))
education = models.TextField(_('education'),
blank=True, null=True,
help_text=_('The person\'s education'))
education_level = models.TextField(_('education level'),
blank=True, null=True,
help_text=_('The person\'s education level'))
mandates = models.IntegerField(_('mandates'),
blank=True, null=True,
help_text=_('Person\'s number of mandates, including the current one'))
email = models.EmailField(_('email'),
blank=True, null=True,
help_text=_('A preferred email address'))
gender = models.CharField(_('gender'),
max_length=128,
blank=True, null=True,
help_text=_('A gender'))
birth_date = PopoloDateTimeField(_('date of birth'),
blank=True,
null=True,
help_text=_('A date of birth'))
death_date = PopoloDateTimeField(_('date of death'),
blank=True,
null=True,
help_text=_('A date of death'))
summary = models.CharField(_('summary'),
max_length=512,
blank=True, null=True,
help_text=_('A one-line account of a person\'s life'))
biography = models.TextField(_('biography'),
blank=True, null=True,
help_text=_('An extended account of a person\'s life'))
image = models.URLField(_('image'),
blank=True, null=True,
help_text=_('A URL of a head shot'))
gov_url = models.ForeignKey('Link',
blank=True, null=True,
help_text='URL to gov website profile',
related_name='gov_link')
gov_id = models.CharField(_('gov_id'),
max_length=255,
blank=True, null=True,
help_text='gov website id for the scraper')
gov_picture_url = models.URLField(_('gov image url'),
blank=True, null=True,
help_text=_('URL to gov website pic'))
voters = models.IntegerField(_('voters'),
blank=True,
null=True,
help_text='number of votes cast for this person in their district')
active = models.BooleanField(_('active'),
default=True,
help_text='a generic active or not toggle')
image = models.ImageField(upload_to='images/', height_field=None, width_field=None, max_length=1000, null=True, blank=True)
def __str__(self):
return self.name + " " + str(self.id)
class Organization(Timestampable, models.Model):
"""A group with a common purpose or reason
for existence that goes beyond the set of people belonging to it.
"""
name = models.TextField(_('name'),
help_text=_('A primary name, e.g. a legally recognized name'))
name_parser = models.CharField(max_length=500,
help_text='Name for parser.',
blank=True, null=True)
# array of items referencing "http://popoloproject.com/schemas/other_name.json#"
acronym = models.CharField(_('acronym'),
blank=True,
null=True,
max_length=128,
help_text=_('Organization acronym'))
gov_id = models.TextField(_('Gov website ID'),
blank=True, null=True,
help_text=_('Gov website ID'))
classification = models.CharField(_('classification'),
max_length=128,
blank=True, null=True,
help_text=('An organization category, e.g. committee'))
# reference to "http://popoloproject.com/schemas/organization.json#"
parent = models.ForeignKey('Organization',
blank=True, null=True,
related_name='children',
help_text=_('The organization that contains this organization'))
dissolution_date = PopoloDateTimeField(blank=True, null=True,
help_text=_('A date of dissolution'))
founding_date = PopoloDateTimeField(blank=True, null=True,
help_text=_('A date of founding'))
# array of items referencing "http://popoloproject.com/schemas/contact_detail.json#"
description = models.TextField(blank=True, null=True,
help_text='Organization description')
is_coalition = models.IntegerField(blank=True, null=True,
help_text='1 if coalition, -1 if not, 0 if it does not apply')
url_name = 'organization-detail'
def __str__(self):
return self.name + " " + str(self.id)
class Speech(Versionable, Timestampable, models.Model):
"""Speeches that happened in parlament."""
speaker = models.ForeignKey('Person',
help_text='Person making the speech')
party = models.ForeignKey('Organization',
help_text='The party of the person making the speech',
null=True,
blank=True)
content = models.TextField(help_text='Words spoken')
video_id = models.TextField(help_text='Video id', blank=True, null=True)
order = models.IntegerField(blank=True, null=True,
help_text='Order of speech')
session = models.ForeignKey('Session',
blank=True, null=True,
help_text='Speech session',
related_name='speeches')
start_time = PopoloDateTimeField(blank=True, null=True,
help_text='Start time')
end_time = PopoloDateTimeField(blank=True, null=True,
help_text='End time')
start_time_stamp = models.BigIntegerField(blank=True, null=True,
help_text='Start time stamp')
end_time_stamp = models.BigIntegerField(blank=True, null=True,
help_text='End time stamp')
def __str__(self):
return self.speaker.name + " " + str(self.id)
class Session(Timestampable, models.Model):
"""Sessions that happened in parliament."""
mandate = models.ForeignKey('Mandate',
blank=True, null=True,
help_text='The mandate of this milestone.')
name = models.CharField(max_length=255,
blank=True, null=True,
help_text='Session name')
gov_id = models.CharField(max_length=255,
blank=True, null=True,
help_text='Gov website ID.')
start_time = PopoloDateTimeField(blank=True, null=True,
help_text='Start time')
end_time = PopoloDateTimeField(blank=True, null=True,
help_text='End time')
organization = models.ForeignKey('Organization',
blank=True, null=True,
related_name='session',
help_text='The organization in session')
classification = models.CharField(max_length=128,
blank=True, null=True,
help_text='Session classification')
in_review = models.BooleanField(default=False,
help_text='Is session in review?')
def __str__(self):
return self.name + ", " + self.organization.name
class Link(Timestampable, models.Model):
"""
A URL
# max_length increased to account for lengthy Camera's URLS
"""
url = models.URLField(_('url'),
max_length=350,
help_text=_('A URL'))
note = models.CharField(_('note'),
max_length=256,
blank=True, null=True,
help_text=_('A note, e.g. \'Wikipedia page\''),)
name = models.TextField(blank=True, null=True)
date = models.DateField(blank=True, null=True)
session = models.ForeignKey('Session', blank=True, null=True)
organization = models.ForeignKey('Organization',
blank=True,
null=True,
help_text='The organization of this link.',
related_name='links')
person = models.ForeignKey('Person',
blank=True, null=True,
help_text='The person of this link.')
def __str__(self):
return self.url
class Mandate(models.Model):
"""Mandate"""
description = models.TextField(blank=True,
null=True)
def __str__(self):
return self.description |
the-stack_106_25597 | from argparse import ArgumentParser
from functools import wraps
import os
import numpy as np
import pandas as pd
if __name__ == "__main__":
__register = list()
parser = ArgumentParser()
parser.add_argument("folder", type=str, help="Where to save the data")
parser.add_argument("--delimiter", type=str, help="csv delimiter", default=",")
args = parser.parse_args()
def save_csv(fname, df, fmt, header=True, index=False):
return df.to_csv(
os.path.join(args.folder, fname),
sep=args.delimiter,
float_format=fmt,
header=header,
index=index,
)
def save(exp_name):
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
"""This enforces saving the cols and index in consistent order"""
mobility, initial_values, rt = func(*args, **kwargs)
region_names = initial_values.columns
save_csv(
f"{exp_name}-mobility.csv",
mobility[region_names].loc[region_names],
fmt="%d",
header=False,
)
save_csv(
f"{exp_name}-initial-values.csv",
initial_values[region_names],
fmt="%d",
header=False,
)
save_csv(
f"{exp_name}-rt.csv", rt[region_names], fmt="%.5f", header=False
)
return
__register.append(inner)
return inner
return wrapper
@save(exp_name="basic")
def basic_example():
region_names = "A B C D E".split()
mobility = pd.DataFrame(
np.ones((5, 5)), index=region_names, columns=region_names
)
s = [400, 1000, 1000, 1000, 1000]
e = [500, 0, 0, 0, 0]
i = [100, 0, 0, 0, 0]
r = [0, 0, 0, 0, 0]
initial_values = pd.DataFrame([s, e, i, r], columns=region_names)
rt = pd.DataFrame(np.full((1000, 5), 2.8), columns=region_names)
return mobility, initial_values, rt
@save(exp_name="iid")
def iid():
region_names = "A B C D E".split()
mobility = pd.DataFrame(
np.zeros((5, 5)), index=region_names, columns=region_names
)
s = [800, 800, 800, 800, 800]
e = [100, 100, 100, 100, 100]
i = [100, 100, 100, 100, 100]
r = [0, 0, 0, 0, 0]
initial_values = pd.DataFrame([s, e, i, r], columns=region_names)
rt = pd.DataFrame(np.full((10000, 5), 2.8), columns=region_names)
return mobility, initial_values, rt
@save(exp_name="identical-uniform-movement")
def identical_uniform_movement():
region_names = "A B C D E".split()
mobility = pd.DataFrame(
np.ones((5, 5)), index=region_names, columns=region_names
)
s = [800, 800, 800, 800, 800]
e = [100, 100, 100, 100, 100]
i = [100, 100, 100, 100, 100]
r = [0, 0, 0, 0, 0]
initial_values = pd.DataFrame([s, e, i, r], columns=region_names)
rt = pd.DataFrame(np.full((10000, 5), 2.8), columns=region_names)
return mobility, initial_values, rt
@save(exp_name="single")
def single_region():
region_names = ["A"]
mobility = pd.DataFrame(
np.ones((1, 1)), index=region_names, columns=region_names
)
s = [800]
e = [100]
i = [100]
r = [0]
initial_values = pd.DataFrame([s, e, i, r], columns=region_names)
rt = pd.DataFrame(np.full((10000, 1), 2.8), columns=region_names)
return mobility, initial_values, rt
@save(exp_name="gostic-single")
def gostic_single_region():
"""
In the gostic paper it seems that they have some continuous decrease in
R, rather than these abrupt changes --- is this due to fewer people
being susceptible?
There is a comment in their code simulation.R that describes a variable called
CONTINUOUS, but I can't find it being used :s
"""
region_names = ["A"]
mobility = pd.DataFrame(
np.ones((1, 1)), index=region_names, columns=region_names
)
s = [2e6 - 60]
e = [0]
i = [60]
r = [0]
initial_values = pd.DataFrame([s, e, i, r], columns=region_names)
rt = pd.DataFrame(
np.interp(
np.arange(300),
[0, 60, 67, 90, 97, 300],
[2.0, 2.0, 0.8, 0.8, 1.15, 1.15],
),
columns=region_names,
)
return mobility, initial_values, rt
@save(exp_name="metapop")
def metapop():
region_names = [
"city",
"town-a",
"town-b",
"village-a",
"village-b",
"village-c",
]
m = np.array(
[
[0, 4000, 3500, 1500, 500, 100],
[0, 0, 5000, 1000, 100, 10],
[0, 0, 0, 100, 100, 10],
[0, 0, 0, 0, 100, 50],
[0, 0, 0, 0, 0, 50],
[0, 0, 0, 0, 0, 0],
]
)
m = np.triu(m)
mobility = pd.DataFrame(m + m.T, index=region_names, columns=region_names,)
s = [2e6 - 60, 200000, 150000, 60000, 10000, 1000]
e = [0, 0, 0, 0, 0, 0]
i = [60, 0, 0, 0, 0, 0]
r = [0, 0, 0, 0, 0, 0]
initial_values = pd.DataFrame([s, e, i, r], columns=region_names)
city_rt = np.interp(
np.arange(300), [0, 60, 67, 90, 97, 300], [2.0, 2.0, 0.8, 0.8, 1.15, 1.15],
)
town_rt = city_rt * 0.75
village_rt = city_rt * 0.5
rts = np.array(
[
city_rt,
town_rt,
town_rt,
village_rt,
village_rt,
np.full(city_rt.shape, 0.9),
]
).T
rt = pd.DataFrame(rts, columns=region_names,)
return mobility, initial_values, rt
[f() for f in __register]
# yw town and village thing
# put in sensible parameters for stuff from reading paper
|
the-stack_106_25598 | # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017,2018
import inspect
import logging
import os
import pkg_resources
import sys
import streamsx
from pkgutil import extend_path
_TRACE = logging.getLogger('streamsx.runtime')
def _add_to_sys_path(dir_):
if _TRACE.isEnabledFor(logging.DEBUG):
_TRACE.debug('Potential addition to sys.path: %s EXISTS %s', dir_, str(os.path.isdir(dir_)))
if os.path.isdir(dir_) and dir_ not in sys.path and os.listdir(dir_):
_TRACE.debug('Inserting as first entry to sys.path: %s', dir_)
sys.path.insert(0, dir_)
pkg_resources.working_set.add_entry(dir_)
# In case a streamsx module (e.g. streamsx.bm)
# is included in the additional code
if os.path.isdir(os.path.join(dir_, 'streamsx')):
streamsx.__path__ = extend_path(streamsx.__path__, streamsx.__name__)
return True
return False
def _setup_operator(tk_dir):
pydir = os.path.join(tk_dir, 'opt', 'python')
changed = _add_to_sys_path(os.path.join(pydir, 'modules'))
changed = _add_to_sys_path(os.path.join(pydir, 'packages')) or changed
if changed and _TRACE.isEnabledFor(logging.INFO):
_TRACE.info('Updated sys.path: %s', str(sys.path))
class _Setup(object):
_DONE = False
@staticmethod
def _setup(out_dir):
if _Setup._DONE:
return
_Setup._DONE = True
bundle_site_dir = _Setup._add_output_packages(out_dir)
_Setup._trace_packages(bundle_site_dir)
@staticmethod
def _add_output_packages(out_dir):
py_dir = _Setup._pip_base_dir(out_dir)
vdir = 'python' + str(sys.version_info.major) + '.' + str(sys.version_info.minor)
site_pkg = os.path.join(py_dir, 'lib', vdir, 'site-packages')
_add_to_sys_path(site_pkg)
return site_pkg
@staticmethod
def _pip_base_dir(out_dir):
"""Base Python directory for pip within the output directory"""
return os.path.join(out_dir, 'etc', 'streamsx.topology', 'python')
@staticmethod
def _trace_packages(bundle_site_dir):
if not _TRACE.isEnabledFor(logging.INFO):
return
_TRACE.info('sys.path: %s', str(sys.path))
dists = list(pkg_resources.working_set)
dists.sort(key=lambda d : d.key)
_TRACE.info('*** Python packages ***')
for pkg_ in dists:
_TRACE.info(repr(pkg_))
_TRACE.info('*** End Python packages ***')
def _setup(out_dir):
_Setup._setup(out_dir)
# Application logic runs within an operator within a
# statement context, effectively a with statement.
# This means that for logic that has __enter__ and __exit__ methods
# (technically its type has the methods)
#
# This allows the logic to create and dispose of objects that
# cannot be pickled such as open files, custom metrics etc.
#
# __enter__ is called:
# a) when the operator starts up before tuple processing
# b) when the operator resets, to inital state or from a checkpoint
# enter is called on the new instance.
#
# __exit__ is called:
# a) when the operator shuts down
# b) when an exception occurs in tuple processing
# c) when the operator resets (on the current instance)
#
# Note: __exit__ is only called if __enter__ was called previously
# Note: in b) if __exit__ returns a true value then the exception is suppressed
#
# Two attributes are set in the object being wrapped to manage context:
#
# _streamsx_ec_context : Boolean indicating if the object has context methods
# _streamsx_ec_entered : Has __enter__ been called on this instance.
#
# Note that the top-level Python object seen by the C++ primitive operator
# maintains these attributes and is responsible for calling __enter__
# on any wrapped logic.
#
# For topology:
# The top-level object is an instance of
# streamsx.topology.runtime._FunctionCallable wrapping the
# application logic.
#
# For SPL primitives:
# Source operator uses streamsx.spl.runtime._SourceIterator
# that wraps the iterable maintains a reference to the iterator.
#
# Otherwise the top-level object is an instance of sub-class
# of the application logic.
#
# The C++ operator calls
#
# _call_enter() - to enter the object into the context
# _call_exit() - to exit the object from the context.
#
# These methods are responsible for seeing if the underlying
# application logic's methods are called.
def _has_context_methods(cls):
return hasattr(cls, '__enter__') and hasattr(cls, '__exit__')
def _call_enter(obj, opc):
if obj._streamsx_ec_context or obj._streamsx_ec_cls:
obj._streamsx_ec_opc = opc
if obj._streamsx_ec_context:
obj.__enter__()
obj._streamsx_ec_entered = True
def _call_exit(obj, exc_info=None):
if obj._streamsx_ec_context and obj._streamsx_ec_entered:
try:
if exc_info is None:
ev = obj.__exit__(None,None,None)
else:
exc_type = exc_info[0]
exc_value = exc_info[1] if len(exc_info) >=2 else None
traceback = exc_info[2] if len(exc_info) >=3 else None
ev = obj.__exit__(exc_type, exc_value, traceback)
if ev and exc_type is not None:
# Remain in the context
return ev
obj._streamsx_ec_entered = False
obj._streamsx_ec_opc = None
return ev
except:
obj._streamsx_ec_entered = False
obj._streamsx_ec_opc = None
raise
obj._streamsx_ec_opc = None
# A _WrappedInstance is used to wrap the functional logic
# passed into a function like map when declaring the graph.
# The wrapping occurs at topology declaration time and the
# instance of _WrappedInstance becomes the "users" logic
# that is passed in as the functional operator's parameter.
#
# If no_context is true then it's guaranteed that
# callable_ does not have __enter__, __exit__ methods
class _WrapOpLogic(object):
def __init__(self, callable_, no_context=None):
self._callable = callable_
is_cls = not inspect.isfunction(callable_)
is_cls = is_cls and not inspect.isbuiltin(callable_)
is_cls = is_cls and not inspect.isclass(callable_)
self._streamsx_ec_cls = is_cls
if is_cls and not no_context:
if hasattr(callable_, '_streamsx_ec_context'):
self._streamsx_ec_context = callable_._streamsx_ec_context
else:
self._streamsx_ec_context = streamsx._streams._runtime._has_context_methods(type(callable_))
else:
self._streamsx_ec_context = False
self._streamsx_ec_entered = False
def __enter__(self):
if self._streamsx_ec_context or self._streamsx_ec_cls:
self._callable._streamsx_ec_opc = self._streamsx_ec_opc
if self._streamsx_ec_context:
self._callable.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
if self._streamsx_ec_context:
ev = self._callable.__exit__(exc_type, exc_value, traceback)
if not ev:
self._streamsx_ec_opc = None
return ev
|
the-stack_106_25599 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'royalbritishlegion_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
|
the-stack_106_25600 | #! /usr/bin/env python
# encoding: utf-8
import os
import TaskGen,Task,Utils
from TaskGen import taskgen,before,extension
nasm_str='${NASM} ${NASM_FLAGS} ${NASM_INCLUDES} ${SRC} -o ${TGT}'
EXT_NASM=['.s','.S','.asm','.ASM','.spp','.SPP']
def apply_nasm_vars(self):
if hasattr(self,'nasm_flags'):
for flag in self.to_list(self.nasm_flags):
self.env.append_value('NASM_FLAGS',flag)
if hasattr(self,'includes'):
for inc in self.to_list(self.includes):
node=self.path.find_dir(inc)
if not node:
raise Utils.WafError('cannot find the dir'+inc)
self.env.append_value('NASM_INCLUDES','-I%s'%node.srcpath(self.env))
self.env.append_value('NASM_INCLUDES','-I%s'%node.bldpath(self.env))
def nasm_file(self,node):
try:obj_ext=self.obj_ext
except AttributeError:obj_ext='_%d.o'%self.idx
task=self.create_task('nasm',node,node.change_ext(obj_ext))
self.compiled_tasks.append(task)
self.meths.append('apply_nasm_vars')
Task.simple_task_type('nasm',nasm_str,color='BLUE',ext_out='.o',shell=False)
def detect(conf):
nasm=conf.find_program(['nasm','yasm'],var='NASM',mandatory=True)
before('apply_link')(apply_nasm_vars)
extension(EXT_NASM)(nasm_file)
|
the-stack_106_25602 | import functools
import logging
import torch
import math
import numpy as np
logger = logging.getLogger(__name__)
def get_device_of(tensor):
"""This function returns the device of the tensor
refer to https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
Arguments:
tensor {tensor} -- tensor
Returns:
int -- device
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def get_range_vector(size, device):
"""This function returns a range vector with the desired size, starting at 0
the CUDA implementation is meant to avoid copy data from CPU to GPU
refer to https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
Arguments:
size {int} -- the size of range
device {int} -- device
Returns:
torch.Tensor -- range vector
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def flatten_and_batch_shift_indices(indices, sequence_length):
"""This function returns a vector that correctly indexes into the flattened target,
the sequence length of the target must be provided to compute the appropriate offsets.
refer to https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
Arguments:
indices {tensor} -- index tensor
sequence_length {int} -- sequence length
Returns:
tensor -- offset index tensor
"""
# Shape: (batch_size)
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
raise RuntimeError("All elements in indices should be in range (0, {})".format(sequence_length - 1))
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(target, indices, flattened_indices=None):
"""This function returns selected values in the target with respect to the provided indices,
which have size ``(batch_size, d_1, ..., d_n, embedding_size)``
refer to https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
Arguments:
target {torch.Tensor} -- target tensor
indices {torch.LongTensor} -- index tensor
Keyword Arguments:
flattened_indices {Optional[torch.LongTensor]} -- flattened index tensor (default: {None})
Returns:
torch.Tensor -- selected tensor
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def get_padding_vector(size, dtype, device):
"""This function initializes padding unit
Arguments:
size {int} -- padding unit size
dtype {torch.dtype} -- dtype
device {int} -- device = -1 if cpu, device >= 0 if gpu
Returns:
tensor -- padding tensor
"""
pad = torch.zeros(size, dtype=dtype)
if device > -1:
pad = pad.cuda(device=device, non_blocking=True)
return pad
def array2tensor(array, dtype, device):
"""This function transforms numpy array to tensor
Arguments:
array {numpy.array} -- numpy array
dtype {torch.dtype} -- torch dtype
device {int} -- device = -1 if cpu, device >= 0 if gpu
Returns:
tensor -- tensor
"""
tensor = torch.as_tensor(array, dtype=dtype)
if device > -1:
tensor = tensor.cuda(device=device, non_blocking=True)
return tensor
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
refer to: https://github.com/huggingface/pytorch-transformers/blob/master/pytorch_transformers/modeling_bert.py
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def pad_vecs(vecs, padding_size, dtype, device):
"""This function pads vectors for batch
Arguments:
vecs {list} -- vector list
padding_size {int} -- padding dims
dtype {torch.dtype} -- dtype
device {int} -- device = -1 if cpu, device >= 0 if gpu
Returns:
tensor -- padded vectors
"""
max_length = max(len(vec) for vec in vecs)
if max_length == 0:
pad_vecs = torch.cat([get_padding_vector((1, padding_size), dtype, device).unsqueeze(0) for _ in vecs], 0)
return pad_vecs # , mask_vecs
pad_vecs = []
# mask_vecs = []
for vec in vecs:
pad_vec = torch.cat(vec + [get_padding_vector((1, padding_size), dtype, device)] * (max_length - len(vec)),
0).unsqueeze(0)
assert pad_vec.size() == (1, max_length, padding_size), "the size of pad vector is not correct"
pad_vecs.append(pad_vec)
return torch.cat(pad_vecs, 0)
def get_bilstm_minus(batch_seq_encoder_repr, span_list, seq_lens):
"""This function gets span representation using bilstm minus
Arguments:
batch_seq_encoder_repr {list} -- batch sequence encoder representation
span_list {list} -- span list
seq_lens {list} -- sequence length list
Returns:
tensor -- span representation vector
"""
assert len(batch_seq_encoder_repr) == len(
span_list), "the length of batch seq encoder repr is not equal to span list's length"
assert len(span_list) == len(seq_lens), "the length of span list is not equal to batch seq lens's length"
hidden_size = batch_seq_encoder_repr.size(-1)
span_vecs = []
for seq_encoder_repr, (s, e), seq_len in zip(batch_seq_encoder_repr, span_list, seq_lens):
rnn_output = seq_encoder_repr[:seq_len]
forward_rnn_output, backward_rnn_output = rnn_output.split(hidden_size // 2, 1)
forward_span_vec = get_forward_segment(forward_rnn_output, s, e, get_device_of(forward_rnn_output))
backward_span_vec = get_backward_segment(backward_rnn_output, s, e, get_device_of(backward_rnn_output))
span_vec = torch.cat([forward_span_vec, backward_span_vec], 0).unsqueeze(0)
span_vecs.append(span_vec)
return torch.cat(span_vecs, 0)
def get_forward_segment(forward_rnn_output, s, e, device):
"""This function gets span representaion in forward rnn
Arguments:
forward_rnn_output {tensor} -- forward rnn output
s {int} -- span start
e {int} -- span end
device {int} -- device
Returns:
tensor -- span representaion vector
"""
_, hidden_size = forward_rnn_output.size()
if s >= e:
vec = torch.zeros(hidden_size, dtype=forward_rnn_output.dtype)
if device > -1:
vec = vec.cuda(device=device, non_blocking=True)
return vec
if s == 0:
return forward_rnn_output[e - 1]
return forward_rnn_output[e - 1] - forward_rnn_output[s - 1]
def get_backward_segment(backward_rnn_output, s, e, device):
"""This function gets span representaion in backward rnn
Arguments:
forward_rnn_output {tensor} -- backward rnn output
s {int} -- span start
e {int} -- span end
device {int} -- device
Returns:
tensor -- span representaion vector
"""
seq_len, hidden_size = backward_rnn_output.size()
if s >= e:
vec = torch.zeros(hidden_size, dtype=backward_rnn_output.dtype)
if device > -1:
vec = vec.cuda(device=device, non_blocking=True)
return vec
if e == seq_len:
return backward_rnn_output[s]
return backward_rnn_output[s] - backward_rnn_output[e]
def get_dist_vecs(span_list, max_sent_len, device):
"""This function gets distance embedding
Arguments:
span_list {list} -- span list
Returns:
tensor -- distance embedding vector
"""
dist_vecs = []
for s, e in span_list:
assert s <= e, "span start is greater than end"
vec = torch.Tensor(np.eye(max_sent_len)[e - s])
if device > -1:
vec = vec.cuda(device=device, non_blocking=True)
dist_vecs.append(vec)
return torch.stack(dist_vecs)
def get_conv_vecs(batch_token_repr, span_list, span_batch_size, conv_layer):
"""This function gets span vector representation through convolution layer
Arguments:
batch_token_repr {list} -- batch token representation
span_list {list} -- span list
span_batch_size {int} -- span convolutuion batch size
conv_layer {nn.Module} -- convolution layer
Returns:
tensor -- conv vectors
"""
assert len(batch_token_repr) == len(span_list), "the length of batch token repr is not equal to span list's length"
span_vecs = []
for token_repr, (s, e) in zip(batch_token_repr, span_list):
if s == e:
span_vecs.append([])
continue
span_vecs.append(list(token_repr[s:e].split(1)))
span_conv_vecs = []
for id in range(0, len(span_vecs), span_batch_size):
span_pad_vecs = pad_vecs(span_vecs[id:id + span_batch_size], conv_layer.get_input_dims(),
batch_token_repr[0].dtype, get_device_of(batch_token_repr[0]))
span_conv_vecs.append(conv_layer(span_pad_vecs))
return torch.cat(span_conv_vecs, dim=0)
def get_n_trainable_parameters(model):
"""This function calculates the number of trainable parameters
of the model
Arguments:
model {nn.Module} -- model
Returns:
int -- the number of trainable parameters of the model
"""
cnt = 0
for param in list(model.parameters()):
if param.requires_grad:
cnt += functools.reduce(lambda x, y: x * y, list(param.size()), 1)
return cnt
def load_weight_from_pretrained_model(model, pretrained_state_dict, prefix=""):
"""load_weight_from_pretrained_model This function loads weight from pretrained model.
Arguments:
model {nn.Module} -- model
pretrained_state_dict {dict} -- state dict of pretrained model
Keyword Arguments:
prefix {str} -- prefix for pretrained model (default: {""})
"""
model_state_dict = model.state_dict()
filtered_state_dict = {}
for k, v in model_state_dict.items():
if 'decoder' in k:
continue
k = k.split('.')
for candi_name in ['.'.join(k), '.'.join(k[1:]), '.'.join(k[2:])]:
if candi_name in pretrained_state_dict and v.size() == pretrained_state_dict[candi_name].size():
filtered_state_dict['.'.join(k)] = pretrained_state_dict[candi_name]
break
candi_name = prefix + candi_name
if candi_name in pretrained_state_dict and v.size() == pretrained_state_dict[candi_name].size():
filtered_state_dict['.'.join(k)] = pretrained_state_dict[candi_name]
break
logger.info("Load weights parameters:")
for name in filtered_state_dict:
logger.info(name)
model_state_dict.update(filtered_state_dict)
model.load_state_dict(model_state_dict)
def clone_weights(first_module, second_module):
"""This function clones(ties) weights from first module to second module
refers to: https://huggingface.co/transformers/v1.2.0/_modules/pytorch_transformers/modeling_utils.html#PreTrainedModel
Arguments:
first_module {nn.Module} -- first module
second_module {nn.Module} -- second module
"""
first_module.weight = second_module.weight
if hasattr(first_module, 'bias') and first_module.bias is not None:
first_module.bias.data = torch.nn.functional.pad(first_module.bias.data,
(0, first_module.weight.shape[0] - first_module.bias.shape[0]),
'constant', 0)
|
the-stack_106_25603 | import logging
from typing import List, Optional, TextIO
import numpy as np
import numpy.typing as npt
from ..cli import run_with_file_argument
from ..io_utils import get_lines
logger = logging.getLogger(__name__)
def read_input(input: TextIO) -> npt.NDArray[int]:
return np.array([list(map(int, line)) for line in get_lines(input)], dtype=int)
STEPS = 100
PADDING = 1
MAX_ENERGY = 9
def main(input: TextIO) -> str:
octopusses = read_input(input)
padded_octopusses = np.pad(octopusses, [(PADDING, PADDING), (PADDING, PADDING)])
octopusses = padded_octopusses[PADDING:-PADDING, PADDING:-PADDING]
logger.info("Before any steps:\n%s", octopusses)
flashes = 0
for step in range(STEPS):
# increase power level of all octopusses by 1
octopusses += 1
# a mask of octopusses that did not yet flash this step
can_still_flash = np.ones_like(octopusses)
while True:
# find the octopusses that need to flash
flashing: npt.NDArray[bool] = octopusses > MAX_ENERGY
# the ones that we will flash are the ones that have the power and have
# not yet flashed this step
flash: npt.NDArray[bool] = flashing & can_still_flash
indices = np.argwhere(flash)
if not len(indices):
break # no more octopusses left to flash
# Increase the power of all neighbouring octopusses
for y, x in indices:
padded_octopusses[
y - 1 + PADDING : y + 2 + PADDING, x - 1 + PADDING : x + 2 + PADDING
] += 1
# Mark the ones that flashed and remove them from the mask
can_still_flash[y, x] = False
# Count number of flashes
flashes += 1
# Zero any octopusses that flashed
octopusses[octopusses > MAX_ENERGY] = 0
logger.info("Step %d:\n%s", step + 1, octopusses)
return f"{flashes}"
if __name__ == "__main__":
run_with_file_argument(main)
|
the-stack_106_25604 | """
Legalese
--------
Copyright (c) 2015, 2016 Genome Research Ltd.
Author: Colin Nolan <[email protected]>
This file is part of HGI's common Python library
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import os
import shutil
import unittest
from tempfile import mkdtemp, mkstemp
from unittest.mock import MagicMock, call
from hgicommon.data_source.dynamic_from_file import register, unregister
from hgicommon.data_source.dynamic_from_file import registration_event_listenable_map
from hgicommon.models import RegistrationEvent
from hgicommon.tests.data_source._stubs import StubRegisteringDataSource
class TestRegister(unittest.TestCase):
"""
Tests for `register` and `unregister`.
"""
def tearDown(self):
listeners = registration_event_listenable_map[int].get_listeners()
for listener in listeners:
registration_event_listenable_map[int].remove_listener(listener)
def test_register(self):
listener_1 = MagicMock()
registration_event_listenable_map[int].add_listener(listener_1)
listener_2 = MagicMock()
registration_event_listenable_map[int].add_listener(listener_2)
register(123)
update_1 = RegistrationEvent(123, RegistrationEvent.Type.REGISTERED)
listener_1.assert_called_once_with(update_1)
listener_1.reset_mock()
listener_2.assert_called_once_with(update_1)
listener_2.reset_mock()
def test_unregister(self):
listener_1 = MagicMock()
registration_event_listenable_map[int].add_listener(listener_1)
listener_2 = MagicMock()
registration_event_listenable_map[int].add_listener(listener_2)
unregister(123)
update_1 = RegistrationEvent(123, RegistrationEvent.Type.UNREGISTERED)
listener_1.assert_called_once_with(update_1)
listener_1.reset_mock()
listener_2.assert_called_once_with(update_1)
listener_2.reset_mock()
def test_register_can_be_unsubscribed(self):
listener_1 = MagicMock()
registration_event_listenable_map[int].add_listener(listener_1)
listener_2 = MagicMock()
registration_event_listenable_map[int].add_listener(listener_2)
register(123)
update_1 = RegistrationEvent(123, RegistrationEvent.Type.REGISTERED)
registration_event_listenable_map[int].remove_listener(listener_2)
register(456)
unregister(456)
listener_2.assert_called_once_with(update_1)
class TestRegisteringDataSource(unittest.TestCase):
"""
Tests for `RegisteringDataSource`.
"""
def setUp(self):
self.temp_directory = mkdtemp(suffix=TestRegisteringDataSource.__name__)
self.source = StubRegisteringDataSource(self.temp_directory, int)
self.source.is_data_file = MagicMock(return_value=True)
def tearDown(self):
self.source.stop()
shutil.rmtree(self.temp_directory)
listenable = registration_event_listenable_map[int]
for listener in listenable.get_listeners():
listenable.remove_listener(listener)
def test_extract_data_from_file(self):
listener = MagicMock()
registration_event_listenable_map[int].add_listener(listener)
rule_file_location = self._create_data_file_in_temp_directory()
with open(rule_file_location, 'w') as file:
file.write("from hgicommon.data_source import register\n"
"register(123)\n"
"register(456)")
loaded = self.source.extract_data_from_file(rule_file_location)
listener.assert_has_calls([
call(RegistrationEvent(123, RegistrationEvent.Type.REGISTERED)),
call(RegistrationEvent(456, RegistrationEvent.Type.REGISTERED))
])
self.assertEqual(loaded, [123, 456])
def test_extract_data_from_file_with_corrupted_file(self):
rule_file_location = self._create_data_file_in_temp_directory()
with open(rule_file_location, 'w') as file:
file.write("~")
logging.root.setLevel(level=logging.ERROR)
self.assertRaises(Exception, self.source.extract_data_from_file, rule_file_location)
def test_extract_data_from_file_with_wrong_file_extension(self):
rule_file_location = self._create_data_file_in_temp_directory()
new_rule_file_location = rule_file_location + "c"
os.rename(rule_file_location, new_rule_file_location)
logging.root.setLevel(level=logging.ERROR)
self.assertRaises(Exception, self.source.extract_data_from_file, new_rule_file_location)
def _create_data_file_in_temp_directory(self) -> str:
"""
Creates a data file in the temp directory used by this test.
:return: the file path of the created file
"""
temp_file_location = mkstemp()[1]
rule_file_location = "%s.py" % temp_file_location
os.rename(temp_file_location, rule_file_location)
return rule_file_location
if __name__ == "__main__":
unittest.main()
|
the-stack_106_25607 | from twython import Twython
import json
import html
import os
import re
from auth_keys import *
from datetime import datetime, timedelta
from pytz import timezone
MONTHS = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"June",
"July",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec"
]
EMOJI_REGEX = re.compile('\\\\U000\w+')
class TwitterSearch():
def __init__(self):
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
twitter = Twython(APP_KEY, APP_SECRET)
self.twitter = twitter
self.search_results = ""
def count(self, search, geo=False, result_type='', count=50, mentions=False):
count = 0
tz = timezone('EST')
day = datetime.now() - timedelta(days=1)
tzoffset = tz.utcoffset(day)
day = day.replace(tzinfo=tz)
#day = day.strftime("%Y-%m-%d")
query = '{} exclude:retweets exclude:replies'.format(search)
if geo:
search_results = self.twitter.search(q=query, count=count, lang="en", is_quote_status=False, geocode=geo, result_type=result_type, until=day)
else:
search_results = self.twitter.search(q=query, count=count, lang="en", is_quote_status=False, until=day)
for tweet in search_results['statuses']:
date = tweet["created_at"]
date = datetime.strptime(date, '%a %b %d %H:%M:%S %z %Y')
tzoffset = tz.utcoffset(date)
date = date + tzoffset
if date >= day:
count = count + 1
return count
def colours(self, search, geo=False, result_type='', count=50, mentions=False):
count = 0
#day = day.strftime("%Y-%m-%d")
colours = {}
query = '{} exclude:retweets exclude:replies'.format(search)
if geo:
search_results = self.twitter.search(q=query, count=count, lang="en", is_quote_status=False, geocode=geo, result_type=result_type)
else:
search_results = self.twitter.search(q=query, count=count, lang="en", is_quote_status=False)
for tweet in search_results['statuses']:
try:
colours[tweet["user"]["profile_sidebar_border_color"]] = colours[tweet["user"]["profile_sidebar_border_color"]] + 1
except:
colours[tweet["user"]["profile_sidebar_border_color"]] = 1
return colours
def run_search(self, search, geo=False, result_type='', count=50, mentions=False):
query = '{} exclude:retweets exclude:replies'.format(search)
if geo:
search_results = self.twitter.search(q=query, count=count, lang="en", is_quote_status=False, geocode=geo, result_type=result_type)
else:
search_results = self.twitter.search(q=query, count=count, lang="en", is_quote_status=False)
self.search_results = search_results
list_data = []
for tweet in search_results['statuses']:
date = tweet["created_at"]
tz = timezone('EST')
date = datetime.strptime(date, '%a %b %d %H:%M:%S %z %Y')
tzoffset = tz.utcoffset(date)
date = date + tzoffset
data = {
"name": tweet["user"]["screen_name"],
"text": tweet["text"],
"created_at": date,
"favorite_count": tweet["favorite_count"],
"retweet_count": tweet["retweet_count"],
"location": tweet["user"]["location"],
"date": date.strftime("%b %d"),
"time": date.strftime("%I:%M %p"),
"colour": tweet["user"]["profile_sidebar_border_color"]
}
list_data.append(data)
return list_data
def get_emojis(self):
data = []
for tweet in self.search_results['statuses']:
emojis = EMOJI_REGEX.findall(str(tweet["text"].encode("unicode_escape")))
if emojis:
for emoji in emojis:
data.append(emoji.encode('utf-8').decode("utf-8"))
return data
|
the-stack_106_25609 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 21:07:28 2019
@author: Chadwick Boulay
@author: Anahita Malvea
This must be run from the ../.. directory (parent/parent)
"""
import csv
from pathlib import Path
from data.utils import download_from_web
if __name__ == "__main__":
working_dir = Path.cwd() / 'data' / 'kjm_ecog'
# Create a local folder to store the data
local_dir = working_dir / 'download'
if not local_dir.is_dir():
local_dir.mkdir()
# Download the data from the server to the local folder
base_url = "https://stacks.stanford.edu/file/druid:zk881ps0522/"
studies_file = working_dir / 'studies.csv'
with open(studies_file) as csvfile:
datasetreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for study in datasetreader:
fname = study['name'] + '.zip'
remote_fname = base_url + fname
md5 = study['md5'] if study['md5'] else None
download_from_web(remote_fname, working_dir / 'download' / fname, md5=md5)
# Download other files
others_file = working_dir / 'other_files.csv'
with open(others_file) as csvfile:
datasetreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in datasetreader:
fname = row['name']
remote_fname = base_url + fname
download_from_web(remote_fname, working_dir / 'download' / fname)
|
the-stack_106_25612 |
#### REST FRAMEWORK #####
from rest_framework import status
from rest_framework import generics
from rest_framework import permissions
from rest_framework.response import Response
##### SERIALIZERS #####
from users.serializers import BuyerOrderSerializer
from users.serializers import SellerProfileSerializer
from items.serializers import ItemSerializer
##### MODELS #####
from django.contrib.auth.models import User
from users.models import BuyerOrder
from users.models import SellerProfile
from items.models import Item
################################ Orders ###############################
class PlaceOrderAPI(generics.GenericAPIView):
permission_classes = [permissions.IsAuthenticated,]
def get_user(self):
return self.request.user
def get_item(self):
return Item.objects.get(id = self.request.data['item_id'])
def post(self, request, *args, **kwargs):
order_serializer = BuyerOrderSerializer(data = request.data )
order_serializer.is_valid(raise_exception=True)
user = self.get_user()
item = self.get_item()
order = order_serializer.save( user = user,
item = item )
## Update the stok when the order is placed
item.stock = item.stock - 1
item.save()
res = {
"status": 'success',
"code": status.HTTP_200_OK,
"message": 'Ordered successfully',
"order" : BuyerOrderSerializer(order).data
}
return Response(res , status=status.HTTP_200_OK)
class GetOrdersAPI(generics.GenericAPIView):
permission_classes = [permissions.IsAuthenticated,]
def get_user(self):
return self.request.user
def get(self, request, *args, **kwargs):
user = self.get_user()
ordered_items = list()
#orders_ = self.model.objects.all()
orders = BuyerOrder.objects.filter(user = user)
for i, obj in enumerate(orders) :
order_info = obj.item
# Every Item has info about the users(field) how posted it
# That user info can be used to get sellerprofile
seller_info = SellerProfile.objects.get(user = obj.item.user)
ordered_items.append( {"placed" : obj.placed,
"Order Satus" : obj.order_sat,
"Info" : ItemSerializer(order_info).data,
"seller" : SellerProfileSerializer(seller_info).data
}
)
res = {
"status": 'success',
"code": status.HTTP_200_OK,
"message": 'Fteched successfully',
"ordered_items" : ordered_items,
}
return Response(res , status=status.HTTP_200_OK)
|
the-stack_106_25613 | #!/usr/bin/env python3
# -*- config: utf-8 -*-
from tkinter import *
def add():
a = Toplevel()
a.geometry('120x130')
a.resizable(0, 0)
Label(a, text="x1").grid(row=0, column=0)
ent1 = Entry(a, width=5)
ent1.grid(row=0, column=1)
Label(a, text="x2").grid(row=1, column=0)
ent2 = Entry(a, width=5)
ent2.grid(row=1, column=1)
Label(a, text="y1").grid(row=0, column=2)
ent3 = Entry(a, width=5)
ent3.grid(row=0, column=3)
Label(a, text="y2").grid(row=1, column=2)
ent4 = Entry(a, width=5)
ent4.grid(row=1, column=3)
var = IntVar()
var.set(1)
r1 = Radiobutton(a, text="Прямоугольник", value=1, variable=var)
r1.grid(row=3, column=0, columnspan=4)
r2 = Radiobutton(a, text="Овал", value=0, variable=var)
r2.grid(row=4, column=0, columnspan=4)
def Paint():
x1 = int(ent1.get())
y1 = int(ent3.get())
x2 = int(ent2.get())
y2 = int(ent4.get())
if var.get == 0:
c.create_oval(x1, y1, x2, y2, width=3)
elif var.get() == 1:
c.create_rectangle(x1, y1, x2, y2, width=3)
a.destroy()
but = Button(a, text='Нарисовать', command=Paint)
but.grid(row=5, column=0, columnspan=4)
root = Tk()
c = Canvas(width=300, height=300, bg='white')
c.grid(row=0, column=0)
Button(bg='lightgrey', text='Добавить фигуру', command=add).grid(row=2, column=0)
root.mainloop() |
the-stack_106_25614 | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import urllib.request, urllib.parse, urllib.error
import queue
import threading
import hmac
from struct import Struct
import webbrowser
import stat
from typing import NamedTuple
import inspect
from .i18n import _
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'QTUM':8, 'mQTUM':5, 'uQTUM':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
unpack_int32_from = Struct('<i').unpack_from
unpack_int64_from = Struct('<q').unpack_from
unpack_uint16_from = Struct('<H').unpack_from
unpack_uint32_from = Struct('<I').unpack_from
unpack_uint64_from = Struct('<Q').unpack_from
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class QtumException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)' % self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
verbosity_filter = ''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
if self.verbosity_filter in verbosity or verbosity == '*':
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
verbosity_filter = 'd'
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
verbosity = '*'
def set_verbosity(b):
global verbosity
verbosity = b
def print_error(*args):
if not verbosity: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def get_func_name(args):
arg_names_from_sig = inspect.getfullargspec(func).args
# prepend class name if there is one (and if we can find it)
if len(arg_names_from_sig) > 0 and len(args) > 0 \
and arg_names_from_sig[0] in ('self', 'cls', 'klass'):
classname = args[0].__class__.__name__
else:
classname = ''
name = '{}.{}'.format(classname, func.__name__) if classname else func.__name__
return name
def do_profile(args, kw_args):
name = get_func_name(args)
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", name, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.qtum.qtum_electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/qtum_electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".qtum-electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Qtum-Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Qtum-Electrum")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
def block_explorer_info():
from . import constants
from .qtum import testnet_block_explorers, mainnet_block_explorers
if constants.net.TESTNET:
return testnet_block_explorers
else:
return mainnet_block_explorers
def block_explorer(config):
bbb = config.get('block_explorer', 'explorer.qtum.org')
return bbb
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, params):
"""
:param config:
:type params: dict
:return: str
"""
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
if params.get('token'):
if 'qtum.org' in be_tuple[0]:
return "{}/token/{}?a={}".format(be_tuple[0], params.get('token'), params.get('addr'))
url_parts = [be_tuple[0], ]
for k, v in params.items():
kind_str = be_tuple[1].get(k)
if not kind_str:
continue
url_parts.append(kind_str)
url_parts.append(v)
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a qtum address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'qtum':
raise Exception("Not a qtum URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid qtum address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='qtum', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import errno
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
# print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
# backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, file_name):
try:
with open(file_name, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def open_browser(url, new=0, autoraise=True):
for name in webbrowser._tryorder:
if name == 'MacOSX':
continue
browser = webbrowser.get(name)
if browser.open(url, new, autoraise):
return True
return False
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
TxMinedStatus = NamedTuple("TxMinedStatus", [("height", int),
("conf", int),
("timestamp", int),
("header_hash", str)])
VerifiedTxInfo = NamedTuple("VerifiedTxInfo", [("height", int),
("timestamp", int),
("txpos", int),
("header_hash", str)]) |
the-stack_106_25615 | """
Blur image using GaussianBlur operator
======================================
"""
import torch
import kornia
import cv2
import numpy as np
import matplotlib.pyplot as plt
# read the image with OpenCV
img: np.array = cv2.imread('./data/lena.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# convert to torch tensor
data: torch.tensor = kornia.image_to_tensor(img) # BxCxHxW
# create the operator
gauss = kornia.filters.GaussianBlur2d((11, 11), (10.5, 10.5))
# blur the image
x_blur: torch.tensor = gauss(data.float())
# convert back to numpy
img_blur: np.array = kornia.tensor_to_image(x_blur.byte())
# Create the plot
fig, axs = plt.subplots(1, 2, figsize=(16, 10))
axs = axs.ravel()
axs[0].axis('off')
axs[0].set_title('image source')
axs[0].imshow(img)
axs[1].axis('off')
axs[1].set_title('image blurred')
axs[1].imshow(img_blur)
|
the-stack_106_25617 | import logging
from typing import (
Any,
Awaitable,
Callable,
Dict,
List,
Optional,
Union,
Tuple,
)
from opentrons.calibration_storage import get, modify, helpers, delete
from opentrons.calibration_storage.types import (
TipLengthCalNotFound,
PipetteOffsetByPipetteMount,
)
from opentrons.config import feature_flags as ff
from opentrons.hardware_control import (
ThreadManager,
CriticalPoint,
Pipette,
robot_calibration as robot_cal,
)
from opentrons.protocol_api import labware
from opentrons.protocols.geometry.deck import Deck
from opentrons.types import Mount, Point, Location
from robot_server.service.errors import RobotServerError
from robot_server.service.session.models.command_definitions import CalibrationCommand
from robot_server.robot.calibration import util
from robot_server.robot.calibration.constants import (
TIP_RACK_LOOKUP_BY_MAX_VOL,
SHORT_TRASH_DECK,
STANDARD_DECK,
POINT_ONE_ID,
MOVE_TO_DECK_SAFETY_BUFFER,
MOVE_TO_TIP_RACK_SAFETY_BUFFER,
CAL_BLOCK_SETUP_BY_MOUNT,
JOG_TO_DECK_SLOT,
)
from ..errors import CalibrationError
from ..helper_classes import RequiredLabware, AttachedPipette, SupportedCommands
from .constants import (
PipetteOffsetCalibrationState as POCState,
PipetteOffsetWithTipLengthCalibrationState as POWTState,
TIP_RACK_SLOT,
)
from .state_machine import (
PipetteOffsetCalibrationStateMachine,
PipetteOffsetWithTipLengthStateMachine,
)
from opentrons_shared_data.labware.dev_types import LabwareDefinition
MODULE_LOG = logging.getLogger(__name__)
"""
A collection of functions that allow a consumer to prepare and update
calibration data associated with the position of a specific physical
pipette attached to the gantry, in relation to the deck
"""
# TODO: BC 2020-07-08: type all command logic here with actual Model type
COMMAND_HANDLER = Callable[..., Awaitable]
COMMAND_MAP = Dict[str, COMMAND_HANDLER]
PipetteOffsetStateMachine = Union[
PipetteOffsetCalibrationStateMachine, PipetteOffsetWithTipLengthStateMachine
]
PipetteOffsetState = Union[POWTState, POCState]
class PipetteOffsetCalibrationUserFlow:
def __init__(
self,
hardware: ThreadManager,
mount: Mount = Mount.RIGHT,
recalibrate_tip_length: bool = False,
has_calibration_block: bool = False,
tip_rack_def: Optional[LabwareDefinition] = None,
):
self._hardware = hardware
self._mount = mount
self._hw_pipette = self._hardware._attached_instruments[mount]
if not self._hw_pipette:
raise RobotServerError(
definition=CalibrationError.NO_PIPETTE_ON_MOUNT, mount=mount
)
deck_load_name = SHORT_TRASH_DECK if ff.short_fixed_trash() else STANDARD_DECK
self._deck = Deck(load_name=deck_load_name)
self._saved_offset_this_session = False
point_one_pos = self._deck.get_calibration_position(POINT_ONE_ID).position
self._cal_ref_point = Point(*point_one_pos)
self._tip_origin_pt: Optional[Point] = None
self._nozzle_height_at_reference: Optional[float] = None
self._using_default_tiprack = False
existing_offset_calibration = self._get_stored_pipette_offset_cal()
self._load_tip_rack(tip_rack_def, existing_offset_calibration)
existing_tip_length_calibration = self._get_stored_tip_length_cal()
perform_tip_length = (
recalibrate_tip_length or not existing_tip_length_calibration
)
if perform_tip_length and has_calibration_block:
self._load_calibration_block()
self._has_calibration_block = has_calibration_block
else:
self._has_calibration_block = False
self._has_calibrated_tip_length: bool = (
self._get_stored_tip_length_cal() is not None or self._using_default_tiprack
)
self._sm = self._determine_state_machine(perform_tip_length)
self._current_state = self._sm.state.sessionStarted
self._should_perform_tip_length = perform_tip_length
self._command_map: COMMAND_MAP = {
CalibrationCommand.load_labware: self.load_labware,
CalibrationCommand.move_to_reference_point: self.move_to_reference_point,
CalibrationCommand.jog: self.jog,
CalibrationCommand.pick_up_tip: self.pick_up_tip,
CalibrationCommand.invalidate_tip: self.invalidate_tip,
CalibrationCommand.save_offset: self.save_offset,
CalibrationCommand.move_to_tip_rack: self.move_to_tip_rack,
CalibrationCommand.move_to_deck: self.move_to_deck,
CalibrationCommand.move_to_point_one: self.move_to_point_one,
CalibrationCommand.set_has_calibration_block: self.set_has_calibration_block, # noqa: E501
CalibrationCommand.exit: self.exit_session,
CalibrationCommand.invalidate_last_action: self.invalidate_last_action,
}
self._hw_pipette.update_pipette_offset(
robot_cal.load_pipette_offset(pip_id=None, mount=self._mount)
)
self._default_tipracks = util.get_default_tipracks(
self.hw_pipette.config.default_tipracks
)
self._supported_commands = SupportedCommands(namespace="calibration")
self._supported_commands.loadLabware = True
@property
def deck(self) -> Deck:
return self._deck
@property
def mount(self) -> Mount:
return self._mount
@property
def hardware(self) -> ThreadManager:
return self._hardware
@property
def hw_pipette(self) -> Pipette:
return self._hw_pipette
@property
def current_state(self) -> PipetteOffsetState:
# Currently, mypy can't interpret enum
# values being saved as variables. Although
# using python's built-in typing methods
# correctly reveals that this is an enum,
# mypy believes it is a string.
return self._sm.current_state
@property
def has_calibrated_tip_length(self) -> bool:
return self._has_calibrated_tip_length
@property
def should_perform_tip_length(self) -> bool:
return self._should_perform_tip_length
@should_perform_tip_length.setter
def should_perform_tip_length(self, value: bool):
self._should_perform_tip_length = value
def get_pipette(self) -> AttachedPipette:
# TODO(mc, 2020-09-17): s/tip_length/tipLength
return AttachedPipette(
model=self._hw_pipette.model,
name=self._hw_pipette.name,
tipLength=self._hw_pipette.config.tip_length,
mount=str(self._mount),
serial=self._hw_pipette.pipette_id,
defaultTipracks=self._default_tipracks,
)
def get_required_labware(self) -> List[RequiredLabware]:
slots = self._deck.get_non_fixture_slots()
lw_by_slot = {s: self._deck[s] for s in slots if self._deck[s]}
return [
RequiredLabware.from_lw(lw, s) # type: ignore
for s, lw in lw_by_slot.items()
]
async def set_has_calibration_block(self, hasBlock: bool):
if self._has_calibration_block and not hasBlock:
self._remove_calibration_block()
elif hasBlock and not self._has_calibration_block:
self._load_calibration_block()
self._has_calibration_block = hasBlock
def _get_tip_rack_lw(self) -> labware.Labware:
pip_vol = self._hw_pipette.config.max_volume
lw_load_name = TIP_RACK_LOOKUP_BY_MAX_VOL[str(pip_vol)].load_name
return labware.load(lw_load_name, self._deck.position_for(TIP_RACK_SLOT))
async def handle_command(self, name: Any, data: Dict[Any, Any]):
"""
Handle a client command
:param name: Name of the command
:param data: Data supplied in command
:return: None
"""
# Here we need to get the current state in the state machine's
# state enum because otherwise mypy will confuse which type
# the state is.
state = getattr(self._sm.state, self._sm.current_state)
next_state = self._sm.get_next_state(state, name)
handler = self._command_map.get(name)
if handler is not None:
await handler(**data)
self._sm.set_state(next_state)
MODULE_LOG.debug(
f"PipetteOffsetCalUserFlow handled command {name}, transitioned"
f"from {self._sm.current_state} to {next_state}"
)
@property
def critical_point_override(self) -> Optional[CriticalPoint]:
return (
CriticalPoint.FRONT_NOZZLE
if self._hw_pipette.config.channels == 8
else None
)
async def get_current_point(self, critical_point: Optional[CriticalPoint]) -> Point:
return await self._hardware.gantry_position(self._mount, critical_point)
async def load_labware(
self,
tiprackDefinition: Optional[LabwareDefinition] = None,
):
self._supported_commands.loadLabware = False
if tiprackDefinition:
verified_definition = labware.verify_definition(tiprackDefinition)
existing_offset_calibration = self._get_stored_pipette_offset_cal()
self._load_tip_rack(verified_definition, existing_offset_calibration)
async def jog(self, vector):
await self._hardware.move_rel(mount=self._mount, delta=Point(*vector))
@property
def tip_origin(self) -> Point:
if self._tip_origin_pt:
return self._tip_origin_pt
else:
return (
self._tip_rack.wells()[0].top().point + MOVE_TO_TIP_RACK_SAFETY_BUFFER
)
@tip_origin.setter
def tip_origin(self, new_val: Point):
self._tip_origin_pt = new_val
def reset_tip_origin(self):
self._tip_origin_pt = None
@property
def supported_commands(self) -> List[str]:
return self._supported_commands.supported()
async def move_to_tip_rack(self):
if (
self._sm.current_state == self._sm.state.labwareLoaded
and not self.has_calibrated_tip_length
and not self.should_perform_tip_length
):
self._flag_unmet_transition_req(
command_handler="move_to_tip_rack",
unmet_condition="not performing tip length calibration",
)
await self._move(Location(self.tip_origin, None))
@staticmethod
def _determine_state_machine(perform_tip_length: bool) -> PipetteOffsetStateMachine:
if perform_tip_length:
return PipetteOffsetWithTipLengthStateMachine()
else:
return PipetteOffsetCalibrationStateMachine()
def _get_stored_tip_length_cal(self) -> Optional[float]:
try:
return get.load_tip_length_calibration(
self._hw_pipette.pipette_id,
self._tip_rack._implementation.get_definition(),
).tip_length
except TipLengthCalNotFound:
return None
def _get_stored_pipette_offset_cal(self) -> Optional[PipetteOffsetByPipetteMount]:
return get.get_pipette_offset(self._hw_pipette.pipette_id, self._mount)
def _get_tip_length(self) -> float:
stored_tip_length_cal = self._get_stored_tip_length_cal()
if stored_tip_length_cal is None or self._should_perform_tip_length:
tip_overlap = self._hw_pipette.config.tip_overlap.get(self._tip_rack.uri, 0)
tip_length = self._tip_rack.tip_length
return tip_length - tip_overlap
else:
return stored_tip_length_cal
def _load_calibration_block(self):
cb_setup = CAL_BLOCK_SETUP_BY_MOUNT[self._mount]
self._deck[cb_setup.slot] = labware.load(
cb_setup.load_name, self._deck.position_for(cb_setup.slot)
)
def _remove_calibration_block(self):
cb_setup = CAL_BLOCK_SETUP_BY_MOUNT[self._mount]
del self._deck[cb_setup.slot]
@staticmethod
def _get_tr_lw(
tip_rack_def: Optional[LabwareDefinition],
existing_calibration: Optional[PipetteOffsetByPipetteMount],
volume: float,
position: Location,
) -> Tuple[bool, labware.Labware]:
"""Find the right tiprack to use. Specifically,
- If it's specified from above, use that
- If it's not, and we have a calibration, use that
- If we don't, use the default
"""
if tip_rack_def:
return False, labware.load_from_definition(tip_rack_def, position)
if existing_calibration and existing_calibration.uri:
try:
details = helpers.details_from_uri(existing_calibration.uri)
return True, labware.load(
load_name=details.load_name,
namespace=details.namespace,
version=details.version,
parent=position,
)
except (IndexError, ValueError, FileNotFoundError):
pass
tr_load_name = TIP_RACK_LOOKUP_BY_MAX_VOL[str(volume)].load_name
return True, labware.load(tr_load_name, position)
def _load_tip_rack(
self,
tip_rack_def: Optional[LabwareDefinition],
existing_calibration: Optional[PipetteOffsetByPipetteMount],
):
"""
load onto the deck the default opentrons tip rack labware for this
pipette and return the tip rack labware. If tip_rack_def is supplied,
load specific tip rack from def onto the deck and return the labware.
"""
self._using_default_tiprack, self._tip_rack = self._get_tr_lw(
tip_rack_def,
existing_calibration,
self._hw_pipette.config.max_volume,
self._deck.position_for(TIP_RACK_SLOT),
)
if self._deck[TIP_RACK_SLOT]:
del self._deck[TIP_RACK_SLOT]
self._deck[TIP_RACK_SLOT] = self._tip_rack
def _flag_unmet_transition_req(self, command_handler: str, unmet_condition: str):
raise RobotServerError(
definition=CalibrationError.UNMET_STATE_TRANSITION_REQ,
handler=command_handler,
state=self._sm.current_state,
condition=unmet_condition,
)
async def move_to_deck(self):
current_state = self._sm.current_state
if (
not self.has_calibrated_tip_length
and current_state == self._sm.state.inspectingTip
):
self._flag_unmet_transition_req(
command_handler="move_to_deck",
unmet_condition="tip length calibration data exists",
)
if (
self.should_perform_tip_length
and isinstance(self._sm.state, POWTState)
and current_state == self._sm.state.tipLengthComplete
and self._saved_offset_this_session
):
self._flag_unmet_transition_req(
command_handler="move_to_deck",
unmet_condition="offset not saved this session",
)
deck_pt = self._deck.get_slot_center(JOG_TO_DECK_SLOT)
ydim = self._deck.get_slot_definition(JOG_TO_DECK_SLOT)["boundingBox"][
"yDimension"
]
new_pt = deck_pt + Point(0, -1 * ydim / 2, 0) + MOVE_TO_DECK_SAFETY_BUFFER
to_loc = Location(new_pt, None)
await self._move(to_loc)
self._should_perform_tip_length = False
async def move_to_point_one(self):
assert (
self._z_height_reference is not None
), "saveOffset has not been called yet"
target_loc = Location(self._cal_ref_point, None)
target = target_loc.move(point=Point(0, 0, self._z_height_reference))
await self._move(target)
async def save_offset(self):
cur_pt = await self.get_current_point(critical_point=None)
current_state = self._sm.current_state
if current_state == self._sm.state.joggingToDeck:
self._z_height_reference = cur_pt.z
elif current_state == self._sm.state.savingPointOne:
if self._hw_pipette.config.channels > 1:
cur_pt = await self.get_current_point(
critical_point=CriticalPoint.FRONT_NOZZLE
)
tiprack_hash = helpers.hash_labware_def(
self._tip_rack._implementation.get_definition()
)
offset = self._cal_ref_point - cur_pt
modify.save_pipette_calibration(
offset=offset,
mount=self._mount,
pip_id=self._hw_pipette.pipette_id,
tiprack_hash=tiprack_hash,
tiprack_uri=self._tip_rack.uri,
)
self._saved_offset_this_session = True
elif (
isinstance(current_state, POWTState)
and current_state == POWTState.measuringNozzleOffset
):
self._nozzle_height_at_reference = cur_pt.z
elif (
isinstance(current_state, POWTState)
and current_state == POWTState.measuringTipOffset
):
assert self._hw_pipette.has_tip
assert self._nozzle_height_at_reference is not None
# set critical point explicitly to nozzle
noz_pt = await self.get_current_point(critical_point=CriticalPoint.NOZZLE)
util.save_tip_length_calibration(
pipette_id=self._hw_pipette.pipette_id,
tip_length_offset=noz_pt.z - self._nozzle_height_at_reference,
tip_rack=self._tip_rack,
)
delete.delete_pipette_offset_file(self._hw_pipette.pipette_id, self.mount)
new_tip_length = self._get_stored_tip_length_cal()
self._has_calibrated_tip_length = new_tip_length is not None
# load the new tip length for the rest of the session
self._hw_pipette.current_tip_length = new_tip_length
await self.hardware.retract(self._mount, 20)
async def move_to_reference_point(self):
if not self.should_perform_tip_length and self._sm.current_state in (
self._sm.state.labwareLoaded,
self._sm.state.inspectingTip,
):
self._flag_unmet_transition_req(
command_handler="move_to_reference_point",
unmet_condition="performing additional tip length calibration",
)
cal_block_target_well: Optional[labware.Well] = None
if self._has_calibration_block:
cb_setup = CAL_BLOCK_SETUP_BY_MOUNT[self._mount]
calblock: labware.Labware = self._deck[cb_setup.slot] # type: ignore
cal_block_target_well = calblock.wells_by_name()[cb_setup.well]
ref_loc = util.get_reference_location(
deck=self._deck, cal_block_target_well=cal_block_target_well
)
await self._move(ref_loc)
async def invalidate_last_action(self):
if self._sm.current_state == POWTState.measuringNozzleOffset:
await self._hardware.home()
await self._hardware.gantry_position(self.mount, refresh=True)
await self.move_to_reference_point()
elif self._sm.current_state == self._sm.state.preparingPipette:
self.reset_tip_origin()
await self._hardware.home()
await self._hardware.gantry_position(self.mount, refresh=True)
await self.move_to_tip_rack()
else:
await self.hardware.home()
await self._hardware.gantry_position(self.mount, refresh=True)
trash = self._deck.get_fixed_trash()
assert trash, "Bad deck setup"
await util.move(self, trash["A1"].top(), CriticalPoint.XY_CENTER)
await self.hardware.drop_tip(self.mount)
await self.move_to_tip_rack()
async def pick_up_tip(self):
await util.pick_up_tip(self, tip_length=self._get_tip_length())
async def invalidate_tip(self):
await util.invalidate_tip(self)
async def return_tip(self):
await util.return_tip(self, tip_length=self._get_tip_length())
async def _move(self, to_loc: Location):
await util.move(self, to_loc)
async def exit_session(self):
if self.hw_pipette.has_tip:
await self.move_to_tip_rack()
await self.return_tip()
# reload new pipette offset data by resetting instrument
self._hardware.reset_instrument(self._mount)
await self._hardware.home()
|
the-stack_106_25618 | """
Copyright (C) 2005-2015 Splunk Inc. All Rights Reserved.
log utility for TA
"""
import logging
import logging.handlers as handlers
import os.path as op
from tab_splunktalib.splunk_platform import make_splunkhome_path
import tab_splunktalib.common.util as cutil
from tab_splunktalib.common.pattern import singleton
def log_enter_exit(logger):
"""
Log decorator to log function enter and exit
"""
def log_decorator(func):
def wrapper(*args, **kwargs):
logger.debug("{} entered.".format(func.__name__))
result = func(*args, **kwargs)
logger.debug("{} exited.".format(func.__name__))
return result
return wrapper
return log_decorator
@singleton
class Logs(object):
def __init__(self, namespace=None, default_level=logging.INFO):
self._loggers = {}
self._default_level = default_level
if namespace is None:
namespace = cutil.get_appname_from_path(op.abspath(__file__))
if namespace:
namespace = namespace.lower()
self._namespace = namespace
def get_logger(self, name, level=None, maxBytes=25000000, backupCount=5):
"""
Set up a default logger.
:param name: The log file name.
:param level: The logging level.
:param maxBytes: The maximum log file size before rollover.
:param backupCount: The number of log files to retain.
"""
# Strip ".py" from the log file name if auto-generated by a script.
if level is None:
level = self._default_level
name = self._get_log_name(name)
if name in self._loggers:
return self._loggers[name]
logfile = make_splunkhome_path(["var", "log", "splunk", name])
logger = logging.getLogger(name)
handler_exists = any(
[True for h in logger.handlers if h.baseFilename == logfile])
if not handler_exists:
file_handler = handlers.RotatingFileHandler(
logfile,
mode="a",
maxBytes=maxBytes,
backupCount=backupCount)
formatter = logging.Formatter(
"%(asctime)s %(levelname)s pid=%(process)d tid=%(threadName)s "
"file=%(filename)s:%(funcName)s:%(lineno)d | %(message)s")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(level)
logger.propagate = False
self._loggers[name] = logger
return logger
def set_level(self, level, name=None):
"""
Change the log level of the logging
:param level: the level of the logging to be setLevel
:param name: the name of the logging to set, in case it is not set,
all the loggers will be affected
"""
if name is not None:
name = self._get_log_name(name)
logger = self._loggers.get(name)
if logger is not None:
logger.setLevel(level)
else:
self._default_level = level
for logger in self._loggers.itervalues():
logger.setLevel(level)
def _get_log_name(self, name):
if name.endswith(".py"):
name = name.replace(".py", "")
if self._namespace:
name = "{}_{}.log".format(self._namespace, name)
else:
name = "{}.log".format(name)
return name
|
the-stack_106_25619 | import os, sys, math, gc, time
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout, ReLU
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
MODULES_PATH = os.path.join(BASE_PATH, "..")
TF_PATH = os.path.join(BASE_PATH, "..", "..", "tf_al")
sys.path.append(MODULES_PATH)
sys.path.append(TF_PATH)
from tf_al import Config, Dataset, ExperimentSuitMetrics, ExperimentSuit, AcquisitionFunction
from tf_al.wrapper import McDropout
# from tf_al_mp.wrapper import MomentPropagation
from models import fchollet_cnn, setup_growth, disable_tf_logs
from utils import setup_logger
verbose = True
# Synthetic dataset
inputs = np.random.randn(100, 10)
targets = np.random.randn(100)
x_test = np.random.randn(50, 10)
y_test = np.random.randn(50)
dataset = Dataset(inputs, targets, test=(x_test, y_test), init_size=5)
# Model
setup_growth()
num_classes = 1
batch_size = 900
sample_size = 25
base_model = Sequential([
Dense(num_classes, activation="relu"),
Dropout(.25),
Dense(num_classes*2),
Dense(1),
ReLU()
])
config = Config(
fit={"epochs": 200, "batch_size": batch_size},
query={"sample_size": sample_size},
eval={"batch_size": batch_size, "sample_size": sample_size}
)
mc_model = McDropout(base_model, config=config, classification=False, verbose=verbose)
optimizer = "adam"
loss = "mean_squared_error"
mc_model.compile(optimizer=optimizer, loss=loss)
# Active Learning
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
METRICS_PATH = os.path.join(BASE_PATH, "..", "..", "metrics")
metrics_handler = ExperimentSuitMetrics(os.path.join(METRICS_PATH, "debug_regression"))
step_size = 10
query_fns = [
AcquisitionFunction("random", batch_size=batch_size, verbose=verbose),
AcquisitionFunction("max_entropy", batch_size=batch_size, verbose=verbose)
]
experiments = ExperimentSuit(
mc_model,
query_fns,
dataset,
step_size=step_size,
no_save_state=True,
metrics_handler=metrics_handler,
verbose=verbose
)
experiments.start() |
the-stack_106_25620 | # Copyright 2014 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo.config import cfg
import requests
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
d_opts = [
cfg.StrOpt('datera_api_token',
default=None,
help='Datera API token.'),
cfg.StrOpt('datera_api_port',
default='7717',
help='Datera API port.'),
cfg.StrOpt('datera_api_version',
default='1',
help='Datera API version.'),
cfg.StrOpt('datera_num_replicas',
default='3',
help='Number of replicas to create of an inode.')
]
CONF = cfg.CONF
CONF.import_opt('driver_client_cert_key', 'cinder.volume.driver')
CONF.import_opt('driver_client_cert', 'cinder.volume.driver')
CONF.register_opts(d_opts)
class DateraDriver(san.SanISCSIDriver):
"""The OpenStack Datera Driver
Version history:
1.0 - Initial driver
"""
VERSION = '1.0'
def __init__(self, *args, **kwargs):
super(DateraDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(d_opts)
self.num_replicas = self.configuration.datera_num_replicas
self.cluster_stats = {}
def create_volume(self, volume):
"""Create a logical volume."""
params = {
'name': volume['display_name'] or volume['id'],
'size': str(volume['size'] * units.Gi),
'uuid': volume['id'],
'numReplicas': self.num_replicas
}
self._issue_api_request('volumes', 'post', body=params)
def create_cloned_volume(self, volume, src_vref):
data = {
'name': volume['display_name'] or volume['id'],
'uuid': volume['id'],
'clone_uuid': src_vref['id'],
'numReplicas': self.num_replicas
}
self._issue_api_request('volumes', 'post', body=data)
def delete_volume(self, volume):
try:
self._issue_api_request('volumes', 'delete', volume['id'])
except exception.NotFound:
msg = _("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, volume['id'])
def _do_export(self, context, volume):
"""Gets the associated account, retrieves CHAP info and updates."""
if volume['provider_location']:
return {'provider_location': volume['provider_location']}
export = self._issue_api_request(
'volumes', action='export', method='post',
body={'ctype': 'TC_BLOCK_ISCSI'}, resource=volume['id'])
# NOTE(thingee): Refer to the Datera test for a stub of what this looks
# like. We're just going to pull the first IP that the Datera cluster
# makes available for the portal.
iscsi_portal = export['_ipColl'][0] + ':3260'
iqn = export['targetIds'].itervalues().next()['ids'][0]['id']
provider_location = '%s %s %s' % (iscsi_portal, iqn, 1)
model_update = {'provider_location': provider_location}
return model_update
def ensure_export(self, context, volume):
return self._do_export(context, volume)
def create_export(self, context, volume):
return self._do_export(context, volume)
def detach_volume(self, context, volume):
try:
self._issue_api_request('volumes', 'delete', resource=volume['id'],
action='export')
except exception.NotFound:
msg = _("Tried to delete export for volume %s, but it was not "
"found in the Datera cluster. Continuing with volume "
"detach")
LOG.info(msg, volume['id'])
def delete_snapshot(self, snapshot):
try:
self._issue_api_request('snapshots', 'delete', snapshot['id'])
except exception.NotFound:
msg = _("Tried to delete snapshot %s, but was not found in Datera "
"cluster. Continuing with delete.")
LOG.info(msg, snapshot['id'])
def create_snapshot(self, snapshot):
data = {
'uuid': snapshot['id'],
'parentUUID': snapshot['volume_id']
}
self._issue_api_request('snapshots', 'post', body=data)
def create_volume_from_snapshot(self, volume, snapshot):
data = {
'name': volume['display_name'] or volume['id'],
'uuid': volume['id'],
'snapshot_uuid': snapshot['id'],
'numReplicas': self.num_replicas
}
self._issue_api_request('volumes', 'post', body=data)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data.
"""
if refresh:
try:
self._update_cluster_stats()
except exception.DateraAPIException:
LOG.error('Failed to get updated stats from Datera cluster.')
pass
return self.cluster_stats
def extend_volume(self, volume, new_size):
data = {
'size': str(new_size * units.Gi)
}
self._issue_api_request('volumes', 'put', body=data,
resource=volume['id'])
def _update_cluster_stats(self):
LOG.debug("Updating cluster stats info.")
results = self._issue_api_request('cluster')
if 'uuid' not in results:
LOG.error(_('Failed to get updated stats from Datera Cluster.'))
backend_name = self.configuration.safe_get('volume_backend_name')
stats = {
'volume_backend_name': backend_name or 'Datera',
'vendor_name': 'Datera',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': int(results['totalRawSpace']),
'free_capacity_gb': int(results['availableSpace']),
'reserved_percentage': 0,
}
self.cluster_stats = stats
def _issue_api_request(self, resource_type, method='get', resource=None,
body=None, action=None):
"""All API requests to Datera cluster go through this method.
:param resource_type: the type of the resource
:param method: the request verb
:param resource: the identifier of the resource
:param body: a dict with options for the action_type
:param action: the action to perform
:returns: a dict of the response from the Datera cluster
"""
host = self.configuration.san_ip
port = self.configuration.datera_api_port
api_token = self.configuration.datera_api_token
api_version = self.configuration.datera_api_version
payload = json.dumps(body, ensure_ascii=False)
payload.encode('utf-8')
header = {'Content-Type': 'application/json; charset=utf-8'}
if api_token:
header['Auth-Token'] = api_token
LOG.debug("Payload for Datera API call: %s", payload)
client_cert = self.configuration.driver_client_cert
client_cert_key = self.configuration.driver_client_cert_key
protocol = 'http'
cert_data = None
if client_cert:
protocol = 'https'
cert_data = (client_cert, client_cert_key)
connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,
api_version, resource_type)
if resource is not None:
connection_string += '/%s' % resource
if action is not None:
connection_string += '/%s' % action
LOG.debug("Endpoint for Datera API call: %s", connection_string)
try:
response = getattr(requests, method)(connection_string,
data=payload, headers=header,
verify=False, cert=cert_data)
except requests.exceptions.RequestException as ex:
msg = _('Failed to make a request to Datera cluster endpoint due '
'to the following reason: %s') % ex.message
LOG.error(msg)
raise exception.DateraAPIException(msg)
data = response.json()
LOG.debug("Results of Datera API call: %s", data)
if not response.ok:
if response.status_code == 404:
raise exception.NotFound(data['message'])
else:
msg = _('Request to Datera cluster returned bad status:'
' %(status)s | %(reason)s') % {
'status': response.status_code,
'reason': response.reason}
LOG.error(msg)
raise exception.DateraAPIException(msg)
return data
|
the-stack_106_25621 | import os
class DictParseError(Exception): pass
def open_dict(dict_file):
dict_data = []
with open(dict_file, 'r') as df:
for line in df:
dict_data.append(line)
return dict_data
def parse_dict(data):
temp_dict = {}
for line in data:
d1 = line.split(':', 1)
filename = d1[0].strip().lower()
keywords = d1[1].strip().lower()
realpath = os.path.join("flags", filename+'.png')
fullpath = os.path.join(os.getcwd(),realpath)
if not os.path.exists(fullpath):
raise DictParseError(f'"{filename}" does not refer to an actual file.')
if keywords is None: #empty keyword list
raise DictParseError(f'Keyword list for "{filename}" is blank.')
keyword_list = keywords.split()
temp_dict[filename] = keyword_list
return temp_dict
def build_dict(dict_file):
temp = parse_dict(open_dict(dict_file))
final = {}
for key, val in temp.items():
for item in val:
final[item] = key
return final
if __name__ == '__main__':
print('TESTING...')
final = build_dict('config.txt')
for key, val in final.items():
print(f'{key}: {val}')
print('\nTest run complete!')
q = input('Press any key to exit.')
|
the-stack_106_25625 | import os
import platform
def get_data_dir() -> str:
system = platform.system()
if system == "Windows":
return os.getenv('APPDATA') + "/scbw"
else:
return os.path.expanduser("~") + "/.scbw"
VERSION = "1.0.4"
SCBW_BASE_DIR = get_data_dir()
SC_GAME_DIR = f"{SCBW_BASE_DIR}/games"
SC_BWAPI_DATA_BWTA_DIR = f"{SCBW_BASE_DIR}/bwapi-data/BWTA"
SC_BWAPI_DATA_BWTA2_DIR = f"{SCBW_BASE_DIR}/bwapi-data/BWTA2"
SC_BOT_DIR = f"{SCBW_BASE_DIR}/bots"
SC_MAP_DIR = f"{SCBW_BASE_DIR}/maps"
SC_IMAGE = "starcraft:game-" + VERSION
SC_PARENT_IMAGE = "ggaic/starcraft:java-" + VERSION
SC_JAVA_IMAGE = "starcraft:java"
SC_BINARY_LINK = "http://files.theabyss.ru/sc/starcraft.zip"
|
the-stack_106_25626 | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from onnx import TensorProto
from onnx import helper as oh
from finn.custom_op.registry import getCustomOp
from finn.transformation import Transformation
from finn.util.basic import get_by_name
import numpy as np
class InsertTLastMarker(Transformation):
"""Ensure that the graph is started/terminated with a TLastMarker node, inserting
one if necessary. Use constructor args to determine type of TLastMarker to be inserted.
More information available on the TLastMarker documentation.
"""
def __init__(self, both=False, external=True, dynamic=True):
super().__init__()
self.dyniters = dynamic
self.external = external
self.both = both
def apply(self, model):
# TODO only makes sense for a pure fpgadataflow graph -- check!
graph_out_name = model.graph.output[0].name
final_node = model.find_producer(graph_out_name)
graph_modified = False
if final_node.op_type != "TLastMarker" and not (
final_node.op_type == "IODMA"
and get_by_name(final_node.attribute, "direction").s.decode("UTF-8")
== "out"
):
custom_op = getCustomOp(final_node)
num_iters = int(custom_op.get_number_output_values())
stream_width = int(custom_op.get_outstream_width())
out_shape = model.get_tensor_shape(graph_out_name)
out_dtype = model.get_tensor_datatype(graph_out_name)
elem_width = out_dtype.bitwidth()
# make new buffer
final_node_out = oh.make_tensor_value_info(
model.make_new_valueinfo_name(), TensorProto.FLOAT, out_shape
)
model.graph.value_info.append(final_node_out)
model.set_tensor_datatype(final_node_out.name, out_dtype)
# reroute final node output to final_node_out_name
final_node.output[0] = final_node_out.name
tlast_node = oh.make_node(
"TLastMarker",
[final_node_out.name],
[graph_out_name],
NumIters=num_iters,
StreamWidth=stream_width,
ElemWidth=elem_width,
DynIters=(1 if self.dyniters else 0),
Direction="out",
Protocol=("external" if self.external else "internal"),
domain="finn",
backend="fpgadataflow",
)
model.graph.node.append(tlast_node)
graph_modified = True
# if both is True, also insert marker on input
if self.both:
graph_in_name = model.graph.input[0].name
first_node = model.find_consumer(graph_in_name)
if first_node.op_type != "TLastMarker" and not (
first_node.op_type == "IODMA"
and get_by_name(first_node.attribute, "direction").s.decode("UTF-8")
== "in"
):
custom_op = getCustomOp(first_node)
num_iters = np.prod(custom_op.get_folded_input_shape()[1:-1])
stream_width = int(custom_op.get_instream_width())
in_shape = model.get_tensor_shape(graph_in_name)
in_dtype = model.get_tensor_datatype(graph_in_name)
elem_width = in_dtype.bitwidth()
# make new buffer
first_node_in = oh.make_tensor_value_info(
model.make_new_valueinfo_name(), TensorProto.FLOAT, in_shape
)
model.graph.value_info.append(first_node_in)
model.set_tensor_datatype(first_node_in.name, in_dtype)
# reroute final node output to first_node_in_name
first_node.input[0] = first_node_in.name
tlast_node = oh.make_node(
"TLastMarker",
[graph_in_name],
[first_node_in.name],
NumIters=num_iters,
StreamWidth=stream_width,
ElemWidth=elem_width,
DynIters=(1 if self.dyniters else 0),
Direction="in",
Protocol=("external" if self.external else "internal"),
domain="finn",
backend="fpgadataflow",
)
model.graph.node.insert(0, tlast_node)
graph_modified = True
return (model, graph_modified)
|
the-stack_106_25627 | import math, os, time
from math import ceil, floor
import xtils
# CIFA10 --------------------
# batch_nums = math.ceil(data_info['train_size']/bsize_train)
train_size = 50000
batch_size = 128
batch_size_val = 64
batch_nums = math.ceil(train_size / batch_size)
BN = batch_nums # =>> Unit #5005
cfgar = {
# experiment config
'exp_version': 'exp.xxx',
'train_val_test': (True, True, True),
# device config
'gpu_ids': [0, 1, 2, 3, 4, 5, 6, 7, 8][0:4],
# model config
'arch_name': 'actres',
'arch_kwargs': {},
'resume': None,
'resume_config': True,
'resume_optimizer': True,
'mgpus_to_sxpu': ['m2s', 's2m', 'none', 'auto'][3],
# data config
'dataset': 'cifar10',
'data_info': {'train_size': train_size, 'val_size': 10000, 'test_size': 5000},
'data_root': xtils.get_data_root(data='cifar10'),
'data_augment': {'train': '1crop-flip', 'val': 'no-aug'},
'data_kwargs': {},
'data_workers': 4,
# path config
'current_time': '',
'ckpt_suffix': '', # when save a ckpt, u can add a special mark to its filename.
'ckpt_base_dir': xtils.get_base_dir(k='ckpt'),
'ckpt_dir': 'auto-setting',
'log_base_dir': xtils.get_base_dir(k='log'),
'log_dir': 'auto-setting',
# iter config
'start_iter': 0,
'max_iters': [350 * BN, 90 * BN, 60 * BN, 40 * BN, 120 * BN][0],
'start_epoch': 0,
'max_epochs': 0,
'bsize_train': batch_size,
'bsize_val': batch_size_val,
'batch_nums': batch_nums,
'Unit': {'epoch': batch_nums, 'iter': 1}[['epoch', 'iter'][0]], # 按epoch为单位调节 还是按iteration为单位调节lr/bs?
'BN': BN,
# lr config
'optim_type': ['Adam', 'SGD'][1],
'optim_custom': False,
'lr_start': {'Adam': 0.01, 'SGD': 0.1}['SGD'],
'lr_decay_policy': ['regular', 'appoint', 'original', 'trace_prec'][1],
'lr_decay_appoint': ((260 * BN, 1 / 10), (300 * BN, 1 / 10), (340 * BN, 1 / 10)), # large
'momentum': 0.9,
'weight_decay': [0.0001, 0.0005, 0.00017, 0.0006][-1],
'nesterov': False,
'rmsprop_alpha': '',
'rmsprop_centered': '',
'decay_fly': {'flymode': ['nofly', 'stepall'][0]},
# frequency config
# # Note: if val_freq: (0, plot_freq)
# # the loss-curve/prec-curve of train and val can have same x-axis point.
'best_prec': {'train_prec1': 0, 'train_prec5': 0, 'val_prec1': 0, 'val_prec5': 0,
'best_start': 3, 'best_ok': False},
'print_frequency': BN // 100,
'plot_frequency': BN // 100, # 5005/100=50
'val_frequency': (0 * BN, BN // 1),
'test_frequency': (999 * BN, BN // 1),
'save_frequency': (0 * BN, BN // 1),
# forzen config
'mode_custom': False,
'train_which': [],
'eval_which': [],
'xfc_which': -1,
# time config
'valid_total_time': 0,
'test_total_time': 0,
'exp_tic': time.time(),
'exclude_keys': ('exclude_keys', 'gpu_ids', 'device', 'resume'),
} |
the-stack_106_25628 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2019-11-25 23:58:55
# @Author : Racter Liu (racterub) ([email protected])
# @Link : https://racterub.io
# @License : MIT
import csv
f = open("./Dengue_Daily_EN.csv")
rows = csv.DictReader(f)
#1
scope = ["Taipei City", "New Taipei City"]
for row in rows:
if row["County_living"] in scope:
print("Date_Onset: {}, Sex: {}, Age_Group: {}, County_living: {}, Township_living: {}".format(row["Date_Onset"], row["Sex"], row["Age_Group"], row["County_living"], row["Township_living"]))
#2
def dataSplitter(data):
return data.split("/")
def createList(n):
data = []
for i in range(n):
tmp = []
for j in range(12):
tmp.append(0)
data.append(tmp)
return data
casesDate = createList(22) #Create lists to store result
f = open("./Dengue_Daily_EN.csv")
rows = csv.DictReader(f)
for row in rows:
year, month, day = dataSplitter(row["Date_Onset"])
casesDate[int(year)-1998][int(month)-1] += 1
for i in range(len(casesDate)):
for k in range(len(casesDate[0])):
print("{}/{} cases: {}".format(i + 1998, k+1, casesDate[i][k]))
|
the-stack_106_25631 | """Download handlers for http and https schemes"""
from time import time
from cStringIO import StringIO
from urlparse import urldefrag
from zope.interface import implements
from twisted.internet import defer, reactor, protocol
from twisted.web.http_headers import Headers as TxHeaders
from twisted.web.iweb import IBodyProducer
from twisted.internet.error import TimeoutError
from twisted.web.http import PotentialDataLoss
from scrapy.xlib.tx import Agent, ProxyAgent, ResponseDone, \
HTTPConnectionPool, TCP4ClientEndpoint
from scrapy.http import Headers
from scrapy.responsetypes import responsetypes
from scrapy.core.downloader.webclient import _parse
from scrapy.utils.misc import load_object
class HTTP11DownloadHandler(object):
def __init__(self, settings):
self._pool = HTTPConnectionPool(reactor, persistent=True)
self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
self._pool._factory.noisy = False
self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
self._contextFactory = self._contextFactoryClass()
def download_request(self, request, spider):
"""Return a deferred for the HTTP download"""
agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool)
return agent.download_request(request)
def close(self):
return self._pool.closeCachedConnections()
class ScrapyAgent(object):
_Agent = Agent
_ProxyAgent = ProxyAgent
def __init__(self, contextFactory=None, connectTimeout=10, bindAddress=None, pool=None):
self._contextFactory = contextFactory
self._connectTimeout = connectTimeout
self._bindAddress = bindAddress
self._pool = pool
def _get_agent(self, request, timeout):
bindaddress = request.meta.get('bindaddress') or self._bindAddress
proxy = request.meta.get('proxy')
if proxy:
scheme, _, host, port, _ = _parse(proxy)
endpoint = TCP4ClientEndpoint(reactor, host, port, timeout=timeout,
bindAddress=bindaddress)
return self._ProxyAgent(endpoint)
return self._Agent(reactor, contextFactory=self._contextFactory,
connectTimeout=timeout, bindAddress=bindaddress, pool=self._pool)
def download_request(self, request):
timeout = request.meta.get('download_timeout') or self._connectTimeout
agent = self._get_agent(request, timeout)
# request details
url = urldefrag(request.url)[0]
method = request.method
headers = TxHeaders(request.headers)
bodyproducer = _RequestBodyProducer(request.body) if request.body else None
start_time = time()
d = agent.request(method, url, headers, bodyproducer)
# set download latency
d.addCallback(self._cb_latency, request, start_time)
# response body is ready to be consumed
d.addCallback(self._cb_bodyready, request)
d.addCallback(self._cb_bodydone, request, url)
# check download timeout
self._timeout_cl = reactor.callLater(timeout, d.cancel)
d.addBoth(self._cb_timeout, request, url, timeout)
return d
def _cb_timeout(self, result, request, url, timeout):
if self._timeout_cl.active():
self._timeout_cl.cancel()
return result
raise TimeoutError("Getting %s took longer than %s seconds." % (url, timeout))
def _cb_latency(self, result, request, start_time):
request.meta['download_latency'] = time() - start_time
return result
def _cb_bodyready(self, txresponse, request):
# deliverBody hangs for responses without body
if txresponse.length == 0:
return txresponse, '', None
def _cancel(_):
txresponse._transport._producer.loseConnection()
d = defer.Deferred(_cancel)
txresponse.deliverBody(_ResponseReader(d, txresponse, request))
return d
def _cb_bodydone(self, result, request, url):
txresponse, body, flags = result
status = int(txresponse.code)
headers = Headers(txresponse.headers.getAllRawHeaders())
respcls = responsetypes.from_args(headers=headers, url=url)
return respcls(url=url, status=status, headers=headers, body=body, flags=flags)
class _RequestBodyProducer(object):
implements(IBodyProducer)
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class _ResponseReader(protocol.Protocol):
def __init__(self, finished, txresponse, request):
self._finished = finished
self._txresponse = txresponse
self._request = request
self._bodybuf = StringIO()
def dataReceived(self, bodyBytes):
self._bodybuf.write(bodyBytes)
def connectionLost(self, reason):
if self._finished.called:
return
body = self._bodybuf.getvalue()
if reason.check(ResponseDone):
self._finished.callback((self._txresponse, body, None))
elif reason.check(PotentialDataLoss):
self._finished.callback((self._txresponse, body, ['partial']))
else:
self._finished.errback(reason)
|
the-stack_106_25633 | import os
import time
import yaml
import math
import numpy as np
import matplotlib
matplotlib.use('Agg', warn=False)
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from datetime import datetime, timedelta
from argparse import ArgumentParser
from collections import defaultdict
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import oft
from oft import OftNet, KittiObjectDataset, MetricDict, huber_loss, ObjectEncoder
def train(args, dataloader, model, encoder, optimizer, summary, epoch):
print('\n==> Training on {} minibatches'.format(len(dataloader)))
model.train()
epoch_loss = oft.MetricDict()
t = time.time()
for i, (_, image, calib, objects, grid) in enumerate(dataloader):
# Move tensors to GPU
if len(args.gpu) > 0:
image, calib, grid = image.cuda(), calib.cuda(), grid.cuda()
# Run network forwards
pred_encoded = model(image, calib, grid)
# Encode ground truth objects
gt_encoded = encoder.encode_batch(objects, grid)
# Compute losses
loss, loss_dict = compute_loss(
pred_encoded, gt_encoded, args.loss_weights)
if float(loss) != float(loss):
raise RuntimeError('Loss diverged :(')
epoch_loss += loss_dict
# Optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print summary
if i % args.print_iter == 0 and i != 0:
batch_time = (time.time() - t) / (1 if i == 0 else args.print_iter)
eta = ((args.epochs - epoch + 1) * len(dataloader) - i) * batch_time
s = '[{:4d}/{:4d}] batch_time: {:.2f}s eta: {:s} loss: '.format(
i, len(dataloader), batch_time,
str(timedelta(seconds=int(eta))))
for k, v in loss_dict.items():
s += '{}: {:.2e} '.format(k, v)
print(s)
t = time.time()
# Visualize predictions
if i % args.vis_iter == 0:
# Visualize image
summary.add_image('train/image', visualize_image(image), epoch)
# Visualize scores
summary.add_figure('train/score',
visualize_score(pred_encoded[0], gt_encoded[0], grid), epoch)
# TODO decode and save results
# Print epoch summary and save results
print('==> Training epoch complete')
for key, value in epoch_loss.mean.items():
print('{:8s}: {:.4e}'.format(key, value))
summary.add_scalar('train/loss/{}'.format(key), value, epoch)
def validate(args, dataloader, model, encoder, summary, epoch):
print('\n==> Validating on {} minibatches\n'.format(len(dataloader)))
model.eval()
epoch_loss = MetricDict()
for i, (_, image, calib, objects, grid) in enumerate(dataloader):
# Move tensors to GPU
if len(args.gpu) > 0:
image, calib, grid = image.cuda(), calib.cuda(), grid.cuda()
with torch.no_grad():
# Run network forwards
pred_encoded = model(image, calib, grid)
# Encode ground truth objects
gt_encoded = encoder.encode_batch(objects, grid)
# Compute losses
_, loss_dict = compute_loss(
pred_encoded, gt_encoded, args.loss_weights)
epoch_loss += loss_dict
# Visualize predictions
if i % args.vis_iter == 0:
# Visualize image
summary.add_image('val/image', visualize_image(image), epoch)
# Visualize scores
summary.add_figure('val/score',
visualize_score(pred_encoded[0], gt_encoded[0], grid), epoch)
# TODO decode and save results
# TODO evaluate
print('\n==> Validation epoch complete')
for key, value in epoch_loss.mean.items():
print('{:8s}: {:.4e}'.format(key, value))
summary.add_scalar('val/loss/{}'.format(key), value, epoch)
def compute_loss(pred_encoded, gt_encoded, loss_weights=[1., 1., 1., 1.]):
# Expand tuples
score, pos_offsets, dim_offsets, ang_offsets = pred_encoded
heatmaps, gt_pos_offsets, gt_dim_offsets, gt_ang_offsets, mask = gt_encoded
score_weight, pos_weight, dim_weight, ang_weight = loss_weights
# Compute losses
score_loss = huber_loss(score, heatmaps)
pos_loss = huber_loss(pos_offsets, gt_pos_offsets, mask.unsqueeze(2))
dim_loss = huber_loss(dim_offsets, gt_dim_offsets, mask.unsqueeze(2))
ang_loss = huber_loss(ang_offsets, gt_ang_offsets, mask.unsqueeze(2))
# Combine loss
total_loss = score_loss * score_weight + pos_loss * pos_weight \
+ dim_loss * dim_weight + ang_loss * ang_weight
# Store scalar losses in a dictionary
loss_dict = {
'score' : float(score_loss), 'position' : float(pos_loss),
'dimension' : float(dim_loss), 'angle' : float(ang_loss),
'total' : float(total_loss)
}
total_loss = score_loss
return total_loss, loss_dict
def visualize_image(image):
return image[0].cpu().detach()
def visualize_score(scores, heatmaps, grid):
# Visualize score
fig_score = plt.figure(num='score', figsize=(8, 6))
fig_score.clear()
oft.vis_score(scores[0, 0], grid[0], ax=plt.subplot(121))
oft.vis_score(heatmaps[0, 0], grid[0], ax=plt.subplot(122))
return fig_score
def parse_args():
parser = ArgumentParser()
# Data options
parser.add_argument('--root', type=str, default='data/kitti',
help='root directory of the KITTI dataset')
parser.add_argument('--grid-size', type=float, nargs=2, default=(80., 80.),
help='width and depth of validation grid, in meters')
parser.add_argument('--train-grid-size', type=int, nargs=2,
default=(120, 120),
help='width and depth of training grid, in pixels')
parser.add_argument('--grid-jitter', type=float, nargs=3,
default=[.25, .5, .25],
help='magn. of random noise applied to grid coords')
parser.add_argument('--train-image-size', type=int, nargs=2,
default=(1080, 360),
help='size of random image crops during training')
parser.add_argument('--yoffset', type=float, default=1.74,
help='vertical offset of the grid from the camera axis')
# Model options
parser.add_argument('--grid-height', type=float, default=4.,
help='size of grid cells, in meters')
parser.add_argument('-r', '--grid-res', type=float, default=0.5,
help='size of grid cells, in meters')
parser.add_argument('--frontend', type=str, default='resnet18',
choices=['resnet18', 'resnet34'],
help='name of frontend ResNet architecture')
parser.add_argument('--topdown', type=int, default=8,
help='number of residual blocks in topdown network')
# Optimization options
parser.add_argument('-l', '--lr', type=float, default=1e-7,
help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum for SGD')
parser.add_argument('--weight-decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--lr-decay', type=float, default=0.99,
help='factor to decay learning rate by every epoch')
parser.add_argument('--loss-weights', type=float, nargs=4,
default=[1., 1., 1., 1.],
help="loss weighting factors for score, position,"\
" dimension and angle loss respectively")
# Training options
parser.add_argument('-e', '--epochs', type=int, default=600,
help='number of epochs to train for')
parser.add_argument('-b', '--batch-size', type=int, default=1,
help='mini-batch size for training')
# Experiment options
parser.add_argument('name', type=str, default='test',
help='name of experiment')
parser.add_argument('-s', '--savedir', type=str,
default='experiments',
help='directory to save experiments to')
parser.add_argument('-g', '--gpu', type=int, nargs='*', default=[0],
help='ids of gpus to train on. Leave empty to use cpu')
parser.add_argument('-w', '--workers', type=int, default=4,
help='number of worker threads to use for data loading')
parser.add_argument('--val-interval', type=int, default=10,
help='number of epochs between validation runs')
parser.add_argument('--print-iter', type=int, default=10,
help='print loss summary every N iterations')
parser.add_argument('--vis-iter', type=int, default=50,
help='display visualizations every N iterations')
return parser.parse_args()
def _make_experiment(args):
print('\n' + '#' * 80)
print(datetime.now().strftime('%A %-d %B %Y %H:%M'))
print('Creating experiment \'{}\' in directory:\n {}'.format(
args.name, args.savedir))
print('#' * 80)
print('\nConfig:')
for key in sorted(args.__dict__):
print(' {:12s} {}'.format(key + ':', args.__dict__[key]))
print('#' * 80)
# Create a new directory for the experiment
savedir = os.path.join(args.savedir, args.name)
os.makedirs(savedir, exist_ok=True)
# Create tensorboard summary writer
summary = SummaryWriter(savedir)
# Save configuration to file
with open(os.path.join(savedir, 'config.yml'), 'w') as fp:
yaml.safe_dump(args.__dict__, fp)
# Write config as a text summary
summary.add_text('config', '\n'.join(
'{:12s} {}'.format(k, v) for k, v in sorted(args.__dict__.items())))
summary.file_writer.flush()
return summary
def save_checkpoint(args, epoch, model, optimizer, scheduler):
ckpt = {
'epoch' : epoch,
'model' : model.state_dict(),
'optim' : optimizer.state_dict(),
'scheduler' : scheduler.state_dict(),
}
ckpt_file = os.path.join(
args.savedir, args.name, 'checkpoint-{:04d}.pth.gz'.format(epoch))
print('==> Saving checkpoint \'{}\''.format(ckpt_file))
torch.save(ckpt, ckpt_file)
def main():
# Parse command line arguments
args = parse_args()
# Create experiment
summary = _make_experiment(args)
# Create datasets
train_data = KittiObjectDataset(
args.root, 'train', args.grid_size, args.grid_res, args.yoffset)
val_data = KittiObjectDataset(
args.root, 'val', args.grid_size, args.grid_res, args.yoffset)
# Apply data augmentation
# train_data = oft.AugmentedObjectDataset(
# train_data, args.train_image_size, args.train_grid_size,
# jitter=args.grid_jitter)
# Create dataloaders
train_loader = DataLoader(train_data, args.batch_size, shuffle=True,
num_workers=args.workers, collate_fn=oft.utils.collate)
val_loader = DataLoader(val_data, args.batch_size, shuffle=False,
num_workers=args.workers,collate_fn=oft.utils.collate)
# Build model
model = OftNet(num_classes=1, frontend=args.frontend,
topdown_layers=args.topdown, grid_res=args.grid_res,
grid_height=args.grid_height)
if len(args.gpu) > 0:
torch.cuda.set_device(args.gpu[0])
model = nn.DataParallel(model, args.gpu).cuda()
# Create encoder
encoder = ObjectEncoder()
# Setup optimizer
optimizer = optim.SGD(
model.parameters(), args.lr, args.momentum, args.weight_decay)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, args.lr_decay)
for epoch in range(1, args.epochs+1):
print('\n=== Beginning epoch {} of {} ==='.format(epoch, args.epochs))
# Update and log learning rate
scheduler.step(epoch-1)
summary.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
# Train model
train(args, train_loader, model, encoder, optimizer, summary, epoch)
# Run validation every N epochs
if epoch % args.val_interval == 0:
validate(args, val_loader, model, encoder, summary, epoch)
# Save model checkpoint
save_checkpoint(args, epoch, model, optimizer, scheduler)
if __name__ == '__main__':
main()
|
the-stack_106_25634 | from story.utils import *
import warnings
warnings.filterwarnings("ignore")
import os
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from generator.gpt2.src import sample, encoder, model
import json
class GPT2Generator:
def __init__(self, generate_num=120, temperature=0.4, top_k=None, top_p=0.9, penalty=.2):
self.generate_num = generate_num
self.temp = temperature
self.top_k = top_k
self.top_p = top_p
self.penalty = penalty
self.model_name = "model_v5"
self.model_dir = "generator/gpt2/models"
self.checkpoint_path = os.path.join(self.model_dir, self.model_name)
models_dir = os.path.expanduser(os.path.expandvars(self.model_dir))
self.batch_size = 1
self.samples = 1
self.enc = encoder.get_encoder(self.model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, self.model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.compat.v1.Session(config=config)
self.context = tf.placeholder(tf.int32, [self.batch_size, None])
# np.random.seed(seed)
# tf.set_random_seed(seed)
self.output = sample.sample_sequence(
hparams=hparams, length=self.generate_num,
context=self.context,
batch_size=self.batch_size,
temperature=temperature, top_k=top_k, top_p=top_p, penalty=penalty
)
self.top_output = sample.sample_sequence(
hparams=hparams, length=self.generate_num,
context=self.context,
batch_size=self.batch_size,
temperature=0.1, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, self.model_name))
saver.restore(self.sess, ckpt)
def generate(self, prompt, use_top: bool = False):
context_tokens = self.enc.encode(prompt)
out = self.sess.run(self.top_output if use_top else self.output, feed_dict={
self.context: [context_tokens]
})[0, len(context_tokens):]
return self.enc.decode(out)
|
the-stack_106_25635 | import torch
from torch.autograd import gradcheck
from nitorch.spatial import grid_grad, grid_pull, grid_push, grid_count
from nitorch.spatial import identity_grid, BoundType, InterpolationType
import pytest
# global parameters
dtype = torch.double # data type (double advised to check gradients)
shape1 = 3 # size along each dimension
extrapolate = True
# parameters
bounds = set(BoundType.__members__.values())
orders = set(InterpolationType.__members__.values())
devices = [('cpu', 1)]
if torch.backends.openmp.is_available() or torch.backends.mkl.is_available():
print('parallel backend available')
devices.append(('cpu', 10))
if torch.cuda.is_available():
print('cuda backend available')
devices.append('cuda')
dims = [1, 2, 3]
def make_data(shape, device, dtype):
id = identity_grid(shape, dtype=dtype, device=device)
id = id[None, ...] # add batch dimension
disp = torch.randn(id.shape, device=device, dtype=dtype)
grid = id + disp
vol = torch.randn((1, 1) + shape, device=device, dtype=dtype)
return vol, grid
def init_device(device):
if isinstance(device, (list, tuple)):
device, param = device
else:
param = 1 if device == 'cpu' else 0
if device == 'cuda':
torch.cuda.set_device(param)
torch.cuda.init()
try:
torch.cuda.empty_cache()
except RuntimeError:
pass
device = '{}:{}'.format(device, param)
else:
assert device == 'cpu'
torch.set_num_threads(param)
return torch.device(device)
# FIXME: grid_grad checks are failing
# @pytest.mark.parametrize("device", devices)
# @pytest.mark.parametrize("dim", dims)
# @pytest.mark.parametrize("bound", bounds)
# @pytest.mark.parametrize("interpolation", orders)
# def test_gradcheck_grid_grad(device, dim, bound, interpolation):
# print(f'grid_grad_{dim}d({interpolation}, {bound}) on {device}')
# device = init_device(device)
# shape = (shape1,) * dim
# vol, grid = make_data(shape, device, dtype)
# vol.requires_grad = True
# grid.requires_grad = True
# assert gradcheck(grid_grad, (vol, grid, interpolation, bound, extrapolate),
# rtol=1., raise_exception=True)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("bound", bounds)
@pytest.mark.parametrize("interpolation", orders)
def test_gradcheck_grid_pull(device, dim, bound, interpolation):
print(f'grid_pull_{dim}d({interpolation}, {bound}) on {device}')
device = init_device(device)
shape = (shape1,) * dim
vol, grid = make_data(shape, device, dtype)
vol.requires_grad = True
grid.requires_grad = True
assert gradcheck(grid_pull, (vol, grid, interpolation, bound, extrapolate),
rtol=1., raise_exception=True)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("bound", bounds)
@pytest.mark.parametrize("interpolation", orders)
def test_gradcheck_grid_push(device, dim, bound, interpolation):
print(f'grid_push_{dim}d({interpolation}, {bound}) on {device}')
device = init_device(device)
shape = (shape1,) * dim
vol, grid = make_data(shape, device, dtype)
vol.requires_grad = True
grid.requires_grad = True
assert gradcheck(grid_push, (vol, grid, shape, interpolation, bound, extrapolate),
rtol=1., raise_exception=True)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("bound", bounds)
@pytest.mark.parametrize("interpolation", orders)
def test_gradcheck_grid_count(device, dim, bound, interpolation):
print(f'grid_count_{dim}d({interpolation}, {bound}) on {device}')
device = init_device(device)
shape = (shape1,) * dim
_, grid = make_data(shape, device, dtype)
grid.requires_grad = True
assert gradcheck(grid_count, (grid, shape, interpolation, bound, extrapolate),
rtol=1., raise_exception=True)
|
the-stack_106_25637 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeleteValuesDetailsApiModelHistoryUpdateRequestApiModel(Model):
"""Request node history update.
:param node_id: Node to update
:type node_id: str
:param browse_path: An optional path from NodeId instance to
the actual node.
:type browse_path: list[str]
:param details:
:type details: ~azure-iiot-opc-history.models.DeleteValuesDetailsApiModel
:param header:
:type header: ~azure-iiot-opc-history.models.RequestHeaderApiModel
"""
_validation = {
'details': {'required': True},
}
_attribute_map = {
'node_id': {'key': 'nodeId', 'type': 'str'},
'browse_path': {'key': 'browsePath', 'type': '[str]'},
'details': {'key': 'details', 'type': 'DeleteValuesDetailsApiModel'},
'header': {'key': 'header', 'type': 'RequestHeaderApiModel'},
}
def __init__(self, details, node_id=None, browse_path=None, header=None):
super(DeleteValuesDetailsApiModelHistoryUpdateRequestApiModel, self).__init__()
self.node_id = node_id
self.browse_path = browse_path
self.details = details
self.header = header
|
the-stack_106_25640 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import mock
import netaddr
from nova import exception
from nova.objects import fixed_ip
from nova.openstack.common import timeutils
from nova.tests import fake_instance
from nova.tests.objects import test_network
from nova.tests.objects import test_objects
fake_fixed_ip = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'address': '192.168.1.100',
'network_id': None,
'virtual_interface_id': None,
'instance_uuid': None,
'allocated': False,
'leased': False,
'reserved': False,
'host': None,
}
class _TestFixedIPObject(object):
def _compare(self, obj, db_obj):
for field in obj.fields:
if field is 'virtual_interface':
continue
if field in fixed_ip.FIXED_IP_OPTIONAL_ATTRS:
if obj.obj_attr_is_set(field) and db_obj[field] is not None:
obj_val = obj[field].uuid
db_val = db_obj[field]['uuid']
else:
continue
else:
obj_val = obj[field]
db_val = db_obj[field]
if isinstance(obj_val, netaddr.IPAddress):
obj_val = str(obj_val)
self.assertEqual(db_val, obj_val)
@mock.patch('nova.db.fixed_ip_get')
def test_get_by_id(self, get):
get.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123)
get.assert_called_once_with(self.context, 123, get_network=False)
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
def test_get_by_id_with_extras(self, network_get, fixed_get):
db_fixed = dict(fake_fixed_ip,
network=test_network.fake_network)
fixed_get.return_value = db_fixed
fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123,
expected_attrs=['network'])
fixed_get.assert_called_once_with(self.context, 123, get_network=True)
self._compare(fixedip, db_fixed)
self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
self.assertFalse(network_get.called)
@mock.patch('nova.db.fixed_ip_get_by_address')
def test_get_by_address(self, get):
get.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4')
get.assert_called_once_with(self.context, '1.2.3.4',
columns_to_join=[])
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get')
def test_get_by_address_with_extras(self, instance_get, network_get,
fixed_get):
db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
instance=fake_instance.fake_db_instance())
fixed_get.return_value = db_fixed
fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
expected_attrs=['network',
'instance'])
fixed_get.assert_called_once_with(self.context, '1.2.3.4',
columns_to_join=['network',
'instance'])
self._compare(fixedip, db_fixed)
self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
self.assertEqual(db_fixed['instance']['uuid'], fixedip.instance.uuid)
self.assertFalse(network_get.called)
self.assertFalse(instance_get.called)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get')
def test_get_by_address_with_extras_deleted_instance(self, instance_get,
network_get,
fixed_get):
db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
instance=None)
fixed_get.return_value = db_fixed
fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
expected_attrs=['network',
'instance'])
fixed_get.assert_called_once_with(self.context, '1.2.3.4',
columns_to_join=['network',
'instance'])
self._compare(fixedip, db_fixed)
self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
self.assertIsNone(fixedip.instance)
self.assertFalse(network_get.called)
self.assertFalse(instance_get.called)
@mock.patch('nova.db.fixed_ip_get_by_floating_address')
def test_get_by_floating_ip(self, get):
get.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
'1.2.3.4')
get.assert_called_once_with(self.context, '1.2.3.4')
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get_by_network_host')
def test_get_by_network_and_host(self, get):
get.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.get_by_network_and_host(self.context,
123, 'host')
get.assert_called_once_with(self.context, 123, 'host')
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_associate')
def test_associate(self, associate):
associate.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4',
'fake-uuid')
associate.assert_called_with(self.context, '1.2.3.4', 'fake-uuid',
network_id=None, reserved=False)
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_associate_pool')
def test_associate_pool(self, associate):
associate.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123,
'fake-uuid', 'host')
associate.assert_called_with(self.context, 123,
instance_uuid='fake-uuid',
host='host')
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_disassociate')
def test_disassociate_by_address(self, disassociate):
fixed_ip.FixedIP.disassociate_by_address(self.context, '1.2.3.4')
disassociate.assert_called_with(self.context, '1.2.3.4')
@mock.patch('nova.db.fixed_ip_disassociate_all_by_timeout')
def test_disassociate_all_by_timeout(self, disassociate):
now = timeutils.utcnow()
now_tz = timeutils.parse_isotime(
timeutils.isotime(now)).replace(
tzinfo=iso8601.iso8601.Utc())
disassociate.return_value = 123
result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context,
'host', now)
self.assertEqual(123, result)
# NOTE(danms): be pedantic about timezone stuff
args, kwargs = disassociate.call_args_list[0]
self.assertEqual(now_tz, args[2])
self.assertEqual((self.context, 'host'), args[:2])
self.assertEqual({}, kwargs)
@mock.patch('nova.db.fixed_ip_create')
def test_create(self, create):
create.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP(address='1.2.3.4')
fixedip.create(self.context)
create.assert_called_once_with(
self.context, {'address': '1.2.3.4'})
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_update')
def test_save(self, update):
update.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
instance_uuid='fake-uuid')
self.assertRaises(exception.ObjectActionError, fixedip.save)
fixedip.obj_reset_changes(['address'])
fixedip.save()
update.assert_called_once_with(self.context, '1.2.3.4',
{'instance_uuid': 'fake-uuid'})
@mock.patch('nova.db.fixed_ip_disassociate')
def test_disassociate(self, disassociate):
fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
instance_uuid='fake-uuid')
fixedip.obj_reset_changes()
fixedip.disassociate()
disassociate.assert_called_once_with(self.context, '1.2.3.4')
self.assertIsNone(fixedip.instance_uuid)
@mock.patch('nova.db.fixed_ip_get_all')
def test_get_all(self, get_all):
get_all.return_value = [fake_fixed_ip]
fixedips = fixed_ip.FixedIPList.get_all(self.context)
self.assertEqual(1, len(fixedips))
get_all.assert_called_once_with(self.context)
self._compare(fixedips[0], fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_get_by_instance(self, get):
get.return_value = [fake_fixed_ip]
fixedips = fixed_ip.FixedIPList.get_by_instance_uuid(self.context,
'fake-uuid')
self.assertEqual(1, len(fixedips))
get.assert_called_once_with(self.context, 'fake-uuid')
self._compare(fixedips[0], fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get_by_host')
def test_get_by_host(self, get):
get.return_value = [fake_fixed_ip]
fixedips = fixed_ip.FixedIPList.get_by_host(self.context, 'host')
self.assertEqual(1, len(fixedips))
get.assert_called_once_with(self.context, 'host')
self._compare(fixedips[0], fake_fixed_ip)
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_by_virtual_interface_id(self, get):
get.return_value = [fake_fixed_ip]
fixedips = fixed_ip.FixedIPList.get_by_virtual_interface_id(
self.context, 123)
self.assertEqual(1, len(fixedips))
get.assert_called_once_with(self.context, 123)
self._compare(fixedips[0], fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_bulk_create')
def test_bulk_create(self, bulk):
fixed_ips = [fixed_ip.FixedIP(address='192.168.1.1'),
fixed_ip.FixedIP(address='192.168.1.2')]
fixed_ip.FixedIPList.bulk_create(self.context, fixed_ips)
bulk.assert_called_once_with(self.context,
[{'address': '192.168.1.1'},
{'address': '192.168.1.2'}])
@mock.patch('nova.db.network_get_associated_fixed_ips')
def test_get_by_network(self, get):
info = {'address': '1.2.3.4',
'instance_uuid': 'fake-uuid',
'network_id': 0,
'vif_id': 1,
'vif_address': 'de:ad:be:ee:f0:00',
'instance_hostname': 'fake-host',
'instance_updated': datetime.datetime(1955, 11, 5),
'instance_created': datetime.datetime(1955, 11, 5),
'allocated': True,
'leased': True,
}
get.return_value = [info]
fixed_ips = fixed_ip.FixedIPList.get_by_network(
self.context, {'id': 0}, host='fake-host')
get.assert_called_once_with(self.context, 0, host='fake-host')
self.assertEqual(1, len(fixed_ips))
fip = fixed_ips[0]
self.assertEqual('1.2.3.4', str(fip.address))
self.assertEqual('fake-uuid', fip.instance_uuid)
self.assertEqual(0, fip.network_id)
self.assertEqual(1, fip.virtual_interface_id)
self.assertTrue(fip.allocated)
self.assertTrue(fip.leased)
self.assertEqual('fake-uuid', fip.instance.uuid)
self.assertEqual('fake-host', fip.instance.hostname)
self.assertIsInstance(fip.instance.created_at, datetime.datetime)
self.assertIsInstance(fip.instance.updated_at, datetime.datetime)
self.assertEqual(1, fip.virtual_interface.id)
self.assertEqual(info['vif_address'], fip.virtual_interface.address)
class TestFixedIPObject(test_objects._LocalTest,
_TestFixedIPObject):
pass
class TestRemoteFixedIPObject(test_objects._RemoteTest,
_TestFixedIPObject):
pass
|
the-stack_106_25641 | #using pil libraries and pandas
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import pandas as pd
#by using excel
x1 = pd.ExcelFile('list2.xlsx')
df = x1.parse('Sheet1')
#by using csv
df = pd.read_csv("list2.csv")
for index, row in df.iterrows(): #for loop for making 'n' number of names
print(index, row['Name'], row['email'])
n=row['Name']
def make_certificate(n): #function for making n number of certifcates with name enabled on it
img = Image.open('certificate.jpg')
draw = ImageDraw.Draw(img)
selectFont = ImageFont.truetype('LHANDW.ttf', size=64)
width, height = img.size
w, h = draw.textsize(n, selectFont)
draw.text(((width - w) / 2, (height - h) / 2), n, '#7daef7', selectFont)
img.save('{}.pdf'.format(n))
img.save('{}.png'.format(n))
make_certificate(n) #function call
|
the-stack_106_25642 | import xml.etree.ElementTree as ET
from os import getcwd
sets=['train','val','test']
classes = ["trunk"]
def convert_annotation(image_id, list_file):
in_file = open('data/VOC/Annotations/%s.xml'%(image_id))
tree=ET.parse(in_file)
root = tree.getroot()
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text), int(xmlbox.find('xmax').text), int(xmlbox.find('ymax').text))
list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
wd = getcwd()
for image_set in sets:
image_ids = open('data/VOC/ImageSets/Main/%s.txt'%(image_set)).read().strip().split()
list_file = open('%s.txt'%(image_set), 'w')
for image_id in image_ids:
list_file.write('%s/data/VOC/JPEGImages/%s.jpg'%(wd, image_id))
convert_annotation(image_id, list_file)
list_file.write('\n')
list_file.close()
|
the-stack_106_25643 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.dllib.keras.engine import ZooKerasLayer
if sys.version >= '3':
long = int
unicode = str
class TimeDistributed(ZooKerasLayer):
"""
TimeDistributed wrapper.
Apply a layer to every temporal slice of an input.
The input should be at least 3D.
The dimension of index one will be considered as the temporal dimension.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
name: String to specify the name of the wrapper. Default is None.
# Arguments
layer: A layer instance.
input_shape: A shape tuple, not including batch.
name: String to set the name of the wrapper.
If not specified, its name will by default to be a generated string.
>>> from bigdl.dllib.keras.layers import Dense
>>> timedistributed = TimeDistributed(Dense(8), input_shape=(10, 12))
creating: createZooKerasDense
creating: createZooKerasTimeDistributed
"""
def __init__(self, layer, input_shape=None, **kwargs):
super(TimeDistributed, self).__init__(None,
layer,
list(input_shape) if input_shape else None,
**kwargs)
class Bidirectional(ZooKerasLayer):
"""
Bidirectional wrapper for RNNs.
Bidirectional currently requires RNNs to return the full sequence, i.e. return_sequences = True.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
Example of creating a bidirectional LSTM:
Bidirectiona(LSTM(12, return_sequences=True), merge_mode="sum", input_shape=(32, 32))
# Arguments
layer: An instance of a recurrent layer.
merge_mode: Mode by which outputs of the forward and backward RNNs will be combined.
Must be one of: 'sum', 'mul', 'concat', 'ave'. Default is 'concat'.
input_shape: A shape tuple, not including batch.
name: String to set the name of the wrapper.
If not specified, its name will by default to be a generated string.
>>> from bigdl.dllib.keras.layers import LSTM
>>> bidiretional = Bidirectional(LSTM(10, return_sequences=True), input_shape=(12, 16))
creating: createZooKerasLSTM
creating: createZooKerasBidirectional
"""
def __init__(self, layer, merge_mode="concat", input_shape=None, **kwargs):
super(Bidirectional, self).__init__(None,
layer,
merge_mode,
list(input_shape) if input_shape else None,
**kwargs)
class KerasLayerWrapper(ZooKerasLayer):
"""
Wrap a torch style layer to keras style layer.
This layer can be built multiple times.
This layer will return a keras compatible layer
# Arguments
torch_layer: a torch style layer.
input_shape: A shape tuple, not including batch.
i.e If the input data is (2, 3, 4) and 2 is the batch size, you should input: (3, 4) here.
>>> from bigdl.dllib.keras.layers import KerasLayerWrapper
>>> from bigdl.dllib.nn.layer import Linear
>>> linear = Linear(100, 10, with_bias=True)
creating: createLinear
>>> kerasLayer = KerasLayerWrapper(linear, input_shape=(100, ))
creating: createZooKerasKerasLayerWrapper
"""
def __init__(self, torch_layer, input_shape=None, **kwargs):
super(KerasLayerWrapper, self).__init__(None,
torch_layer,
list(input_shape) if input_shape else None,
**kwargs)
|
the-stack_106_25644 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
DashboardGroup = [
cfg.StrOpt('dashboard_url',
default='http://localhost/dashboard/',
help='Where the dashboard can be found'),
cfg.StrOpt('help_url',
default='https://docs.openstack.org/',
help='Dashboard help page url'),
]
IdentityGroup = [
cfg.StrOpt('username',
default='demo',
help='Username to use for non-admin API requests.'),
cfg.StrOpt('password',
default='secretadmin',
help='API key to use when authenticating.',
secret=True),
cfg.StrOpt('domain',
default=None,
help='Domain name to use if required for login'),
cfg.StrOpt('home_project',
default='demo',
help='Project to keep all objects belonging to a regular user.'
),
cfg.StrOpt('admin_username',
default='admin',
help='Administrative Username to use for admin API requests.'),
cfg.StrOpt('admin_password',
default='secretadmin',
help='API key to use when authenticating as admin.',
secret=True),
cfg.StrOpt('admin_home_project',
default='admin',
help='Project to keep all objects belonging to an admin user.'),
cfg.StrOpt('default_keystone_role',
default='member',
help='Name of default role every user gets in his new project.'),
cfg.StrOpt('default_keystone_admin_role',
default='admin',
help=('Name of the role that grants admin rights to a user in '
'his project')),
cfg.IntOpt('unique_last_password_count',
# The default value is chosen to match the value of
# [security_compliance] unique_last_password_count in DevStack
# as the first target of the integration tests is the gate.
# Note that the default value of unique_last_password_count
# in keystone may differ, so you might need
# to change this parameter.
default=2,
help=('The number of passwords for a user that must be unique '
'before an old password can be used. '
'This should match the keystone configuration option '
'"[security_compliance] unique_last_password_count".')),
]
ImageGroup = [
cfg.StrOpt('panel_type',
default='angular',
help='type/version of images panel'),
cfg.StrOpt('http_image',
default='http://download.cirros-cloud.net/0.3.1/'
'cirros-0.3.1-x86_64-uec.tar.gz',
help='http accessible image'),
cfg.ListOpt('images_list',
default=['cirros-0.3.5-x86_64-disk'],
help='default list of images')
]
NetworkGroup = [
cfg.StrOpt('network_cidr',
default='10.100.0.0/16',
help='The cidr block to allocate tenant ipv4 subnets from'),
cfg.StrOpt(
'external_network',
# Devstack default external network is 'public' but it
# can be changed as per available external network.
default='public',
help='The external network for a router creation.'),
]
AvailableServiceGroup = [
cfg.BoolOpt('neutron',
default=True,
help='Whether neutron is expected to be available'),
]
SeleniumGroup = [
cfg.FloatOpt(
'message_implicit_wait',
default=0.1,
help='Timeout in seconds to wait for message confirmation modal'),
cfg.IntOpt(
'implicit_wait',
default=10,
help=('Implicit timeout to wait until element become available, '
'It is used for every find_element, find_elements call.')),
cfg.IntOpt(
'explicit_wait',
default=90,
help=('Explicit timeout is used for long lasting operations, '
'Methods using explicit timeout are usually prefixed with '
'"wait"')),
cfg.IntOpt(
'page_timeout',
default=60,
help='Timeout in seconds to wait for a page to become available'),
cfg.StrOpt(
'screenshots_directory',
default='test_reports',
help='Output directory for screenshots'),
cfg.BoolOpt(
'maximize_browser',
default=True,
help='Maximize the browser window at the start of each test or not'),
]
FlavorsGroup = [
cfg.StrOpt('panel_type',
default='legacy',
help='type/version of flavors panel'),
]
ScenarioGroup = [
cfg.StrOpt('ssh_user',
default='cirros',
help='ssh username for image file'),
]
InstancesGroup = [
cfg.StrOpt('available_zone',
default='nova',
help='Availability zone to be selected for launch instances'),
cfg.StrOpt('image_name',
default='cirros-0.5.1-x86_64-disk (15.6 MB)',
help='Boot Source to be selected for launch Instances'),
cfg.StrOpt('flavor',
default='m1.tiny',
help='Flavor to be selected for launch instances'),
]
VolumeGroup = [
cfg.StrOpt('volume_type',
default='lvmdriver-1',
help='Default volume type'),
cfg.StrOpt('volume_size',
default='1',
help='Default volume size ')
]
PluginGroup = [
cfg.BoolOpt(
'is_plugin',
default='False',
help='Set to true if this is a plugin'),
cfg.MultiStrOpt(
'plugin_page_path',
default='',
help='Additional path to look for plugin page content'),
cfg.MultiStrOpt(
'plugin_page_structure',
default='',
help=('JSON string to define the page structure for the plugin')),
]
def _get_config_files():
conf_dir = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
'integration_tests')
conf_file = os.environ.get('HORIZON_INTEGRATION_TESTS_CONFIG_FILE',
'%s/horizon.conf' % conf_dir)
local_config = os.environ.get('HORIZON_INTEGRATION_TESTS_LOCAL_CONFIG',
'%s/local-horizon.conf' % conf_dir)
config_files = [conf_file, local_config]
return [f for f in config_files if os.path.isfile(f)]
def get_config():
cfg.CONF([], project='horizon', default_config_files=_get_config_files())
cfg.CONF.register_opts(DashboardGroup, group="dashboard")
cfg.CONF.register_opts(IdentityGroup, group="identity")
cfg.CONF.register_opts(NetworkGroup, group="network")
cfg.CONF.register_opts(AvailableServiceGroup, group="service_available")
cfg.CONF.register_opts(SeleniumGroup, group="selenium")
cfg.CONF.register_opts(FlavorsGroup, group="flavors")
cfg.CONF.register_opts(ImageGroup, group="image")
cfg.CONF.register_opts(ScenarioGroup, group="scenario")
cfg.CONF.register_opts(InstancesGroup, group="launch_instances")
cfg.CONF.register_opts(PluginGroup, group="plugin")
cfg.CONF.register_opts(VolumeGroup, group="volume")
return cfg.CONF
def list_opts():
return [
("dashboard", DashboardGroup),
("selenium", SeleniumGroup),
("flavors", FlavorsGroup),
("image", ImageGroup),
("identity", IdentityGroup),
("network", NetworkGroup),
("service_available", AvailableServiceGroup),
("scenario", ScenarioGroup),
("launch_instances", InstancesGroup),
("plugin", PluginGroup),
("volume", VolumeGroup),
]
|
the-stack_106_25645 | import sys
import re
from subprocess import call
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
import matplotlib.patches as patches
from matplotlib.patches import Polygon
# Utility class to implement enum equivalent
class enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
# Internal states used by parses
parseState = enum(["INIT",
"ERROR",
"IN_TOP_NODE",
"IN_GAPS",
"IN_NODES",
"IN_VIRTUAL_NODES",
"IN_EDGES",
"IN_CUT_VERTICES",
"IN_BLOCK_BEGIN",
"IN_BLOCK_CUT_VERTICES",
"IN_BLOCK_ORDINARY_VERTICES",
"IN_BLOCK_EDGES",
"IN_BLOCK_END",
"IN_INCIDENCES",
"IN_FACES",
"IN_AREA",
"END"])
# Position of edge labels: Center, Clockwise side, and Counter-clockwise side
edgeLabelPos = enum(["CENTER", "CW", "CCW"])
# Base parser class.
class parser(object):
IN_DELIM = ' '
OUT_DELIM = ' '
def __init__(self, filePath):
self.error = None
self.state = parseState.INIT
with open(filePath, "r") as ins:
lineno = 0
for line in ins:
line = line.strip()
lineno = lineno + 1
comment = self.isComment(line)
if comment:
continue
changed, self.error = self.updateState(line)
if changed:
continue
if self.error:
self.emitError(lineno, line)
break
self.error = self.parseLine(line)
if self.error:
self.emitError(lineno, self.error)
break
def isComment(self, line):
if re.match(r'^#.*', line):
return True
if re.match(r'^\s*$', line):
return True
return False
def updateState(self, line):
return ERROR, False
def parseLine(self, line):
return True
def emitError(self, lineno, mess):
sys.stderr.write('Syntax error on line' + str(lineno))
sys.stderr.write(parser.OUT_DELIM)
sys.stderr.write('[' + mess + ']\n')
# Represents an edge with label dimensions
class labelEdge(object):
@classmethod
def fromString(cls, line):
inst = cls()
inst.error = None
fields = line.split(parser.IN_DELIM)
if len(fields) != 11:
inst.error = 'wrong edge syntax'
else:
inst.n1 = int (fields[ 0])
inst.n2 = int (fields[ 1])
inst.p1 = cls.parsePos(fields[ 2])
inst.w1 = float (fields[ 3])
inst.h1 = float (fields[ 4])
inst.pm = cls.parsePos(fields[ 5])
inst.wm = float (fields[ 6])
inst.hm = float (fields[ 7])
inst.p2 = cls.parsePos(fields[ 8])
inst.w2 = float (fields[ 9])
inst.h2 = float (fields[10])
return inst
@classmethod
def parsePos(cls, field):
if field == 'CENTER':
return edgeLabelPos.CENTER
elif field == 'CW':
return edgeLabelPos.CW
elif field == 'CCW':
return edgeLabelPos.CCW
else:
self.error = 'wrong position token'
@classmethod
def fromNodes(cls, n1, n2):
inst = cls()
inst.error = None
inst.n1 = n1
inst.n2 = n2
return inst
@classmethod
def fromAnother(cls, e):
inst = cls()
inst.error = e.error
inst.n1 = e.n1
inst.n2 = e.n2
inst.p1 = e.p1
inst.w1 = e.w1
inst.h1 = e.h1
inst.pm = e.pm
inst.wm = e.wm
inst.hm = e.hm
inst.p2 = e.p2
inst.w2 = e.w2
inst.h2 = e.h2
return inst
def __init__(self):
self.error = None
self.n1 = -1
self.n2 = -1
self.p1 = 'CENTER'
self.w1 = 0.0
self.h1 = 0.0
self.pm = 'CENTER'
self.wm = 0.0
self.hm = 0.0
self.p2 = 'CENTER'
self.w2 = 0.0
self.h2 = 0.0
def __str__(self):
out_str = ''
if self.error:
out_str = out_str + 'ERROR: [' + self.error + ']\n'
else:
out_str = out_str + str(self.n1) + parser.OUT_DELIM
out_str = out_str + str(self.n2) + parser.OUT_DELIM
out_str = out_str + str(self.p1) + parser.OUT_DELIM
out_str = out_str + str(self.w1) + parser.OUT_DELIM
out_str = out_str + str(self.h1) + parser.OUT_DELIM
out_str = out_str + str(self.pm) + parser.OUT_DELIM
out_str = out_str + str(self.wm) + parser.OUT_DELIM
out_str = out_str + str(self.hm) + parser.OUT_DELIM
out_str = out_str + str(self.p2) + parser.OUT_DELIM
out_str = out_str + str(self.w2) + parser.OUT_DELIM
out_str = out_str + str(self.h2) + '\n'
return out_str
def setLabel1(self, e):
self.p1 = e.p1
self.w1 = e.w1
self.h1 = e.h1
def setLabelM(self, e):
self.pm = e.pm
self.wm = e.wm
self.hm = e.hm
def setLabel2(self, e):
self.p2 = e.p2
self.w2 = e.w2
self.h2 = e.h2
# Represents a node with label dimensions
class labelNode:
@classmethod
def fromString(cls, line):
inst = cls()
inst.error = None
fields = line.split(parser.IN_DELIM)
if len(fields) != 3:
inst.error = 'wrong node syntax'
else:
inst.n = int(fields[ 0])
inst.w = int(fields[ 1])
inst.h = int(fields[ 2])
inst.virtual = False
return inst
def __init__(self):
self.n = -1
self.w = 0.0
self.h = 0.0
def __str__(self):
out_str = ''
if self.error:
out_str = out_str + 'ERROR: [' + self.error + ']\n'
else:
out_str = out_str + str(self.n) + parser.OUT_DELIM
out_str = out_str + str(self.w) + parser.OUT_DELIM
out_str = out_str + str(self.h) + '\n'
return out_str
# Parses the original input graph and create internal representation of it.
class originalInputParser(parser):
def __init__(self, filePath):
self.topNode = None
self.gaps = None
self.nodes = {}
self.edges = {}
self.nodeNumMax = -1
super(originalInputParser, self).__init__(filePath)
def updateState(self, line):
if self.state == parseState.INIT:
if line == 'TOP NODE':
self.state = parseState.IN_TOP_NODE
return True, None
else:
return False, None
elif self.state == parseState.IN_TOP_NODE:
if line == 'GAPS':
self.state = parseState.IN_GAPS
return True, None
else:
return False, None
elif self.state == parseState.IN_GAPS:
if line == 'NODES':
self.state = parseState.IN_NODES
return True, None
else:
return False, None
elif self.state == parseState.IN_NODES:
if line == 'EDGES':
self.state = parseState.IN_EDGES
return True, None
else:
return False, None
elif self.state == parseState.IN_EDGES:
return False, None
else:
return False, 'wrong state'
def parseLine(self, line):
if self.state == parseState.INIT:
return 'wrong state'
elif self.state == parseState.IN_TOP_NODE:
fields = line.split(parser.IN_DELIM)
if self.topNode:
return 'Duped top node'
elif len(fields) != 1:
return 'Wrong top node syntax'
else:
self.topNode = int(fields[0])
return None
elif self.state == parseState.IN_GAPS:
fields = line.split(parser.IN_DELIM)
if self.gaps:
return 'Duped gaps'
elif len(fields) != 2:
return 'Wrong gaps syntax'
else:
self.gaps = (float(fields[0]), float(fields[1]))
return None
elif self.state == parseState.IN_NODES:
n = labelNode.fromString(line)
if n.error:
return n.error
else:
self.nodes[n.n] = n
if self.nodeNumMax < n.n:
self.nodeNumMax = n.n
return None
elif self.state == parseState.IN_EDGES:
e = labelEdge.fromString(line)
if e.error:
return e.error
else:
self.edges[(e.n1, e.n2)] = e
return None
else:
return False, 'wrong state'
def emitForDecomposition(self, filePath):
with open(filePath, "w") as outs:
outs.write('NODES\n')
for n in self.nodes:
outs.write(str(self.nodes[n].n) + '\n')
outs.write('\n')
outs.write('EDGES\n')
for e in self.edges:
outs.write(str(e[0]) + ' ' + str(e[1]) + '\n')
outs.write('\n')
def __str__(self):
out_str = ''
if self.error:
out_str = out_str + 'ERROR: [' + self.error + ']\n'
return out_str
if self.topNode:
out_str = out_str + 'TOP NODE\n'
out_str = out_str + str(self.topNode) + '\n'
out_str = out_str + '\n'
if self.gaps:
out_str = out_str + 'GAPS\n'
out_str = out_str + str(self.gaps[0]) + parser.OUT_DELIM
out_str = out_str + str(self.gaps[1]) + '\n'
out_str = out_str + '\n'
out_str = out_str + 'NODES\n'
for n in self.nodes:
out_str = out_str + str(self.nodes[n])
out_str = out_str + '\n'
out_str = out_str + 'EDGES\n'
for e in self.edges:
out_str = out_str + str(self.edges[e])
return out_str
class bcTreeParser(parser):
class cutVertex:
def __init__(self, line):
self.error = None
fields = line.split(parser.IN_DELIM)
if len(fields) < 4:
self.error = 'wrong cut vertex syntax'
else:
self.cvIndex = int(fields[ 0])
self.nodeNum = int(fields[ 1])
self.blockIndices = [ int(c) for c in fields[2:] ]
def __str__(self):
out_str = ''
if self.error:
out_str = out_str + 'ERROR: [' + self.error + ']\n'
else:
out_str = out_str + str(self.cvIndex) + parser.OUT_DELIM
out_str = out_str + str(self.nodeNum)
for n in self.blockIndices:
out_str = out_str + parser.OUT_DELIM
out_str = out_str + str(n)
out_str = out_str + '\n'
return out_str
class block:
def __init__(self):
self.error = None
self.blockIndex = -1
self.cvNodeNums = {} # Key:NodeNum, Val:CutVertex Index
self.ordinaryNodeNums = []
self.edges = set()
def addCV(self, nodeNum, cvIndex):
self.cvNodeNums[nodeNum] = cvIndex
def addOrdinaryNum(self, n):
self.ordinaryNodeNums.append(n)
def addEdge(self, n1, n2):
self.edges.add((n1, n2))
def __str__(self):
out_str = 'BLOCK_BEGIN\n'
out_str = out_str + str(self.blockIndex) + '\n'
out_str = out_str + 'BLOCK_CUT_VERTICES\n'
for c in self.cvNodeNums:
out_str = out_str + str(c) + ' ' + str(self.cvNodeNums[c]) + '\n'
out_str = out_str + 'BLOCK_ORDINARY_VERTICES\n'
for o in self.ordinaryNodeNums:
out_str = out_str + str(o) + '\n'
out_str = out_str + 'BLOCK_EDGES\n'
for e in self.edges:
out_str = out_str + str(e[0]) + ' ' + str(e[1]) + '\n'
out_str = out_str + 'BLOCK_END\n'
return out_str
def emitForPlanarization(self, filePath, virtualNodeStart):
with open(filePath, "w") as outs:
outs.write('NODES\n')
for n in self.cvNodeNums:
outs.write(str(n) + '\n')
for n in self.ordinaryNodeNums:
outs.write(str(n) + '\n')
outs.write('\n')
outs.write('VIRTUAL NODE START\n')
outs.write(str(virtualNodeStart) + '\n')
outs.write('EDGES\n')
for e in self.edges:
outs.write(str(e[0]) + ' ' + str(e[1]) + '\n')
outs.write('\n')
def __init__(self, filePath):
self.cutVertices = {}
self.blocks = {}
self.planarizedBlocks = {}
self.blockIndex = 1
super(bcTreeParser, self).__init__(filePath)
def updateState(self, line):
if self.state == parseState.INIT:
if line == 'CUT_VERTICES':
self.state = parseState.IN_CUT_VERTICES
return True, None
else:
return False, None
elif self.state == parseState.IN_CUT_VERTICES:
if line == 'BLOCK_BEGIN':
self.blockBeingParsed = self.block()
self.state = parseState.IN_BLOCK_BEGIN
return True, None
else:
return False, None
elif self.state == parseState.IN_BLOCK_BEGIN:
if line == 'BLOCK_CUT_VERTICES':
self.state = parseState.IN_BLOCK_CUT_VERTICES
return True, None
else:
return False, None
elif self.state == parseState.IN_BLOCK_CUT_VERTICES:
if line == 'BLOCK_ORDINARY_VERTICES':
self.state = parseState.IN_BLOCK_ORDINARY_VERTICES
return True, None
else:
return False, None
elif self.state == parseState.IN_BLOCK_ORDINARY_VERTICES:
if line == 'BLOCK_EDGES':
self.state = parseState.IN_BLOCK_EDGES
return True, None
else:
return False, None
elif self.state == parseState.IN_BLOCK_EDGES:
if line == 'BLOCK_END':
self.blocks[self.blockIndex] = self.blockBeingParsed
self.blockIndex = self.blockIndex + 1
self.state = parseState.IN_BLOCK_END
return True, None
else:
return False, None
elif self.state == parseState.IN_BLOCK_END:
if line == 'BLOCK_BEGIN':
self.blockBeingParsed = self.block()
self.state = parseState.IN_BLOCK_BEGIN
return True, None
else:
return False, None
else:
return False, 'wrong state'
def parseLine(self, line):
if self.state == parseState.INIT:
return 'wrong state'
elif self.state == parseState.IN_CUT_VERTICES:
c = self.cutVertex(line)
if c.error:
return c.error
else:
self.cutVertices[c.nodeNum] = c
return None
elif self.state == parseState.IN_BLOCK_BEGIN:
fields = line.split(parser.IN_DELIM)
if len(fields) != 1:
return 'Wrong top block num syntax'
else:
self.blockBeingParsed.blockIndex = int(fields[0])
return None
elif self.state == parseState.IN_BLOCK_CUT_VERTICES:
fields = line.split(parser.IN_DELIM)
if len(fields) != 2:
return 'Wrong top block cut vertex syntax'
else:
# NodeNum CVIndex(unused)
self.blockBeingParsed.addCV( int(fields[0]), int(fields[1]))
return None
elif self.state == parseState.IN_BLOCK_ORDINARY_VERTICES:
fields = line.split(parser.IN_DELIM)
if len(fields) != 1:
return 'Wrong top block ordinary vertex syntax'
else:
self.blockBeingParsed.addOrdinaryNum(int(fields[0]))
return None
elif self.state == parseState.IN_BLOCK_EDGES:
fields = line.split(parser.IN_DELIM)
if len(fields) != 2:
return 'Wrong top block edge syntax'
else:
self.blockBeingParsed.addEdge(int(fields[0]), int(fields[1]))
return None
else:
return False, 'wrong state'
def __str__(self):
out_str = ''
if self.error:
out_str = out_str + 'ERROR: [' + self.error + ']\n'
return out_str
out_str = out_str + 'CUT_VERTICES\n'
for c in self.cutVertices:
out_str = out_str + str(self.cutVertices[c])
for b in self.blocks:
out_str = out_str + str(self.blocks[b])
return out_str
class planarizedParser(parser):
# Represents a chain of edges derived from a single original edge.
# A chain is made as a result of planarization operation with virtual
# nodes.
class edgeChain(object):
def __init__(self, line):
self.error = None
fields = line.split(parser.IN_DELIM)
if len(fields) < 2:
self.error = 'wrong edge syntax'
else:
self.n1 = int(fields[ 0])
self.n2 = int(fields[-1])
self.virtualNodes = [ int(c) for c in fields[1:-1] ]
def __str__(self):
out_str = ''
if self.error:
out_str = out_str + 'ERROR: [' + self.error + ']\n'
else:
out_str = out_str + str(self.n1)
for v in self.virtualNodes:
out_str = out_str + parser.OUT_DELIM + str(v)
out_str = out_str + parser.OUT_DELIM + str(self.n2)
return out_str
def emitForEmbedding(self):
out_str = ''
if self.error:
out_str = out_str + 'ERROR: [' + self.error + ']\n'
else:
if len(self.virtualNodes) == 0:
out_str = out_str + str(self.n1) + parser.OUT_DELIM
out_str = out_str + str(self.n2) + '\n'
else:
out_str = out_str + str(self.n1) + parser.OUT_DELIM
out_str = out_str + str(self.virtualNodes[0]) + '\n'
if len(self.virtualNodes) >= 2:
for i in range (0, len(self.virtualNodes)-1):
out_str = out_str + str(self.virtualNodes[i])
out_str = out_str + parser.OUT_DELIM
out_str = out_str + str(self.virtualNodes[i+1]) + '\n'
out_str = out_str + str(self.virtualNodes[-1]) + parser.OUT_DELIM
out_str = out_str + str(self.n2) + '\n'
return out_str
def flip(self):
tmp = self.n1
self.n1 = self.n2
self.n2 = tmp
self.virtualNodes.reverse()
def generateSplitEdges(self, orgE):
if len(self.virtualNodes) == 0:
return [ labelEdge.fromAnother(orgE) ]
elif len(self.virtualNodes) == 1:
e1 = labelEdge.fromNodes(self.n1, self.virtualNodes[0])
e2 = labelEdge.fromNodes(self.virtualNodes[0], self.n2)
e1.setLabel1(orgE)
e1.setLabelM(orgE)
e2.setLabel2(orgE)
return [e1, e2]
else:
eList = []
e1 = labelEdge.fromNodes(self.n1, self.virtualNodes[0])
e1.setLabel1(orgE)
eList.append(e1)
halfP = len(self.virtualNodes)/2
for i in range(0,len(self.virtualNodes)-1):
e = labelEdge.fromNodes(self.virtualNodes[i],self.virtualNodes[i+1])
if i == halfP:
e.setLabelM(orgE)
eList.append(e)
el = labelEdge.fromNodes(self.virtualNodes[-1], self.n2)
el.setLabel2(orgE)
eList.append(el)
return eList
def __init__(self, filePath, orgEdges):
self.nodes = set()
self.virtualNodes = set()
self.originalEdges = orgEdges # original Edges with label info
self.edgeChains = {} # edge chains with key (n1,n2)
self.splitEdges = {} # split edges with label info distributed
self.nodeNumMax = -1 # holds the highest node num + 1
super(planarizedParser, self).__init__(filePath)
def updateState(self, line):
if self.state == parseState.INIT:
if line == 'NODES':
self.state = parseState.IN_NODES
return True, None
else:
return False, None
elif self.state == parseState.IN_NODES:
if line == 'VIRTUAL_NODES':
self.state = parseState.IN_VIRTUAL_NODES
return True, None
elif line == 'EDGES':
self.state = parseState.IN_EDGES
return True, None
else:
return False, None
elif self.state == parseState.IN_VIRTUAL_NODES:
if line == 'EDGES':
self.state = parseState.IN_EDGES
return True, None
else:
return False, None
elif self.state == parseState.IN_EDGES:
return False, None
else:
return False, 'wrong state'
def parseLine(self, line):
if self.state == parseState.INIT:
return 'wrong state'
elif self.state == parseState.IN_NODES:
fields = line.split(parser.IN_DELIM)
if len(fields) != 1:
return 'Wrong node syntax'
else:
val = int(fields[0])
if self.nodeNumMax < val:
self.nodeNumMax = val
self.nodes.add(val)
return None
elif self.state == parseState.IN_VIRTUAL_NODES:
fields = line.split(parser.IN_DELIM)
if len(fields) != 1:
return 'Wrong node syntax'
else:
val = int(fields[0])
if self.nodeNumMax < val:
self.nodeNumMax = val
self.virtualNodes.add(val)
return None
elif self.state == parseState.IN_EDGES:
e = self.edgeChain(line)
if e.error:
return e.error
else:
if (e.n1, e.n2) not in self.originalEdges:
e.flip()
self.edgeChains[(e.n1, e.n2)] = e
ses = e.generateSplitEdges(self.originalEdges[(e.n1, e.n2)])
for se in ses:
self.splitEdges[(se.n1, se.n2)] = se
return None
else:
return False, 'wrong state'
def __str__(self):
out_str = ''
out_str = out_str + 'NODES\n'
for n in self.nodes:
out_str = out_str + str(n) + '\n'
out_str = out_str + 'VIRTUAL_NODES\n'
for n in self.virtualNodes:
out_str = out_str + str(n) + '\n'
out_str = out_str + 'EDGES\n'
for e in self.edgeChains:
out_str = out_str + str(self.edges[e]) + '\n'
return out_str
def emitForEmbedding(self, filePath):
with open(filePath, "w") as outs:
outs.write('NODES\n')
for n in self.nodes:
outs.write(str(n) + '\n')
for n in self.virtualNodes:
outs.write(str(n) + '\n')
outs.write('\n')
outs.write('EDGES\n')
for e in self.edgeChains:
outs.write(self.edgeChains[e].emitForEmbedding())
class embeddingParser(parser):
class incidence(object):
def __init__(self, line):
self.error = None
fields = line.split(parser.IN_DELIM)
self.nodeNum = int(fields[ 0])
self.incidence = [ int(c) for c in fields[1:] ]
def __str__(self):
out_str = ''
if self.error:
out_str = out_str + 'ERROR: [' + self.error + ']\n'
else:
out_str = out_str + str(self.nodeNum)
for n in self.incidence:
out_str = out_str + parser.OUT_DELIM + str(n)
return out_str
class face(object):
def __init__(self, line, index):
self.error = None
fields = line.split(parser.IN_DELIM)
self.index = index
self.incidentNodes = [ int(c) for c in fields[1:] ]
def __str__(self):
out_str = ''
if self.error:
out_str = out_str + 'ERROR: [' + self.error + ']\n'
else:
out_str = out_str + str(self.index)
for n in self.incidentNodes:
out_str = out_str + parser.OUT_DELIM + str(n)
return out_str
def __init__(self, filePath, faceIndexStart):
self.incidences = {}
self.faces = {}
self.faceIndex = faceIndexStart
super(embeddingParser, self).__init__(filePath)
def updateState(self, line):
if self.state == parseState.INIT:
if line == 'INCIDENCES':
self.state = parseState.IN_INCIDENCES
return True, None
else:
return False, None
elif self.state == parseState.IN_INCIDENCES:
if line == 'FACES':
self.state = parseState.IN_FACES
return True, None
else:
return False, None
elif self.state == parseState.IN_FACES:
return False, None
else:
return False, 'wrong state'
def parseLine(self, line):
if self.state == parseState.INIT:
return 'wrong state'
elif self.state == parseState.IN_INCIDENCES:
i = self.incidence(line)
if i.error:
return i.error
else:
self.incidences[i.nodeNum] = i
return None
elif self.state == parseState.IN_FACES:
f = self.face(line, self.faceIndex)
self.faceIndex = self.faceIndex + 1
if f.error:
return f.error
else:
self.faces[f.index] = f
return None
else:
return False, 'wrong state'
def __str__(self):
out_str = ''
out_str = out_str + 'INCIDENCES\n'
for n in self.incidences:
out_str = out_str + str(self.incidences[n]) + '\n'
out_str = out_str + 'FACES\n'
for f in self.faces:
out_str = out_str + str(self.faces[f]) + '\n'
return out_str
# Finds a biggest face incident to the given cut vertex.
def biggestFace(self, cv):
maxIdx = -1
maxSize = -1
for fk in self.faces:
f= self.faces[fk]
if cv in f.incidentNodes:
if len(f.incidentNodes) > maxSize:
maxIdx = fk
maxSize = len(f.incidentNodes)
if maxIdx == -1:
# No face incident to cv found.
return (-1, -1, -1)
fMax = self.faces[maxIdx]
for cvPos in range(0, len(fMax.incidentNodes)):
if cv == fMax.incidentNodes[cvPos]:
break;
n1Pos = cvPos + 1
if n1Pos >= len(fMax.incidentNodes):
n1Pos = 0
n2Pos = cvPos - 1
if n2Pos < 0:
n2Pos = len(fMax.incidentNodes)-1
return (maxIdx, fMax.incidentNodes[n1Pos], fMax.incidentNodes[n2Pos])
class visRepParser(parser):
class node(object):
def __init__(self, line):
self.error = None
fields = line.split(parser.IN_DELIM)
self.n = int( fields[0])
self.xLeft = float(fields[1])
self.xRight = float(fields[2])
self.y = float(fields[3])
def __str__(self):
out_str = ''
out_str = out_str + str(self.n) + ' ' + str(self.xLeft) + ' '
out_str = out_str + str(self.xRight) + ' ' + str(self.y)
return out_str
class edge(object):
def __init__(self, line):
self.error = None
fields = line.split(parser.IN_DELIM)
self.n1 = int( fields[0])
self.n2 = int( fields[1])
self.x = float(fields[2])
self.y1 = float(fields[3])
self.y2 = float(fields[4])
self.fl = fields[5]
def __str__(self):
out_str = ''
out_str = out_str + str(self.n1) + ' ' + str(self.n2) + ' '
out_str = out_str + str(self.x) + ' ' + str(self.y1) + ' '
out_str = out_str + str(self.y2) + ' ' + self.fl
return out_str
def __init__(self, filePath):
self.nodes = {}
self.edges = {}
self.width = 0.0
self.height = 0.0
super(visRepParser, self).__init__(filePath)
def updateState(self, line):
if self.state == parseState.INIT:
if line == 'AREA':
self.state = parseState.IN_AREA
return True, None
else:
return False, None
elif self.state == parseState.IN_AREA:
if line == 'NODES':
self.state = parseState.IN_NODES
return True, None
else:
return False, None
elif self.state == parseState.IN_NODES:
if line == 'EDGES':
self.state = parseState.IN_EDGES
return True, None
else:
return False, None
elif self.state == parseState.IN_EDGES:
return False, None
else:
return False, 'wrong state'
def parseLine(self, line):
if self.state == parseState.INIT:
return 'wrong state'
elif self.state == parseState.IN_AREA:
fields = line.split(parser.IN_DELIM)
if len(fields) != 2:
return 'Wrong area syntax'
else:
self.width = float(fields[0])
self.height = float(fields[1])
return None
elif self.state == parseState.IN_NODES:
n = self.node(line)
if n.error:
return n.error
else:
self.nodes[n.n] = n
return None
elif self.state == parseState.IN_EDGES:
e = self.edge(line)
if e.error:
return e.error
else:
self.edges[(e.n1, e.n2)] = e
return None
else:
return False, 'wrong state'
def __str__(self):
out_str = ''
out_str = out_str + 'AREA\n'
out_str = out_str + str(self.width) + ' ' + str(self.height) + '\n'
out_str = out_str + '\n'
out_str = out_str + 'NODES\n'
for n in self.nodes:
out_str = out_str + str(self.nodes[n]) + '\n'
out_str = out_str + '\n'
out_str = out_str + 'EDGES\n'
for e in self.edges:
out_str = out_str + str(self.edges[e]) + '\n'
return out_str
def emitVisRepInput(filePath, pOrg, pDecomp, planarizedBlocks, incidences, embeddings):
hGap = pOrg.gaps[0]
vGap = pOrg.gaps[1]
with open(filePath, "w") as outs:
outs.write('GAPS\n')
outs.write(str(hGap) + ' ' + str(vGap) + '\n')
outs.write('\n')
outs.write('NODES\n')
for n in pOrg.nodes:
outs.write(str(pOrg.nodes[n]))
for bk in planarizedBlocks:
for vn in planarizedBlocks[bk].virtualNodes:
outs.write(str(vn) + ' ' + str(hGap*2.0) + ' ' + str(vGap*2.0) + '\n')
outs.write('EDGES\n')
for bk in planarizedBlocks:
block = planarizedBlocks[bk]
for e in block.splitEdges:
outs.write(str(block.splitEdges[e]))
outs.write('INCIDENCES\n')
for i in incidences:
outs.write(str(i))
[outs.write(' ' + str(n)) for n in incidences[i]]
outs.write('\n')
outs.write('BLOCKS\n')
for blockIndex in pDecomp.blocks:
block = pDecomp.blocks[blockIndex]
outs.write(str(blockIndex))
for n in block.cvNodeNums:
outs.write(' ' + str(n))
for n in block.ordinaryNodeNums:
outs.write(' ' + str(n))
outs.write('\n')
outs.write('FACES\n')
for blockIndex in embeddings:
emb = embeddings[blockIndex]
for f in emb.faces:
outs.write(str(emb.faces[f]) + '\n')
outs.write('UNIFICATION GROUPS\n')
ugIdx = 1
for cvIndex in pDecomp.cutVertices:
cvInfo = pDecomp.cutVertices[cvIndex]
outs.write(str(ugIdx) + ' ' + str(cvInfo.nodeNum))
for blockIndex in cvInfo.blockIndices:
faceIdx, nCW, nCCW = embeddings[blockIndex].biggestFace(cvInfo.nodeNum)
outs.write(' ' + str(blockIndex) + ' ' + str(faceIdx) + ' ' + str(nCW) + ' ' + str(nCCW))
outs.write('\n')
ugIdx = ugIdx + 1
outs.write('ROOT\n')
maxBlockIndex = 0
maxSum = 0
for blockIndex in pDecomp.blocks:
block = pDecomp.blocks[blockIndex]
inCVS = False
for c in block.cvNodeNums:
if pOrg.topNode == c:
inCVS = True
break
if pOrg.topNode in block.cvNodeNums or pOrg.topNode in block.ordinaryNodeNums:
sum = len(block.cvNodeNums) + len(block.ordinaryNodeNums) + len(block.edges)
if maxSum < sum:
maxSum = sum
maxBlockIndex = blockIndex
faceIdx, nCW, nCCW = embeddings[maxBlockIndex].biggestFace(pOrg.topNode)
outs.write(str(maxBlockIndex) + ' ' + str(pOrg.topNode) + ' ' + str(faceIdx) + '\n')
class renderNode:
def __init__(self, n, w, h, xLeft, xRight, y, virtual):
self.num = n
self.w = w
self.h = h
self.xLeft = xLeft
self.xRight = xRight
self.y = y
self.virtual = virtual
self.degree = 0
self.xAccum = 0.0
def __str__(self):
return "Node: " + str(self.num) + "\tW: " + str(self.w) + "\tH: " + \
str(self.h) + "\txLeft: " + str(self.xLeft) + "\txRight: " + \
str(self.xRight) + "\ty: " + str(self.y) + "\tvirtual: " +\
str(self.virtual)
def xMid(self):
mid = self.xAccum / float(self.degree)
if (mid - (self.w/2.0)) < self.xLeft:
return self.xLeft + (self.w/2.0)
elif (mid + (self.w/2.0)) > self.xRight:
return self.xRight - (self.w/2.0)
else:
return mid
def drawLabel(self, ax):
ax.add_patch(
patches.Rectangle(
(self.xMid()-self.w/2.0, self.y - self.h/2.0),
self.w,
self.h,
edgecolor="skyblue",
facecolor="skyblue",
zorder=1
)
)
ax.text(self.textX(), self.textY(), self.text(), fontsize=10, zorder=2)
def lineX(self):
xMid = self.xMid()
wh = self.w/2.0
return [(xMid-wh)/1.0, (xMid+wh)/1.0, (xMid+wh)/1.0, (xMid-wh)/1.0, (xMid-wh)/1.0]
def lineY(self):
hh = self.h/2.0
return [(self.y+hh)/1.0, (self.y+hh)/1.0, (self.y-hh)/1.0, (self.y-hh)/1.0, (self.y+hh)/1.0 ]
def textX(self):
return self.xMid()
def textY(self):
return self.y
def text(self):
return str(self.num)
class renderEdge:
def __init__(self, n1, n2, t1, w1, h1, tm, wm, hm, t2, w2, h2, x, y1, y2, fl):
self.n1 = n1
self.n2 = n2
self.t1 = t1
self.w1 = w1
self.h1 = h1
self.tm = tm
self.wm = wm
self.hm = hm
self.t2 = t2
self.w2 = w2
self.h2 = h2
self.x = x
self.y1 = y1
self.y2 = y2
self.f = fl
def __str__(self):
return "Edge: {" + str(self.n1) + "," + str(self.n2) + "}\t"\
+ str(self.t1) + "\tW: " + str(self.w1) + "\tH: " + str(self.h1) + "\t"\
+ str(self.tm) + "\tW: " + str(self.wm) + "\tH: " + str(self.hm) + "\t"\
+ str(self.t2) + "\tW: " + str(self.w2) + "\tH: " + str(self.h2) + "\t"\
+ "x: " + str(self.x) + "\ty1: " + str(self.y1) + "\ty2: " + str(self.y2) + "\t"\
+ self.f
def drawLabel1(self, ax, n1):
ax.add_patch(
patches.Rectangle(
(self.labelX1()[3], self.labelY1(n1)[3]),
self.w1,
self.h1,
edgecolor="None",
facecolor="pink",
zorder=1
)
)
ax.text(self.textX1(), self.textY1(n1), self.text1(), fontsize=10,zorder=2)
def drawLabelm(self, ax, n1,n2):
ax.add_patch(
patches.Rectangle(
(self.labelXm()[3], self.labelYm(n1,n2)[3]),
self.wm,
self.hm,
edgecolor="None",
facecolor="pink",
zorder=1
)
)
ax.text(self.textXm(), self.textYm(n1,n2), self.textm(), fontsize=10,zorder=2)
def drawLabel2(self, ax, n2):
ax.add_patch(
patches.Rectangle(
(self.labelX2()[3], self.labelY2(n2)[3]),
self.w2,
self.h2,
edgecolor="None",
facecolor="pink",
zorder=1
)
)
ax.text(self.textX2(), self.textY2(n2), self.text2(), fontsize=10,zorder=2)
def drawLine(self, ax, n1, n2):
line = lines.Line2D(self.lineX(n1,n2), self.lineY(n1,n2), color='black', linewidth=2.0, zorder=0)
ax.add_line(line)
def labelY1(self, n1):
if self.y1 > self.y2:
ry = n1.y - n1.h/2.0;
h = self.h1
return [ry, ry, ry-h, ry-h, ry]
else:
ry = n1.y + n1.h/2.0;
h = self.h1
return [ry+h, ry+h, ry, ry, ry+h]
def labelY2(self, n2):
if self.y1 < self.y2:
ry = n2.y - n2.h/2.0;
h = self.h2
return [ry, ry, ry-h, ry-h, ry]
else:
ry = n2.y + n2.h/2.0;
h = self.h2
return [ry+h, ry+h, ry, ry, ry+h]
def labelYm(self, n1, n2):
if self.y1 > self.y2:
cy = ((n1.y - n1.h/2.0 - self.h1) + (n2.y + n2.h/2.0 + self.h2))/2.0
else:
cy = ((n1.y + n1.h/2.0 + self.h1) + (n2.y - n2.h/2.0 - self.h2))/2.0
hh = self.hm /2.0
return [cy + hh, cy + hh, cy - hh, cy - hh, cy + hh]
def labelX1(self):
return self.labelX(self.w1, self.t1, self.f)
def labelXm(self):
return self.labelX(self.wm, self.tm, self.f)
def labelX2(self):
return self.labelX(self.w2, self.t2, self.f)
def labelX(self, w, t, f):
hw = w/2.0
x = self.x
if t == 'CENTER':
return [x - hw, x + hw, x + hw, x - hw, x - hw]
if f == 'NOTFLIPPED':
if (t == 'CCW' and self.y1 > self.y2) or\
(t == 'CW' and self.y1 <= self.y2):
return [x, x + w, x + w, x, x]
else:
return [x - w, x, x, x - w, x - w]
else:
if (t == 'CCW' and self.y1 > self.y2) or\
(t == 'CW' and self.y1 <= self.y2):
return [x - w, x, x, x - w, x - w]
else:
return [x, x + w, x + w, x, x]
def lineX(self, n1, n2):
return [n1.xMid(), self.x, self.x, n2.xMid()]
def lineY(self, n1, n2):
if n1.y > n2.y:
return [n1.y, n1.y - n1.h/2.0, n2.y + n2.h/2.0, n2.y]
else:
return [n1.y, n1.y + n1.h/2.0, n2.y - n2.h/2.0, n2.y]
def textX1(self):
return self.textX(self.w1, self.t1, self.f)
def textXm(self):
return self.textX(self.wm, self.tm, self.f)
def textX2(self):
return self.textX(self.w2, self.t2, self.f)
def textX(self, w, t, f):
hw = w/2.0
x = self.x
if t == 'CENTER':
return x
if f == 'NOTFLIPPED':
if (t == 'CCW' and self.y1 <= self.y2) or (t == 'CW' and self.y1 > self.y2):
return x - hw
else:
return x + hw
else:
if (t == 'CCW' and self.y1 <= self.y2) or (t == 'CW' and self.y1 > self.y2):
return x + hw
else:
return x - hw
def textY1(self, n1):
if self.y1 > self.y2:
ry = n1.y - n1.h/2.0;
h = self.h1/2.0
return ry-h
else:
ry = n1.y + n1.h/2.0;
h = self.h1/2.0
return ry+h
def textY2(self, n2):
if self.y2 > self.y1:
ry = n2.y - n2.h/2.0;
h = self.h2/2.0
return ry-h
else:
ry = n2.y + n2.h/2.0;
h = self.h2/2.0
return ry+h
def textYm(self, n1,n2):
if self.y1 > self.y2:
return ((n1.y - n1.h/2.0 - self.h1) + (n2.y + n2.h/2.0 + self.h2))/2.0
else:
return ((n1.y + n1.h/2.0 + self.h1) + (n2.y - n2.h/2.0 - self.h2))/2.0
def text1(self):
return "{" + str(self.n1) + "," + str(self.n2) + "} Side 1"
def text2(self):
return "{" + str(self.n1) + "," + str(self.n2) + "} Side 2"
def textm(self):
return "{" + str(self.n1) + "," + str(self.n2) + "} Mid"
def constructGeomInfo(pOrg, planarizedBlocks, pVisRep):
hGap = pOrg.gaps[0]
vGap = pOrg.gaps[1]
rNodes = {}
rEdges = {}
for nodeNum in pVisRep.nodes:
nVisRep = pVisRep.nodes[nodeNum]
if nodeNum in pOrg.nodes:
nOrg = pOrg.nodes[nodeNum]
rNodes[nodeNum] = renderNode(nodeNum, nOrg.w, nOrg.h, nVisRep.xLeft, nVisRep.xRight, nVisRep.y, False)
else:
# virtual node
rNodes[nodeNum] = renderNode(nodeNum, hGap*2.0, vGap*2.0, nVisRep.xLeft, nVisRep.xRight, nVisRep.y, True)
for blockIndex in planarizedBlocks:
sEdges = planarizedBlocks[blockIndex].splitEdges
for e in sEdges:
es = sEdges[e]
ev = pVisRep.edges[e]
rEdges[e] = renderEdge(e[0], e[1], es.p1, es.w1, es.h1, es.pm, es.wm, es.hm, es.p2, es.w2, es.h2, ev.x, ev.y1, ev.y2, ev.fl)
n1 = rNodes[e[0]]
n2 = rNodes[e[1]]
n1.xAccum = n1.xAccum + ev.x
n2.xAccum = n2.xAccum + ev.x
n1.degree = n1.degree + 1
n2.degree = n2.degree + 1
return rNodes, rEdges
def draw(w, h, rNodes, rEdges):
fig, ax = plt.subplots()
ax.set_xlim([-10,w+10])
ax.set_ylim([-10,h+10])
for e in rEdges:
edge = rEdges[e]
n1 = rNodes[e[0]]
n2 = rNodes[e[1]]
edge.drawLine(ax,n1,n2)
if edge.w1 > 0.0 and edge.h1 > 0.0:
edge.drawLabel1(ax,n1)
if edge.wm > 0.0 and edge.hm > 0.0:
edge.drawLabelm(ax,n1,n2)
if edge.w2 > 0.0 and edge.h2 > 0.0:
edge.drawLabel2(ax,n2)
for n in rNodes:
node = rNodes[n]
if not node.virtual:
node.drawLabel(ax);
plt.show()
def main(argv):
work_dir = './sample_graph_drawer_tmp/'
call(['mkdir', '-p', work_dir])
decompInputFileName = work_dir + 'decomp_input.txt'
decompOutputFileName = work_dir + 'decomp_output.txt'
pOrg = originalInputParser(argv[1])
pOrg.emitForDecomposition(decompInputFileName)
call(['decomposer', decompInputFileName, decompOutputFileName])
nodeMax = pOrg.nodeNumMax
faceIndexStart = 1
pDecomp = bcTreeParser(decompOutputFileName)
planarizedBlocks = {} # Key is the block number
embeddings = {} # Key is the block number
incidences = {} # Key is the node number
for blockIndex in pDecomp.blocks:
block = pDecomp.blocks[blockIndex]
planarizationInputFileName = work_dir + 'unplanarized_' + str(blockIndex) + '.txt'
planarizationOutputFileName = work_dir + 'planarized_' + str(blockIndex) + '.txt'
embeddingInputFileName = work_dir + 'embedding_input_' + str(blockIndex) + '.txt'
embeddingOutputFileName = work_dir + 'embedding_output_' + str(blockIndex) + '.txt'
block.emitForPlanarization(planarizationInputFileName, nodeMax+1)
call(['planarizer', planarizationInputFileName, planarizationOutputFileName])
pPlanarized = planarizedParser(planarizationOutputFileName, pOrg.edges);
pPlanarized.emitForEmbedding(embeddingInputFileName)
call(['biconnected_embedding_finder',
embeddingInputFileName,
embeddingOutputFileName])
pEmbedding = embeddingParser(embeddingOutputFileName, faceIndexStart)
faceIndexStart = pEmbedding.faceIndex
planarizedBlocks[blockIndex] = pPlanarized
embeddings[blockIndex] = pEmbedding
for nodeNum in pEmbedding.incidences.keys():
if nodeNum not in incidences:
incidences[nodeNum] = []
incidences[nodeNum] = incidences[nodeNum] + pEmbedding.incidences[nodeNum].incidence
if nodeMax < pPlanarized.nodeNumMax:
nodeMax = pPlanarized.nodeNumMax
visRepInputFileName = work_dir + 'vis_rep_input.txt'
visRepOutputFileName = work_dir + 'vis_rep_output.txt'
emitVisRepInput(visRepInputFileName, pOrg, pDecomp, planarizedBlocks, incidences, embeddings)
call(['vis_rep_finder', visRepInputFileName, visRepOutputFileName])
pVisRep = visRepParser(visRepOutputFileName)
rNodes, rEdges = constructGeomInfo(pOrg, planarizedBlocks, pVisRep)
call(['rm', '-fr', work_dir])
draw(pVisRep.width, pVisRep.height, rNodes, rEdges)
if __name__ == "__main__":
main(sys.argv)
|
the-stack_106_25647 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_facts
version_added: "2.1"
author: "Nathaniel Case (@qalthos)"
short_description: Collect facts from remote devices running Juniper Junos
description:
- Collects fact information from a remote device running the Junos
operating system. By default, the module will collect basic fact
information from the device to be included with the hostvars.
Additional fact information can be collected based on the
configured set of arguments.
extends_documentation_fragment: junos
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected. To maintain backward compatibility old style facts
can be retrieved by explicitly adding C(ofacts) to value, this requires
junos-eznc to be installed as a prerequisite. Valid value of gather_subset
are default, hardware, config, interfaces, ofacts. If C(ofacts) is present in the
list it fetches the old style facts (fact keys without 'ansible_' prefix) and it requires
junos-eznc library to be installed on control node and the device login credentials
must be given in C(provider) option.
required: false
default: ['!config', '!ofacts']
version_added: "2.3"
config_format:
description:
- The I(config_format) argument specifies the format of the configuration
when serializing output from the device. This argument is applicable
only when C(config) value is present in I(gather_subset).
The I(config_format) should be supported by the junos version running on
device. This value is not applicable while fetching old style facts that is
when C(ofacts) value is present in value if I(gather_subset) value.
required: false
default: 'text'
choices: ['xml', 'text', 'set', 'json']
version_added: "2.3"
requirements:
- ncclient (>=v0.5.2)
notes:
- Ensure I(config_format) used to retrieve configuration from device
is supported by junos version running on device.
- With I(config_format = json), configuration in the results will be a dictionary(and not a JSON string)
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
- Fetching old style facts requires junos-eznc library to be installed on control node and the device login credentials
must be given in provider option.
"""
EXAMPLES = """
- name: collect default set of facts
junos_facts:
- name: collect default set of facts and configuration
junos_facts:
gather_subset: config
"""
RETURN = """
ansible_facts:
description: Returns the facts collect from the device
returned: always
type: dict
"""
import platform
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.netconf import exec_rpc
from ansible.module_utils.network.junos.junos import junos_argument_spec, get_param, tostring
from ansible.module_utils.network.junos.junos import get_configuration, get_capabilities
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
try:
from lxml.etree import Element, SubElement
except ImportError:
from xml.etree.ElementTree import Element, SubElement
try:
from jnpr.junos import Device
from jnpr.junos.exception import ConnectError
HAS_PYEZ = True
except ImportError:
HAS_PYEZ = False
USE_PERSISTENT_CONNECTION = True
class FactsBase(object):
def __init__(self, module):
self.module = module
self.facts = dict()
def populate(self):
raise NotImplementedError
def cli(self, command):
reply = command(self.module, command)
output = reply.find('.//output')
if not output:
self.module.fail_json(msg='failed to retrieve facts for command %s' % command)
return str(output.text).strip()
def rpc(self, rpc):
return exec_rpc(self.module, tostring(Element(rpc)))
def get_text(self, ele, tag):
try:
return str(ele.find(tag).text).strip()
except AttributeError:
pass
class Default(FactsBase):
def populate(self):
self.facts.update(self.platform_facts())
reply = self.rpc('get-chassis-inventory')
data = reply.find('.//chassis-inventory/chassis')
self.facts['serialnum'] = self.get_text(data, 'serial-number')
def platform_facts(self):
platform_facts = {}
resp = get_capabilities(self.module)
device_info = resp['device_info']
platform_facts['system'] = device_info['network_os']
for item in ('model', 'image', 'version', 'platform', 'hostname'):
val = device_info.get('network_os_%s' % item)
if val:
platform_facts[item] = val
platform_facts['api'] = resp['network_api']
platform_facts['python_version'] = platform.python_version()
return platform_facts
class Config(FactsBase):
def populate(self):
config_format = self.module.params['config_format']
reply = get_configuration(self.module, format=config_format)
if config_format == 'xml':
config = tostring(reply.find('configuration')).strip()
elif config_format == 'text':
config = self.get_text(reply, 'configuration-text')
elif config_format == 'json':
config = self.module.from_json(reply.text.strip())
elif config_format == 'set':
config = self.get_text(reply, 'configuration-set')
self.facts['config'] = config
class Hardware(FactsBase):
def populate(self):
reply = self.rpc('get-system-memory-information')
data = reply.find('.//system-memory-information/system-memory-summary-information')
self.facts.update({
'memfree_mb': int(self.get_text(data, 'system-memory-free')),
'memtotal_mb': int(self.get_text(data, 'system-memory-total'))
})
reply = self.rpc('get-system-storage')
data = reply.find('.//system-storage-information')
filesystems = list()
for obj in data:
filesystems.append(self.get_text(obj, 'filesystem-name'))
self.facts['filesystems'] = filesystems
reply = self.rpc('get-route-engine-information')
data = reply.find('.//route-engine-information')
routing_engines = dict()
for obj in data:
slot = self.get_text(obj, 'slot')
routing_engines.update({slot: {}})
routing_engines[slot].update({'slot': slot})
for child in obj:
if child.text != "\n":
routing_engines[slot].update({child.tag.replace("-", "_"): child.text})
self.facts['routing_engines'] = routing_engines
if len(data) > 1:
self.facts['has_2RE'] = True
else:
self.facts['has_2RE'] = False
reply = self.rpc('get-chassis-inventory')
data = reply.findall('.//chassis-module')
modules = list()
for obj in data:
mod = dict()
for child in obj:
if child.text != "\n":
mod.update({child.tag.replace("-", "_"): child.text})
modules.append(mod)
self.facts['modules'] = modules
class Interfaces(FactsBase):
def populate(self):
ele = Element('get-interface-information')
SubElement(ele, 'detail')
reply = exec_rpc(self.module, tostring(ele))
interfaces = {}
for item in reply[0]:
name = self.get_text(item, 'name')
obj = {
'oper-status': self.get_text(item, 'oper-status'),
'admin-status': self.get_text(item, 'admin-status'),
'speed': self.get_text(item, 'speed'),
'macaddress': self.get_text(item, 'hardware-physical-address'),
'mtu': self.get_text(item, 'mtu'),
'type': self.get_text(item, 'if-type'),
}
interfaces[name] = obj
self.facts['interfaces'] = interfaces
class OFacts(FactsBase):
def _connect(self, module):
host = get_param(module, 'host')
kwargs = {
'port': get_param(module, 'port') or 830,
'user': get_param(module, 'username')
}
if get_param(module, 'password'):
kwargs['passwd'] = get_param(module, 'password')
if get_param(module, 'ssh_keyfile'):
kwargs['ssh_private_key_file'] = get_param(module, 'ssh_keyfile')
kwargs['gather_facts'] = False
try:
device = Device(host, **kwargs)
device.open()
device.timeout = get_param(module, 'timeout') or 10
except ConnectError as exc:
module.fail_json('unable to connect to %s: %s' % (host, to_native(exc)))
return device
def populate(self):
device = self._connect(self.module)
facts = dict(device.facts)
if '2RE' in facts:
facts['has_2RE'] = facts['2RE']
del facts['2RE']
facts['version_info'] = dict(facts['version_info'])
if 'junos_info' in facts:
for key, value in facts['junos_info'].items():
if 'object' in value:
value['object'] = dict(value['object'])
return facts
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
config=Config,
interfaces=Interfaces,
ofacts=OFacts
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
""" Main entry point for AnsibleModule
"""
argument_spec = dict(
gather_subset=dict(default=['!config', '!ofacts'], type='list'),
config_format=dict(default='text', choices=['xml', 'text', 'set', 'json']),
)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Subset must be one of [%s], got %s' %
(', '.join(sorted([subset for subset in
VALID_SUBSETS])), subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
# handle fetching old style facts separately
runable_subsets.discard('ofacts')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
ansible_facts = dict()
# fetch old style facts only when explicitly mentioned in gather_subset option
if 'ofacts' in gather_subset:
if HAS_PYEZ:
ansible_facts.update(OFacts(module).populate())
else:
warnings += ['junos-eznc is required to gather old style facts but does not appear to be installed. '
'It can be installed using `pip install junos-eznc`']
facts['gather_subset'].append('ofacts')
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
the-stack_106_25648 | #!/usr/bin/env python
#
# Public Domain 2014-2018 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
class BaseDataSet(object):
"""
BaseDataSet is an abstract base class for other *DataSet classes.
An object of this type should not be created directly. These classes
represent test data sets that can be used to populate tables and
to check the contents of existing tables.
"""
def __init__(self, testcase, uri, rows, **kwargs):
self.testcase = testcase
self.uri = uri
self.rows = rows
self.key_format = kwargs.get('key_format', 'S')
self.value_format = kwargs.get('value_format', 'S')
self.config = kwargs.get('config', '')
self.projection = kwargs.get('projection', '')
def create(self):
self.testcase.session.create(self.uri, 'key_format=' + self.key_format
+ ',value_format=' + self.value_format
+ ',' + self.config)
def fill(self):
c = self.testcase.session.open_cursor(self.uri, None)
for i in xrange(1, self.rows + 1):
c[self.key(i)] = self.value(i)
c.close()
def postfill(self):
pass
@classmethod
def is_lsm(cls):
return False
def populate(self):
self.testcase.pr('populate: ' + self.uri + ' with '
+ str(self.rows) + ' rows')
self.create()
self.fill()
self.postfill()
# Create a key for a Simple or Complex data set.
@staticmethod
def key_by_format(i, key_format):
if key_format == 'i' or key_format == 'r':
return i
elif key_format == 'S' or key_format == 'u':
return str('%015d' % i)
else:
raise AssertionError(
'key: object has unexpected format: ' + key_format)
# Create a value for a Simple data set.
@staticmethod
def value_by_format(i, value_format):
if value_format == 'i' or value_format == 'r':
return i
elif value_format == 'S' or value_format == 'u':
return str(i) + ': abcdefghijklmnopqrstuvwxyz'
elif value_format == '8t':
value = (
0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xaa, 0xab,
0xac, 0xad, 0xae, 0xaf, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf)
return value[i % len(value)]
else:
raise AssertionError(
'value: object has unexpected format: ' + value_format)
# Create a key for this data set. Simple and Complex data sets have
# the same key space.
def key(self, i):
return BaseDataSet.key_by_format(i, self.key_format)
def check(self):
self.testcase.pr('check: ' + self.uri)
cursor = self.testcase.session.open_cursor(
self.uri + self.projection, None, None)
self.check_cursor(cursor)
cursor.close()
class SimpleDataSet(BaseDataSet):
"""
SimpleDataSet creates a table with a single key and value that is
populated with predefined data, up to the requested number of rows.
key_format and value_format may be set in the constructor to
override the simple string defaults.
"""
def __init__(self, testcase, uri, rows, **kwargs):
super(SimpleDataSet, self).__init__(testcase, uri, rows, **kwargs)
# A value suitable for checking the value returned by a cursor.
def comparable_value(self, i):
return BaseDataSet.value_by_format(i, self.value_format)
# A value suitable for assigning to a cursor.
def value(self, i):
return BaseDataSet.value_by_format(i, self.value_format)
def check_cursor(self, cursor):
i = 0
for key, val in cursor:
i += 1
self.testcase.assertEqual(key, self.key(i))
if cursor.value_format == '8t' and val == 0: # deleted
continue
self.testcase.assertEqual(val, self.value(i))
self.testcase.assertEqual(i, self.rows)
class SimpleLSMDataSet(SimpleDataSet):
"""
SimpleLSMDataSet is identical to SimpleDataSet, but using LSM files
via the type=lsm configuration.
"""
def __init__(self, testcase, uri, rows, **kwargs):
kwargs['config'] = kwargs.get('config', '') + ',type=lsm'
super(SimpleLSMDataSet, self).__init__(
testcase, uri, rows, **kwargs)
@classmethod
def is_lsm(cls):
return True
class SimpleIndexDataSet(SimpleDataSet):
"""
SimpleIndexDataSet is identical to SimpleDataSet, adding one index
that maps the value to the key.
"""
def __init__(self, testcase, uri, rows, **kwargs):
self.indexname = 'index:' + uri.split(":")[1] + ':index1'
self.origconfig = kwargs.get('config', '')
kwargs['config'] = self.origconfig + ',columns=(key0,value0)'
super(SimpleIndexDataSet, self).__init__(
testcase, uri, rows, **kwargs)
def create(self):
super(SimpleIndexDataSet, self).create()
self.testcase.session.create(self.indexname, 'columns=(value0,key0),' +
self.origconfig)
def check(self):
BaseDataSet.check(self)
# Check values in the index.
idxcursor = self.testcase.session.open_cursor(self.indexname)
for i in xrange(1, self.rows + 1):
k = self.key(i)
v = self.value(i)
ik = (v, k) # The index key is columns=(v,k).
self.testcase.assertEqual(v, idxcursor[ik])
idxcursor.close()
class SimpleIndexLSMDataSet(SimpleIndexDataSet):
"""
SimpleIndexLSMDataSet is identical to SimpleIndexDataSet, but
using LSM files.
"""
def __init__(self, testcase, uri, rows, **kwargs):
kwargs['config'] = kwargs.get('config', '') + ',type=lsm'
super(SimpleIndexLSMDataSet, self).__init__(
testcase, uri, rows, **kwargs)
@classmethod
def is_lsm(cls):
return True
class ComplexDataSet(BaseDataSet):
"""
ComplexDataSet populates a table with a mixed set of indices
and column groups. Some indices are created before the
table is populated, some after.
"""
def __init__(self, testcase, uri, rows, **kwargs):
self.indexlist = [
['indx1', 'column2'],
['indx2', 'column3'],
['indx3', 'column4'],
['indx4', 'column2,column4'],
['indx5', 'column3,column5'],
['indx6', 'column3,column5,column4']]
self.cglist = [
['cgroup1', 'column2'],
['cgroup2', 'column3'],
['cgroup3', 'column4'],
['cgroup4', 'column2,column3'],
['cgroup5', 'column3,column4'],
['cgroup6', 'column2,column4,column5']]
self.cgconfig = kwargs.pop('cgconfig', '')
config = kwargs.get('config', '')
config += ',columns=(record,column2,column3,column4,column5),' + \
'colgroups=(cgroup1,cgroup2,cgroup3,cgroup4,cgroup5,cgroup6)'
kwargs['config'] = config
kwargs['value_format'] = 'SiSS'
super(ComplexDataSet, self).__init__(testcase, uri, rows, **kwargs)
def create(self):
config = 'key_format=' + self.key_format + \
',value_format=' + self.value_format + ',' + self.config
session = self.testcase.session
##self.testcase.tty('URI=' + self.uri + 'CONFIG=' + config)
session.create(self.uri, config)
tablepart = self.uri.split(":")[1] + ':'
for cg in self.cglist:
session.create('colgroup:' + tablepart + cg[0],
',columns=(' + cg[1] + '),' + self.cgconfig)
for index in self.indexlist[0:4]:
session.create('index:' + tablepart + index[0],
',columns=(' + index[1] + '),' + self.config)
def postfill(self):
# add some indices after filling the table
tablepart = self.uri.split(":")[1] + ':'
session = self.testcase.session
for index in self.indexlist[4:]:
session.create('index:' + tablepart + index[0],
',columns=(' + index[1] + ')')
def colgroup_count(self):
return len(self.cglist)
def colgroup_name(self, i):
return 'colgroup:' + self.uri.split(":")[1] + ':' + self.cglist[i][0]
def index_count(self):
return len(self.indexlist)
def index_name(self, i):
return 'index:' + self.uri.split(":")[1] + ':' + self.indexlist[i][0]
# A value suitable for checking the value returned by a cursor, as
# cursor.get_value() returns a list.
def comparable_value(self, i):
return [str(i) + ': abcdefghijklmnopqrstuvwxyz'[0:i%26],
i,
str(i) + ': abcdefghijklmnopqrstuvwxyz'[0:i%23],
str(i) + ': abcdefghijklmnopqrstuvwxyz'[0:i%18]]
# A value suitable for assigning to a cursor, as cursor.set_value() expects
# a tuple when it is used with a single argument and the value is composite.
def value(self, i):
return tuple(self.comparable_value(i))
def check_cursor(self, cursor):
i = 0
for key, s1, i2, s3, s4 in cursor:
i += 1
self.testcase.assertEqual(key, self.key(i))
v = self.value(i)
self.testcase.assertEqual(s1, v[0])
self.testcase.assertEqual(i2, v[1])
self.testcase.assertEqual(s3, v[2])
self.testcase.assertEqual(s4, v[3])
self.testcase.assertEqual(i, self.rows)
class ComplexLSMDataSet(ComplexDataSet):
"""
ComplexLSMDataSet is identical to ComplexDataSet, but using LSM files.
"""
def __init__(self, testcase, uri, rows, **kwargs):
kwargs['cgconfig'] = kwargs.get('cgconfig', '') + ',type=lsm'
super(ComplexLSMDataSet, self).__init__(
testcase, uri, rows, **kwargs)
@classmethod
def is_lsm(cls):
return True
class ProjectionDataSet(SimpleDataSet):
"""
ProjectionDataSet creates a table with predefined data identical to
SimpleDataSet (single key and value), but when checking it, uses
a cursor with a projection.
"""
def __init__(self, testcase, uri, rows, **kwargs):
kwargs['config'] = kwargs.get('config', '') + ',columns=(k,v0)'
kwargs['projection'] = '(v0,v0,v0)'
super(ProjectionDataSet, self).__init__(testcase, uri, rows, **kwargs)
# A value suitable for checking the value returned by a cursor.
def comparable_value(self, i):
v0 = self.value(i)
return [v0, v0, v0]
def check_cursor(self, cursor):
i = 0
for key, got0, got1, got2 in cursor:
i += 1
self.testcase.assertEqual(key, self.key(i))
if cursor.value_format == '8t' and got0 == 0: # deleted
continue
self.testcase.assertEqual([got0, got1, got2],
self.comparable_value(i))
self.testcase.assertEqual(i, self.rows)
class ProjectionIndexDataSet(BaseDataSet):
"""
ProjectionIndexDataSet creates a table with three values and
an index. Checks are made against a projection of the main table
and a projection of the index.
"""
def __init__(self, testcase, uri, rows, **kwargs):
self.origconfig = kwargs.get('config', '')
self.indexname = 'index:' + uri.split(":")[1] + ':index0'
kwargs['config'] = self.origconfig + ',columns=(k,v0,v1,v2)'
kwargs['value_format'] = kwargs.get('value_format', 'SiS')
kwargs['projection'] = '(v1,v2,v0)'
super(ProjectionIndexDataSet, self).__init__(
testcase, uri, rows, **kwargs)
def value(self, i):
return ('v0:' + str(i), i*i, 'v2:' + str(i))
# Suitable for checking the value returned by a cursor using a projection.
def comparable_value(self, i):
return [i*i, 'v2:' + str(i), 'v0:' + str(i)]
def create(self):
super(ProjectionIndexDataSet, self).create()
self.testcase.session.create(self.indexname, 'columns=(v2,v1),' +
self.origconfig)
def check_cursor(self, cursor):
i = 0
for key, got0, got1, got2 in cursor:
i += 1
self.testcase.assertEqual(key, self.key(i))
if cursor.value_format == '8t' and got0 == 0: # deleted
continue
self.testcase.assertEqual([got0, got1, got2],
self.comparable_value(i))
self.testcase.assertEqual(i, self.rows)
def check_index_cursor(self, cursor):
for i in xrange(1, self.rows + 1):
k = self.key(i)
v = self.value(i)
ik = (v[2], v[1]) # The index key is (v2,v2)
expect = [v[1],k,v[2],v[0]]
self.testcase.assertEqual(expect, cursor[ik])
def check(self):
BaseDataSet.check(self)
# Check values in the index.
idxcursor = self.testcase.session.open_cursor(
self.indexname + '(v1,k,v2,v0)')
self.check_index_cursor(idxcursor)
idxcursor.close()
def index_count(self):
return 1
def index_name(self, i):
return self.indexname
# create a key based on a cursor as a shortcut to creating a SimpleDataSet
def simple_key(cursor, i):
return BaseDataSet.key_by_format(i, cursor.key_format)
# create a value based on a cursor as a shortcut to creating a SimpleDataSet
def simple_value(cursor, i):
return BaseDataSet.value_by_format(i, cursor.value_format)
# create a key based on a cursor as a shortcut to creating a ComplexDataSet
def complex_key(cursor, i):
return BaseDataSet.key_by_format(i, cursor.key_format)
|
the-stack_106_25655 | # -*- coding:utf-8 -*-
import sys
sys.path.append("../moebot")
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
test()
|
the-stack_106_25656 | # var = 1
# while var == 1:
# num = int(input("输入一个数字 :"))
# print("你输入的数字是: ", num)
# print("Good bye!")
# class MyNumbers:
# def __iter__(self):
# self.a = 1
# return self
# def __next__(self):
# x = self.a
# self.a += 1
# return x
# myClass = MyNumbers()
# myiter = iter(myClass)
# print(next(myiter))
# print(next(myiter))
# print(next(myiter))
# print(next(myiter))
# print(next(myiter))
# import sys
# def fibonacci(n):
# a, b, counter = 0, 1, 0
# while True:
# if(counter > n):
# return
# yield a
# a, b = b, a + b
# counter += 1
# f = fibonacci(10)
# while True:
# try:
# print(next(f), end=" ")
# except StopIteration:
# sys.exit()
num = 1
def fun1():
global num
print(num)
num = 123
print(num)
fun1()
print(num)
def outer():
num = 10
def inner():
nonlocal num # nonlocal关键字声明
num = 100
print(num)
inner()
print(num)
outer()
import study2
study2.print_func("bbigcd") |
the-stack_106_25657 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as np
import tensorflow as tf
import logging
from tensorflow.python.estimator.export.export import build_raw_serving_input_receiver_fn
from tensorflow.python.keras.layers import LSTM, Dense
from tensorflow.python.estimator.model_fn import ModeKeys as Modes
# https://github.com/tensorflow/tensorflow/issues/15868
# Module: test_s3_checkpoint_save_timeout
# Purpose: Train with random data and produce a large enough graph file, which should cause a
# request time out when saving to S3 on the default C++ SDK S3 request timeout configuration.
# This test script is meant to test if the patch, in the github issue above, and s3 request
# timeout environment variable were applied properly.
def model_fn(features, labels, mode, params):
hidden_dim = params.get('hidden_dim', 512)
classes = params.get('classes', 2)
learning_rate = params.get('learning_rate', 0.001)
embedding_dropout = params.get('embedding_dropout', 0.5)
drop = (mode == Modes.TRAIN)
word_seq = features['inputs']
with tf.variable_scope("embedding"):
emb_parts = _partitioned_embeddings(params)
word_vectors = tf.nn.embedding_lookup(emb_parts, word_seq, name='word_vectors', partition_strategy='mod')
z = tf.layers.dropout(word_vectors, rate=embedding_dropout, training=drop)
l = LSTM(hidden_dim)(z)
logits = Dense(classes, activation="sigmoid")(l)
if mode in (Modes.PREDICT, Modes.EVAL):
predicted_indices = tf.argmax(input=logits, axis=1)
probabilities = tf.nn.softmax(logits, name='softmax_tensor')
if mode in (Modes.TRAIN, Modes.EVAL):
global_step = tf.train.get_or_create_global_step()
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entropy)
tf.summary.scalar('OptimizeLoss', loss)
if mode == Modes.PREDICT:
predictions = {
'classes': predicted_indices,
'probabilities': probabilities
}
export_outputs = {
'serving_default': tf.estimator.export.PredictOutput(predictions)
}
return tf.estimator.EstimatorSpec(
mode, predictions=predictions, export_outputs=export_outputs)
if mode == Modes.TRAIN:
logging.info(params)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
logging.info('returned estimator spec')
return tf.estimator.EstimatorSpec(mode,
loss=loss,
train_op=train_op)
if mode == Modes.EVAL:
actual_index = tf.argmax(input=labels, axis=1)
ones = tf.ones(tf.shape(actual_index), tf.int64)
actual_endings = tf.equal(ones, actual_index)
predicted_endings = tf.equal(ones, predicted_indices)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(actual_index, predicted_indices),
'precision': tf.metrics.precision(actual_endings, predicted_endings),
'recall': tf.metrics.recall(actual_endings, predicted_endings)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def _partitioned_embeddings(params):
logging.info('initialize embedding layer')
partitions = params.get('partitions', 10)
embedding_dim = params.get('embedding_dim', 500)
max_vocab_size = params.get('max_vocab_size', 134367)
emb = np.random.rand(max_vocab_size, embedding_dim)
end_pad = partitions - ((emb.shape[0] + 1) % partitions)
padded = np.lib.pad(emb, ((1,end_pad), (0,0)), 'constant', constant_values=(0.0, 0.0)).astype(np.float32)
logging.info('read in embeddings')
constants = []
for i in range(partitions):
constants.append(tf.constant(padded[i::partitions]))
logging.info('create partitioned constants')
return constants
def serving_input_fn(params):
inputs = tf.placeholder(tf.int32, shape=[None, 7])
tensors = {'inputs': inputs}
return build_raw_serving_input_receiver_fn(tensors)()
def train_input_fn(training_dir, params):
return _input_fn(params)()
def eval_input_fn(training_dir, params):
return _input_fn(params)()
def _input_fn(params,shuffle=False):
window_size = params.get('windows_size', 7)
batch_size = params.get('batch_size', 128)
logging.info('window size = {}'.format(window_size))
max_int = params.get('max_vocab_size', 134367) - 1
word_ids = np.random.random_integers(0, high=max_int, size=(batch_size * 10, window_size)).astype(np.int32)
x = {'inputs': word_ids}
classes = np.random.random_integers(0, high=1, size=batch_size * 10).tolist()
labels = []
for i in range(len(classes)):
labels.append([classes[i], abs(classes[i] - 1)])
y_list = np.array(labels, dtype=np.float32)
logging.info(y_list.shape)
return tf.estimator.inputs.numpy_input_fn(
x=x,
y=y_list,
batch_size=batch_size,
num_epochs=None,
shuffle=shuffle)
|
the-stack_106_25659 | # -*- coding: utf-8 -*-
'''
Pillar data from vCenter or an ESXi host
.. versionadded:: 2017.7.0
:depends: - pyVmomi
This external pillar can pull attributes from objects in vCenter or an ESXi host and provide those attributes
as pillar data to minions. This can allow for pillar based targeting of minions on ESXi host, Datastore, VM
configuration, etc. This setup requires only the salt master have access to the vCenter server/ESXi hosts.
The pillar will return an empty dict if the 'os' or 'virtual' grain are not 'VMWare', 'ESXi', or 'VMWare ESXi'.
Defaults
========
The external pillar will search for Virtual Machines with the VM name matching the minion id.
Data will be returned into the 'vmware' pillar key.
The external pillar has a default set of properties to return for both VirtualMachine and HostSystem types.
Configuring the VMWare pillar
============================
The required minimal configuration in the salt master ext_pillar setup:
.. code-block:: yaml
ext_pillar:
- vmware:
host: <vcenter/esx host>
username: <user to connect with>
password: <password>
Optionally, the following keyword arguments can be passed to the ext_pillar for customized configuration:
pillar_key
Optionally set the pillar key to return the data into. Default is ``vmware``.
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
property_name
Property name to match the minion id against. Defaults to ``name``.
property_types
Optionally specify a list of pyVmomi vim types to search for the minion id in 'property_name'.
Default is ``['VirtualMachine']``.
For example, to search both vim.VirtualMachine and vim.HostSystem object types:
.. code-block:: yaml
ext_pillar:
- vmware:
host: myesx
username: root
password: complex_password
property_types:
- VirtualMachine
- HostSystem
Additionally, the list of property types can be dicts, the item of the dict being a list specifying
the attribute to return for that vim object type.
The pillar will attempt to recurse the attribute and return all child attributes.
To explicitly specify deeper attributes without attempting to recurse an attribute, convert the list
item to a dict with the item of the dict being the child attributes to return. Follow this pattern
to return attributes as deep within the object as necessary.
.. note::
Be careful when specifying custom attributes! Many attributes have objects as attributes which
have the parent object as an attribute and which will cause the pillar to fail due to the attempt
to convert all sub-objects recursively (i.e. infinite attribute loops). Specifying only the
sub-attributes you would like returned will keep the infinite recursion from occurring.
A maximum recursion exception will occur in this case and the pillar will not return as desired.
.. code-block:: yaml
ext_pillar:
- vmware:
host: myvcenter
username: my_user
password: my_pass
replace_default_attributes: True
property_types:
- VirtualMachine:
- config:
- bootOptions:
- bootDelay
- bootRetryDelay
- HostSystem:
- datastore:
- name
The above ext_pillar example would return a pillar like the following for a VirtualMachine object that's
name matched the minion id:
.. code-block:: yaml
vmware:
config:
bootOptions:
bootDelay: 1000
bootRetryDelay: 1000
If you were to retrieve these virtual machine attributes via pyVmomi directly, this would be the same as
.. code-block:: python
vmObject.config.bootOptions.bootDelay
vmObject.config.bootOptionis.bootRetryDelay
The above ext_pillar example would return a pillar like the following for a HostySystem object that's name
matched the minion id:
.. code-block:: yaml
vmware:
datastore:
- name: Datastore1
- name: Datastore2
The 'datastore' property of a HostSystem object is a list of datastores, thus a list is returned.
replace_default_attributes
If custom attributes are specified by the property_types parameter, replace_default_attributes determines
if those will be added to default attributes (False) or replace the default attributes completely (True).
The default setting is 'False'.
.. note::
vCenter "Custom Attributes" (i.e. Annotations) will always be returned if it exists on the object as
part of the pillar regardless of this setting.
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
import salt.utils.dictupdate as dictupdate
import salt.utils.vmware
# Import 3rd-party libs
from salt.ext import six
try:
from pyVmomi import vim
from pyVim.connect import Disconnect
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
__virtualname__ = 'vmware'
# Set up logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only return if python-etcd is installed
'''
return __virtualname__ if HAS_LIBS else False
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
**kwargs):
'''
Check vmware/vcenter for all data
'''
vmware_pillar = {}
host = None
username = None
password = None
property_types = []
property_name = 'name'
protocol = None
port = None
pillar_key = 'vmware'
replace_default_attributes = False
type_specific_pillar_attributes = {
'VirtualMachine': [
{
'config':
[
'version',
'guestId',
'files',
'tools',
'flags',
'memoryHotAddEnabled',
'cpuHotAddEnabled',
'cpuHotRemoveEnabled',
'datastoreUrl',
'swapPlacement',
'bootOptions',
'scheduledHardwareUpgradeInfo',
'memoryAllocation',
'cpuAllocation',
]
},
{
'summary':
[
{
'runtime':
[
{
'host':
[
'name',
{'parent': 'name'},
]
},
'bootTime',
]
},
{
'guest':
[
'toolsStatus',
'toolsVersionStatus',
'toolsVersionStatus2',
'toolsRunningStatus',
]
},
{
'config':
[
'cpuReservation',
'memoryReservation',
]
},
{
'storage':
[
'committed',
'uncommitted',
'unshared',
]
},
{'dasVmProtection': ['dasProtected']},
]
},
{
'storage':
[
{
'perDatastoreUsage':
[
{
'datastore': 'name'
},
'committed',
'uncommitted',
'unshared',
]
}
]
},
],
'HostSystem': [
{
'datastore':
[
'name',
'overallStatus',
{
'summary':
[
'url',
'freeSpace',
'maxFileSize',
'maxVirtualDiskCapacity',
'maxPhysicalRDMFileSize',
'maxVirtualRDMFileSize',
{
'vmfs':
[
'capacity',
'blockSizeMb',
'maxBlocks',
'majorVersion',
'version',
'uuid',
{
'extent':
[
'diskName',
'partition',
]
},
'vmfsUpgradeable',
'ssd',
'local',
],
},
],
},
{'vm': 'name'}
]
},
{
'vm':
[
'name',
'overallStatus',
{
'summary':
[
{'runtime': 'powerState'},
]
},
]
},
]
}
pillar_attributes = [
{
'summary':
[
'overallStatus'
]
},
{
'network':
[
'name',
{'config': {'distributedVirtualSwitch': 'name'}},
]
},
{
'datastore':
[
'name',
]
},
{
'parent':
[
'name'
]
},
]
if 'pillar_key' in kwargs:
pillar_key = kwargs['pillar_key']
vmware_pillar[pillar_key] = {}
if 'host' not in kwargs:
log.error('VMWare external pillar configured but host is not specified in ext_pillar configuration.')
return vmware_pillar
else:
host = kwargs['host']
log.debug('vmware_pillar -- host = {0}'.format(host))
if 'username' not in kwargs:
log.error('VMWare external pillar requested but username is not specified in ext_pillar configuration.')
return vmware_pillar
else:
username = kwargs['username']
log.debug('vmware_pillar -- username = {0}'.format(username))
if 'password' not in kwargs:
log.error('VMWare external pillar requested but password is not specified in ext_pillar configuration.')
return vmware_pillar
else:
password = kwargs['password']
log.debug('vmware_pillar -- password = {0}'.format(password))
if 'replace_default_attributes' in kwargs:
replace_default_attributes = kwargs['replace_default_attributes']
if replace_default_attributes:
pillar_attributes = []
type_specific_pillar_attributes = {}
if 'property_types' in kwargs:
for prop_type in kwargs['property_types']:
if isinstance(prop_type, dict):
property_types.append(getattr(vim, prop_type.keys()[0]))
if isinstance(prop_type[prop_type.keys()[0]], list):
pillar_attributes = pillar_attributes + prop_type[prop_type.keys()[0]]
else:
log.warning('A property_type dict was specified, but its value is not a list')
else:
property_types.append(getattr(vim, prop_type))
else:
property_types = [vim.VirtualMachine]
log.debug('vmware_pillar -- property_types = {0}'.format(property_types))
if 'property_name' in kwargs:
property_name = kwargs['property_name']
else:
property_name = 'name'
log.debug('vmware_pillar -- property_name = {0}'.format(property_name))
if 'protocol' in kwargs:
protocol = kwargs['protocol']
log.debug('vmware_pillar -- protocol = {0}'.format(protocol))
if 'port' in kwargs:
port = kwargs['port']
log.debug('vmware_pillar -- port = {0}'.format(port))
virtualgrain = None
osgrain = None
if 'virtual' in __grains__:
virtualgrain = __grains__['virtual'].lower()
if 'os' in __grains__:
osgrain = __grains__['os'].lower()
if virtualgrain == 'vmware' or osgrain == 'vmware esxi' or osgrain == 'esxi':
vmware_pillar[pillar_key] = {}
try:
_conn = salt.utils.vmware.get_service_instance(host,
username,
password,
protocol,
port)
if _conn:
data = None
for prop_type in property_types:
data = salt.utils.vmware.get_mor_by_property(_conn,
prop_type,
minion_id,
property_name=property_name)
if data:
type_name = type(data).__name__.replace('vim.', '')
if hasattr(data, 'availableField'):
vmware_pillar[pillar_key]['annotations'] = {}
for availableField in data.availableField:
for customValue in data.customValue:
if availableField.key == customValue.key:
vmware_pillar[pillar_key]['annotations'][availableField.name] = customValue.value
type_specific_pillar_attribute = []
if type_name in type_specific_pillar_attributes:
type_specific_pillar_attribute = type_specific_pillar_attributes[type_name]
vmware_pillar[pillar_key] = dictupdate.update(vmware_pillar[pillar_key],
_crawl_attribute(data,
pillar_attributes +
type_specific_pillar_attribute))
break
# explicitly disconnect from vCenter when we are done, connections linger idle otherwise
Disconnect(_conn)
else:
log.error(
'Unable to obtain a connection with {0}, please verify your vmware ext_pillar configuration'.format(
host))
except RuntimeError:
log.error(('A runtime error occurred in the vmware_pillar, '
'this is likely caused by an infinite recursion in '
'a requested attribute. Verify your requested attributes '
'and reconfigure the pillar.'))
return vmware_pillar
else:
return {}
def _recurse_config_to_dict(t_data):
'''
helper function to recurse through a vim object and attempt to return all child objects
'''
if not isinstance(t_data, type(None)):
if isinstance(t_data, list):
t_list = []
for i in t_data:
t_list.append(_recurse_config_to_dict(i))
return t_list
elif isinstance(t_data, dict):
t_dict = {}
for k, v in six.iteritems(t_data):
t_dict[k] = _recurse_config_to_dict(v)
return t_dict
else:
if hasattr(t_data, '__dict__'):
return _recurse_config_to_dict(t_data.__dict__)
else:
return _serializer(t_data)
def _crawl_attribute(this_data, this_attr):
'''
helper function to crawl an attribute specified for retrieval
'''
if isinstance(this_data, list):
t_list = []
for d in this_data:
t_list.append(_crawl_attribute(d, this_attr))
return t_list
else:
if isinstance(this_attr, dict):
t_dict = {}
for k in this_attr:
if hasattr(this_data, k):
t_dict[k] = _crawl_attribute(getattr(this_data, k, None), this_attr[k])
return t_dict
elif isinstance(this_attr, list):
this_dict = {}
for l in this_attr:
this_dict = dictupdate.update(this_dict, _crawl_attribute(this_data, l))
return this_dict
else:
return {this_attr: _recurse_config_to_dict(getattr(this_data, this_attr, None))}
def _serializer(obj):
'''
helper function to serialize some objects for prettier return
'''
import datetime
if isinstance(obj, datetime.datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
return obj.__str__()
return obj
|
the-stack_106_25660 | from copy import deepcopy
from easydict import EasyDict
space_invaders_impala_config = dict(
exp_name='space_invaders_impala_seed0',
env=dict(
collector_env_num=8,
evaluator_env_num=4,
n_evaluator_episode=8,
stop_value=10000000000,
env_id='SpaceInvadersNoFrameskip-v4',
frame_stack=4,
manager=dict(shared_memory=False, )
),
policy=dict(
cuda=True,
# (int) the trajectory length to calculate v-trace target
unroll_len=32,
random_collect_size=500,
model=dict(
obs_shape=[4, 84, 84],
action_shape=6,
encoder_hidden_size_list=[128, 128, 256, 512],
critic_head_hidden_size=512,
critic_head_layer_num=3,
actor_head_hidden_size=512,
actor_head_layer_num=3,
),
learn=dict(
# (int) collect n_sample data, train model update_per_collect times
# here we follow impala serial pipeline
update_per_collect=3, # update_per_collect show be in [1, 10]
# (int) the number of data for a train iteration
batch_size=128,
grad_clip_type='clip_norm',
clip_value=5,
learning_rate=0.0003,
# (float) loss weight of the value network, the weight of policy network is set to 1
value_weight=0.5,
# (float) loss weight of the entropy regularization, the weight of policy network is set to 1
entropy_weight=0.01,
# (float) discount factor for future reward, defaults int [0, 1]
discount_factor=0.99,
# (float) additional discounting parameter
lambda_=0.95,
# (float) clip ratio of importance weights
rho_clip_ratio=1.0,
# (float) clip ratio of importance weights
c_clip_ratio=1.0,
# (float) clip ratio of importance sampling
rho_pg_clip_ratio=1.0,
),
collect=dict(
# (int) collect n_sample data, train model n_iteration times
n_sample=16,
collector=dict(collect_print_freq=1000, ),
),
eval=dict(evaluator=dict(eval_freq=5000, )),
other=dict(replay_buffer=dict(
replay_buffer_size=10000,
), ),
),
)
main_config = EasyDict(space_invaders_impala_config)
space_invaders_impala_create_config = dict(
env=dict(
type='atari',
import_names=['dizoo.atari.envs.atari_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='impala'),
replay_buffer=dict(type='naive'),
)
create_config = EasyDict(space_invaders_impala_create_config)
if __name__ == '__main__':
from ding.entry import serial_pipeline
serial_pipeline((main_config, create_config), seed=0)
|
the-stack_106_25662 | import tempfile
from os import path
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from PIL import Image
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""
Return url for recipe image.
"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""
Return recipe detail URL.
"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Casserole'):
"""
Create and return a sample tag.
"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Butter'):
"""
Create and return a sample tag.
"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""
Create and return a sample recipe.
"""
# Default parameters of the recipe
defaults = {
'title': 'Sample Recipe',
'time_minutes': 30,
'price': 5.00
}
# Update the default dict
defaults.update(params)
# Create the recipe
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""
Test unauthenticated recipe API access.
"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""
Test that authentication is required.
"""
# Make a GET Request
res = self.client.get(RECIPE_URL)
# Assertion
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""
Test authenticated recipe API access.
"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'santa4521!'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""
Test retrieving a list of recipes.
"""
# Create recipes
sample_recipe(user=self.user)
sample_recipe(user=self.user, title='Pizza')
# Make a GET Request
res = self.client.get(RECIPE_URL)
# Get the recipes and serialize the data
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
# Assertions
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""
Test retrieving recipes for user.
"""
# Create second user
user2 = get_user_model().objects.create_user(
'[email protected]',
'santa4521!_2'
)
# Create recipes
sample_recipe(user=user2)
sample_recipe(user=self.user)
# Make a GET Request
res = self.client.get(RECIPE_URL)
# Get the recipe for the authenticated user
# and serialize the data
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
# Assertions
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""
Test viewing a recipe detail.
"""
# Add a sample recipe with a tag and ingredient
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
# Generate the url & make a GET Request
url = detail_url(recipe.id)
res = self.client.get(url)
# Serialize the data
serializer = RecipeDetailSerializer(recipe)
# Assertion
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""
Test creating a recipe.
"""
payload = {
'title': 'Curry Chicken',
'time_minutes': 45,
'price': 16.00
}
# Make a POST Request
res = self.client.post(RECIPE_URL, payload)
# Assertions
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""
Test creating a recipe with tags.
"""
# Create tags
tag1 = sample_tag(user=self.user, name='indian')
tag2 = sample_tag(user=self.user, name='dinner')
# Create payload and make a POST Request
payload = {
'title': 'curry chicken',
'tags': [tag1.id, tag2.id],
'time_minutes': 55,
'price': 21.99
}
res = self.client.post(RECIPE_URL, payload)
# Assertions
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""
Test creating recipe with ingredients.
"""
# Create ingredients
ingredient1 = sample_ingredient(user=self.user, name='bluberry')
ingredient2 = sample_ingredient(user=self.user, name='suger')
# Create payload and make a POST Request
payload = {
'title': 'Blueberry Pie',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 35,
'price': 12
}
res = self.client.post(RECIPE_URL, payload)
# Assertions
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""
Test updating a recipe with PATCH.
"""
# Create sample data
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name="Italian")
# Payload
payload = {
'title': 'Pizza',
'tags': [new_tag.id]
}
# Get URL and make a PATCH request
url = detail_url(recipe.id)
self.client.patch(url, payload)
# Refresh the database
recipe.refresh_from_db()
# Assertions
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_updated_recipe(self):
"""
Test updating a recipe with PUT.
"""
# Create sample data
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
# Payload
payload = {
'title': 'Chicken Pasta',
'time_minutes': 25,
'price': 12.00
}
# Get URL and make a PUT request
url = detail_url(recipe.id)
self.client.put(url, payload)
# Refresh the database
recipe.refresh_from_db()
# Assertions
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
"""
Testing recipe image uploads.
"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'santa4521!'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""
Test uploading an image to recipe.
"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as f:
img = Image.new('RGB', (20, 20))
img.save(f, format='JPEG')
f.seek(0)
res = self.client.post(url, {'image': f}, format='multipart')
# Assertions
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""
Test uploading an invalid image/
"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
# Assertions
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""
Test returning recipes with specific tags.
"""
recipe1 = sample_recipe(user=self.user, title='Sicilian Pizza')
recipe2 = sample_recipe(user=self.user, title='Ravioli')
recipe3 = sample_recipe(user=self.user, title='Caesar Salad')
tag1 = sample_tag(user=self.user, name='Italian')
tag2 = sample_tag(user=self.user, name='Pasta')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
res = self.client.get(RECIPE_URL, {'tags': f'{tag1.id}, {tag2.id}'})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serialized3 = RecipeSerializer(recipe3)
# Assertions
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serialized3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""
Test returning recipes with specific ingredients.
"""
recipe1 = sample_recipe(user=self.user, title='Chicken over rise')
recipe2 = sample_recipe(user=self.user, title='Oxtail and rice')
recipe3 = sample_recipe(user=self.user, title='Steak and bread')
ingredient1 = sample_ingredient(user=self.user, name='chicken')
ingredient2 = sample_ingredient(user=self.user, name='rice')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
res = self.client.get(
RECIPE_URL,
{'ingredients': f'{ingredient1.id}, {ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serialized3 = RecipeSerializer(recipe3)
# Assertions
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serialized3.data, res.data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.