ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40d68671d3588c08ee985a5958c869264b1138e | """
dynamodb.py
AWS Dynamo DB helper methods - currently extending boto3
Note:
Credentials are required to communicate with AWS.
aws cli profile can be passed in using --profile, or
the following ENVIRONMENT VARIABLES can be set before
running this script:
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
"""
import logging
import argparse
import sys
import boto3
import botocore
def get_all_tables_in_region(region, profile=None):
if not region:
logging.error("You must supply a region")
return []
session = boto3.session.Session(profile_name=profile, region_name=region)
dynamodb = session.client('dynamodb')
all_tables = []
try:
current_set = dynamodb.list_tables()
all_tables.extend(current_set['TableNames'])
while 'LastEvaluatedTableName' in current_set:
start_name = current_set['LastEvaluatedTableName']
current_set = dynamodb.list_tables(ExclusiveStartTableName=start_name)
all_tables.extend(current_set['TableNames'])
except botocore.exceptions.ClientError as e:
logging.error('Unexpected error: %s' % e)
raise e
return all_tables
def get_all_tables(region_list, profile=None):
result = {}
for region in region_list:
logging.debug("Checking region: " + region)
result[region] = get_all_tables_in_region(region, profile)
return result
def table_exists_in_region(region, table_name, profile=None, suppress_warning=False):
if not region:
logging.error("You must supply a region")
return []
session = boto3.session.Session(profile_name=profile, region_name=region)
dynamodb = session.client('dynamodb')
result = False
try:
response = dynamodb.describe_table(TableName=table_name)
if 'Table' in response:
result = True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
if not suppress_warning:
logging.warn("The given table %s does not exist in region %s" % (table_name, region))
else:
logging.error('Unexpected error: %s' % e)
raise e
return result
def table_exists(region_list, table_name, profile=None, suppress_warning=False):
result = {}
for region in region_list:
logging.debug("Checking region: " + region)
result[region] = table_exists_in_region(region, table_name, profile, suppress_warning)
return result
def get_all_items_in_table_in_region(region, table_name, profile=None):
if not region:
logging.error("You must supply a region")
return []
all_items = []
# TODO: Implement this
logging.error("Not yet implemented")
return all_items
def get_all_items_in_table(region_list, table_name, profile=None):
result = {}
for region in region_list:
logging.debug("Checking region: " + region)
result[region] = get_all_items_in_table_in_region(region, table_name, profile)
return result
def get_item_from_table_in_region(region, table_name, key, profile=None):
if not region:
logging.error("You must supply a region")
return False
session = boto3.session.Session(profile_name=profile, region_name=region)
dynamodb = session.client('dynamodb')
result = None
try:
response = dynamodb.get_item(TableName=table_name, Key=key)
if 'ResponseMetadata' in response:
if 'HTTPStatusCode' in response['ResponseMetadata']:
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
if 'Item' in response:
result = response['Item']
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ValidationException':
logging.error("The provided key element(s) do not match the schema")
else:
logging.error('Unexpected error: %s' % e)
raise e
return result
def get_item_from_table(region_list, table_name,
item_partition_key_value, item_partition_key_name, item_partition_key_type='S',
item_sort_key_value=None, item_sort_key_name=None, item_sort_key_type='S',
profile=None):
result = {}
# Build the key dict
key = {}
key[item_partition_key_name] = {item_partition_key_type: item_partition_key_value}
if item_sort_key_value:
key[item_sort_key_name] = {item_sort_key_type: item_sort_key_value}
for region in region_list:
logging.debug("Checking region: " + region)
if table_exists_in_region(region, table_name, profile):
result[region] = get_item_from_table_in_region(region, table_name, key, profile)
else:
result[region] = None
return result
def put_item_in_table_in_region(region, table_name, key, dryrun=False, profile=None):
if not region:
logging.error("You must supply a region")
return False
result = False
# TODO: Implement this
logging.error("Not yet implemented")
return result
def put_item_in_table(region_list, table_name,
item_partition_key_value, item_partition_key_name, item_partition_key_type='S',
item_sort_key_value=None, item_sort_key_name=None, item_sort_key_type='S',
dryrun=False, profile=None):
result = {}
# Build the key dict
key = {}
key[item_partition_key_name] = {item_partition_key_type: item_partition_key_value}
if item_sort_key_value:
key[item_sort_key_name] = {item_sort_key_type: item_sort_key_value}
for region in region_list:
logging.debug("Checking region: " + region)
if table_exists_in_region(region, table_name, profile):
result[region] = put_item_in_table_in_region(region, table_name, key, dryrun, profile)
else:
result[region] = False
return result
def update_item_in_table_in_region(region, table_name, key, dryrun=False, profile=None):
if not region:
logging.error("You must supply a region")
return False
result = False
# TODO: Implement this
logging.error("Not yet implemented")
return result
def update_item_in_table(region_list, table_name,
item_partition_key_value, item_partition_key_name, item_partition_key_type='S',
item_sort_key_value=None, item_sort_key_name=None, item_sort_key_type='S',
dryrun=False, profile=None):
result = {}
# Build the key dict
key = {}
key[item_partition_key_name] = {item_partition_key_type: item_partition_key_value}
if item_sort_key_value:
key[item_sort_key_name] = {item_sort_key_type: item_sort_key_value}
for region in region_list:
logging.debug("Checking region: " + region)
if table_exists_in_region(region, table_name, profile):
result[region] = update_item_in_table_in_region(region, table_name, key, dryrun, profile)
else:
result[region] = False
return result
def create_item_in_table_in_region(region, table_name, key, dryrun=False, profile=None):
if not region:
logging.error("You must supply a region")
return False
result = False
# TODO: Implement this
logging.error("Not yet implemented")
return result
def create_item_in_table(region_list, table_name,
item_partition_key_value, item_partition_key_name, item_partition_key_type='S',
item_sort_key_value=None, item_sort_key_name=None, item_sort_key_type='S',
dryrun=False, profile=None):
result = {}
# Build the key dict
key = {}
key[item_partition_key_name] = {item_partition_key_type: item_partition_key_value}
if item_sort_key_value:
key[item_sort_key_name] = {item_sort_key_type: item_sort_key_value}
for region in region_list:
logging.debug("Checking region: " + region)
if table_exists_in_region(region, table_name, profile):
result[region] = create_item_in_table_in_region(region, table_name, key, dryrun, profile)
else:
result[region] = False
return result
def delete_item_in_table_in_region(region, table_name, key, dryrun=False, profile=None):
if not region:
logging.error("You must supply a region")
return False
result = False
# TODO: Implement this
logging.error("Not yet implemented")
return result
def delete_item_in_table(region_list, table_name,
item_partition_key_value, item_partition_key_name, item_partition_key_type='S',
item_sort_key_value=None, item_sort_key_name=None, item_sort_key_type='S',
dryrun=False, profile=None):
result = {}
# Build the key dict
key = {}
key[item_partition_key_name] = {item_partition_key_type: item_partition_key_value}
if item_sort_key_value:
key[item_sort_key_name] = {item_sort_key_type: item_sort_key_value}
for region in region_list:
logging.debug("Checking region: " + region)
if table_exists_in_region(region, table_name, profile):
result[region] = create_item_in_table_in_region(region, table_name, key, dryrun, profile)
else:
result[region] = False
return result
def item_exists_in_region(region, table_name, key, profile=None):
if get_item_from_table_in_region(region, table_name, key, profile):
return True
else:
return False
def item_exists(region_list, table_name, item_partition_key_value, item_partition_key_name, item_partition_key_type='S',
item_sort_key_value=None, item_sort_key_name=None, item_sort_key_type='S', profile=None):
result = {}
# Build the key dict
key = {}
key[item_partition_key_name] = {item_partition_key_type: item_partition_key_value}
if item_sort_key_value:
key[item_sort_key_name] = {item_sort_key_type: item_sort_key_value}
for region in region_list:
logging.debug("Checking region: " + region)
if table_exists_in_region(region, table_name, profile):
result[region] = item_exists_in_region(region, table_name, key, profile)
else:
result[region] = False
return result
def create_table_in_region(region, table_name, dryrun=False, profile=None):
if not region:
logging.error("You must supply a region")
return False
result = False
# TODO: Implement this
logging.error("Not yet implemented")
return result
def create_table(region_list, table_name, dryrun=False, profile=None):
result = {}
for region in region_list:
logging.debug("Checking region: " + region)
if not table_exists_in_region(region, table_name, profile, suppress_warning=True):
result[region] = create_table_in_region(region, table_name, dryrun, profile)
else:
logging.warn("Table %s already exists in region %s" % (table_name, region))
return result
def delete_table_in_region(region, table_name, dryrun=False, profile=None):
if not region:
logging.error("You must supply a region")
return False
result = False
# TODO: Implement this
logging.error("Not yet implemented")
return result
def delete_table(region_list, table_name, dryrun=False, profile=None):
result = {}
for region in region_list:
logging.debug("Checking region: " + region)
if table_exists_in_region(region, table_name, profile):
result[region] = delete_table_in_region(region, table_name, dryrun, profile)
else:
result[region] = False
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='dynamodb.py')
me_cmd_group = parser.add_mutually_exclusive_group(required=True)
me_cmd_group.add_argument("--get", help="Perform a Get on an item", action="store_true")
# me_cmd_group.add_argument("--put", help="Perform a Put on an item", action="store_true")
# me_cmd_group.add_argument("--update", help="Perform an Update on an item", action="store_true")
# me_cmd_group.add_argument("--list-all", help="List all items in the given table and regions", action="store_true", dest="list_all_items")
# me_cmd_group.add_argument("--delete", help="Perform a Delete on an item/table", action="store_true")
me_cmd_group.add_argument("--exists", help="Check if an item/table exists", action="store_true")
# me_cmd_group.add_argument("--create", help="Create a table", action="store_true")
me_cmd_group.add_argument("--list-all-tables", help="List all tables in the given regions", action="store_true", dest="list_all_tables")
parser.add_argument("--table", help="Table name", dest='table', required=False)
parser.add_argument("--item", help="Item name", dest='item', required=False)
parser.add_argument("--pkey", help="Partition key", dest='pkey', required=False)
parser.add_argument("--pkey-type", help="Partition key type", dest='pkey_type', default='S', required=False)
parser.add_argument("--skey", help="Sort key", dest='skey', required=False)
parser.add_argument("--skey-type", help="Sort key type", dest='skey_type', default='S', required=False)
parser.add_argument("--skey-value", help="Sort key value", dest='skey_value', required=False)
parser.add_argument("--regions", help="AWS Region(s) involved (space separated)", dest='regions', nargs='+', required=True)
parser.add_argument("--profile",
help="The name of an aws cli profile to use.", dest='profile', required=False)
parser.add_argument("--verbose", help="Turn on DEBUG logging", action='store_true', required=False)
parser.add_argument("--dryrun", help="Do a dryrun - no changes will be performed", dest='dryrun',
action='store_true', default=False,
required=False)
args = parser.parse_args()
log_level = logging.INFO
if args.verbose:
print("Verbose logging selected")
log_level = logging.DEBUG
# Turn down logging for botocore
logging.getLogger("botocore").setLevel(logging.CRITICAL)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('dynamodb.log')
fh.setLevel(logging.DEBUG)
# create console handler using level set in log_level
ch = logging.StreamHandler()
ch.setLevel(log_level)
console_formatter = logging.Formatter('%(levelname)8s: %(message)s')
ch.setFormatter(console_formatter)
file_formatter = logging.Formatter('%(asctime)s - %(levelname)8s: %(message)s')
fh.setFormatter(file_formatter)
# Add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logging.debug("INIT")
if not args.list_all_tables and not args.table:
logger.error("Must supply a table")
sys.exit(1)
if args.item:
if not args.pkey:
logger.error("Must supply a partition key")
sys.exit(1)
if args.skey and not args.skey_value:
logger.error("Sort key name present, but no sort key value provided")
sys.exit(1)
if args.skey_value and not args.skey:
logging.warn("Sort key value present, but no sort key provided - will be ignored")
# Item operations
if args.get:
if not args.item:
logger.error("Must supply an item to get")
sys.exit(1)
result = get_item_from_table(args.regions, args.table,
args.item, args.pkey, args.pkey_type,
args.skey_value, args.skey, args.skey_type,
args.profile)
for region in result:
print(region + ': ' + ('\n' + str(result[region]) if result[region] else "Not Present"))
# if args.put:
# # TODO: Implement this above in put_item_in_region
# result = put_item_in_table(args.regions, args.table,
# args.item, args.pkey, args.pkey_type,
# args.skey_value, args.skey, args.skey_type,
# args.dryrun, args.profile)
# for region in result:
# print(region + ': ' + ('\n' + 'Success' if result[region] else "Failed"))
#
# if args.update:
# # TODO: Implement this above in update_item_in_region
# result = update_item_in_table(args.regions, args.table,
# args.item, args.pkey, args.pkey_type,
# args.skey_value, args.skey, args.skey_type,
# args.dryrun, args.profile)
# for region in result:
# print(region + ': ' + ('\n' + 'Success' if result[region] else "Failed"))
#
# # Item/Table operations
# if args.delete:
# if args.item:
# # Item delete
# # TODO: Implement this above in create_item_in_table_in_region
# result = delete_item_in_table(args.regions, args.table,
# args.item, args.pkey, args.pkey_type,
# args.skey_value, args.skey, args.skey_type,
# args.dryrun, args.profile)
# else:
# # Table delete
# # TODO: Implement this above in create_table_in_region
# result = delete_table(args.regions, args.table, args.dryrun, args.profile)
# for region in result:
# print(region + ': ' + ('\n' + 'Success' if result[region] else "Failed"))
if args.exists:
if args.item:
# Item exists
result = item_exists(args.regions, args.table,
args.item, args.pkey, args.pkey_type,
args.skey_value, args.skey, args.skey_type,
args.profile)
else:
# Table exists
if args.pkey or args.skey or args.skey_value:
logging.warn("Ignoring extraneous information provided")
result = table_exists(args.regions, args.table, args.profile, suppress_warning=True)
for region in result:
print(region + ': ' + str(result[region]))
# Table operations
# if args.list_all_items:
# # TODO: Implement this above in create_table_in_region
# result = get_all_items_in_table(args.regions, args.table, args.profile)
# for region in result:
# logger.info(region + ' - ' + str(len(result[region])) + ' table' + ('' if len(result[region]) == 1 else 's') + ' present')
# if len(result[region]) > 0:
# for table in result[region]:
# logger.info(' ' + table)
#
# if args.create:
# if args.item:
# logger.warn("--item present with --create operation; Please use --put to create an item in a table")
# else:
# # Table create
# # TODO: Implement this above in create_table_in_region
# result = create_table(args.regions, args.table, args.dryrun, args.profile)
# for region in result:
# print(region + ': ' + ('\n' + 'Success' if result[region] else "Failed"))
if args.list_all_tables:
result = get_all_tables(args.regions, args.profile)
for region in result:
logger.info(region + ' - ' + str(len(result[region])) + ' table' + ('' if len(result[region]) == 1 else 's') + ' present')
if len(result[region]) > 0:
for table in result[region]:
logger.info(' ' + table)
|
py | b40d6870872e2b5b4d7d2e75c0dee2dbd8b88248 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.desk.form import assign_to
import frappe.cache_manager
from frappe import _
class AssignmentRule(Document):
def validate(self):
assignment_days = self.get_assignment_days()
if not len(set(assignment_days)) == len(assignment_days):
repeated_days = get_repeated(assignment_days)
frappe.throw(_("Assignment Day {0} has been repeated.".format(frappe.bold(repeated_days))))
def on_update(self): # pylint: disable=no-self-use
frappe.cache_manager.clear_doctype_map('Assignment Rule', self.name)
def after_rename(self): # pylint: disable=no-self-use
frappe.cache_manager.clear_doctype_map('Assignment Rule', self.name)
def apply_unassign(self, doc, assignments):
if (self.unassign_condition and
self.name in [d.assignment_rule for d in assignments]):
return self.clear_assignment(doc)
return False
def apply_close(self, doc, assignments):
if (self.close_assignments and
self.name in [d.assignment_rule for d in assignments]):
return self.close_assignments(doc)
return False
def apply_assign(self, doc):
if self.safe_eval('assign_condition', doc):
self.do_assignment(doc)
return True
def do_assignment(self, doc):
# clear existing assignment, to reassign
assign_to.clear(doc.get('doctype'), doc.get('name'))
user = self.get_user()
assign_to.add(dict(
assign_to = user,
doctype = doc.get('doctype'),
name = doc.get('name'),
description = frappe.render_template(self.description, doc),
assignment_rule = self.name,
notify = True
))
# set for reference in round robin
self.db_set('last_user', user)
def clear_assignment(self, doc):
'''Clear assignments'''
if self.safe_eval('unassign_condition', doc):
return assign_to.clear(doc.get('doctype'), doc.get('name'))
def close_assignments(self, doc):
'''Close assignments'''
if self.safe_eval('close_condition', doc):
return assign_to.close_all_assignments(doc.get('doctype'), doc.get('name'))
def get_user(self):
'''
Get the next user for assignment
'''
if self.rule == 'Round Robin':
return self.get_user_round_robin()
elif self.rule == 'Load Balancing':
return self.get_user_load_balancing()
def get_user_round_robin(self):
'''
Get next user based on round robin
'''
# first time, or last in list, pick the first
if not self.last_user or self.last_user == self.users[-1].user:
return self.users[0].user
# find out the next user in the list
for i, d in enumerate(self.users):
if self.last_user == d.user:
return self.users[i+1].user
# bad last user, assign to the first one
return self.users[0].user
def get_user_load_balancing(self):
'''Assign to the user with least number of open assignments'''
counts = []
for d in self.users:
counts.append(dict(
user = d.user,
count = frappe.db.count('ToDo', dict(
reference_type = self.document_type,
owner = d.user,
status = "Open"))
))
# sort by dict value
sorted_counts = sorted(counts, key = lambda k: k['count'])
# pick the first user
return sorted_counts[0].get('user')
def safe_eval(self, fieldname, doc):
try:
if self.get(fieldname):
return frappe.safe_eval(self.get(fieldname), None, doc)
except Exception as e:
# when assignment fails, don't block the document as it may be
# a part of the email pulling
frappe.msgprint(frappe._('Auto assignment failed: {0}').format(str(e)), indicator = 'orange')
return False
def get_assignment_days(self):
return [d.day for d in self.get('assignment_days', [])]
def is_rule_not_applicable_today(self):
today = frappe.flags.assignment_day or frappe.utils.get_weekday()
assignment_days = self.get_assignment_days()
if assignment_days and not today in assignment_days:
return True
return False
def get_assignments(doc):
return frappe.get_all('ToDo', fields = ['name', 'assignment_rule'], filters = dict(
reference_type = doc.get('doctype'),
reference_name = doc.get('name'),
status = ('!=', 'Cancelled')
), limit = 5)
@frappe.whitelist()
def bulk_apply(doctype, docnames):
import json
docnames = json.loads(docnames)
background = len(docnames) > 5
for name in docnames:
if background:
frappe.enqueue('frappe.automation.doctype.assignment_rule.assignment_rule.apply', doc=None, doctype=doctype, name=name)
else:
apply(None, doctype=doctype, name=name)
def reopen_closed_assignment(doc):
todo = frappe.db.exists('ToDo', dict(
reference_type = doc.doctype,
reference_name = doc.name,
status = 'Closed'
))
if not todo:
return False
todo = frappe.get_doc("ToDo", todo)
todo.status = 'Open'
todo.save(ignore_permissions=True)
return True
def apply(doc, method=None, doctype=None, name=None):
if frappe.flags.in_patch or frappe.flags.in_install or frappe.flags.in_setup_wizard:
return
if not doc and doctype and name:
doc = frappe.get_doc(doctype, name)
assignment_rules = frappe.cache_manager.get_doctype_map('Assignment Rule', doc.doctype, dict(
document_type = doc.doctype, disabled = 0), order_by = 'priority desc')
assignment_rule_docs = []
# multiple auto assigns
for d in assignment_rules:
assignment_rule_docs.append(frappe.get_doc('Assignment Rule', d.get('name')))
if not assignment_rule_docs:
return
doc = doc.as_dict()
assignments = get_assignments(doc)
clear = True # are all assignments cleared
new_apply = False # are new assignments applied
if assignments:
# first unassign
# use case, there are separate groups to be assigned for say L1 and L2,
# so when the value switches from L1 to L2, L1 team must be unassigned, then L2 can be assigned.
clear = False
for assignment_rule in assignment_rule_docs:
if assignment_rule.is_rule_not_applicable_today():
continue
clear = assignment_rule.apply_unassign(doc, assignments)
if clear:
break
# apply rule only if there are no existing assignments
if clear:
for assignment_rule in assignment_rule_docs:
if assignment_rule.is_rule_not_applicable_today():
continue
new_apply = assignment_rule.apply_assign(doc)
if new_apply:
break
# apply close rule only if assignments exists
assignments = get_assignments(doc)
if assignments:
for assignment_rule in assignment_rule_docs:
if assignment_rule.is_rule_not_applicable_today():
continue
if not new_apply:
if not assignment_rule.safe_eval('close_condition', doc):
reopen = reopen_closed_assignment(doc)
if reopen:
break
close = assignment_rule.apply_close(doc, assignments)
if close:
break
def get_assignment_rules():
return [d.document_type for d in frappe.db.get_all('Assignment Rule', fields=['document_type'], filters=dict(disabled = 0))]
def get_repeated(values):
unique_list = []
diff = []
for value in values:
if value not in unique_list:
unique_list.append(str(value))
else:
if value not in diff:
diff.append(str(value))
return " ".join(diff)
|
py | b40d69e5c4b5e30d27d1a31196340bf5d236eaea | # encoding=utf8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append(".")
sys.path.append("..")
sys.path.append("Paddle_pSp")
dataset_paths = {
'celeba_train': '',
'celeba_test': 'datasets/CelebA_test/',
'celeba_train_sketch': '',
'celeba_test_sketch': '',
'celeba_train_segmentation': '',
'celeba_test_segmentation': '',
'ffhq': 'datasets/FFHQ/',
}
# 'celeba_test': 'CelebA/img_align_celeba/',
model_paths = {
'stylegan_ffhq': 'pretrained_models/stylegan2-ffhq-config-f.pdparams',
'ir_se50': 'pretrained_models/model_ir_se50.pdparams',
'circular_face': 'pretrained_models/CurricularFace_Backbone.pdparams',
'alexnet': 'pretrained_models/alexnet.pdparams',
'lin_alex0.1': 'pretrained_models/lin_alex.pdparams',
'mtcnn_pnet': 'models/mtcnn/mtcnn_paddle/src/weights/pnet.npy',
'mtcnn_rnet': 'models/mtcnn/mtcnn_paddle/src/weights/rnet.npy',
'mtcnn_onet': 'models/mtcnn/mtcnn_paddle/src/weights/onet.npy',
'shape_predictor': 'shape_predictor_68_face_landmarks.dat',
'moco': 'pretrained_models/moco_v2_800ep_pretrain.pth.tar'
}
|
py | b40d6ac15ec3937809e83377bee1578671ef7bb7 | # USAGE
# python Stu_track.py --video video/iphonecase.mov
# import the necessary packages
import numpy as np
import argparse
import time
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
# 定义颜色的上下边界
#设定一个阈值 为 “ 蓝色 ”
blueLower = np.array([100, 67, 0], dtype = "uint8")
blueUpper = np.array([255, 128, 50], dtype = "uint8")
# 加载摄像头
camera = cv2.VideoCapture(args["video"])
while True:
# 抓取当前帧
(grabbed , frame) = camera.read()
# 检查 我们是否到了结尾
if not grabbed:
break
# 确定那些图像落在了蓝色边界内
# 然后对图像进行二值化处理
blue = cv2.inRange(frame , blueLower , blueUpper)
blue = cv2.GaussianBlur(blue , ( 3 ,3) , 0)
# 在图像中寻找轮廓
(_, cnts, _) = cv2.findContours(blue.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# 查看是否有任何的轮廓
if len(cnts) > 0 :
# 对轮廓进行排序并找到最大的轮廓——我们将假设这个轮廓对应于我手机的区域#
cnt = sorted(cnts,key=cv2.contourArea , reverse= True)[0]
# 计算周围的(旋转的)边界框
# 轮廓然后绘制它
rect = np.int32(cv2.boxPoints(cv2.minAreaRect(cnt)))
cv2.drawContours(frame , [rect] , -1 , ( 0,255,0) , 2)
cv2.imshow(" Tracking", frame)
cv2.imshow(" Binary",blue)
# 如果你的机器速度很快,它可能会显示帧
# 自 32 帧以上以来似乎是“快进”的内容
# 每秒帧数正在显示——一个简单的 hack
# 只是在帧之间睡一点点;
# 然而,如果你的电脑很慢,你可能想要
# 注释掉这一行
time.sleep(0.025)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
|
py | b40d6ad55287738b75991792d23f3c20a15c6f4b | import keras
import numpy as np
from keras.models import Model
from keras.layers import *
import keras.backend as K
from keras_augmented import AddCentered, MeanCentered
def freeze_model(model):
model.trainable = False
for layer in model.layers:
layer.trainable = False
def apply_conv(x, dilatation_rate, filters, padding="valid"):
tanh_out = Conv1D(filters, 3,
dilation_rate=dilatation_rate,
activation="tanh",
padding=padding)(x)
sigm_out = Conv1D(filters, 3,
dilation_rate=dilatation_rate,
activation="sigmoid",
padding=padding)(x)
return Multiply()([tanh_out, sigm_out])
def residual_block(x, dilatation_rate, filters, padding="valid"):
out_act = apply_conv(x, dilatation_rate, filters, padding=padding)
res_x = Conv1D(filters, 1, padding=padding)(out_act)
if padding == "valid":
layer = AddCentered()
elif padding == "same":
layer = Average()
else:
print(padding)
assert False
res_x = layer([x, res_x])
skip_x = Conv1D(filters, 1, padding=padding)(out_act)
return res_x, skip_x
def wavenet(input_tensor, filters, nb_blocks=3, conv_per_block=8, padding="valid"):
x = Conv1D(filters, 1, activation="relu")(input_tensor)
skip_connections = []
for i in range(nb_blocks):
for j in range(conv_per_block):
x, skip_x = residual_block(x, 2 ** j, filters, padding=padding)
skip_connections.append(skip_x)
if padding == "valid":
layer = MeanCentered()
elif padding == "same":
layer = Average()
else:
print(padding)
assert False
out = layer(skip_connections)
out = Activation("relu")(out)
out = Conv1D(filters, 3, activation="relu", padding=padding)(out)
out = Conv1D(1, 1, activation="linear")(out)
return out
def get_generator(filters, nb_blocks=3, conv_per_block=8, padding="valid"):
print("last conv is dilated by a factor of", 2 ** (conv_per_block - 1))
print("receptive field:", nb_blocks * (2 ** conv_per_block))
input_mix = Input(shape=(None, 1), name='input_mix_gen')
input_latent = Input(shape=(None, 1), name='input_latent_gen')
full_input = Concatenate(axis=-1)([input_mix, input_latent])
out = wavenet(full_input, filters, nb_blocks, conv_per_block, padding) # Shape is (BS, timesteps, 1)
model = Model([input_mix, input_latent], out)
print(model.layers[-1])
# We find the padding induced.
dummy_array = np.zeros((1, 10000, 1))
output_size = model.predict([dummy_array, dummy_array]).shape[1]
model.padding = (dummy_array.shape[1] - output_size) // 2
assert output_size % 2 == 0
print("Padding found:", model.padding)
return model
def get_discriminator(filters, nb_blocks=3, conv_per_block=8, padding="valid", padding_gen=0):
print("last conv is dilated by a factor of", 2 ** (conv_per_block - 1))
print("receptive field:", nb_blocks * (2 ** conv_per_block))
input_mix = Input(shape=(None, 1), name='input_mix_disc')
input_voice = Input(shape=(None, 1), name='input_voice_disc')
# It's possible that input_voice has a smaller size than input_mix.
if padding_gen == 0:
input_voice_pad = input_voice
else:
input_voice_pad = ZeroPadding1D(padding_gen)(input_voice)
full_input = Concatenate(axis=-1)([input_mix, input_voice_pad])
out = wavenet(full_input, filters, nb_blocks, conv_per_block, padding)
out = GlobalAveragePooling1D()(out)
out = Activation("sigmoid")(out) # Shape is (BS, 1)
return Model([input_mix, input_voice], out)
def get_gan(filters, nb_blocks, conv_per_block, padding_generator="valid", padding_discriminator="valid"):
generator = get_generator(filters, nb_blocks, conv_per_block, padding=padding_generator)
discriminator = get_discriminator(filters, nb_blocks, conv_per_block,
padding=padding_discriminator,
padding_gen=generator.padding)
# We need two inputs
input_mixed = Input(shape=(None, 1), name='input_mixed_gan')
input_latent = Input(shape=(None, 1), name='input_latent_gan')
predicted_voice = generator([input_mixed, input_latent])
disciminator_decision = discriminator([input_mixed, predicted_voice])
gan = Model([input_mixed, input_latent], disciminator_decision)
return gan, generator, discriminator
|
py | b40d6bc53164fdb1aeb4ad72a1cb53a20cc7235a | import os
import cv2
import numpy as np
import xml.etree.ElementTree as ET
from PIL import Image, ImageDraw, ImageFont
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
# 判断是否OpenCV图片类型
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
# 创建一个可以在给定图像上绘图的对象
draw = ImageDraw.Draw(img)
# 字体的格式
fontStyle = ImageFont.truetype(
"simsun.ttc", textSize, encoding="utf-8"
)
# 绘制文本
draw.text((left, top), text, textColor, font=fontStyle)
# 转换回OpenCV格式
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def drawRect(im, bbox_lists, score=True):
c = (0, 0, 255)
for bb in bbox_lists:
cls_name = bb['name']
txt = '{}'.format(cls_name)
if score:
cls_score = float(str(bb['score']))
txt = '{}_{:.5f}'.format(cls_name, cls_score)
bbox = np.array(bb['bbox'], dtype=np.int32)
xmin, ymin, xmax, ymax = bbox
assert xmax > xmin
assert ymax > ymin
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
cv2.rectangle(im, (bbox[0], bbox[1] - cat_size[1] - 2),
(bbox[0] + cat_size[0], bbox[1] - 2),
c, -1)
im = cv2ImgAddText(im, txt, bbox[0]+2, bbox[1]-cat_size[1] -2, (0, 0, 0), 10)
return im
def parse_rec(filename, score=False):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
if score:
obj_struct['score'] = obj.find('score').text
obj_struct['name'] = obj.find('name').text
bbox = obj.find('bndbox')
xmin = int(float(bbox.find('xmin').text))
ymin = int(float(bbox.find('ymin').text))
xmax = int(float(bbox.find('xmax').text))
ymax = int(float(bbox.find('ymax').text))
# assert xmin >= xmax
# assert ymin >= ymax
if (xmin >= xmax) or (ymin >= ymax):
print("Warning {} , bbox: xmin_{}, ymin_{}, xmax_{}, ymax_{}".format(
os.path.basename(filename), xmin, ymin, xmax, ymax
))
obj_struct['bbox'] = [xmin, ymin, xmax, ymax]
objects.append(obj_struct)
return objects
def pathExit(path):
if isinstance(path, list):
for ipath in path:
if not os.path.exists(ipath):
os.makedirs(ipath)
else:
if not os.path.exists(path):
print("create new folder: {}".format(path))
os.makedirs(path) |
py | b40d6d63ce8e031823fa3ac051ffc5bfc9bc0bfe | import logging
import os.path
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-22s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M')
def create(namespaces_list, namespace, parent):
if namespace not in namespaces_list:
namespaces_list.append(dict(name=namespace, parent=parent, vars=[]))
def add(namespaces_list, namespace, var):
for i in namespaces_list:
if i['name'] == namespace:
i['vars'].append(var)
def get(namespaces_list, namespace, var):
namespace_found, var_found, parent = find_var(namespaces_list, namespace, var)
if not namespace_found:
print('None')
else:
if var_found:
print(namespace)
else:
# если не нашли переменную в указанном пространстве, ищем в родительском
if not parent:
# не нашли в указанном namespace и родительского у него нет (это global)
print('None')
else:
while parent and not var_found:
pp = parent
namespace_found, var_found, parent = find_var(namespaces_list, pp, var)
if namespace_found is None:
print('None')
else:
if var_found:
print(pp)
else:
if parent is None:
print('None')
def find_var(namespaces_list, namespace, var):
namespace_found = False
var_found = False
for i in namespaces_list:
if i['name'] == namespace:
namespace_found = True
parent = i.get('parent')
if len(i['vars']) > 0:
for j in i['vars']:
if j == var:
var_found = True
return namespace_found, var_found, parent
return namespace_found, var_found, parent
def emulator():
n = int(input())
namespaces_list = [dict(name='global', parent=None, vars=[]), ]
for i in range(n):
func, namespace, arg = map(str, input().split())
if func == 'create':
create(namespaces_list, namespace, arg)
logger.info(f'namespaces list: {namespaces_list}')
if func == 'add':
add(namespaces_list, namespace, arg)
logger.info(f'namespaces list: {namespaces_list}')
if func == 'get':
get(namespaces_list, namespace, arg)
logger.info(f'namespaces list: {namespaces_list}')
if __name__ == '__main__':
emulator()
|
py | b40d6f0d434d748d27756dd2423122d4b341d366 | """This file implements the gym environment of minitaur.
"""
import math
import random
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from gym import spaces
import numpy as np
from pybullet_envs.minitaur.envs import minitaur_gym_env
import pybullet_data
GOAL_DISTANCE_THRESHOLD = 0.8
GOAL_REWARD = 1000.0
REWARD_SCALING = 1e-3
INIT_BALL_ANGLE = math.pi / 3
INIT_BALL_DISTANCE = 5.0
ACTION_EPS = 0.01
class MinitaurBallGymEnv(minitaur_gym_env.MinitaurGymEnv):
"""The gym environment for the minitaur and a ball.
It simulates a minitaur (a quadruped robot) and a ball. The state space
includes the angle and distance of the ball relative to minitaur's base.
The action space is a steering command. The reward function is based
on how far the ball is relative to the minitaur's base.
"""
def __init__(self,
urdf_root=pybullet_data.getDataPath(),
self_collision_enabled=True,
pd_control_enabled=False,
leg_model_enabled=True,
on_rack=False,
render=False):
"""Initialize the minitaur and ball gym environment.
Args:
urdf_root: The path to the urdf data folder.
self_collision_enabled: Whether to enable self collision in the sim.
pd_control_enabled: Whether to use PD controller for each motor.
leg_model_enabled: Whether to use a leg motor to reparameterize the action
space.
on_rack: Whether to place the minitaur on rack. This is only used to debug
the walking gait. In this mode, the minitaur's base is hanged midair so
that its walking gait is clearer to visualize.
render: Whether to render the simulation.
"""
super(MinitaurBallGymEnv, self).__init__(urdf_root=urdf_root,
self_collision_enabled=self_collision_enabled,
pd_control_enabled=pd_control_enabled,
leg_model_enabled=leg_model_enabled,
on_rack=on_rack,
render=render)
self._cam_dist = 2.0
self._cam_yaw = -70
self._cam_pitch = -30
self.action_space = spaces.Box(np.array([-1]), np.array([1]))
self.observation_space = spaces.Box(np.array([-math.pi, 0]), np.array([math.pi, 100]))
def reset(self):
self._ball_id = 0
super(MinitaurBallGymEnv, self).reset()
self._init_ball_theta = random.uniform(-INIT_BALL_ANGLE, INIT_BALL_ANGLE)
self._init_ball_distance = INIT_BALL_DISTANCE
self._ball_pos = [
self._init_ball_distance * math.cos(self._init_ball_theta),
self._init_ball_distance * math.sin(self._init_ball_theta), 1
]
self._ball_id = self._pybullet_client.loadURDF(
"%s/sphere_with_restitution.urdf" % self._urdf_root, self._ball_pos)
return self._get_observation()
def _get_observation(self):
world_translation_minitaur, world_rotation_minitaur = (
self._pybullet_client.getBasePositionAndOrientation(self.minitaur.quadruped))
world_translation_ball, world_rotation_ball = (
self._pybullet_client.getBasePositionAndOrientation(self._ball_id))
minitaur_translation_world, minitaur_rotation_world = (self._pybullet_client.invertTransform(
world_translation_minitaur, world_rotation_minitaur))
minitaur_translation_ball, _ = (self._pybullet_client.multiplyTransforms(
minitaur_translation_world, minitaur_rotation_world, world_translation_ball,
world_rotation_ball))
distance = math.sqrt(minitaur_translation_ball[0]**2 + minitaur_translation_ball[1]**2)
angle = math.atan2(minitaur_translation_ball[0], minitaur_translation_ball[1])
self._observation = [angle - math.pi / 2, distance]
return self._observation
def _transform_action_to_motor_command(self, action):
if self._leg_model_enabled:
for i, action_component in enumerate(action):
if not (-self._action_bound - ACTION_EPS <= action_component <=
self._action_bound + ACTION_EPS):
raise ValueError("{}th action {} out of bounds.".format(i, action_component))
action = self._apply_steering_to_locomotion(action)
action = self.minitaur.ConvertFromLegModel(action)
return action
def _apply_steering_to_locomotion(self, action):
# A hardcoded feedforward walking controller based on sine functions.
amplitude_swing = 0.5
amplitude_extension = 0.5
speed = 200
steering_amplitude = 0.5 * action[0]
t = self.minitaur.GetTimeSinceReset()
a1 = math.sin(t * speed) * (amplitude_swing + steering_amplitude)
a2 = math.sin(t * speed + math.pi) * (amplitude_swing - steering_amplitude)
a3 = math.sin(t * speed) * amplitude_extension
a4 = math.sin(t * speed + math.pi) * amplitude_extension
action = [a1, a2, a2, a1, a3, a4, a4, a3]
return action
def _distance_to_ball(self):
world_translation_minitaur, _ = (self._pybullet_client.getBasePositionAndOrientation(
self.minitaur.quadruped))
world_translation_ball, _ = (self._pybullet_client.getBasePositionAndOrientation(
self._ball_id))
distance = math.sqrt((world_translation_ball[0] - world_translation_minitaur[0])**2 +
(world_translation_ball[1] - world_translation_minitaur[1])**2)
return distance
def _goal_state(self):
return self._observation[1] < GOAL_DISTANCE_THRESHOLD
def _reward(self):
reward = -self._observation[1]
if self._goal_state():
reward += GOAL_REWARD
return reward * REWARD_SCALING
def _termination(self):
if self._goal_state():
return True
return False
|
py | b40d703451024f46b1588f13f47358ab116a78d6 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.resources',
marshal='google.ads.googleads.v8',
manifest={
'HotelPerformanceView',
},
)
class HotelPerformanceView(proto.Message):
r"""A hotel performance view.
Attributes:
resource_name (str):
Output only. The resource name of the hotel performance
view. Hotel performance view resource names have the form:
``customers/{customer_id}/hotelPerformanceView``
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | b40d71682b44e131fda9bd2ce0257ab77ca57343 | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from bs4 import BeautifulSoup as Soup
from bs4.element import Tag, NavigableString
class RichMediaRenderer(object):
def __init__(self, callback):
self._client = callback
def _default_output(self):
pass
def _concat_result(self, first, second):
pass
def render(self, client_context, message):
if message:
message = "<content>%s</content>"%message
soup = Soup(message, "lxml-xml")
if soup.children:
output = self._default_output()
for outer_child in soup.children:
if outer_child.children:
for child in outer_child.children:
if isinstance(child, Tag):
result = self.parse_tag(client_context, child)
if result is not None:
output = self._concat_result(output, result)
elif isinstance(child, NavigableString):
result = self.parse_text(client_context, child)
if result is not None:
output = self._concat_result(output, result)
return output
return self.parse_text(client_context, message)
return None
def parse_tag(self, client_context, tag):
if tag.name == 'button':
return self.parse_button(client_context, tag)
elif tag.name == 'link':
return self.parse_link(client_context, tag)
elif tag.name == 'image':
return self.parse_image(client_context, tag)
elif tag.name == 'video':
return self.parse_video(client_context, tag)
elif tag.name == 'card':
return self.parse_card(client_context, tag)
elif tag.name == 'carousel':
return self.parse_carousel(client_context, tag)
elif tag.name == 'reply':
return self.parse_reply(client_context, tag)
elif tag.name == 'delay':
return self.parse_delay(client_context, tag)
elif tag.name == 'split':
return self.parse_split(client_context, tag)
elif tag.name == 'list':
return self.parse_list(client_context, tag)
elif tag.name == 'olist':
return self.parse_olist(client_context, tag)
elif tag.name == 'location':
return self.parse_location(client_context, tag)
elif tag.name == 'tts':
return self.parse_tts(client_context, tag)
else:
return self.parse_xml(client_context, tag)
def parse_text(self, client_context, text):
return self.handle_text(client_context, {"type": "text", "text": text})
def parse_xml(self, client_context, tag):
text = str(tag)
return self.handle_text(client_context, {"type": "text", "text": text})
def extract_button_info(self, tag):
text = None
url = None
postback = None
for child in tag.children:
if child.name is None:
pass
elif child.name == 'text':
text = child.text
elif child.name == 'url':
url = child.text
elif child.name == 'postback':
postback = child.text
else:
print("Unknown button tag %s" % child.name)
return {"type": "button", "text": text, "url": url, "postback": postback}
def parse_button(self, client_context, tag):
button = self.extract_button_info(tag)
if button['url'] is not None:
return self.handle_url_button(client_context, button)
else:
return self.handle_postback_button(client_context, button)
def extract_link_info(self, tag):
text = None
url = None
for child in tag.children:
if child.name is None:
pass
elif child.name == 'text':
text = child.text
elif child.name == 'url':
url = child.text
else:
print("Unknown link tag %s" % child.name)
return {"type": "link", "text": text, "url": url}
def parse_link(self, client_context, tag):
link = self.extract_link_info(tag)
return self.handle_link(client_context, link)
def parse_image(self, client_context, tag):
return self.handle_image(client_context, {"type": "image", "url": tag.text.strip()})
def parse_video(self, client_context, tag):
return self.handle_video(client_context, {"type": "video", "url": tag.text.strip()})
def extract_card_info(self, tag):
image = None
title = None
subtitle = None
buttons = []
for child in tag.children:
if child.name is None:
pass
elif child.name == 'image':
image = child.text
elif child.name == 'title':
title = child.text
elif child.name == 'subtitle':
subtitle = child.text
elif child.name == 'button':
button = self.extract_button_info(child)
buttons.append(button)
else:
print("Unknown card tag [%s]" % child.name)
return {"type": "card", "image": image, "title": title, "subtitle": subtitle, "buttons": buttons}
def parse_card(self, client_context, tag):
card = self.extract_card_info(tag)
return self.handle_card(client_context, card)
def extract_carousel_info(self, tag):
cards = []
for child in tag.children:
if child.name is None:
pass
elif child.name == 'card':
card = self.extract_card_info(child)
cards.append(card)
else:
print("Unknown carousel tag %s" % child.name)
return {"type": "carousel", "cards": cards}
def parse_carousel(self, client_context, tag):
carousel = self.extract_carousel_info(tag)
return self.handle_carousel(client_context, carousel)
def extract_reply_info(self, tag):
text = None
postback = None
for child in tag.children:
if child.name is None:
pass
elif child.name == 'text':
text = child.text.strip()
elif child.name == 'postback':
postback = child.text.strip()
else:
print("Unknown reply tag %s" % child.name)
return {"type": "reply", "text": text, "postback": postback}
def parse_reply(self, client_context, tag):
reply = self.extract_reply_info(tag)
return self.handle_reply(client_context, reply)
def extract_delay_info(self, tag):
seconds = None
for child in tag.children:
if child.name is None:
pass
elif child.name == 'seconds':
seconds = child.text.strip()
return {"type": "delay", "seconds": seconds}
def parse_delay(self, client_context, tag):
delay = self.extract_delay_info(tag)
return self.handle_delay(client_context, delay)
def parse_split(self, client_context, tag):
return self.handle_split(client_context, {"type": "split"})
def extract_item_info(self, tag):
if tag.name == 'button':
return self.extract_reply_info(tag)
elif tag.name == 'link':
return self.extract_link_info(tag)
elif tag.name == 'image':
return {"type": "image", "url": tag.text}
elif tag.name == 'video':
return {"type": "video", "url": tag.text}
elif tag.name == 'card':
return self.extract_card_info(tag)
elif tag.name == 'carousel':
return self.extract_carousel_info(tag)
elif tag.name == 'reply':
return self.extract_reply_info(tag)
elif tag.name == 'delay':
return self.extract_delay_info(tag)
elif tag.name == 'split':
# Not allowed
pass
elif tag.name == 'list':
return self.extract_list_info(tag)
elif tag.name == 'olist':
return self.extract_list_info(tag)
elif tag.name == 'location':
# Not allowed
pass
else:
if isinstance(tag, Tag):
text = tag.text
elif isinstance(tag, NavigableString):
text = tag
return {"type": "text", "text": text}
def extract_list_info(self, tag):
items = []
for child in tag.children:
if child.name is None:
pass
elif child.name == 'item':
for childs_child in child.children:
if isinstance(childs_child, Tag):
items.append(self.extract_item_info(childs_child))
elif isinstance(childs_child, NavigableString):
childs_child_text = childs_child.strip()
if childs_child_text:
items.append({'type': 'text', 'text': childs_child_text})
else:
print("Unknown list tag %s" % child.name)
return {'type': 'list', 'items': items}
def parse_list(self, client_context, tag):
list = self.extract_list_info(tag)
return self.handle_list(client_context, list)
def parse_olist(self, client_context, tag):
list = self.extract_list_info(tag)
return self.handle_ordered_list(client_context, list)
def parse_location(self, client_context, tag):
return self.handle_location(client_context, {"type": "location"})
def parse_tts(self, client_context, tag):
return self.handle_tts(client_context, {"type": "tts", "text": tag.text})
######################################################################################################
# You need to implement all of these and decide how to display the various rich media elements
#
def handle_text(self, client_context, text):
return None
def handle_url_button(self, client_context, button):
return None
def handle_postback_button(self, client_context, button):
return None
def handle_link(self, client_context, link):
return None
def handle_image(self, client_context, image):
return None
def handle_video(self, client_context, image):
return None
def handle_card(self, client_context, card):
return None
def handle_carousel(self, client_context, carousel):
return None
def handle_reply(self, client_context, reply):
return None
def handle_delay(self, client_context, delay):
return None
def handle_split(self, client_contex, split):
return None
def handle_list(self, client_context, list):
return None
def handle_ordered_list(self, client_context, list):
return None
def handle_location(self, client_context, location):
return None
def handle_tts(self, client_context, location):
return None
|
py | b40d718117bc51aa9bfef7dfa547053105f0af30 | from typing import (
Dict, MutableMapping, Optional
)
from dbt.contracts.graph.parsed import ParsedMacro
from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
from dbt.clients.jinja import MacroGenerator
MacroNamespace = Dict[str, ParsedMacro]
# This class builds the MacroResolver by adding macros
# to various categories for finding macros in the right order,
# so that higher precedence macros are found first.
# This functionality is also provided by the MacroNamespace,
# but the intention is to eventually replace that class.
# This enables us to get the macro unique_id without
# processing every macro in the project.
# Note: the root project macros override everything in the
# dbt internal projects. External projects (dependencies) will
# use their own macros first, then pull from the root project
# followed by dbt internal projects.
class MacroResolver:
def __init__(
self,
macros: MutableMapping[str, ParsedMacro],
root_project_name: str,
internal_package_names,
) -> None:
self.root_project_name = root_project_name
self.macros = macros
# internal packages comes from get_adapter_package_names
self.internal_package_names = internal_package_names
# To be filled in from macros.
self.internal_packages: Dict[str, MacroNamespace] = {}
self.packages: Dict[str, MacroNamespace] = {}
self.root_package_macros: MacroNamespace = {}
# add the macros to internal_packages, packages, and root packages
self.add_macros()
self._build_internal_packages_namespace()
self._build_macros_by_name()
def _build_internal_packages_namespace(self):
# Iterate in reverse-order and overwrite: the packages that are first
# in the list are the ones we want to "win".
self.internal_packages_namespace: MacroNamespace = {}
for pkg in reversed(self.internal_package_names):
if pkg in self.internal_packages:
# Turn the internal packages into a flat namespace
self.internal_packages_namespace.update(
self.internal_packages[pkg])
# search order:
# local_namespace (package of particular node), not including
# the internal packages or the root package
# This means that within an extra package, it uses its own macros
# root package namespace
# non-internal packages (that aren't local or root)
# dbt internal packages
def _build_macros_by_name(self):
macros_by_name = {}
# all internal packages (already in the right order)
for macro in self.internal_packages_namespace.values():
macros_by_name[macro.name] = macro
# non-internal packages
for fnamespace in self.packages.values():
for macro in fnamespace.values():
macros_by_name[macro.name] = macro
# root package macros
for macro in self.root_package_macros.values():
macros_by_name[macro.name] = macro
self.macros_by_name = macros_by_name
def _add_macro_to(
self,
package_namespaces: Dict[str, MacroNamespace],
macro: ParsedMacro,
):
if macro.package_name in package_namespaces:
namespace = package_namespaces[macro.package_name]
else:
namespace = {}
package_namespaces[macro.package_name] = namespace
if macro.name in namespace:
raise_duplicate_macro_name(
macro, macro, macro.package_name
)
package_namespaces[macro.package_name][macro.name] = macro
def add_macro(self, macro: ParsedMacro):
macro_name: str = macro.name
# internal macros (from plugins) will be processed separately from
# project macros, so store them in a different place
if macro.package_name in self.internal_package_names:
self._add_macro_to(self.internal_packages, macro)
else:
# if it's not an internal package
self._add_macro_to(self.packages, macro)
# add to root_package_macros if it's in the root package
if macro.package_name == self.root_project_name:
self.root_package_macros[macro_name] = macro
def add_macros(self):
for macro in self.macros.values():
self.add_macro(macro)
def get_macro(self, local_package, macro_name):
local_package_macros = {}
if (local_package not in self.internal_package_names and
local_package in self.packages):
local_package_macros = self.packages[local_package]
# First: search the local packages for this macro
if macro_name in local_package_macros:
return local_package_macros[macro_name]
# Now look up in the standard search order
if macro_name in self.macros_by_name:
return self.macros_by_name[macro_name]
return None
def get_macro_id(self, local_package, macro_name):
macro = self.get_macro(local_package, macro_name)
if macro is None:
return None
else:
return macro.unique_id
# Currently this is just used by test processing in the schema
# parser (in connection with the MacroResolver). Future work
# will extend the use of these classes to other parsing areas.
# One of the features of this class compared to the MacroNamespace
# is that you can limit the number of macros provided to the
# context dictionary in the 'to_dict' manifest method.
class TestMacroNamespace:
def __init__(
self, macro_resolver, ctx, node, thread_ctx, depends_on_macros
):
self.macro_resolver = macro_resolver
self.ctx = ctx
self.node = node # can be none
self.thread_ctx = thread_ctx
self.local_namespace = {}
self.project_namespace = {}
if depends_on_macros:
dep_macros = []
self.recursively_get_depends_on_macros(depends_on_macros, dep_macros)
for macro_unique_id in dep_macros:
if macro_unique_id in self.macro_resolver.macros:
# Split up the macro unique_id to get the project_name
(_, project_name, macro_name) = macro_unique_id.split('.')
# Save the plain macro_name in the local_namespace
macro = self.macro_resolver.macros[macro_unique_id]
macro_gen = MacroGenerator(
macro, self.ctx, self.node, self.thread_ctx,
)
self.local_namespace[macro_name] = macro_gen
# We also need the two part macro name
if project_name not in self.project_namespace:
self.project_namespace[project_name] = {}
self.project_namespace[project_name][macro_name] = macro_gen
def recursively_get_depends_on_macros(self, depends_on_macros, dep_macros):
for macro_unique_id in depends_on_macros:
dep_macros.append(macro_unique_id)
if macro_unique_id in self.macro_resolver.macros:
macro = self.macro_resolver.macros[macro_unique_id]
if macro.depends_on.macros:
self.recursively_get_depends_on_macros(macro.depends_on.macros, dep_macros)
def get_from_package(
self, package_name: Optional[str], name: str
) -> Optional[MacroGenerator]:
macro = None
if package_name is None:
macro = self.macro_resolver.macros_by_name.get(name)
elif package_name == GLOBAL_PROJECT_NAME:
macro = self.macro_resolver.internal_packages_namespace.get(name)
elif package_name in self.macro_resolver.packages:
macro = self.macro_resolver.packages[package_name].get(name)
else:
raise_compiler_error(
f"Could not find package '{package_name}'"
)
if not macro:
return None
macro_func = MacroGenerator(
macro, self.ctx, self.node, self.thread_ctx
)
return macro_func
|
py | b40d722b8dd6902e644be575309b58d2ece3393f | from mo.cli import main
main()
|
py | b40d72ca66a12f62e4cad18fb8dda7c881fe8c9b | """
File: graphgui.py
Author: Delano Lourenco
Repo: https://github.com/3ddelano/graph-visualizer-python
License: MIT
"""
import os
from time import time, sleep
import tkinter.filedialog as filedialog
import tkinter as tk
from .graph_data.graph import Graph
from .constants import START_NODE_COLOR, END_NODE_COLOR, SCREEN_BG_COLOR
from .animations.path_animation import PathAnimation
from .animations.bfs_animation import BFSAnimation
from .animations.dfs_animation import DFSAnimation
from .interfaces.drawable import Drawable
from .edgegui import EdgeGUI
from .nodegui import NodeGUI
VERSION = "v.1.2"
DOUBLE_CLICK_TIME = 0.3
NODELIST_FILEPATH = "nodelist.csv"
EDGELIST_FILEPATH = "edgelist.csv"
ANIMATION_FRAME_DURATION = 9
class GraphGUI(Graph, Drawable):
def __init__(self, canvas):
# Selected node
self.selected_node = None
self.dragging_node = None
# Selected edge
self.selected_edge = None
self.dragging_edge = None
# [nodeA_pos_x, nodeA_pos_y, nodeB_pos_x, nodeB_pos_y, drag_start_x, drag_start_y]
self.dragging_edge_offsets = [0, 0, 0, 0, 0, 0]
# Double click time
self.last_leftclick_time = time()
self.last_rightclick_time = time()
self.animation = None
self.frames = 0
self.help_visible = False
self.node_id_visible = False
# Path finding UI
self.path_algorithm_name = tk.StringVar(value="dijkstra")
self.start_node = None
self.goal_node = None
self.path = []
super().__init__()
self.init_ui(canvas)
def deselect_nodes(self):
if self.selected_node:
self.selected_node.is_selected = False
self.selected_node = None
if self.dragging_node:
self.dragging_node.is_dragging = False
self.dragging_node = None
def deselect_edges(self):
if self.selected_edge:
self.selected_edge.is_selected = False
self.selected_edge = None
if self.dragging_edge:
self.dragging_edge.is_dragging = False
self.dragging_edge = None
def deselect_path(self):
for i in range(len(self.path)):
node = self.get_node_by_id(self.path[i][0])
edge = self.path[i][1]
node.is_selected = False
if edge:
edge.is_selected = False
edge.nodeA.is_selected = False
edge.nodeB.is_selected = False
if self.start_node and self.goal_node:
self.start_node = None
self.goal_node = None
self.path = []
def get_clicked_node(self, x, y):
for node in self.nodes:
if node.is_clicked(x, y):
return node
return None
def get_clicked_edge(self, x, y):
for edge in self.edges:
if edge.is_clicked(x, y):
return edge
return None
def on_right_click(self, x, y):
self.deselect_path()
clicked_edge = self.get_clicked_edge(x, y)
if not clicked_edge:
# Empty area was right clicked
self.deselect_edges()
self.last_rightclick_time = time()
return
# Got a right click on an edge
self.deselect_nodes()
is_double_click = False
time_since_last_rightclick = time() - self.last_rightclick_time
if time_since_last_rightclick <= DOUBLE_CLICK_TIME:
# Got a double click on an edge
is_double_click = True
if is_double_click:
# It was a double click
# If a edge was previously selected deselect it
if self.selected_edge:
self.selected_edge.is_selected = False
self.selected_edge = None
# Start dragging this edge
clicked_edge.is_dragging = True
self.dragging_edge = clicked_edge
self.dragging_edge_offsets = [
clicked_edge.nodeA.pos[0],
clicked_edge.nodeA.pos[1],
clicked_edge.nodeB.pos[0],
clicked_edge.nodeB.pos[1],
x,
y,
]
else:
# It was a single right click
if self.dragging_edge:
# An edge was being dragged stop the drag
self.dragging_edge.is_dragging = False
self.dragging_edge = None
elif self.selected_edge:
# There was already a selected edge
if clicked_edge == self.selected_edge:
# The same edge was selected again
# Deselect it
self.selected_edge.is_selected = False
self.selected_edge = None
else:
# A different edge was selected
# Deselect the selected edge and select the clicked edge
self.selected_edge.is_selected = False
self.selected_edge = clicked_edge
self.selected_edge.is_selected = True
else:
# There was no selected node
# Store the node as selected node
self.selected_edge = clicked_edge
clicked_edge.is_selected = True
self.last_rightclick_time = time()
def on_left_click(self, x, y):
self.deselect_path()
clicked_node = self.get_clicked_node(x, y)
self.deselect_edges()
if clicked_node:
# A node was left clicked
time_since_last_leftclick = time() - self.last_leftclick_time
if time_since_last_leftclick <= DOUBLE_CLICK_TIME:
# Got a double left click
# Start dragging this node
clicked_node.is_dragging = True
self.dragging_node = clicked_node
# If a node was selected deselect it
if self.selected_node:
self.selected_node.is_selected = False
self.selected_node = None
else:
# It was a single click
if self.dragging_node:
# A node was being dragged stop the drag and select that node
self.dragging_node.is_dragging = False
self.dragging_node = None
elif self.selected_node:
# There was already a selected node
if clicked_node == self.selected_node:
# The same node was clicked again
# Deselect it
self.selected_node.is_selected = False
self.selected_node = None
else:
# A different node was clicked
# Create an edge between the two nodes if there isnt one
edge_exists = self.get_edge_between_nodes(
self.selected_node, clicked_node
)
if not edge_exists:
edge = EdgeGUI(self.selected_node, clicked_node)
self.add_edge(edge)
# Deselect the selected node and select the clicked node
self.selected_node.is_selected = False
self.selected_node = clicked_node
self.selected_node.is_selected = True
else:
# There was no selected node
# Store the node as selected node
self.selected_node = clicked_node
clicked_node.is_selected = True
else:
# Empty area was clicked
node = NodeGUI(x, y)
self.add_node(node)
if self.selected_node:
# A node is already selected
# Draw an edge from selected node to new clicked node
edge = EdgeGUI(self.selected_node, node)
self.add_edge(edge)
# Deselect the selected node
self.selected_node.is_selected = False
# Select the new node
self.selected_node = node
self.selected_node.is_selected = True
else:
# There was no selected node
# Mark the new node as the selected one
node.is_selected = True
self.selected_node = node
self.last_leftclick_time = time()
def ondrag(self, x, y):
if self.dragging_node:
# Node is being dragged
self.dragging_node.pos[0] = x
self.dragging_node.pos[1] = y
elif self.dragging_edge:
# Edge is being dragged
nodeA = self.dragging_edge.nodeA
nodeB = self.dragging_edge.nodeB
offsets = self.dragging_edge_offsets
nodeA.pos[0] = offsets[0] + x - offsets[4]
nodeA.pos[1] = offsets[1] + y - offsets[5]
nodeB.pos[0] = offsets[2] + x - offsets[4]
nodeB.pos[1] = offsets[3] + y - offsets[5]
def on_delete(self):
if self.selected_node:
# Delete the node
node = self.selected_node
node.is_selected = False
self.delete_node(node)
# Delete connected edges
connected_edges = self.get_adjacent_edges(node)
for edge in connected_edges:
self.delete_edge(edge)
self.selected_node = None
elif self.selected_edge:
# Delete the edge
edge = self.selected_edge
edge.is_selected = False
self.delete_edge(edge)
self.selected_edge = None
def draw(self, tur, canvas):
# Check if animation ended
if self.animation and self.animation.is_ended():
self.animation = None
self.deselect_path()
sleep(1)
# Animate the animation if any
animation_nodes = []
animation_edges = []
if self.animation:
if self.frames % ANIMATION_FRAME_DURATION == 0:
# Take a animation step
self.animation.one_step()
# Get the drawn nodes and edges from animation
animation_nodes = self.animation.get_drawn_nodes()
animation_edges = self.animation.get_drawn_edges()
# Draw all edges
for edge in self.edges:
if not isinstance(edge, EdgeGUI):
continue
animation_drew_edge = False
for edge_data in animation_edges:
if edge == edge_data["edge"]:
edge.draw(tur, color=edge_data["color"])
animation_drew_edge = True
break
if not animation_drew_edge:
edge.draw(tur)
edge.draw_weight(canvas)
# Draw all nodes
for node in self.nodes:
if not isinstance(node, NodeGUI):
continue
animation_drew_node = False
for node_data in animation_nodes:
if node == node_data["node"]:
node.draw(tur, color=node_data["color"])
animation_drew_node = True
break
if not animation_drew_node:
if node == self.start_node:
node.draw(tur, color=START_NODE_COLOR)
elif node == self.goal_node:
node.draw(tur, color=END_NODE_COLOR)
else:
node.draw(tur)
if self.node_id_visible:
node.draw_id(canvas)
self.frames += 1
# Show help text
self.draw_help(canvas)
def draw_help(self, canvas):
main_lines = ["H key - Toggle help text"]
lines = [
"Single Left Click - Add node / Select Node",
"Single Right Click - Select Edge",
"Double Left Click - Move Node",
"Double Right Click - Move Edge",
"",
"D key - Delete Node/Edge",
"W key - Update Edge Weight",
"S key - Save Data",
"L key - Load Data",
"B key - Start BFS at selected node",
"N key - Start DFS at selected node",
"F key - Toggle node Id visibility",
"",
"github.com/3ddelano/graph-visualizer-python",
]
font_size = 10
font = f"Arial {font_size} normal"
draw_y = 50
for line in main_lines:
# Draw the text
canvas.create_text(
0, draw_y, text=line, font=font, fill="white", anchor="w"
)
draw_y += font_size + 10
if not self.help_visible:
return
for line in lines:
# Draw the text
canvas.create_text(
0, draw_y, text=line, font=font, fill="white", anchor="w"
)
draw_y += font_size + 10
def on_bfs_start(self):
# Check if a node is selected
if not self.selected_node:
print("No node is selected for BFS")
tk.messagebox.showerror("Error", "No node is selected for BFS")
return
# Start bfs from the selected node
print("Starting BFS at node id=", self.selected_node.id)
self.animation = BFSAnimation(self)
self.animation.set_start_node(self.selected_node)
self.deselect_nodes()
tk.messagebox.showinfo("BFS result", self.animation.get_result_string())
def on_dfs_start(self):
# Check if a node is selected
if not self.selected_node:
print("No node is selected for DFS")
tk.messagebox.showerror("Error", "No node is selected for DFS")
return
# Start dfs from the selected node
print("Starting DFS at node id=", self.selected_node.id)
self.animation = DFSAnimation(self)
self.animation.set_start_node(self.selected_node)
self.deselect_nodes()
tk.messagebox.showinfo("DFS result", self.animation.get_result_string())
def on_help_toggle(self):
self.help_visible = not self.help_visible
def on_nodeid_toggle(self):
self.node_id_visible = not self.node_id_visible
def on_update_weight(self):
if not self.selected_edge:
print("No edge is selected to set weight")
tk.messagebox.showerror(
"Set Weight Error", "No edge is selected to set weight"
)
return
default_weight = round(
self.get_euclidean_distance(
self.selected_edge.nodeA, self.selected_edge.nodeB
),
2,
)
new_weight = tk.simpledialog.askstring(
title="Set Edge Weight",
prompt="Enter the new weight for the edge",
initialvalue=str(default_weight),
)
if new_weight is None:
return
try:
new_weight = float(new_weight)
self.update_edge_weight(self.selected_edge, new_weight)
except Exception as e:
print("Invalid weight provided to update edge weight")
tk.messagebox.showerror(
"Update Weight Error",
"Invalid weight. Weight should be a valid number.",
)
return
def on_save(self):
save_folder = filedialog.askdirectory(mustexist=True)
if save_folder == "":
# User cancelled the save
return
success = self.save_to_files(
os.path.join(save_folder, NODELIST_FILEPATH),
os.path.join(save_folder, EDGELIST_FILEPATH),
)
if success:
tk.messagebox.showinfo(
"Saving Graph", "Graph saved to nodelist.csv and edgelist.csv"
)
def on_load(self):
load_folder = filedialog.askdirectory(mustexist=True)
if load_folder == "":
# User cancelled the laod
return
node_path = os.path.join(load_folder, NODELIST_FILEPATH)
edge_path = os.path.join(load_folder, EDGELIST_FILEPATH)
if not os.path.exists(node_path):
tk.messagebox.showerror(
"Loading Graph Error", "nodelist.csv file not found"
)
return
if not os.path.exists(edge_path):
tk.messagebox.showerror(
"Loading Graph Error", "edgelist.csv file not found"
)
return
self.deselect_nodes()
self.deselect_edges()
self.deselect_path()
self.load_from_files(node_path, edge_path)
self.convert_graph_to_gui()
def on_set_start_node(
self,
):
if not self.selected_node:
print("No node is selected")
tk.messagebox.showerror("Set Start Node Error", "No node is selected")
return
self.start_node = self.selected_node
self.deselect_nodes()
tk.messagebox.showinfo("Set Start Node", "Start node set successfully")
def on_set_end_node(
self,
):
if not self.selected_node:
print("No node is selected")
tk.messagebox.showerror("Set Goal Node Error", "No node is selected")
return
self.goal_node = self.selected_node
self.deselect_nodes()
tk.messagebox.showinfo("Set Goal Node", "Goal node set successfully")
def on_find_path(self):
if self.animation:
self.animation = None
# Ensure that start and goal nodes are set
if not self.start_node:
tk.messagebox.showerror("Find Path Error", "Start node not set")
return
if not self.goal_node:
tk.messagebox.showerror("Find Path Error", "Goal node not set")
return
temp_start_node = self.start_node
temp_end_node = self.goal_node
self.deselect_path()
self.start_node = temp_start_node
self.goal_node = temp_end_node
# Array of node ids to be used for the path
self.path = []
self.path = self.find_shortest_path(
self.path_algorithm_name.get(), self.start_node, self.goal_node
)
if len(self.path) < 2:
tk.messagebox.showerror("Find Path Error", "No path found")
return
for i in range(len(self.path)):
node = self.get_node_by_id(self.path[i][0])
edge = self.path[i][1]
node.is_selected = True
if edge:
edge.is_selected = True
def on_anim_find_path(self):
# Ensure that start and goal nodes are set
if not self.start_node:
tk.messagebox.showerror("Find Path Error", "Start node not set")
return
if not self.goal_node:
tk.messagebox.showerror("Find Path Error", "Goal node not set")
return
temp_start_node = self.start_node
temp_end_node = self.goal_node
self.deselect_path()
self.start_node = temp_start_node
self.goal_node = temp_end_node
animate_data = self.animate_shortest_path(
self.path_algorithm_name.get(), self.start_node, self.goal_node
)
path = animate_data["final_path"]
if not path or (path and len(path) < 2):
tk.messagebox.showerror("Animate Path Error", "No path found")
return
edges = animate_data["visited_edges"]
print(f"Starting {self.path_algorithm_name.get()} path animation")
tk.messagebox.showinfo(
"Path Finding Statistics",
f"Number of nodes visited: {len(animate_data['visited_nodes'])}",
)
self.animation = PathAnimation(
self, self.start_node, self.goal_node, path, edges
)
def convert_graph_to_gui(self):
self.nodes = [NodeGUI(node.id, node.pos[0], node.pos[1]) for node in self.nodes]
self.edges = [
EdgeGUI(
edge.id,
self.get_node_by_id(edge.nodeA.id),
self.get_node_by_id(edge.nodeB.id),
edge.weight,
)
for edge in self.edges
]
def init_ui(self, canvas):
frame = tk.Frame(canvas.master.master)
frame.config(bg=SCREEN_BG_COLOR)
frame.place(x=10, y=10)
pad_x = 1
tk.Button(frame, text="Help Text", command=self.on_help_toggle).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Node Id", command=self.on_nodeid_toggle).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Load", command=self.on_load).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Save", command=self.on_save).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Delete", command=self.on_delete).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Set Weight", command=self.on_update_weight).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="BFS Anim", command=self.on_bfs_start).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="DFS Anim", command=self.on_dfs_start).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Set Start Node", command=self.on_set_start_node).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Set Goal Node", command=self.on_set_end_node).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Find Path", command=self.on_find_path).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Anim Find Path", command=self.on_anim_find_path).pack(
padx=pad_x, side=tk.LEFT
)
# Create radio buttons
tk.Radiobutton(
frame,
text="BFS",
variable=self.path_algorithm_name,
value="bfs",
).pack(side=tk.LEFT, padx=(5, 0))
tk.Radiobutton(
frame,
text="DFS",
variable=self.path_algorithm_name,
value="dfs",
).pack(side=tk.LEFT)
tk.Radiobutton(
frame,
text="Dijkstra",
variable=self.path_algorithm_name,
value="dijkstra",
).pack(side=tk.LEFT)
tk.Radiobutton(
frame,
text="A*",
variable=self.path_algorithm_name,
value="astar",
).pack(side=tk.LEFT)
|
py | b40d73a95c7c2a6d2c915debd3aa08eeeec6a16e | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.6, 0)
lut.SetSaturationRange(1.0, 0)
lut.SetValueRange(0.5, 1.0)
# Read the data: a height field results
demReader = vtk.vtkDEMReader()
demReader.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
demReader.Update()
lo = demReader.GetOutput().GetScalarRange()[0]
hi = demReader.GetOutput().GetScalarRange()[1]
surface = vtk.vtkImageDataGeometryFilter()
surface.SetInputConnection(demReader.GetOutputPort())
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(surface.GetOutputPort())
warp.SetScaleFactor(1)
warp.UseNormalOn()
warp.SetNormal(0, 0, 1)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(warp.GetOutputPort())
normals.SetFeatureAngle(60)
normals.SplittingOff()
demMapper = vtk.vtkPolyDataMapper()
demMapper.SetInputConnection(normals.GetOutputPort())
demMapper.SetScalarRange(lo, hi)
demMapper.SetLookupTable(lut)
demActor = vtk.vtkLODActor()
demActor.SetMapper(demMapper)
# Create some paths
pts = vtk.vtkPoints()
pts.InsertNextPoint(562669, 5.1198e+006, 1992.77)
pts.InsertNextPoint(562801, 5.11618e+006, 2534.97)
pts.InsertNextPoint(562913, 5.11157e+006, 1911.1)
pts.InsertNextPoint(559849, 5.11083e+006, 1681.34)
pts.InsertNextPoint(562471, 5.11633e+006, 2593.57)
pts.InsertNextPoint(563223, 5.11616e+006, 2598.31)
pts.InsertNextPoint(566579, 5.11127e+006, 1697.83)
pts.InsertNextPoint(569000, 5.11127e+006, 1697.83)
lines = vtk.vtkCellArray()
lines.InsertNextCell(3)
lines.InsertCellPoint(0)
lines.InsertCellPoint(1)
lines.InsertCellPoint(2)
lines.InsertNextCell(5)
lines.InsertCellPoint(3)
lines.InsertCellPoint(4)
lines.InsertCellPoint(5)
lines.InsertCellPoint(6)
lines.InsertCellPoint(7)
terrainPaths = vtk.vtkPolyData()
terrainPaths.SetPoints(pts)
terrainPaths.SetLines(lines)
projectedPaths = vtk.vtkProjectedTerrainPath()
projectedPaths.SetInputData(terrainPaths)
projectedPaths.SetSourceConnection(demReader.GetOutputPort())
projectedPaths.SetHeightOffset(25)
projectedPaths.SetHeightTolerance(5)
projectedPaths.SetProjectionModeToNonOccluded()
projectedPaths.SetProjectionModeToHug()
pathMapper = vtk.vtkPolyDataMapper()
pathMapper.SetInputConnection(projectedPaths.GetOutputPort())
paths = vtk.vtkActor()
paths.SetMapper(pathMapper)
paths.GetProperty().SetColor(1, 0, 0)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(demActor)
ren1.AddActor(paths)
ren1.SetBackground(.1, .2, .4)
iren.SetDesiredUpdateRate(5)
ren1.GetActiveCamera().SetViewUp(0, 0, 1)
ren1.GetActiveCamera().SetPosition(-99900, -21354, 131801)
ren1.GetActiveCamera().SetFocalPoint(41461, 41461, 2815)
ren1.ResetCamera()
ren1.GetActiveCamera().Dolly(1.2)
ren1.ResetCameraClippingRange()
renWin.Render()
iren.Initialize()
#iren.Start()
|
py | b40d744f2be8a849b34090085b9ca847c2967f04 | # Python solution for "Is it even?" codewars question.
# Level: 8 kyu
# Tags: Fundamentals and Numbers.
# Author: Jack Brokenshire
# Date: 29/03/2020
import unittest
def is_even(n):
"""
Determines if the given integer or float input is even.
:param n: an float or integer value.
:return: True if the integer is even, otherwise false.
"""
return n % 2 == 0
class TestIsEven(unittest.TestCase):
"""Class to test "is_even" function"""
def test_is_even(self):
self.assertEqual(is_even(0), True)
self.assertEqual(is_even(0.5), False)
self.assertEqual(is_even(1), False)
self.assertEqual(is_even(2), True)
self.assertEqual(is_even(-4), True)
self.assertEqual(is_even(15), False)
self.assertEqual(is_even(20), True)
self.assertEqual(is_even(220), True)
self.assertEqual(is_even(222222221), False)
self.assertEqual(is_even(500000000), True)
if __name__ == "__main__":
unittest.main()
|
py | b40d76af0f21adceacdffbc523c02146fde51313 | from flee import flee
from flee.datamanager import handle_refugee_data
import numpy as np
import flee.postprocessing.analysis as a
"""
Generation 1 code. Incorporates only distance, travel always takes one day.
"""
def test_1_agent():
print("Testing basic data handling and simulation kernel.")
flee.SimulationSettings.MinMoveSpeed = 5000.0
flee.SimulationSettings.MaxMoveSpeed = 5000.0
flee.SimulationSettings.MaxWalkSpeed = 42.0
end_time = 10
e = flee.Ecosystem()
l1 = e.addLocation("A", movechance=0.3)
l2 = e.addLocation("B", movechance=0.0)
l3 = e.addLocation("C", movechance=0.0)
l4 = e.addLocation("D", movechance=0.0)
e.linkUp("A", "B", "100.0")
e.linkUp("A", "C", "100.0")
e.linkUp("A", "D", "100.0")
new_refs = 1
# Insert refugee agents
for i in range(0, new_refs):
e.addAgent(location=l1)
for t in range(0, end_time):
# Propagate the model by one time step.
e.evolve()
print("Our single agent is at", e.agents[0].location.name)
print(t, l1.numAgents + l2.numAgents + l3.numAgents + l4.numAgents,
l1.numAgents, l2.numAgents, l3.numAgents, l4.numAgents)
assert t == 9
assert l1.numAgents + l2.numAgents + l3.numAgents + l4.numAgents == 1
print("Test successful!")
if __name__ == "__main__":
test_1_agent()
|
py | b40d78cb986eb46b38951c09f749592dd4b6770c | from django.urls import path
from . import views
urlpatterns = [
path('<int:pk>', views.FeedView.as_view({'get': 'retrieve'})),
path('', views.FeedView.as_view({'get': 'list'})),
]
|
py | b40d7904e33e4738325f68a91680c65f8dbc2d89 | from __future__ import annotations
import numbers
from typing import TYPE_CHECKING
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._typing import (
Dtype,
DtypeObj,
type_t,
)
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
if TYPE_CHECKING:
import pyarrow
@register_extension_dtype
class BooleanDtype(BaseMaskedDtype):
"""
Extension dtype for boolean data.
.. versionadded:: 1.0.0
.. warning::
BooleanDtype is considered experimental. The implementation and
parts of the API may change without warning.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> pd.BooleanDtype()
BooleanDtype
"""
name = "boolean"
# https://github.com/python/mypy/issues/4125
# error: Signature of "type" incompatible with supertype "BaseMaskedDtype"
@property
def type(self) -> type: # type: ignore[override]
return np.bool_
@property
def kind(self) -> str:
return "b"
@property
def numpy_dtype(self) -> np.dtype:
return np.dtype("bool")
@classmethod
def construct_array_type(cls) -> type_t[BooleanArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return BooleanArray
def __repr__(self) -> str:
return "BooleanDtype"
@property
def _is_boolean(self) -> bool:
return True
@property
def _is_numeric(self) -> bool:
return True
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
) -> BooleanArray:
"""
Construct BooleanArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
if array.type != pyarrow.bool_():
raise TypeError(f"Expected array of boolean type, got {array.type} instead")
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
buflist = arr.buffers()
data = pyarrow.BooleanArray.from_buffers(
arr.type, len(arr), [None, buflist[1]], offset=arr.offset
).to_numpy(zero_copy_only=False)
if arr.null_count != 0:
mask = pyarrow.BooleanArray.from_buffers(
arr.type, len(arr), [None, buflist[0]], offset=arr.offset
).to_numpy(zero_copy_only=False)
mask = ~mask
else:
mask = np.zeros(len(arr), dtype=bool)
bool_arr = BooleanArray(data, mask)
results.append(bool_arr)
if not results:
return BooleanArray(
np.array([], dtype=np.bool_), np.array([], dtype=np.bool_)
)
else:
return BooleanArray._concat_same_type(results)
def coerce_to_array(
values, mask=None, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
if isinstance(values, BooleanArray):
if mask is not None:
raise ValueError("cannot pass mask for BooleanArray input")
values, mask = values._data, values._mask
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
mask_values = None
if isinstance(values, np.ndarray) and values.dtype == np.bool_:
if copy:
values = values.copy()
elif isinstance(values, np.ndarray) and is_numeric_dtype(values.dtype):
mask_values = isna(values)
values_bool = np.zeros(len(values), dtype=bool)
values_bool[~mask_values] = values[~mask_values].astype(bool)
if not np.all(
values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
):
raise TypeError("Need to pass bool-like values")
values = values_bool
else:
values_object = np.asarray(values, dtype=object)
inferred_dtype = lib.infer_dtype(values_object, skipna=True)
integer_like = ("floating", "integer", "mixed-integer-float")
if inferred_dtype not in ("boolean", "empty") + integer_like:
raise TypeError("Need to pass bool-like values")
mask_values = isna(values_object)
values = np.zeros(len(values), dtype=bool)
values[~mask_values] = values_object[~mask_values].astype(bool)
# if the values were integer-like, validate it were actually 0/1's
if (inferred_dtype in integer_like) and not (
np.all(
values[~mask_values].astype(float)
== values_object[~mask_values].astype(float)
)
):
raise TypeError("Need to pass bool-like values")
if mask is None and mask_values is None:
mask = np.zeros(values.shape, dtype=bool)
elif mask is None:
mask = mask_values
else:
if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
if mask_values is not None:
mask = mask | mask_values
else:
if copy:
mask = mask.copy()
else:
mask = np.array(mask, dtype=bool)
if mask_values is not None:
mask = mask | mask_values
if values.shape != mask.shape:
raise ValueError("values.shape and mask.shape must match")
return values, mask
class BooleanArray(BaseMaskedArray):
"""
Array of boolean (True/False) data with missing values.
This is a pandas Extension array for boolean data, under the hood
represented by 2 numpy arrays: a boolean array with the data and
a boolean array with the mask (True indicating missing).
BooleanArray implements Kleene logic (sometimes called three-value
logic) for logical operations. See :ref:`boolean.kleene` for more.
To construct an BooleanArray from generic array-like input, use
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
below).
.. versionadded:: 1.0.0
.. warning::
BooleanArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : numpy.ndarray
A 1-d boolean-dtype array with the data.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values (True
indicates missing).
copy : bool, default False
Whether to copy the `values` and `mask` arrays.
Attributes
----------
None
Methods
-------
None
Returns
-------
BooleanArray
Examples
--------
Create an BooleanArray with :func:`pandas.array`:
>>> pd.array([True, False, None], dtype="boolean")
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
# Fill values used for any/all
_truthy_value = True
_falsey_value = False
_TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
_FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
def __init__(
self, values: np.ndarray, mask: np.ndarray, copy: bool = False
) -> None:
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
raise TypeError(
"values should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
self._dtype = BooleanDtype()
super().__init__(values, mask, copy=copy)
@property
def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
def _from_sequence_of_strings(
cls,
strings: list[str],
*,
dtype: Dtype | None = None,
copy: bool = False,
true_values: list[str] | None = None,
false_values: list[str] | None = None,
) -> BooleanArray:
true_values_union = cls._TRUE_VALUES.union(true_values or [])
false_values_union = cls._FALSE_VALUES.union(false_values or [])
def map_string(s):
if isna(s):
return s
elif s in true_values_union:
return True
elif s in false_values_union:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
scalars = [map_string(x) for x in strings]
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
@classmethod
def _coerce_to_array(
cls, value, *, dtype: DtypeObj, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
if dtype:
assert dtype == "boolean"
return coerce_to_array(value, copy=copy)
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_scalar = lib.is_scalar(other)
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other, dtype="bool")
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
other, mask = coerce_to_array(other, copy=False)
elif isinstance(other, np.bool_):
other = other.item()
if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
raise TypeError(
"'other' should be pandas.NA or a bool. "
f"Got {type(other).__name__} instead."
)
if not other_is_scalar and len(self) != len(other):
raise ValueError("Lengths must match")
if op.__name__ in {"or_", "ror_"}:
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
elif op.__name__ in {"and_", "rand_"}:
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
else:
# i.e. xor, rxor
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
# i.e. BooleanArray
return self._maybe_mask_result(result, mask)
|
py | b40d795bd6fbf1f25123202b125ef6657ba69c06 | # Generated by Django 2.0.4 on 2018-04-27 09:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('usergroups', '0006_auto_20170519_0944'),
]
operations = [
migrations.AlterUniqueTogether(
name='vote',
unique_together=set(),
),
migrations.RemoveField(
model_name='vote',
name='application',
),
migrations.RemoveField(
model_name='vote',
name='user',
),
migrations.RemoveField(
model_name='vote',
name='usergroup',
),
migrations.RemoveField(
model_name='voteaudit',
name='application',
),
migrations.RemoveField(
model_name='voteaudit',
name='user',
),
migrations.RemoveField(
model_name='voteaudit',
name='usergroup',
),
migrations.DeleteModel(
name='Vote',
),
migrations.DeleteModel(
name='VoteAudit',
),
]
|
py | b40d7a504f0a755ea2514c891e4f64df7eb5deb7 | import math
import torch.nn as nn
import torch.nn.functional as F
class EqualizedConv3d(nn.Conv3d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1,
gain=math.sqrt(2), bias=True):
super().__init__( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
fan_in = nn.init._calculate_correct_fan(self.weight, "fan_in")
std = gain / math.sqrt(fan_in)
self.scale = std
def reset_parameters(self):
nn.init.normal_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, input):
w = self.scale * self.weight
return F.conv3d(input, w, self.bias, self.stride, self.padding, self.dilation, self.groups)
class EqualizedConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1,
gain=math.sqrt(2), bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
fan_in = nn.init._calculate_correct_fan(self.weight, "fan_in")
std = gain / math.sqrt(fan_in)
self.scale = std
def reset_parameters(self):
nn.init.normal_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, input):
w = self.scale * self.weight
return F.conv2d(input, w, self.bias, self.stride, self.padding, self.dilation, self.groups)
class EqualizedLinear(nn.Linear):
def __init__(self, in_features, out_features, gain=math.sqrt(2), bias=True):
super().__init__(in_features, out_features, bias=bias)
fan_in = nn.init._calculate_correct_fan(self.weight, "fan_in")
std = gain / math.sqrt(fan_in)
self.scale = std
def reset_parameters(self):
nn.init.normal_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, input):
w = self.scale * self.weight
return F.linear(input, w, self.bias)
class EqualizedLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * math.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualizedLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equalized_lr(module, name='weight'):
EqualizedLR.apply(module, name)
return module |
py | b40d7ae284efa40316d2f62fa1711daaa03f48d1 | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: Xia Xianba
@license: Apache Licence
@contact: [email protected]
@site: https://weibo.com/xiaxianba/
@software: PyCharm
@file: maxdown.py
@time: 2018/6/29 18:58
"""
# 计算最大回撤率,首先要清楚的是最大回撤率的定义,以下是百度上的解释。
# 最大回撤率:在选定周期内任一历史时点往后推,产品净值走到最低点时的收益率回撤幅度的最大值。测试数据由网友提供。
def get_maxdown(list_net):
maxdown = 0.0
if type(list_net) != list:
return 0
for index in range(len(list_net)):
for sub_index in range(index):
max_net = max(list_net[:index])
if float(max_net - list_net[index])/float(max_net) > maxdown:
maxdown = float(max_net - list_net[index])/float(max_net)
return maxdown
if __name__ == "__main__":
list_test = [100, 200, 50, 300, 150, 100, 200]
max_down = get_maxdown(list_test)
print max_down
# 根据测试数据,最终的结果为0.75,即3/4.
|
py | b40d7cfc9998dc2f387f7bb0ac0bcf508d7c137a | from mead.tasks import *
from mead.utils import *
from baseline.utils import export, import_user_module
__all__ = []
exporter = export(__all__)
@exporter
class Exporter(object):
def __init__(self, task):
super(Exporter, self).__init__()
self.task = task
def run(self, model_file, embeddings, output_dir, model_version, **kwargs):
pass
def create_exporter(task, exporter_type):
if exporter_type == 'default':
return task.ExporterType(task)
else:
mod = import_user_module("exporter", exporter_type)
return mod.create_exporter(task, exporter_type)
|
py | b40d7e2eec4b665a61f25b954cb28dcc4c639bc4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @Date : 2016-10-13 14:10:48
# @Author : Yunyu2019 ([email protected])
# @Link :
# @descp : The document description
import os
import time
import json
import codecs
import scrapy
import urlparse
import requests
from urllib import urlencode
from scrapy.selector import Selector
from zhihu.items import FollowItem
from scrapy.http import Request,FormRequest
from scrapy.spiders import CrawlSpider,Rule
from scrapy.linkextractors import LinkExtractor
from zhihu.settings import ZHIHU,LOG_FILE,DATAS,DEFAULT_REQUEST_HEADERS
class Follow(CrawlSpider):
"""docstring for Follow"""
name='follow'
allowed_domains=["www.zhihu.com"]
start_urls=["https://www.zhihu.com/people/excited-vczh/followees"]
rules = (
Rule(LinkExtractor(allow=("/people/excited-vczh/followees",)), callback='parse_followee',follow=False),
)
def __init__(self,*args,**kwargs):
super(Follow, self).__init__(*args, **kwargs)
source_root=os.path.dirname(LOG_FILE)
self.headers=DEFAULT_REQUEST_HEADERS
self.data_file='{0}/{1}'.format(source_root,DATAS['followees'])
self.cookie_file='{0}/{1}'.format(source_root,DATAS['cookies'])
self.captcha_file='{0}/{1}'.format(source_root,'captcha.gif')
self.captcha=False
self.xsrf=None
def start_requests(self):
return [Request('https://www.zhihu.com/#signin',meta = {'cookiejar' : 1}, callback = self.post_login)]
def post_login(self,response):
print 'Preparing login'
hxs=Selector(response)
xsrf=hxs.xpath('//input[@name="_xsrf"]/@value')[0].extract()
self.xsrf=xsrf
formdata={'_xsrf': xsrf,'email':ZHIHU['email'],'password': ZHIHU['password'],'remember_me':'true'}
if self.captcha:
signform=hxs.xpath('//div[@class="view view-signin"]')
captcha_type=signform.css('.captcha-module::attr(data-type)').extract()[0]
self.getCaptcha(captcha_type)
hint=''
if captcha_type=='cn':
formdata['captcha_type']=captcha_type
hint=",the value like {\"img_size\":[200,44],\"input_points\":[[17.375,24],[161.375,20]]}"
msg='please input the captch{0}\n'.format(hint)
captcha=raw_input(msg)
formdata['captcha']=json.dumps(captcha)
return [FormRequest(ZHIHU['login_url'],formdata = formdata,method='POST',callback = self.after_login,dont_filter=True)]
def getCaptcha(self,type):
r=int(1000*time.time())
captcha_url='{0}/captcha.gif?r={1}&type=login&lang={2}'.format(self.allowed_domains[0],r,type)
headers=self.headers
headers['User-Agent']='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/601.6.17 (KHTML, like Gecko) Version/9.1.1 Safari/601.6.17'
req=requests.get(captcha_url,headers=headers)
captcha=req.content
with open(self.captcha_file,'wb') as fp:
fp.write(captcha)
def after_login(self,response):
res=json.loads(response.body_as_unicode())
if 'errorcode' in res:
errcode=res['errorcode']
if errcode==1991829:
print 'captcha required'
self.captcha=True
self.start_requests()
else:
print res['msg']
else:
print res['msg']
cookie=response.headers.getlist('Set-Cookie')
cookies=dict()
map(lambda x:cookies.setdefault(x.split('=',1)[0],x.split('=',1)[1]),cookie)
cookies['_xsrf']=self.xsrf
with codecs.open(self.cookie_file,'a',encoding='utf-8') as fp:
line=json.dumps(dict(cookies),ensure_ascii=True)
fp.write(line)
for url in self.start_urls :
yield self.make_requests_from_url(url)
def catchError(self,response):
pass
def parse_followee(self,response):
user_url=response.url
parse_urls=urlparse.urlparse(user_url)
print 'spider follows begin:{0}'.format(parse_urls.path)
hxs=Selector(response)
section=hxs.xpath('//span[@class="zm-profile-section-name"]/text()').re('(\d+)')
nums=int(section[0])
token=hxs.xpath('//input[@name="_xsrf"]/@value').extract()[0]
warp=hxs.xpath('//div[@class="zh-general-list clearfix"]')
params_init=warp.xpath('@data-init').extract()[0]
params=json.loads(params_init)
hash_id=params['params']['hash_id']
nodename=params['nodename']
ls=warp.xpath('div[@class="zm-profile-card zm-profile-section-item zg-clear no-hovercard"]')
with codecs.open(self.data_file,'a',encoding="utf-8") as fp:
for v in ls:
item=FollowItem()
a=v.xpath('a[@class="zm-item-link-avatar"]')
name=a.xpath('@title').extract()[0]
view_url=a.xpath('@href').extract()[0]
avatar=a.xpath('img[@class="zm-item-img-avatar"]/@src').extract()
descp=v.css('.bio::text').extract()
descp=descp[0] if descp else ''
avatar=avatar[0] if avatar else ''
points=v.css('.zg-link-gray-normal::text').re('(\d+)')
item['name']=name
item['avatar']=avatar
item['view_url']=view_url
item['descp']=descp
item['data']=points
line=json.dumps(dict(item),ensure_ascii=True)
fp.write('{0}\n'.format(line))
print 'collection his|her profile success,the count of followees is: {0}'.format(nums)
pages=int(nums//20+1)
url='https://{0}/node/{1}'.format(self.allowed_domains[0],nodename)
ajax_parses=urlparse.urlparse(url)
self.headers['Referer']=user_url
self.headers['X-Requested-With']='XMLHttpRequest'
self.headers['X-Xsrftoken']=token
self.headers['User-Agent']='Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
self.headers['Content-Type']='application/x-www-form-urlencoded; charset=UTF-8'
for i in range(1,pages):
offset=i*20
params={"offset":offset,"order_by":"created","hash_id":hash_id}
datas={
'method':'next',
'params':json.dumps(params)
}
print 'start ajax spider {0},offset: {1}'.format(ajax_parses.path,offset)
yield Request(url,method="POST",headers=self.headers,body=urlencode(datas),callback=self.parse_followees,meta={'offset':offset})
def parse_followees(self,response):
offset=response.meta['offset']
cont=json.loads(response.body_as_unicode())
if cont['msg']:
print 'ajax-followees success,offset:{0}'.format(offset)
with codecs.open(self.data_file,'a',encoding='utf-8') as fp:
for i in cont['msg']:
item=FollowItem()
hxs=Selector(text=i)
a=hxs.xpath('//a[@class="zm-item-link-avatar"]')
view_url=a.xpath('./@href').extract()[0]
name=a.xpath('./@title').extract()[0]
avatar=a.xpath('./img[@class="zm-item-img-avatar"]/@src').extract()[0]
descp=hxs.xpath('//span[@class="bio"]/text()').extract()
points=hxs.xpath('//a[@class="zg-link-gray-normal"]/text()').re('(\d+)')
item['view_url']=view_url
item['name']=name
item['avatar']=avatar
item['descp']=descp[0] if descp else ''
item['data']=points
line=json.dumps(dict(item),ensure_ascii=True)
fp.write('{0}\n'.format(line))
else:
print 'ajax-followees faild,offset:{0}'.format(offset)
|
py | b40d7f890af8b50998842fbffa883526fb9a7127 | # 获取抖音直播的真实流媒体地址,默认最高画质。
# 如果知道该直播间如“6779127643792280332”形式的room_id,则直接传入room_id。
# 如果不知道room_id,可以使用手机上打开直播间后,选择“分享--复制链接”,传入如“https://v.douyin.com/qyRqMp/”形式的分享链接。
#
import requests
import re
class DouYin:
def __init__(self, rid):
self.rid = rid
def get_real_url(self):
headers = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, '
'like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1',
}
try:
if 'v.douyin.com' in self.rid:
room_id = re.findall(r'(\d{19})', requests.get(url=self.rid).url)[0]
else:
room_id = self.rid
room_url = 'https://webcast.amemv.com/webcast/reflow/{}'.format(room_id)
response = requests.get(url=room_url, headers= headers).text
rtmp_pull_url = re.search(r'"rtmp_pull_url":"(.*?flv)"', response).group(1)
hls_pull_url = re.search(r'"hls_pull_url":"(.*?m3u8)"', response).group(1)
real_url = [rtmp_pull_url, hls_pull_url]
except:
raise Exception('直播间不存在或未开播或参数错误')
return real_url
def get_real_url(rid):
try:
dy = DouYin(rid)
return dy.get_real_url()
except Exception as e:
print('Exception:', e)
return False
if __name__ == '__main__':
r = input('请输入抖音直播间room_id或分享链接:\n')
print(get_real_url(r))
|
py | b40d7ff55e191a724e7f9c3bb1fd789c461ae8c3 | from datetime import datetime
from bson import ObjectId
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError
# AJK TODO before this is run, collapse migrations
print("start:", datetime.now())
# Add the parent directory to the path in order to enable imports from sister directories
from os.path import abspath as _abspath
from sys import path as _path
_one_folder_up = _abspath(__file__).rsplit('/', 2)[0]
_path.insert(1, _one_folder_up)
from cronutils.error_handler import ErrorHandler, null_error_handler
import json
# Load the Django settings
from config import load_django
# Import Mongolia models
from db.data_access_models import ChunksRegistry as MChunkSet, ChunkRegistry as MChunk
from db.profiling import Uploads as MUploadSet
from db.study_models import (
StudyDeviceSettings as MSettings, StudyDeviceSettingsCollection as MSettingsSet,
Studies as MStudySet, Survey as MSurvey, Surveys as MSurveySet
)
from db.user_models import Admins as MAdminSet, Users as MUserSet
# Import Django models
from database.models import (
Researcher as DAdmin, DeviceSettings as DSettings, Participant as DUser,
Study as DStudy, Survey as DSurvey, ChunkRegistry as DChunk, UploadTracking as DUpload
)
class NoSuchDatabaseObject(Exception): pass
class MissingRequiredForeignKey(Exception): pass
class ObjectCreationException(Exception): pass
# Utils
def create_dummy_survey(object_id, study_object_id):
# print orphaned_surveys
# print object_id
# print object_id in orphaned_surveys
# from time import sleep
# sleep(1)
# if DSurvey.objects.filter(object_id=str(object_id)).exists():
# raise Exception("wtf")
try:
d_study = DStudy.objects.get(object_id=str(study_object_id))
except ObjectDoesNotExist:
d_study = create_dummy_study(study_object_id)
s = DSurvey(
survey_type=DSurvey.DUMMY_SURVEY,
object_id=str(object_id),
deleted=True,
study=d_study
# use these fields' defaults
# content=json.dumps(m_survey['content']),
# settings=json.dumps(m_survey['settings']),
# timings=json.dumps(m_survey['timings'])
)
s.save()
survey_id_dict[object_id] = {"pk": s.id}
return s
def create_dummy_study(object_id):
s = DStudy(
name="Old Unnamed Study %s" % object_id,
encryption_key="0"*32,
object_id=str(object_id),
deleted=True,
)
s.save()
study_id_dict[object_id] = {"pk": s.id}
return s
def duplicate_chunk_path_severity(chunk_path):
""" Compare contents of all chunks matching this path, blow up if any values are different.
(They should all be identical.) """
collisions = {key: set() for key in MChunk.DEFAULTS.keys()}
for key in MChunk.DEFAULTS.iterkeys():
for chunk in MChunkSet(chunk_path=chunk_path):
collisions[key].add(chunk[key])
for key, collision in collisions.iteritems():
if len(collision) > 1:
raise Exception('actually a collision:\n%s' % collisions)
# Migration functions
def migrate_studies():
d_study_list = []
for m_study in MStudySet.iterator():
with error_handler:
# Create a Django Study object modeled off the Mongolia Study
study_name = m_study['name']
d_study = DStudy(
name=study_name,
encryption_key=m_study['encryption_key'],
object_id=m_study['_id'],
deleted=m_study['deleted'],
is_test=m_study['is_test'],
)
# Validate the new Study object and add it to the bulk create list
d_study.full_clean()
d_study_list.append(d_study)
# Get lists of Mongolia Surveys, Admins and StudyDeviceSettings attached to this Study
m_survey_list = m_study['surveys']
m_admin_list = m_study['admins']
m_device_settings = m_study['device_settings']
study_referents[study_name] = {
'survey_list': m_survey_list,
'admin_list': m_admin_list,
'device_settings': m_device_settings,
}
# Bulk create the Django Studies
DStudy.objects.bulk_create(d_study_list)
# Create a reference from Mongolia Study IDs to Django Studies that doesn't require
# any future database calls.
for m_study in MStudySet.iterator():
with error_handler:
m_study_id = m_study['_id']
d_study_id = DStudy.objects.filter(name=m_study['name']).values('pk', 'deleted').get()
study_id_dict[m_study_id] = d_study_id
def remap_study_relationships():
for study_name, object_ids in study_referents.iteritems():
with error_handler:
m_admin_id_list = object_ids['admin_list']
m_survey_id_list = object_ids['survey_list']
m_settings_id = object_ids['device_settings']
d_study = DStudy.objects.get(name=study_name)
for m_survey_id in m_survey_id_list:
with error_handler:
m_survey = MSurvey(m_survey_id)
if not m_survey:
msg = 'Survey {} referenced by Study but does not exist.'.format(m_survey_id)
print(msg)
# raise NoSuchDatabaseObject(msg)
d_study_survey_dict[m_survey_id] = d_study
for m_admin_id in m_admin_id_list:
# Add the Study-Researcher pair to the list of pairs
d_study_admin_list.append((d_study.pk, m_admin_id))
m_settings = MSettings(m_settings_id)
if not m_settings:
msg = 'DeviceSettings {} referenced by Study but does not exist.'.format(m_settings_id)
print(msg)
# raise NoSuchDatabaseObject(msg)
d_study_settings_dict[m_settings_id] = d_study
def migrate_surveys():
d_survey_list = []
# Build all Surveys
for m_survey in MSurveySet.iterator():
with error_handler:
try:
d_study = d_study_survey_dict[m_survey['_id']]
except KeyError: # This MSurvey has no corresponding Study
print('Survey {} does not connect to any Study.'.format(m_survey['_id']))
orphaned_surveys[m_survey['_id']] = m_survey
continue
d_survey = DSurvey(
content=json.dumps(m_survey['content']),
survey_type=m_survey['survey_type'],
settings=json.dumps(m_survey['settings']),
timings=json.dumps(m_survey['timings']),
object_id=m_survey['_id'],
study_id=d_study.pk,
deleted=d_study.deleted,
)
# Validate the Survey and add it to the bulk_create list
d_survey.full_clean()
d_survey_list.append(d_survey)
# Bulk_create the list of Researchers
DSurvey.objects.bulk_create(d_survey_list)
# Create a mapping from Surveys' Mongolia ids to their Django primary keys
for m_survey in MSurveySet.iterator():
with error_handler:
m_survey_id = m_survey['_id']
try:
d_survey_id = DSurvey.objects.filter(object_id=m_survey['_id']).values('pk').get()
except DSurvey.DoesNotExist:
print('Survey {} was not created.'.format(m_survey_id))
continue
survey_id_dict[m_survey_id] = d_survey_id
def migrate_settings():
d_settings_list = []
# Build a new DeviceSettings object
for m_settings in MSettingsSet.iterator():
with error_handler:
try:
d_study = d_study_settings_dict[m_settings['_id']]
except KeyError: # This MSettings has no corresponding Study
print('DeviceSettings {} is not connected to any Study.'.format(m_settings['_id']))
continue
d_settings = DSettings(
accelerometer=m_settings['accelerometer'],
gps=m_settings['gps'],
calls=m_settings['calls'],
texts=m_settings['texts'],
wifi=m_settings['wifi'],
bluetooth=m_settings['bluetooth'],
power_state=m_settings['power_state'],
proximity=m_settings['proximity'],
gyro=m_settings['gyro'],
magnetometer=m_settings['magnetometer'],
devicemotion=m_settings['devicemotion'],
reachability=m_settings['reachability'],
allow_upload_over_cellular_data=m_settings['allow_upload_over_cellular_data'],
accelerometer_off_duration_seconds=m_settings['accelerometer_off_duration_seconds'],
accelerometer_on_duration_seconds=m_settings['accelerometer_on_duration_seconds'],
bluetooth_on_duration_seconds=m_settings['bluetooth_on_duration_seconds'],
bluetooth_total_duration_seconds=m_settings['bluetooth_total_duration_seconds'],
bluetooth_global_offset_seconds=m_settings['bluetooth_global_offset_seconds'],
check_for_new_surveys_frequency_seconds=m_settings['check_for_new_surveys_frequency_seconds'],
create_new_data_files_frequency_seconds=m_settings['create_new_data_files_frequency_seconds'],
gps_off_duration_seconds=m_settings['gps_off_duration_seconds'],
gps_on_duration_seconds=m_settings['gps_on_duration_seconds'],
seconds_before_auto_logout=m_settings['seconds_before_auto_logout'],
upload_data_files_frequency_seconds=m_settings['upload_data_files_frequency_seconds'],
voice_recording_max_time_length_seconds=m_settings['voice_recording_max_time_length_seconds'],
wifi_log_frequency_seconds=m_settings['wifi_log_frequency_seconds'],
gyro_off_duration_seconds=m_settings['gyro_off_duration_seconds'],
gyro_on_duration_seconds=m_settings['gyro_on_duration_seconds'],
magnetometer_off_duration_seconds=m_settings['magnetometer_off_duration_seconds'],
magnetometer_on_duration_seconds=m_settings['magnetometer_on_duration_seconds'],
devicemotion_off_duration_seconds=m_settings['devicemotion_off_duration_seconds'],
devicemotion_on_duration_seconds=m_settings['devicemotion_on_duration_seconds'],
about_page_text=m_settings['about_page_text'],
call_clinician_button_text=m_settings['call_clinician_button_text'],
consent_form_text=m_settings['consent_form_text'],
survey_submit_success_toast_text=m_settings['survey_submit_success_toast_text'],
consent_sections=json.dumps(m_settings['consent_sections']),
study_id=d_study.pk,
deleted=d_study.deleted,
)
d_settings_list.append(d_settings)
# Bulk_create the objects built above
DSettings.objects.bulk_create(d_settings_list)
def migrate_admins():
d_admin_list = []
# Build all Researchers
for m_admin in MAdminSet.iterator():
with error_handler:
d_admin = DAdmin(
username=m_admin['_id'],
admin=m_admin['system_admin'],
access_key_id=m_admin['access_key_id'] or None, # access_key_id is unique and therefore nullable
access_key_secret=m_admin['access_key_secret'] or '',
access_key_secret_salt=m_admin['access_key_secret_salt'] or '',
password=m_admin['password'] or 'NoPassword',
salt=m_admin['salt'],
deleted=False,
)
# Validate the Researcher and add it to the bulk_create list
d_admin.full_clean()
d_admin_list.append(d_admin)
# Bulk_create the list of Researchers
DAdmin.objects.bulk_create(d_admin_list)
# Now that the Researchers have primary keys, fill in the Study-Researcher ManyToMany relationship
# Create a mapping from Researcher's username to primary key
admin_username_to_pk_dict = dict(DAdmin.objects.values_list('username', 'pk'))
d_study_admin_relation_list = []
for study_id, admin_username in d_study_admin_list:
with error_handler:
try:
admin_id = admin_username_to_pk_dict[admin_username]
except KeyError:
# study_name = DStudy.objects.get(pk=study_id).name
print('Admin {} is referenced by a Study but does not exist.'.format(admin_username))
continue
# Populate a list of database objects in the Study-Researcher relationship table
new_relation = DAdmin.studies.through(study_id=study_id, researcher_id=admin_id)
d_study_admin_relation_list.append(new_relation)
# Bulk_create the Study-Researcher relationship objects
with error_handler:
DAdmin.studies.through.objects.bulk_create(d_study_admin_relation_list)
def migrate_users():
m_user_list = MUserSet.iterator()
d_user_list = []
for m_user in m_user_list:
with error_handler:
# Get information about the Participant's Study
m_study_id = m_user['study_id']
try:
d_study_info = study_id_dict[m_study_id]
except KeyError:
print('Study {} is referenced by a User but does not exist.'.format(m_study_id))
continue
# Django convention is to use the empty string rather than None in CharFields
device_id = m_user['device_id'] or ''
os_type = m_user['os_type'] or ''
# Build a new Django Participant
d_user = DUser(
patient_id=m_user['_id'],
device_id=device_id,
os_type=os_type,
study_id=d_study_info['pk'],
password=m_user['password'],
salt=m_user['salt'],
deleted=d_study_info['deleted'],
)
# Validate the Participant and add it to the bulk_create list
d_user.full_clean()
d_user_list.append(d_user)
# Bulk_create the Participants
DUser.objects.bulk_create(d_user_list)
for m_user in MUserSet.iterator():
with error_handler:
m_user_id = m_user['_id']
try:
d_user_id = DUser.objects.filter(patient_id=m_user['_id']).values('pk').get()
except DUser.DoesNotExist:
msg = 'User {} was not created.'.format(m_user_id)
print(msg)
# raise ObjectCreationException(msg)
user_id_dict[m_user_id] = d_user_id
def migrate_chunk_registries():
# Calculate the number of chunks that will be used to go through all of MChunkSet()
d_chunk_list = []
num_registries_handled = 0
num_bulk_creates = 0
for m_chunk in MChunkSet.iterator():
with error_handler:
try:
d_study_info = study_id_dict[m_chunk.study_id]
except KeyError:
msg = 'Study {} referenced in chunk but does not exist, creating it.'.format(m_chunk['study_id'])
print(msg)
create_dummy_study(m_chunk.study_id)
# raise NoSuchDatabaseObject(msg)
try:
d_user_info = user_id_dict[m_chunk.user_id]
except KeyError:
msg = 'User {} referenced in chunk but does not exist.'.format(m_chunk['user_id'])
print(msg)
continue
# raise NoSuchDatabaseObject(msg)
# some chunks have survey_ids that are string representations of objectids, fix.
# (and sometimes this can be an empty string, handle that too.)
if m_chunk.survey_id and isinstance(m_chunk.survey_id, (str, unicode)):
m_chunk.survey_id = ObjectId(m_chunk.survey_id)
if not m_chunk.survey_id:
d_survey_pk = None
elif m_chunk.survey_id in survey_id_dict:
d_survey_pk = survey_id_dict[m_chunk.survey_id]['pk']
else:
print('Survey {} referenced in chunk but does not exist, creating it.'.format(m_chunk.survey_id))
new_survey = create_dummy_survey(m_chunk.survey_id, m_chunk.study_id)
d_survey_pk = new_survey.pk
d_chunk = DChunk(
is_chunkable=m_chunk.is_chunkable,
chunk_path=m_chunk.chunk_path,
chunk_hash=m_chunk.chunk_hash or '',
data_type=m_chunk.data_type,
time_bin=m_chunk.time_bin,
study_id=d_study_info['pk'],
participant_id=d_user_info['pk'],
survey_id=d_survey_pk,
deleted=d_study_info['deleted'],
)
# d_chunk.full_clean() # Don't bother full cleaning, it is slow and unnecessary here.
d_chunk_list.append(d_chunk)
num_registries_handled += 1
if num_registries_handled % CHUNK_SIZE == 0:
# Every 10.000 database objects, bulk create and print to stdout
num_bulk_creates += 1
if num_bulk_creates % 10 == 0:
print(num_bulk_creates * CHUNK_SIZE)
# there are a lot of unique chunk path issues
try:
DChunk.objects.bulk_create(d_chunk_list)
except IntegrityError as e:
# This can't happen, because chunk_path does has unique=False at the time of the
# migration, and only has unique=True set later in a separate Django migration.
if "UNIQUE" in e.message:
for d_chunk in d_chunk_list:
if DChunk.objects.filter(chunk_path=d_chunk.chunk_path).exists():
try:
print("duplicate path:",)
duplicate_chunk_path_severity(d_chunk.chunk_path)
print("...nevermind.")
except Exception as e2:
print(d_chunk.chunk_path)
print(e2.message)
# raise e2
else:
d_chunk.save()
else:
raise e
finally:
d_chunk_list = []
DChunk.objects.bulk_create(d_chunk_list)
def migrate_upload_trackers():
d_upload_list = []
num_uploads_handled = 0
num_bulk_creates = 0
for m_upload in MUploadSet.iterator():
with error_handler:
try:
d_user_info = user_id_dict[m_upload.user_id]
except KeyError:
msg = 'User {} referenced in upload tracker but does not exist.'.format(m_upload['user_id'])
print(msg)
continue
d_upload = DUpload(
file_path=m_upload.file_path,
file_size=m_upload.file_size or 0,
timestamp=m_upload.timestamp,
participant_id=d_user_info['pk'],
deleted=False,
)
d_upload_list.append(d_upload)
num_uploads_handled += 1
if num_uploads_handled % CHUNK_SIZE == 0:
# Every 10.000 database objects, bulk create and print to stdout
num_bulk_creates += 1
if num_bulk_creates % 10 == 0:
print(num_bulk_creates * CHUNK_SIZE)
DUpload.objects.bulk_create(d_upload_list)
d_upload_list = []
# Bulk create any remaining database objects
DUpload.objects.bulk_create(d_upload_list)
def run_all_migrations():
print "migrate_studies..."
migrate_studies()
print "remap_study_relationships..."
remap_study_relationships()
print "migrate_surveys..."
migrate_surveys()
print "migrate_settings..."
migrate_settings()
print "migrate_admins..."
migrate_admins()
print "migrate_users..."
migrate_users()
print "migrate_chunk_registries..."
migrate_chunk_registries()
print "migrate_upload_trackers..."
migrate_upload_trackers()
if __name__ == '__main__':
study_referents = {}
study_id_dict = {}
user_id_dict = {}
survey_id_dict = {}
orphaned_surveys = {}
d_study_admin_list = [] # A list of study-researcher pairs
d_study_survey_dict = {} # A mapping of surveys to their associated studies
d_study_settings_dict = {} # A mapping of device settings to their associated studies
CHUNK_SIZE = 10000
# error_handler = ErrorHandler()
error_handler = null_error_handler()
print(MStudySet.count(), MSurveySet.count(), MSettingsSet.count(),
MAdminSet.count(), MUserSet.count(), MChunkSet.count(), MUploadSet.count())
with error_handler:
run_all_migrations()
print(DStudy.objects.count(), DSurvey.objects.count(), DSettings.objects.count(),
DAdmin.objects.count(), DUser.objects.count(), DChunk.objects.count(), DUpload.objects.count())
print("end:", datetime.now())
error_handler.raise_errors()
|
py | b40d802172d95e076b14abcade35a9b5de161c77 | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from ErrorDownload.clsErrorDownload import ErrorDownload # noqa: E501
from appcenter_sdk.rest import ApiException
class TestErrorDownload(unittest.TestCase):
"""ErrorDownload unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testErrorDownload(self):
"""Test ErrorDownload"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsErrorDownload.ErrorDownload() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b40d81149a689d67e92537ad833dc0e7ee1e791e | '''3) Elabore um algoritmo para calcular e mostrar a soma:
S = 2 + 2^2 + 2^3 + 2^4 + 2^5 + ... + 2^10 '''
soma = 0
for i in range(1,11): soma += 2**i
print(f"O valor da soma e {soma}")
|
py | b40d8157a9eef294930bad8dcd26e1031fd4f634 | import torch
from mmcls.models.heads import (ClsHead, MultiLabelClsHead,
MultiLabelLinearClsHead)
def test_cls_head():
# test ClsHead with cal_acc=True
head = ClsHead()
fake_cls_score = torch.rand(4, 3)
fake_gt_label = torch.randint(0, 2, (4, ))
losses = head.loss(fake_cls_score, fake_gt_label)
assert losses['loss'].item() > 0
# test ClsHead with cal_acc=False
head = ClsHead(cal_acc=False)
fake_cls_score = torch.rand(4, 3)
fake_gt_label = torch.randint(0, 2, (4, ))
losses = head.loss(fake_cls_score, fake_gt_label)
assert losses['loss'].item() > 0
def test_multilabel_head():
head = MultiLabelClsHead()
fake_cls_score = torch.rand(4, 3)
fake_gt_label = torch.randint(0, 2, (4, 3))
losses = head.loss(fake_cls_score, fake_gt_label)
assert losses['loss'].item() > 0
def test_multilabel_linear_head():
head = MultiLabelLinearClsHead(3, 5)
fake_cls_score = torch.rand(4, 3)
fake_gt_label = torch.randint(0, 2, (4, 3))
head.init_weights()
losses = head.loss(fake_cls_score, fake_gt_label)
assert losses['loss'].item() > 0
|
py | b40d8187e1aff9dd57f25b4da6029f9524127d6d | from __future__ import annotations
from .ast import Context
from .lexer import Lexer
from .parser import Parser, ParserState
from .preprocesser import PreProcesser, MINECRAFT_KEYWORDS
MINECRAFT = {
"EXEC": "(align|anchored|as|at|facing|if|in|positioned|rotated|unless|store)(?!\w)",
"COMMENT": r"(/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/)|(//.*)",
"MCCMD": "(" + "|".join(MINECRAFT_KEYWORDS) + r").*(?!\n)",
"CREATE": r"create(?!\w)",
"SCORE": r"score(?!\w)",
}
KEYWORDS = {
"VAR": r"var(?!\w)",
"FUNC": r"func(?!\w)",
"FOR": r"for(?!\w)",
"WHILE": r"while(?!\w)",
}
OPERATORS = {
"PLUSEQ": r"\+\=",
"MINUSEQ": r"\-\=",
"MULTEQ": r"\*\=",
"DIVEQ": r"\/\=",
"PLUS": r"\+",
"MINUS": r"\-",
"MULT": r"\*",
"DIV": r"\/",
"<<": r"\<\<",
">>": r"\>\>",
"><": r"\>\<",
"==": r"\=\=",
">=": r"\>\=",
"<=": r"\<\=",
"!=": r"\!\=",
"<": r"\<",
">": r"\>",
"!": r"\!",
"AND": r"\&\&",
"OR": r"\|\|",
"=": r"\=",
}
LITERALS = {
"FLOAT": r"\d[.]\d+",
"INTEGER": r"\d+",
"STRING": r'\"[^\"]*\"',
"BOOLEAN": r"(?<!\w)(true|false)(?!\w)"
}
PUNCTUATORS = {
"(": r"\(",
")": r"\)",
"{": r"\{",
"}": r"\}",
",": r"\,",
":": r"\:",
";": r"\;",
}
IDENTIFIERS = {
"IDENTIFIER": r"[_a-zA-Z][_a-zA-Z0-9]{0,31}",
}
TOKENTYPES = MINECRAFT | KEYWORDS | OPERATORS | LITERALS | PUNCTUATORS | IDENTIFIERS
##################################################
# Compiler
##################################################
class Compiler:
@staticmethod
def compile(inFile: str, outFile: str):
with open(inFile, 'r') as file:
source = file.read()
if len(source) > 0:
processor = PreProcesser(inFile)
source = processor.include(source)
source = processor.add_endings(source)
# lexer
lexer = Lexer(TOKENTYPES, r'[ \n\t\r\f\v]+')
tokens = lexer.lex(source)
# parser
state = ParserState()
parser = Parser(list(TOKENTYPES), [
('right', ['PLUS', 'MINUS']),
('left', ['MULT', 'DIV']),
('left', ['AND', 'OR', ]),
], inFile, source)
# ast
ast = parser.parse(tokens, state)
context = Context(inFile, outFile, source)
result = ast.interpret(context)
if result.error:
return result.error
commands = result.value
with open(outFile, "w") as file:
file.write(commands.toStr())
return 0
|
py | b40d83b4e413e6d71343235ed59f97776bc64fe0 | # coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
import ntpath
import posixpath
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
try:
FileExistsError
except NameError:
FileExistsError = OSError
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from . import py31compat
from pip._vendor import appdirs
from pip._vendor import packaging
__import__("pip._vendor.packaging.version")
__import__("pip._vendor.packaging.specifiers")
__import__("pip._vendor.packaging.requirements")
__import__("pip._vendor.packaging.markers")
__metaclass__ = type
if (3, 0) < sys.version_info < (3, 5):
raise RuntimeError("Python 3.5 or later is required")
if six.PY2:
# Those builtin exceptions are only defined in Python 3
PermissionError = None
NotADirectoryError = None
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
def parse_version(v):
try:
return packaging.version.Version(v)
except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g["_sget_" + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g["_sset_" + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = "macosx-%s-%s" % (".".join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
"require",
"run_script",
"get_provider",
"get_distribution",
"load_entry_point",
"get_entry_map",
"get_entry_info",
"iter_entry_points",
"resource_string",
"resource_stream",
"resource_filename",
"resource_listdir",
"resource_exists",
"resource_isdir",
# Environmental control
"declare_namespace",
"working_set",
"add_activation_listener",
"find_distributions",
"set_extraction_path",
"cleanup_resources",
"get_default_cache",
# Primary implementation classes
"Environment",
"WorkingSet",
"ResourceManager",
"Distribution",
"Requirement",
"EntryPoint",
# Exceptions
"ResolutionError",
"VersionConflict",
"DistributionNotFound",
"UnknownExtra",
"ExtractionError",
# Warnings
"PEP440Warning",
# Parsing functions and string utilities
"parse_requirements",
"parse_version",
"safe_name",
"safe_version",
"get_platform",
"compatible_platforms",
"yield_lines",
"split_sections",
"safe_extra",
"to_filename",
"invalid_marker",
"evaluate_marker",
# filesystem utilities
"ensure_directory",
"normalize_path",
# Distribution "precedence" constants
"EGG_DIST",
"BINARY_DIST",
"SOURCE_DIST",
"CHECKOUT_DIST",
"DEVELOP_DIST",
# "Provider" interfaces, implementations, and registration/lookup APIs
"IMetadataProvider",
"IResourceProvider",
"FileMetadata",
"PathMetadata",
"EggMetadata",
"EmptyProvider",
"empty_provider",
"NullProvider",
"EggProvider",
"DefaultProvider",
"ZipProvider",
"register_finder",
"register_namespace_handler",
"register_loader_type",
"fixup_namespace_packages",
"get_importer",
# Warnings
"PkgResourcesDeprecationWarning",
# Deprecated/backward compatibility only
"run_main",
"AvailableDistributions",
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + " by {self.required_by}"
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = (
"The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}"
)
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return "the application"
return ", ".join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = "{}.{}".format(*sys.version_info)
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, "__loader__", None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == "":
plist = "/System/Library/CoreServices/SystemVersion.plist"
if os.path.exists(plist):
if hasattr(plistlib, "readPlist"):
plist_content = plistlib.readPlist(plist)
if "ProductVersion" in plist_content:
version = plist_content["ProductVersion"]
_cache.append(version.split("."))
return _cache[0]
def _macosx_arch(machine):
return {"PowerPC": "ppc", "Power_Macintosh": "ppc"}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith("macosx-"):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]),
int(version[1]),
_macosx_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if (
dversion == 7
and macosversion >= "10.3"
or dversion == 8
and macosversion >= "10.4"
):
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns["__name__"]
ns.clear()
ns["__name__"] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet:
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns["__name__"]
ns.clear()
ns["__name__"] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(
self,
requirements,
env=None,
installer=None,
replace_conflicting=False,
extras=None,
):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer, replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:],
self.entry_keys.copy(),
self.by_key.copy(),
self.callbacks[:],
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({"extra": extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment:
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR
):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter("hashcmp"), reverse=True)
def best_match(self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(resource_name)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(resource_name)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent(
"""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
"""
).lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + "-tmp", *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == "nt" and not path.startswith(os.environ["windir"]):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == "posix":
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError("Can't change extraction path, files already extracted")
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return os.environ.get("PYTHON_EGG_CACHE") or appdirs.user_cache_dir(
appname="Python-Eggs"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub("[^A-Za-z0-9.]+", "-", name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(" ", ".")
return re.sub("[^A-Za-z0-9.]+", "-", version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub("[^A-Za-z0-9.-]+", "_", extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace("-", "_")
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, "__loader__", None)
self.module_path = os.path.dirname(getattr(module, "__file__", ""))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
path = self._get_metadata_path(name)
return self._has(path)
def get_metadata(self, name):
if not self.egg_info:
return ""
path = self._get_metadata_path(name)
value = self._get(path)
if six.PY2:
return value
try:
return value.decode("utf-8")
except UnicodeDecodeError as exc:
# Include the path in the error message to simplify
# troubleshooting, and without changing the exception type.
exc.reason += " in {} file at path: {}".format(name, path)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = "scripts/" + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}".format(
**locals()
),
)
script_text = self.get_metadata(script).replace("\r\n", "\n")
script_text = script_text.replace("\r", "\n")
script_filename = self._fn(self.egg_info, script)
namespace["__file__"] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, "exec")
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text),
0,
script_text.split("\n"),
script_filename,
)
script_code = compile(script_text, script_filename, "exec")
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
if resource_name:
return os.path.join(base, *resource_name.split("/"))
return base
@staticmethod
def _validate_resource_path(path):
"""
Validate the resource paths according to the docs.
https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
>>> warned = getfixture('recwarn')
>>> warnings.simplefilter('always')
>>> vrp = NullProvider._validate_resource_path
>>> vrp('foo/bar.txt')
>>> bool(warned)
False
>>> vrp('../foo/bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('/foo/bar.txt')
>>> bool(warned)
True
>>> vrp('foo/../../bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('foo/f../bar.txt')
>>> bool(warned)
False
Windows path separators are straight-up disallowed.
>>> vrp(r'\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
>>> vrp(r'C:\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
Blank values are allowed
>>> vrp('')
>>> bool(warned)
False
Non-string values are not.
>>> vrp(None)
Traceback (most recent call last):
...
AttributeError: ...
"""
invalid = (
os.path.pardir in path.split(posixpath.sep)
or posixpath.isabs(path)
or ntpath.isabs(path)
)
if not invalid:
return
msg = "Use of .. or absolute path in a resource path is not allowed."
# Aggressively disallow Windows absolute paths
if ntpath.isabs(path) and not posixpath.isabs(path):
raise ValueError(msg)
# for compatibility, warn; in future
# raise ValueError(msg)
warnings.warn(
msg[:-1] + " and will raise exceptions in a future release.",
DeprecationWarning,
stacklevel=4,
)
def _get(self, path):
if hasattr(self.loader, "get_data"):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, "EGG-INFO")
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), "rb")
def _get(self, path):
with open(path, "rb") as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = (
"SourceFileLoader",
"SourcelessFileLoader",
)
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ""
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace("/", os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple("manifest_mod", "manifest mtime")
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ""
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre) :]
raise AssertionError("%s is not a subpath of %s" % (fspath, self.zip_pre))
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1 :].split(os.sep)
raise AssertionError("%s is not a subpath of %s" % (fspath, self.egg_root))
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if "/".join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(manager, os.path.join(zip_path, name))
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError(
'"os.rename" and "os.unlink" are not supported ' "on this platform"
)
try:
real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path))
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == "nt":
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, "rb") as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ("native_libs.txt", "eager_resources.txt"):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name):
return name == "PKG-INFO" and os.path.isfile(self.path)
def get_metadata(self, name):
if name != "PKG-INFO":
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding="utf-8", errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.7 compat for: replacement_char = '�'
replacement_char = b"\xef\xbf\xbd".decode("utf-8")
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state("dict", _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith(".whl"):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata("PKG-INFO"):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir(""):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith(".dist-info"):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split("-"), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item,
metadata=PathMetadata(path_item, os.path.join(path_item, "EGG-INFO")),
)
return
entries = safe_listdir(path_item)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (entry for entry in entries if dist_factory(path_item, entry, only))
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
"""
Return a dist_factory for a path_item and entry
"""
lower = entry.lower()
is_meta = any(map(lower.endswith, (".egg-info", ".dist-info")))
return (
distributions_from_metadata
if is_meta
else find_distributions
if not only and _is_egg_path(entry)
else resolve_egg_link
if not only and lower.endswith(".egg-link")
else NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
if six.PY2:
__nonzero__ = __bool__
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
# Python 2 on Windows needs to be handled this way :(
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root,
entry,
metadata,
precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref) for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, "FileFinder"):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state("dict", _namespace_handlers={})
_declare_state("dict", _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, "__path__"):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float("inf")
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count(".") + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path = sys.path
parent, _, _ = packageName.rpartition(".")
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split(".")[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, "FileFinder"):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename): # pragma: nocover
"""
Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
symlink components. Using
os.path.abspath() works around this limitation. A fix in os.getcwd()
would probably better, in Cygwin even more so, except
that this seems to be by design...
"""
return os.path.abspath(filename) if sys.platform == "cygwin" else filename
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return path.lower().endswith(".egg")
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return _is_egg_path(path) and os.path.isfile(
os.path.join(path, "EGG-INFO", "PKG-INFO")
)
def _set_parent_ns(packageName):
parts = packageName.split(".")
name = parts.pop()
if parts:
parent = ".".join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith("#"):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint:
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ":" + ".".join(self.attrs)
if self.extras:
s += " [%s]" % ",".join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=["__name__"], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r"\s*"
r"(?P<name>.+?)\s*"
r"=\s*"
r"(?P<module>[\w.]+)\s*"
r"(:\s*(?P<attr>[\w.]+))?\s*"
r"(?P<extras>\[.*\])?\s*$"
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res["extras"])
attrs = res["attr"].split(".") if res["attr"] else ()
return cls(res["name"], res["module"], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse("x" + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ""
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith("md5="):
return urllib.parse.urlunparse(parsed[:-1] + ("",))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith("version:")
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), "")
_, _, value = line.partition(":")
return safe_version(value.strip()) or None
class Distribution:
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = "PKG-INFO"
def __init__(
self,
location=None,
metadata=None,
project_name=None,
version=None,
py_version=PY_MAJOR,
platform=None,
precedence=EGG_DIST,
):
self.project_name = safe_name(project_name or "Unknown")
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
"name", "ver", "pyver", "plat"
)
return cls(
location,
metadata,
project_name=project_name,
version=version,
py_version=py_version,
platform=platform,
**kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or "",
self.platform or "",
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = (
textwrap.dedent(
"""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
"""
)
.strip()
.replace("\n", " ")
)
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = ("Missing 'Version:' header and/or {} file at path: {}").format(
self.PKG_INFO, path
)
raise ValueError(msg, self)
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(":")
fails_marker = marker and (
invalid_marker(marker) or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in "requires.txt", "depends.txt":
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra("%s has no such extra feature %r" % (self, ext))
return deps
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
"""
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
# since _get_metadata_path() is marked private.
path = self._provider._get_metadata_path(name)
# Handle exceptions e.g. in case the distribution's metadata
# provider doesn't support _get_metadata_path().
except Exception:
return "[could not detect]"
return path
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
return version
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata("namespace_packages.txt"):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name),
to_filename(self.version),
self.py_version or PY_MAJOR,
)
if self.platform:
filename += "-" + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, "version", None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith("_"):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
| set(attr for attr in self._provider.__dir__() if not attr.startswith("_"))
)
if not hasattr(object, "__dir__"):
# python 2.7 not supported
del __dir__
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata, **kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata("entry_points.txt"), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == "setuptools":
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata("namespace_packages.txt"))
loc = normalize_path(self.location)
for modname in self._get_metadata("top_level.txt"):
if (
modname not in sys.modules
or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ("pkg_resources", "setuptools", "site"):
continue
fn = getattr(sys.modules[modname], "__file__", None)
if fn and (
normalize_path(fn).startswith(loc) or fn.startswith(self.location)
):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = "project_name version py_version platform location precedence"
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault("metadata", self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = self._get_version()
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = "METADATA"
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all("Requires-Dist") or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({"extra": extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all("Provides-Extra") or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
".egg": Distribution,
".egg-info": EggInfoDistribution,
".dist-info": DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return " ".join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if " #" in line:
line = line[: line.find(" #")]
# If there is a line continuation, drop it, and append the next line.
if line.endswith("\\"):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.url,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return isinstance(other, Requirement) and self.hashCmp == other.hashCmp
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
(req,) = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, "__class__", type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g["_manager"] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith("_")
)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state("object", working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(dist.activate(replace=False) for dist in working_set)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
class PkgResourcesDeprecationWarning(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
|
py | b40d845be5aee352bdc3281721145a6876837e80 | #!/usr/bin/env python
import os
import sys
import json
import argparse
filepath = "inventory.json"
class StaticInventory(object):
def cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
self.cli_args = parser.parse_args()
def default_res(self):
return {
"_meta": {
"hostvars": {}
}
}
def __init__(self):
self.cli_args()
self.inventory = self.default_res()
f = open(filepath,"r")
if self.cli_args.list:
self.inventory=json.load(f)
if self.cli_args.host:
self.inventory = self.default_res()
print(json.dumps(self.inventory, indent=2))
StaticInventory()
|
py | b40d85f6daece006cbf3ebdcf03df360191a5031 | # MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Scanning operations
"""
import numpy as np
class ScanningOps:
"""
Specific operations done during scanning
"""
@staticmethod
def optimize_in_single_dimension(pvalues, a_max, image_to_node, score_function):
"""
Optimizes over all subsets of nodes for a given subset of images or over all subsets of images for a given
subset of nodes.
:param pvalues: pvalue ranges
:type pvalues: `ndarray`
:param a_max: alpha max. determines the significance level threshold
:type a_max: `float`
:param image_to_node: informs what direction to optimize in
:type image_to_node: `bool`
:param score_function: scoring function
:type score_function: `.ScoringFunction`
:return: (best_score_so_far, subset, best_alpha)
:rtype: `float`, `np.array`, `float`
"""
alpha_thresholds = np.unique(pvalues[:, :, 1])
# alpha_thresholds = alpha_thresholds[0::5] #take every 5th for speed purposes
# where does a_max fall in check
last_alpha_index = np.searchsorted(alpha_thresholds, a_max)
# resize check for only ones smaller than a_max
alpha_thresholds = alpha_thresholds[0:last_alpha_index]
step_for_50 = len(alpha_thresholds) / 50
alpha_thresholds = alpha_thresholds[0::int(step_for_50) + 1]
# add on the max value to check as well as it may not have been part of unique
alpha_thresholds = np.append(alpha_thresholds, a_max)
# alpha_thresholds = np.arange(a_max/50, a_max, a_max/50)
if image_to_node:
number_of_elements = pvalues.shape[1] # searching over j columns
size_of_given = pvalues.shape[0] # for fixed this many images
unsort_priority = np.zeros(
(pvalues.shape[1], alpha_thresholds.shape[0])) # number of columns
else:
number_of_elements = pvalues.shape[0] # searching over i rows
size_of_given = pvalues.shape[1] # for this many fixed nodes
unsort_priority = np.zeros(
(pvalues.shape[0], alpha_thresholds.shape[0])) # number of rows
for elem_indx in range(0, number_of_elements):
# sort all the range maxes
if image_to_node:
# collect ranges over images(rows)
arg_sort_max = np.argsort(pvalues[:, elem_indx, 1])
# arg_sort_min = np.argsort(pvalues[:,e,0]) #collect ranges over images(rows)
completely_included = np.searchsorted(
pvalues[:, elem_indx, 1][arg_sort_max], alpha_thresholds, side='right')
else:
# collect ranges over nodes(columns)
arg_sort_max = np.argsort(pvalues[elem_indx, :, 1])
# arg_sort_min = np.argsort(pvalues[elem_indx,:,0])
completely_included = np.searchsorted(
pvalues[elem_indx, :, 1][arg_sort_max], alpha_thresholds, side='right')
# should be num elements by num thresh
unsort_priority[elem_indx, :] = completely_included
# want to sort for a fixed thresh (across?)
arg_sort_priority = np.argsort(-unsort_priority, axis=0)
best_score_so_far = -10000
best_alpha = -2
alpha_count = 0
for alpha_threshold in alpha_thresholds:
# score each threshold by itself, cumulating priority,
# cumulating count, alpha stays same.
alpha_v = np.ones(number_of_elements) * alpha_threshold
n_alpha_v = np.cumsum(unsort_priority[:, alpha_count][arg_sort_priority][:, alpha_count])
count_increments_this = np.ones(number_of_elements) * size_of_given
n_v = np.cumsum(count_increments_this)
vector_of_scores = score_function(n_alpha_v, n_v, alpha_v)
best_score_for_this_alpha_idx = np.argmax(vector_of_scores)
best_score_for_this_alpha = vector_of_scores[best_score_for_this_alpha_idx]
if best_score_for_this_alpha > best_score_so_far:
best_score_so_far = best_score_for_this_alpha
best_size = best_score_for_this_alpha_idx + 1 # not sure 1 is needed?
best_alpha = alpha_threshold
best_alpha_count = alpha_count
alpha_count = alpha_count + 1
# after the alpha for loop we now have best score, best alpha, size of best subset,
# and alpha counter use these with the priority argsort to reconstruct the best subset
unsort = arg_sort_priority[:, best_alpha_count]
subset = np.zeros(best_size).astype(int)
for loc in range(0, best_size):
subset[loc] = unsort[loc]
return best_score_so_far, subset, best_alpha
@staticmethod
def single_restart(pvalues, a_max, indices_of_seeds, image_to_node, score_function):
"""
Here we control the iteration between images->nodes and nodes->images. It starts with a fixed subset of nodes by
default.
:param pvalues: pvalue ranges
:type pvalues: `ndarray`
:param a_max: alpha max. determines the significance level threshold
:type a_max: `float`
:param indices_of_seeds: indices of initial sets of images or nodes to perform optimization
:type indices_of_seeds: `np.array`
:param image_to_node: informs what direction to optimize in
:type image_to_node: `bool`
:param score_function: scoring function
:type score_function: `.ScoringFunction`
:return: (best_score_so_far, best_sub_of_images, best_sub_of_nodes, best_alpha)
:rtype: `float`, `np.array`, `np.array`, `float`
"""
best_score_so_far = -100000
count = 0
while True:
# These can be moved outside the while loop as only executed first time through??
if count == 0: # first time through, we need something initialized depending on direction.
if image_to_node:
sub_of_images = indices_of_seeds
else:
sub_of_nodes = indices_of_seeds
if image_to_node: # passed pvalues are only those belonging to fixed images, update nodes in return
# only sending sub of images
score_from_optimization, sub_of_nodes, optimal_alpha = ScanningOps.optimize_in_single_dimension(
pvalues[sub_of_images, :, :], a_max, image_to_node, score_function)
else: # passed pvalues are only those belonging to fixed nodes, update images in return
# only sending sub of nodes
score_from_optimization, sub_of_images, optimal_alpha = ScanningOps.optimize_in_single_dimension(
pvalues[:, sub_of_nodes, :], a_max, image_to_node, score_function)
if score_from_optimization > best_score_so_far: # haven't converged yet
# update
best_score_so_far = score_from_optimization
best_sub_of_nodes = sub_of_nodes
best_sub_of_images = sub_of_images
best_alpha = optimal_alpha
image_to_node = not image_to_node # switch direction!
count = count + 1 # for printing and
else: # converged! Don't update from most recent optimization, return current best
return best_score_so_far, best_sub_of_images, best_sub_of_nodes, best_alpha
|
py | b40d861dad3a6393ebceebcdeb36be6986885e77 | # Copyright (c) 2021, Frappe Technologies and Contributors
# See license.txt
# import frappe
import unittest
class TestAdhesionPagos360(unittest.TestCase):
pass
|
py | b40d862caca4fbfce0aaa45bedfea9222a96c9e5 | """Some extra functionality."""
from .appendix import flatten, get_event_log, remove_item
__all__ = ["remove_item", "get_event_log", "flatten"]
|
py | b40d87165396915b53c2f21f2888249d7dc07b91 | import tensorflow as tf
from root_module.parameters_module import GlobalParams, ConfidenceNetworkParams, TargetNetworkParams, Directory
from root_module.implementation_module import RepLayer, ConfidenceNetwork, TargetNetwork
class L2LWS:
def __init__(self, global_params, cnf_params, tar_params, dir_obj, cnf_dir_obj, tar_dir_obj):
self.global_params = global_params
self.cnf_params = cnf_params
self.tar_params = tar_params
self.dir_obj = dir_obj
self.cnf_dir = cnf_dir_obj
self.tar_dir = tar_dir_obj
self.init_pipeline()
def init_pipeline(self):
self.create_placeholders()
self.extract_word_embedding()
self.init_representation_layer()
self.init_target_network()
self.init_confidence_network()
self.run_confidence_network()
self.run_target_network()
def create_placeholders(self):
with tf.variable_scope('placeholders'):
self.labeled_text = tf.placeholder(dtype=tf.int32,
shape=[None, self.global_params.MAX_LEN],
name='labeled_txt_placeholder')
self.unlabeled_text = tf.placeholder(dtype=tf.int32,
shape=[None, self.global_params.MAX_LEN],
name='unlabeled_txt_placeholder')
self.gold_label = tf.placeholder(dtype=tf.int32,
shape=[None],
name='gold_label_placeholder')
self.weak_label_labeled = tf.placeholder(dtype=tf.float32,
shape=[None, self.global_params.num_classes],
name='weak_labeled_placeholder')
self.weak_label_unlabeled = tf.placeholder(dtype=tf.float32,
shape=[None, self.global_params.num_classes],
name='weak_unlabeled_placeholder')
def extract_word_embedding(self):
with tf.variable_scope('emb_lookup'):
# Vocabulary size should be the size of the vocab in the dictionary we created
# Creates a new matrix if one is not already there. It is initialized with
# glorot_uniform_initializer
self.word_emb_matrix = tf.get_variable(name = "word_embedding_matrix",
shape=[self.global_params.vocab_size, self.global_params.EMB_DIM],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(0.0),
trainable=self.global_params.is_word_trainable)
labeled_padded_input = tf.pad(self.labeled_text, paddings=[[0, 0], [5, 0]])
self.labeled_word_emb = tf.nn.embedding_lookup(params=self.word_emb_matrix,
ids=labeled_padded_input,
name='labeled_word_emb',
validate_indices=True)
unlabeled_padded_input = tf.pad(self.unlabeled_text, paddings=[[0, 0], [5, 0]])
self.unlabeled_word_emb = tf.nn.embedding_lookup(params=self.word_emb_matrix,
ids=unlabeled_padded_input,
name='unlabeled_word_emb',
validate_indices=True)
print('Extracted word embedding')
def init_representation_layer(self):
with tf.variable_scope('rep_layer'):
self.rep_layer = RepLayer()
def init_confidence_network(self):
with tf.variable_scope('cnf_net'):
self.cnf_network = ConfidenceNetwork(self.global_params.num_classes,
self.cnf_params.optimizer,
self.cnf_params.max_grad_norm,
self.cnf_params.REG_CONSTANT,
self.global_params.log)
def run_confidence_network(self):
with tf.variable_scope('rep_layer'):
repc = self.cnf_rep = self.rep_layer.create_representation(self.labeled_word_emb, self.cnf_params)
with tf.variable_scope('cnf_net'):
if self.cnf_params.mode == 'TR':
run_cnf = self.cnf_network.train(repc,
num_layers=self.cnf_params.num_hidden_layer,
true_label=self.gold_label,
weak_label=self.weak_label_labeled)
else:
run_cnf, _ = self.cnf_network.aggregate_loss(repc,
num_layers=self.cnf_params.num_hidden_layer,
true_label=self.gold_label,
weak_label=self.weak_label_labeled)
self.cnf_train_op = run_cnf
def init_target_network(self):
with tf.variable_scope('tar_net'):
self.tar_network = TargetNetwork(self.tar_params.optimizer,
self.tar_params.max_grad_norm,
self.tar_params.REG_CONSTANT,
self.global_params.log)
def run_target_network(self):
with tf.variable_scope('rep_layer', reuse=True):
rept = self.tar_rep = self.rep_layer.create_representation(self.unlabeled_word_emb, self.global_params)
with tf.variable_scope('cnf_net', reuse=True):
logits, confidence, _ = self.cnf_network.compute_confidence(rept, self.cnf_params.num_hidden_layer)
# confidence = tf.ones(shape=[self.cnf_params.batch_size, 1], dtype=tf.float32)
with tf.variable_scope('tar_net'):
if self.tar_params.mode == 'TR':
run_tar = self.tar_network.train(rept,
num_layers=self.tar_params.num_hidden_layer,
num_classes=self.global_params.num_classes,
confidence=confidence,
weak_label=self.weak_label_unlabeled)
else:
run_tar = self.tar_network.aggregate_loss(rept,
num_layers=self.tar_params.num_hidden_layer,
num_classes=self.global_params.num_classes,
confidence=confidence,
weak_label=self.weak_label_unlabeled)
self.tar_train_op = run_tar
# def main():
# L2LWS(GlobalParams(), ConfidenceNetworkParams(), TargetNetworkParams(), Directory('TR'))
#
#
# if __name__ == '__main__':
# main()
|
py | b40d8991247e5e1141c1dcce1c6b4b4e534c196f | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'administration.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
py | b40d8a1c73d80588a063f27c6819043d543f14d7 | import os
import sys
import logging
import paddle
import argparse
import functools
import paddle.fluid as fluid
sys.path.append("..")
import imagenet_reader as reader
import models
sys.path.append("../../")
from utility import add_arguments, print_arguments
from paddle.fluid.contrib.slim import Compressor
logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 64*4, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('model', str, None, "The target model.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('config_file', str, None, "The config file for compression with yaml format.")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def compress(args):
class_dim=1000
image_shape="3,224,224"
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(args.model, model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# model definition
model = models.__dict__[args.model]()
out = model.net(input=image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
val_program = fluid.default_main_program().clone()
# for param in fluid.default_main_program().global_block().all_parameters():
# print param.name, param.shape
# return
opt = fluid.optimizer.Momentum(
momentum=0.9,
learning_rate=fluid.layers.piecewise_decay(
boundaries=[5000 * 30, 5000 * 60, 5000 * 90],
values=[0.1, 0.01, 0.001, 0.0001]),
regularization=fluid.regularizer.L2Decay(4e-5))
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if args.pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(reader.val(), batch_size=args.batch_size)
val_feed_list = [('image', image.name), ('label', label.name)]
val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', acc_top5.name)]
train_reader = paddle.batch(
reader.train(), batch_size=args.batch_size, drop_last=True)
train_feed_list = [('image', image.name), ('label', label.name)]
train_fetch_list = [('loss', avg_cost.name)]
com_pass = Compressor(
place,
fluid.global_scope(),
fluid.default_main_program(),
train_reader=train_reader,
train_feed_list=train_feed_list,
train_fetch_list=train_fetch_list,
eval_program=val_program,
eval_reader=val_reader,
eval_feed_list=val_feed_list,
eval_fetch_list=val_fetch_list,
save_eval_model=True,
prune_infer_model=[[image.name], [out.name]],
train_optimizer=opt)
com_pass.config(args.config_file)
com_pass.run()
def main():
args = parser.parse_args()
print_arguments(args)
compress(args)
if __name__ == '__main__':
main()
|
py | b40d8afadc8f66d768df525a5ed6b40ad3730f9f | #!/usr/bin/python
# coding: utf-8
r"""STL module of aocxchange"""
from __future__ import print_function
import logging
import OCC.StlAPI
import OCC.TopoDS
import aocxchange.exceptions
import aocxchange.extensions
import aocxchange.utils
import aocxchange.checks
logger = logging.getLogger(__name__)
class StlImporter(object):
r"""STL importer
Parameters
----------
filename : str
"""
def __init__(self, filename):
logger.info("StlImporter instantiated with filename : %s" % filename)
aocxchange.checks.check_importer_filename(filename, aocxchange.extensions.stl_extensions)
self._filename = filename
self._shape = None
logger.info("Reading file ....")
self.read_file()
def read_file(self):
r"""Read the STL file and stores the result in a TopoDS_Shape"""
stl_reader = OCC.StlAPI.StlAPI_Reader()
shape = OCC.TopoDS.TopoDS_Shape()
stl_reader.Read(shape, self._filename)
self._shape = shape
@property
def shape(self):
r"""Shape"""
if self._shape.IsNull():
raise AssertionError("Error: the shape is NULL")
else:
return self._shape
class StlExporter(object):
""" A TopoDS_Shape to STL exporter. Default mode is ASCII
Parameters
----------
filename : str
ascii_mode : bool
(default is False)
"""
def __init__(self, filename=None, ascii_mode=False):
logger.info("StlExporter instantiated with filename : %s" % filename)
logger.info("StlExporter ascii : %s" % str(ascii_mode))
aocxchange.checks.check_exporter_filename(filename, aocxchange.extensions.stl_extensions)
aocxchange.checks.check_overwrite(filename)
self._shape = None # only one shape can be exported
self._ascii_mode = ascii_mode
self._filename = filename
def set_shape(self, a_shape):
"""
only a single shape can be exported...
Parameters
----------
a_shape
"""
aocxchange.checks.check_shape(a_shape) # raises an exception if the shape is not valid
self._shape = a_shape
def write_file(self):
r"""Write file"""
stl_writer = OCC.StlAPI.StlAPI_Writer()
stl_writer.Write(self._shape, self._filename, self._ascii_mode)
logger.info("Wrote STL file")
|
py | b40d8b61af89259bd3d5722ca8ecdce3625582ba | import asyncio
import json
import discord
from discord import app_commands
from discord.ext import commands
from utils.ids import GuildIDs, TGArenaChannelIDs, TGMatchmakingRoleIDs
class Matchmaking(commands.Cog):
"""Contains the Unranked portion of our matchmaking system."""
def __init__(self, bot):
self.bot = bot
def store_ping(self, ctx: commands.Context, mm_type: str, timestamp: float):
"""Saves a Matchmaking Ping of any unranked type in the according file."""
with open(rf"./json/{mm_type}.json", "r", encoding="utf-8") as f:
user_pings = json.load(f)
user_pings[f"{ctx.author.id}"] = {}
user_pings[f"{ctx.author.id}"] = {"channel": ctx.channel.id, "time": timestamp}
with open(rf"./json/{mm_type}.json", "w", encoding="utf-8") as f:
json.dump(user_pings, f, indent=4)
def delete_ping(self, ctx: commands.Context, mm_type: str):
"""Deletes a Matchmaking Ping of any unranked type from the according file."""
with open(rf"./json/{mm_type}.json", "r", encoding="utf-8") as f:
user_pings = json.load(f)
try:
del user_pings[f"{ctx.message.author.id}"]
except KeyError:
logger = self.bot.get_logger("bot.mm")
logger.warning(
f"Tried to delete a {mm_type} ping by {str(ctx.message.author)} but the ping was already deleted."
)
with open(rf"./json/{mm_type}.json", "w", encoding="utf-8") as f:
json.dump(user_pings, f, indent=4)
def get_recent_pings(self, mm_type: str, timestamp: float) -> str:
"""Gets a list with every Ping saved.
As long as the ping is not older than 30 Minutes.
"""
with open(rf"./json/{mm_type}.json", "r", encoding="utf-8") as f:
user_pings = json.load(f)
list_of_searches = []
for ping in user_pings:
ping_channel = user_pings[f"{ping}"]["channel"]
ping_timestamp = user_pings[f"{ping}"]["time"]
difference = timestamp - ping_timestamp
minutes = round(difference / 60)
if minutes < 31:
list_of_searches.append(
f"<@!{ping}>, in <#{ping_channel}>, {minutes} minutes ago\n"
)
list_of_searches.reverse()
return "".join(list_of_searches) or "Looks like no one has pinged recently :("
@commands.hybrid_command(
aliases=["matchmaking", "matchmakingsingles", "mmsingles", "Singles"]
)
@commands.cooldown(1, 600, commands.BucketType.user)
@app_commands.guilds(*GuildIDs.ALL_GUILDS)
async def singles(self, ctx: commands.Context):
"""Used for 1v1 Matchmaking with competitive rules."""
timestamp = discord.utils.utcnow().timestamp()
singles_role = discord.utils.get(
ctx.guild.roles, id=TGMatchmakingRoleIDs.SINGLES_ROLE
)
if ctx.message.channel.id in TGArenaChannelIDs.PUBLIC_ARENAS:
self.store_ping(ctx, "singles", timestamp)
searches = self.get_recent_pings("singles", timestamp)
embed = discord.Embed(
title="Singles pings in the last 30 Minutes:",
description=searches,
colour=discord.Colour.dark_red(),
)
# Role mentions dont ping in an interaction response, so if the user uses the slash command version of this command
# we first need to acknowledge the interaction in some way and then send a followup message into the channel.
# This is the same for the other matchmaking types below and also for the ranked matchmaking.
if ctx.interaction:
await ctx.send("Processing request...", ephemeral=True)
mm_message = await ctx.channel.send(
f"{ctx.author.mention} is looking for {singles_role.mention} games!",
embed=embed,
)
mm_thread = await mm_message.create_thread(
name=f"Singles Arena of {ctx.author.name}", auto_archive_duration=60
)
await mm_thread.add_user(ctx.author)
await mm_thread.send(
f"Hi there, {ctx.author.mention}! Please use this thread for communicating with your opponent."
)
await asyncio.sleep(1800)
self.delete_ping(ctx, "singles")
elif ctx.message.channel.id in TGArenaChannelIDs.PRIVATE_ARENAS:
searches = self.get_recent_pings("singles", timestamp)
embed = discord.Embed(
title="Singles pings in the last 30 Minutes:",
description=searches,
colour=discord.Colour.dark_red(),
)
if ctx.interaction:
await ctx.send("Processing request...", ephemeral=True)
await ctx.channel.send(
f"{ctx.author.mention} is looking for {singles_role.mention} games!\n"
"Here are the most recent Singles pings in our open arenas:",
embed=embed,
)
else:
await ctx.send(
"Please only use this command in our arena channels!", ephemeral=True
)
ctx.command.reset_cooldown(ctx)
@singles.error
async def singles_error(self, ctx, error):
if not isinstance(error, commands.CommandOnCooldown):
raise error
# Triggers when you're on cooldown, lists out the recent pings.
# Same for every other type below.
timestamp = discord.utils.utcnow().timestamp()
if (
ctx.message.channel.id in TGArenaChannelIDs.PUBLIC_ARENAS
or ctx.message.channel.id in TGArenaChannelIDs.PRIVATE_ARENAS
):
searches = self.get_recent_pings("singles", timestamp)
embed = discord.Embed(
title="Singles pings in the last 30 Minutes:",
description=searches,
colour=discord.Colour.dark_red(),
)
await ctx.send(
f"{ctx.author.mention}, you are on cooldown for another {round((error.retry_after)/60)} minutes to use this command. \n"
"In the meantime, here are the most recent Singles pings in our open arenas:",
embed=embed,
)
else:
await ctx.send(
"Please only use this command in our arena channels!", ephemeral=True
)
@commands.hybrid_command(aliases=["matchmakingdoubles", "mmdoubles", "Doubles"])
@commands.cooldown(1, 600, commands.BucketType.user)
@app_commands.guilds(*GuildIDs.ALL_GUILDS)
async def doubles(self, ctx: commands.Context):
"""Used for 2v2 Matchmaking."""
timestamp = discord.utils.utcnow().timestamp()
doubles_role = discord.utils.get(
ctx.guild.roles, id=TGMatchmakingRoleIDs.DOUBLES_ROLE
)
if ctx.message.channel.id in TGArenaChannelIDs.PUBLIC_ARENAS:
self.store_ping(ctx, "doubles", timestamp)
searches = self.get_recent_pings("doubles", timestamp)
embed = discord.Embed(
title="Doubles pings in the last 30 Minutes:",
description=searches,
colour=discord.Colour.dark_blue(),
)
if ctx.interaction:
await ctx.send("Processing request...", ephemeral=True)
mm_message = await ctx.channel.send(
f"{ctx.author.mention} is looking for {doubles_role.mention} games!",
embed=embed,
)
mm_thread = await mm_message.create_thread(
name=f"Doubles Arena of {ctx.author.name}", auto_archive_duration=60
)
await mm_thread.add_user(ctx.author)
await mm_thread.send(
f"Hi there, {ctx.author.mention}! Please use this thread for communicating with your opponents."
)
await asyncio.sleep(1800)
self.delete_ping(ctx, "doubles")
elif ctx.message.channel.id in TGArenaChannelIDs.PRIVATE_ARENAS:
searches = self.get_recent_pings("doubles", timestamp)
embed = discord.Embed(
title="Doubles pings in the last 30 Minutes:",
description=searches,
colour=discord.Colour.dark_blue(),
)
if ctx.interaction:
await ctx.send("Processing request...", ephemeral=True)
await ctx.channel.send(
f"{ctx.author.mention} is looking for {doubles_role.mention} games!\n"
"Here are the most recent Doubles pings in our open arenas:",
embed=embed,
)
else:
await ctx.send(
"Please only use this command in our arena channels!", ephemeral=True
)
ctx.command.reset_cooldown(ctx)
@doubles.error
async def doubles_error(self, ctx, error):
if not isinstance(error, commands.CommandOnCooldown):
raise error
timestamp = discord.utils.utcnow().timestamp()
if (
ctx.message.channel.id in TGArenaChannelIDs.PUBLIC_ARENAS
or ctx.message.channel.id in TGArenaChannelIDs.PRIVATE_ARENAS
):
searches = self.get_recent_pings("doubles", timestamp)
embed = discord.Embed(
title="Doubles pings in the last 30 Minutes:",
description=searches,
colour=discord.Colour.dark_blue(),
)
await ctx.send(
f"{ctx.author.mention}, you are on cooldown for another {round((error.retry_after)/60)} minutes to use this command. \n"
"In the meantime, here are the most recent Doubles pings in our open arenas:",
embed=embed,
)
else:
await ctx.send(
"Please only use this command in our arena channels!", ephemeral=True
)
@commands.hybrid_command(aliases=["matchmakingfunnies", "mmfunnies", "Funnies"])
@commands.cooldown(1, 600, commands.BucketType.user)
@app_commands.guilds(*GuildIDs.ALL_GUILDS)
@app_commands.describe(
message="Optional message, for example the ruleset you want to use."
)
async def funnies(self, ctx: commands.Context, *, message: str = None):
"""Used for 1v1 Matchmaking with non-competitive rules."""
timestamp = discord.utils.utcnow().timestamp()
funnies_role = discord.utils.get(
ctx.guild.roles, id=TGMatchmakingRoleIDs.FUNNIES_ROLE
)
if message:
message = f"`{discord.utils.remove_markdown(message[:100])}`"
else:
message = "\u200b"
if ctx.message.channel.id in TGArenaChannelIDs.PUBLIC_ARENAS:
self.store_ping(ctx, "funnies", timestamp)
searches = self.get_recent_pings("funnies", timestamp)
embed = discord.Embed(
title="Funnies pings in the last 30 Minutes:",
description=searches,
colour=discord.Colour.green(),
)
if ctx.interaction:
await ctx.send("Processing request...", ephemeral=True)
mm_message = await ctx.channel.send(
f"{ctx.author.mention} is looking for {funnies_role.mention} games: {message}",
embed=embed,
)
mm_thread = await mm_message.create_thread(
name=f"Funnies Arena of {ctx.author.name}", auto_archive_duration=60
)
await mm_thread.add_user(ctx.author)
await mm_thread.send(
f"Hi there, {ctx.author.mention}! Please use this thread for communicating with your opponent."
)
await asyncio.sleep(1800)
self.delete_ping(ctx, "funnies")
elif ctx.message.channel.id in TGArenaChannelIDs.PRIVATE_ARENAS:
searches = self.get_recent_pings("funnies", timestamp)
embed = discord.Embed(
title="Funnies pings in the last 30 Minutes:",
description=searches,
colour=discord.Colour.green(),
)
if ctx.interaction:
await ctx.send("Processing request...", ephemeral=True)
await ctx.channel.send(
f"{ctx.author.mention} is looking for {funnies_role.mention} games: {message}\n"
"Here are the most recent Funnies pings in our open arenas:",
embed=embed,
)
else:
await ctx.send(
"Please only use this command in our arena channels!", ephemeral=True
)
ctx.command.reset_cooldown(ctx)
@funnies.error
async def funnies_error(self, ctx, error):
if not isinstance(error, commands.CommandOnCooldown):
raise error
timestamp = discord.utils.utcnow().timestamp()
if (
ctx.message.channel.id in TGArenaChannelIDs.PUBLIC_ARENAS
or ctx.message.channel.id in TGArenaChannelIDs.PRIVATE_ARENAS
):
searches = self.get_recent_pings("funnies", timestamp)
embed = discord.Embed(
title="Funnies pings in the last 30 Minutes:",
description=searches,
colour=discord.Colour.green(),
)
await ctx.send(
f"{ctx.author.mention}, you are on cooldown for another {round((error.retry_after)/60)} minutes to use this command. \n"
"In the meantime, here are the most recent Funnies pings in our open arenas:",
embed=embed,
)
else:
await ctx.send(
"Please only use this command in our arena channels!", ephemeral=True
)
async def setup(bot):
await bot.add_cog(Matchmaking(bot))
print("Matchmaking cog loaded")
|
py | b40d8cd874cac321f8beccc739143f99ca6aadf2 | """adding filters field
Revision ID: 422da2d0234
Revises: 17dcb75f3fe
Create Date: 2015-05-18 23:03:15.809549
"""
# revision identifiers, used by Alembic.
revision = '422da2d0234'
down_revision = '17dcb75f3fe'
import sqlalchemy as sa
from alembic import op
def upgrade():
op.add_column('feed', sa.Column('filters', sa.PickleType(), nullable=True))
def downgrade():
op.drop_column('feed', 'filters')
|
py | b40d8d026b6fa6b054f96e281c93f71e46229dad | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You ([email protected])
import collections
import random
import math
import cv2
import matplotlib
import numpy as np
from PIL import Image, ImageFilter, ImageOps
from utils.tools.logger import Logger as Log
class RandomPad(object):
""" Padding the Image to proper size.
Args:
stride: the stride of the network.
pad_value: the value that pad to the image border.
img: Image object as input.
Returns::
img: Image object.
"""
def __init__(self, up_scale_range=None, ratio=0.5, mean=(104, 117, 123)):
# do something
assert isinstance(up_scale_range, (list, tuple))
self.up_scale_range = up_scale_range
self.ratio = ratio
self.mean = tuple(mean)
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, (Image.Image, list))
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
width, height = img.size
ws = random.uniform(self.up_scale_range[0], self.up_scale_range[1])
hs = ws
for _ in range(50):
scale = random.uniform(self.up_scale_range[0], self.up_scale_range[1])
min_ratio = max(0.5, 1. / scale / scale)
max_ratio = min(2, scale * scale)
ratio = math.sqrt(random.uniform(min_ratio, max_ratio))
ws = scale * ratio
hs = scale / ratio
if ws >= 1 and hs >= 1:
break
w = int(ws * width)
h = int(hs * height)
pad_width = random.randint(0, w - width)
pad_height = random.randint(0, h - height)
left_pad = random.randint(0, pad_width) # pad_left
up_pad = random.randint(0, pad_height) # pad_up
right_pad = pad_width - left_pad # pad_right
down_pad = pad_height - up_pad # pad_down
if not isinstance(img, list):
img = ImageOps.expand(img, (left_pad, up_pad, right_pad, down_pad), fill=self.mean)
else:
img = [ImageOps.expand(item, (left_pad, up_pad, right_pad, down_pad), fill=self.mean) for item in img]
if labelmap is not None:
labelmap = ImageOps.expand(labelmap, (left_pad, up_pad, right_pad, down_pad), fill=255)
if maskmap is not None:
maskmap = ImageOps.expand(maskmap, (left_pad, up_pad, right_pad, down_pad), fill=1)
if polygons is not None:
for object_id in range(len(polygons)):
for polygon_id in range(len(polygons[object_id])):
polygons[object_id][polygon_id][0::2] += left_pad
polygons[object_id][polygon_id][1::2] += up_pad
if kpts is not None and kpts.size > 0:
kpts[:, :, 0] += left_pad
kpts[:, :, 1] += up_pad
if bboxes is not None and bboxes.size > 0:
bboxes[:, 0::2] += left_pad
bboxes[:, 1::2] += up_pad
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
class Padding(object):
""" Padding the Image to proper size.
Args:
stride: the stride of the network.
pad_value: the value that pad to the image border.
img: Image object as input.
Returns::
img: Image object.
"""
def __init__(self, pad=None, ratio=0.5, mean=(104, 117, 123), allow_outside_center=True):
self.pad = pad
self.ratio = ratio
self.mean = tuple(mean)
self.allow_outside_center = allow_outside_center
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, (Image.Image, list))
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
width, height = img.size
left_pad, up_pad, right_pad, down_pad = self.pad
target_size = [width + left_pad + right_pad, height + up_pad + down_pad]
offset_left = -left_pad
offset_up = -up_pad
if kpts is not None and kpts.size > 0:
kpts[:, :, 0] -= offset_left
kpts[:, :, 1] -= offset_up
mask = np.logical_or.reduce((kpts[:, :, 0] >= target_size[0], kpts[:, :, 0] < 0,
kpts[:, :, 1] >= target_size[1], kpts[:, :, 1] < 0))
kpts[mask == 1, 2] = -1
if bboxes is not None and bboxes.size > 0:
if self.allow_outside_center:
mask = np.ones(bboxes.shape[0], dtype=bool)
else:
crop_bb = np.array([offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]])
center = (bboxes[:, :2] + bboxes[:, 2:]) / 2
mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]).all(axis=1)
bboxes[:, 0::2] -= offset_left
bboxes[:, 1::2] -= offset_up
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, target_size[0] - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, target_size[1] - 1)
mask = np.logical_and(mask, (bboxes[:, :2] < bboxes[:, 2:]).all(axis=1))
bboxes = bboxes[mask]
if labels is not None:
labels = labels[mask]
if polygons is not None:
new_polygons = list()
for object_id in range(len(polygons)):
if mask[object_id] == 1:
for polygon_id in range(len(polygons[object_id])):
polygons[object_id][polygon_id][0::2] -= offset_left
polygons[object_id][polygon_id][1::2] -= offset_up
polygons[object_id][polygon_id][0::2] = np.clip(polygons[object_id][polygon_id][0::2],
0, target_size[0] - 1)
polygons[object_id][polygon_id][1::2] = np.clip(polygons[object_id][polygon_id][1::2],
0, target_size[1] - 1)
new_polygons.append(polygons[object_id])
polygons = new_polygons
if not isinstance(img, list):
img = ImageOps.expand(img, border=tuple(self.pad), fill=tuple(self.mean))
else:
img = [ImageOps.expand(item, border=tuple(self.pad), fill=tuple(self.mean)) for item in img]
if maskmap is not None:
maskmap = ImageOps.expand(maskmap, border=tuple(self.pad), fill=1)
if labelmap is not None:
labelmap = ImageOps.expand(labelmap, border=tuple(self.pad), fill=255)
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomHFlip(object):
def __init__(self, swap_pair=None, ratio=0.5):
self.swap_pair = swap_pair
self.ratio = ratio
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, (Image.Image, list))
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
width, height = img.size
if not isinstance(img, list):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
else:
img = [item.transpose(Image.FLIP_LEFT_RIGHT) for item in img]
if labelmap is not None:
labelmap = labelmap.transpose(Image.FLIP_LEFT_RIGHT)
if maskmap is not None:
maskmap = maskmap.transpose(Image.FLIP_LEFT_RIGHT)
if polygons is not None:
for object_id in range(len(polygons)):
for polygon_id in range(len(polygons[object_id])):
polygons[object_id][polygon_id][0::2] = width - 1 - polygons[object_id][polygon_id][0::2]
if bboxes is not None and bboxes.size > 0:
xmin = width - 1 - bboxes[:, 2]
xmax = width - 1 - bboxes[:, 0]
bboxes[:, 0] = xmin
bboxes[:, 2] = xmax
if kpts is not None and kpts.size > 0:
kpts[:, :, 0] = width - 1 - kpts[:, :, 0]
for pair in self.swap_pair:
temp_point = np.copy(kpts[:, pair[0] - 1])
kpts[:, pair[0] - 1] = kpts[:, pair[1] - 1]
kpts[:, pair[1] - 1] = temp_point
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomSaturation(object):
def __init__(self, lower=0.5, upper=1.5, ratio=0.5):
self.lower = lower
self.upper = upper
self.ratio = ratio
assert self.upper >= self.lower, "saturation upper must be >= lower."
assert self.lower >= 0, "saturation lower must be non-negative."
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, Image.Image)
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
img = np.array(img).astype(np.float32)
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
img[:, :, 1] *= random.uniform(self.lower, self.upper)
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
img = np.clip(img, 0, 255)
return Image.fromarray(img.astype(np.uint8)), labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomHue(object):
def __init__(self, delta=18, ratio=0.5):
assert 0 <= delta <= 360
self.delta = delta
self.ratio = ratio
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, Image.Image)
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
img = np.array(img).astype(np.float32)
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
img[:, :, 0] += random.uniform(-self.delta, self.delta)
img[:, :, 0][img[:, :, 0] > 360] -= 360
img[:, :, 0][img[:, :, 0] < 0] += 360
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
img = np.clip(img, 0, 255)
return Image.fromarray(img.astype(np.uint8)), labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomPerm(object):
def __init__(self, ratio=0.5):
self.ratio = ratio
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, Image.Image)
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
swap = self.perms[random.randint(0, len(self.perms)-1)]
img = np.array(img)
img = img[:, :, swap]
return Image.fromarray(img.astype(np.uint8)), labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5, ratio=0.5):
self.lower = lower
self.upper = upper
self.ratio = ratio
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, Image.Image)
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
img = np.array(img).astype(np.float32)
img *= random.uniform(self.lower, self.upper)
img = np.clip(img, 0, 255)
return Image.fromarray(img.astype(np.uint8)), labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomBrightness(object):
def __init__(self, shift_value=30, ratio=0.5):
self.shift_value = shift_value
self.ratio = ratio
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, Image.Image)
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
shift = np.random.uniform(-self.shift_value, self.shift_value, size=1)
image = np.array(img).astype(np.float32)
image[:, :, :] += shift
image = np.around(image)
image = np.clip(image, 0, 255)
image = image.astype(np.uint8)
image = Image.fromarray(image)
return image, labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomGaussBlur(object):
def __init__(self, max_blur=4, ratio=0.5):
self.max_blur = max_blur
self.ratio = ratio
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, Image.Image)
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
blur_value = np.random.uniform(0, self.max_blur)
img = img.filter(ImageFilter.GaussianBlur(radius=blur_value))
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomHSV(object):
"""
Args:
h_range (float tuple): random ratio of the hue channel,
new_h range from h_range[0]*old_h to h_range[1]*old_h.
s_range (float tuple): random ratio of the saturation channel,
new_s range from s_range[0]*old_s to s_range[1]*old_s.
v_range (int tuple): random bias of the value channel,
new_v range from old_v-v_range to old_v+v_range.
Notice:
h range: 0-1
s range: 0-1
v range: 0-255
"""
def __init__(self, h_range, s_range, v_range, ratio=0.5):
assert isinstance(h_range, (list, tuple)) and \
isinstance(s_range, (list, tuple)) and \
isinstance(v_range, (list, tuple))
self.h_range = h_range
self.s_range = s_range
self.v_range = v_range
self.ratio = ratio
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, Image.Image)
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
img = np.array(img)
img_hsv = matplotlib.colors.rgb_to_hsv(img)
img_h, img_s, img_v = img_hsv[:, :, 0], img_hsv[:, :, 1], img_hsv[:, :, 2]
h_random = np.random.uniform(min(self.h_range), max(self.h_range))
s_random = np.random.uniform(min(self.s_range), max(self.s_range))
v_random = np.random.uniform(min(self.v_range), max(self.v_range))
img_h = np.clip(img_h * h_random, 0, 1)
img_s = np.clip(img_s * s_random, 0, 1)
img_v = np.clip(img_v * v_random, 0, 255)
img_hsv = np.stack([img_h, img_s, img_v], axis=2)
img_new = matplotlib.colors.hsv_to_rgb(img_hsv)
return Image.fromarray(img_new.astype(np.uint8)), labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomResizedCrop(object):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale_range=(0.08, 1.0), aspect_range=(3. / 4., 4. / 3.)):
self.size = size
self.scale = scale_range
self.ratio = aspect_range
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(*scale) * area
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback
w = min(img.size[0], img.size[1])
i = (img.size[1] - w) // 2
j = (img.size[0] - w) // 2
return i, j, w, w
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
assert labelmap is None and maskmap is None and kpts is None and bboxes is None and labels is None
i, j, h, w = self.get_params(img, self.scale, self.ratio)
img = img.crop((j, i, j + w, i + h))
img = img.resize(self.size, Image.BILINEAR)
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomResize(object):
"""Resize the given numpy.ndarray to random size and aspect ratio.
Args:
scale_min: the min scale to resize.
scale_max: the max scale to resize.
"""
def __init__(self, scale_range=(0.75, 1.25), aspect_range=(0.9, 1.1),
target_size=None, resize_bound=None, method='random', ratio=0.5):
self.scale_range = scale_range
self.aspect_range = aspect_range
self.resize_bound = resize_bound
self.method = method
self.ratio = ratio
if target_size is not None:
if isinstance(target_size, int):
self.input_size = (target_size, target_size)
elif isinstance(target_size, (list, tuple)) and len(target_size) == 2:
self.input_size = target_size
else:
raise TypeError('Got inappropriate size arg: {}'.format(target_size))
else:
self.input_size = None
def get_scale(self, img_size, bboxes):
if self.method == 'random':
scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1])
return scale_ratio
elif self.method == 'focus':
if self.input_size is not None and bboxes is not None and len(bboxes) > 0:
bboxes = np.array(bboxes)
border = bboxes[:, 2:] - bboxes[:, 0:2]
scale = 0.6 / max(max(border[:, 0]) / self.input_size[0], max(border[:, 1]) / self.input_size[1])
scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1]) * scale
return scale_ratio
else:
scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1])
return scale_ratio
elif self.method == 'bound':
scale1 = self.resize_bound[0] / min(img_size)
scale2 = self.resize_bound[1] / max(img_size)
scale = min(scale1, scale2)
return scale
else:
Log.error('Resize method {} is invalid.'.format(self.method))
exit(1)
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
"""
Args:
img (Image): Image to be resized.
maskmap (Image): Mask to be resized.
kpt (list): keypoints to be resized.
center: (list): center points to be resized.
Returns:
Image: Randomly resize image.
Image: Randomly resize maskmap.
list: Randomly resize keypoints.
list: Randomly resize center points.
"""
assert isinstance(img, (Image.Image, list))
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
width, height = img.size
if random.random() < self.ratio:
scale_ratio = self.get_scale([width, height], bboxes)
aspect_ratio = random.uniform(*self.aspect_range)
w_scale_ratio = math.sqrt(aspect_ratio) * scale_ratio
h_scale_ratio = math.sqrt(1.0 / aspect_ratio) * scale_ratio
else:
w_scale_ratio, h_scale_ratio = 1.0, 1.0
if kpts is not None and kpts.size > 0:
kpts[:, :, 0] *= w_scale_ratio
kpts[:, :, 1] *= h_scale_ratio
if bboxes is not None and bboxes.size > 0:
bboxes[:, 0::2] *= w_scale_ratio
bboxes[:, 1::2] *= h_scale_ratio
if polygons is not None:
for object_id in range(len(polygons)):
for polygon_id in range(len(polygons[object_id])):
polygons[object_id][polygon_id][0::2] *= w_scale_ratio
polygons[object_id][polygon_id][1::2] *= h_scale_ratio
converted_size = (int(width * w_scale_ratio), int(height * h_scale_ratio))
if not isinstance(img, list):
img = img.resize(converted_size, Image.BILINEAR)
else:
img = [item.resize(converted_size, Image.BILINEAR) for item in img]
if labelmap is not None:
labelmap = labelmap.resize(converted_size, Image.NEAREST)
if maskmap is not None:
maskmap = maskmap.resize(converted_size, Image.NEAREST)
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomRotate(object):
"""Rotate the input numpy.ndarray and points to the given degree.
Args:
degree (number): Desired rotate degree.
"""
def __init__(self, max_degree, ratio=0.5, mean=(104, 117, 123)):
assert isinstance(max_degree, int)
self.max_degree = max_degree
self.ratio = ratio
self.mean = tuple(mean)
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
"""
Args:
img (Image): Image to be rotated.
maskmap (Image): Mask to be rotated.
kpt (np.array): Keypoints to be rotated.
center (list): Center points to be rotated.
Returns:
Image: Rotated image.
list: Rotated key points.
"""
assert isinstance(img, (Image.Image, list))
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() < self.ratio:
rotate_degree = random.uniform(-self.max_degree, self.max_degree)
else:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
img = np.array(img)
height, width, _ = img.shape
img_center = (width / 2.0, height / 2.0)
rotate_mat = cv2.getRotationMatrix2D(img_center, rotate_degree, 1.0)
cos_val = np.abs(rotate_mat[0, 0])
sin_val = np.abs(rotate_mat[0, 1])
new_width = int(height * sin_val + width * cos_val)
new_height = int(height * cos_val + width * sin_val)
rotate_mat[0, 2] += (new_width / 2.) - img_center[0]
rotate_mat[1, 2] += (new_height / 2.) - img_center[1]
if not isinstance(img, list):
img = cv2.warpAffine(img, rotate_mat, (new_width, new_height), borderValue=self.mean)
img = Image.fromarray(img.astype(np.uint8))
else:
for i in range(len(img)):
item = cv2.warpAffine(img[i], rotate_mat, (new_width, new_height), borderValue=self.mean)
img[i] = Image.fromarray(item.astype(np.uint8))
if labelmap is not None:
labelmap = np.array(labelmap)
labelmap = cv2.warpAffine(labelmap, rotate_mat, (new_width, new_height),
borderValue=(255, 255, 255), flags=cv2.INTER_NEAREST)
labelmap = Image.fromarray(labelmap.astype(np.uint8))
if maskmap is not None:
maskmap = np.array(maskmap)
maskmap = cv2.warpAffine(maskmap, rotate_mat, (new_width, new_height),
borderValue=(1, 1, 1), flags=cv2.INTER_NEAREST)
maskmap = Image.fromarray(maskmap.astype(np.uint8))
if polygons is not None:
for object_id in range(len(polygons)):
for polygon_id in range(len(polygons[object_id])):
for i in range(len(polygons[object_id][polygon_id]) // 2):
x = polygons[object_id][polygon_id][i * 2]
y = polygons[object_id][polygon_id][i * 2 + 1]
p = np.array([x, y, 1])
p = rotate_mat.dot(p)
polygons[object_id][polygon_id][i * 2] = p[0]
polygons[object_id][polygon_id][i * 2 + 1] = p[1]
if kpts is not None and kpts.size > 0:
num_objects = len(kpts)
num_keypoints = len(kpts[0])
for i in range(num_objects):
for j in range(num_keypoints):
x = kpts[i][j][0]
y = kpts[i][j][1]
p = np.array([x, y, 1])
p = rotate_mat.dot(p)
kpts[i][j][0] = p[0]
kpts[i][j][1] = p[1]
# It is not right for object detection tasks.
if bboxes is not None and bboxes.size > 0:
for i in range(len(bboxes)):
bbox_temp = [bboxes[i][0], bboxes[i][1], bboxes[i][2], bboxes[i][1],
bboxes[i][0], bboxes[i][3], bboxes[i][2], bboxes[i][3]]
for node in range(4):
x = bbox_temp[node * 2]
y = bbox_temp[node * 2 + 1]
p = np.array([x, y, 1])
p = rotate_mat.dot(p)
bbox_temp[node * 2] = p[0]
bbox_temp[node * 2 + 1] = p[1]
bboxes[i] = [min(bbox_temp[0], bbox_temp[2], bbox_temp[4], bbox_temp[6]),
min(bbox_temp[1], bbox_temp[3], bbox_temp[5], bbox_temp[7]),
max(bbox_temp[0], bbox_temp[2], bbox_temp[4], bbox_temp[6]),
max(bbox_temp[1], bbox_temp[3], bbox_temp[5], bbox_temp[7])]
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomCrop(object):
"""Crop the given numpy.ndarray and at a random location.
Args:
size (int or tuple): Desired output size of the crop.(w, h)
"""
def __init__(self, crop_size, ratio=0.5, method='focus', grid=None, allow_outside_center=True):
self.ratio = ratio
self.method = method
self.grid = grid
self.allow_outside_center = allow_outside_center
if isinstance(crop_size, float):
self.size = (crop_size, crop_size)
elif isinstance(crop_size, collections.Iterable) and len(crop_size) == 2:
self.size = crop_size
else:
raise TypeError('Got inappropriate size arg: {}'.format(crop_size))
def get_lefttop(self, crop_size, img_size):
if self.method == 'center':
return [(img_size[0] - crop_size[0]) // 2, (img_size[1] - crop_size[1]) // 2]
elif self.method == 'random':
x = random.randint(0, img_size[0] - crop_size[0])
y = random.randint(0, img_size[1] - crop_size[1])
return [x, y]
elif self.method == 'grid':
grid_x = random.randint(0, self.grid[0] - 1)
grid_y = random.randint(0, self.grid[1] - 1)
x = grid_x * ((img_size[0] - crop_size[0]) // (self.grid[0] - 1))
y = grid_y * ((img_size[1] - crop_size[1]) // (self.grid[1] - 1))
return [x, y]
else:
Log.error('Crop method {} is invalid.'.format(self.method))
exit(1)
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
"""
Args:
img (Image): Image to be cropped.
maskmap (Image): Mask to be cropped.
kpts (np.array): keypoints to be cropped.
bboxes (np.array): bounding boxes.
Returns:
Image: Cropped image.
Image: Cropped maskmap.
np.array: Cropped keypoints.
np.ndarray: Cropped center points.
"""
assert isinstance(img, (Image.Image, list))
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
target_size = (min(self.size[0], img.size[0]), min(self.size[1], img.size[1]))
offset_left, offset_up = self.get_lefttop(target_size, img.size)
# img = ImageHelper.draw_box(img, bboxes[index])
if kpts is not None and kpts.size > 0:
kpts[:, :, 0] -= offset_left
kpts[:, :, 1] -= offset_up
if bboxes is not None and bboxes.size > 0:
if self.allow_outside_center:
mask = np.ones(bboxes.shape[0], dtype=bool)
else:
crop_bb = np.array([offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]])
center = (bboxes[:, :2] + bboxes[:, 2:]) / 2
mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]).all(axis=1)
bboxes[:, 0::2] -= offset_left
bboxes[:, 1::2] -= offset_up
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, target_size[0] - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, target_size[1] - 1)
mask = np.logical_and(mask, (bboxes[:, :2] < bboxes[:, 2:]).all(axis=1))
bboxes = bboxes[mask]
if labels is not None:
labels = labels[mask]
if polygons is not None:
new_polygons = list()
for object_id in range(len(polygons)):
if mask[object_id] == 1:
for polygon_id in range(len(polygons[object_id])):
polygons[object_id][polygon_id][0::2] -= offset_left
polygons[object_id][polygon_id][1::2] -= offset_up
polygons[object_id][polygon_id][0::2] = np.clip(polygons[object_id][polygon_id][0::2],
0, target_size[0] - 1)
polygons[object_id][polygon_id][1::2] = np.clip(polygons[object_id][polygon_id][1::2],
0, target_size[1] - 1)
new_polygons.append(polygons[object_id])
polygons = new_polygons
if not isinstance(img, list):
img = img.crop((offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]))
else:
img = [item.crop((offset_left, offset_up,
offset_left + target_size[0], offset_up + target_size[1])) for item in img]
if maskmap is not None:
maskmap = maskmap.crop((offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]))
if labelmap is not None:
labelmap = labelmap.crop((offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]))
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomFocusCrop(object):
"""Crop the given numpy.ndarray and at a random location.
Args:
size (int or tuple): Desired output size of the crop.(w, h)
"""
def __init__(self, crop_size, ratio=0.5, center_jitter=None, mean=(104, 117, 123), allow_outside_center=True):
self.ratio = ratio
self.center_jitter = center_jitter
self.mean = mean
self.allow_outside_center = allow_outside_center
if isinstance(crop_size, float):
self.size = (crop_size, crop_size)
elif isinstance(crop_size, collections.Iterable) and len(crop_size) == 2:
self.size = crop_size
else:
raise TypeError('Got inappropriate size arg: {}'.format(crop_size))
def get_center(self, img_size, bboxes):
max_center = [img_size[0] // 2, img_size[1] // 2]
if bboxes is None or len(bboxes) == 0:
if img_size[0] > self.size[0]:
x = random.randint(self.size[0] // 2, img_size[0] - self.size[0] // 2)
else:
x = img_size[0] // 2
if img_size[1] > self.size[1]:
y = random.randint(self.size[1] // 2, img_size[1] - self.size[1] // 2)
else:
y = img_size[1] // 2
return [x, y], -1
else:
border = bboxes[:, 2:] - bboxes[:, 0:2]
area = border[:, 0] * border[:, 1]
max_index = np.argmax(area)
max_center = [(bboxes[max_index][0] + bboxes[max_index][2]) / 2,
(bboxes[max_index][1] + bboxes[max_index][3]) / 2]
if self.center_jitter is not None:
jitter = random.randint(-self.center_jitter, self.center_jitter)
max_center[0] += jitter
jitter = random.randint(-self.center_jitter, self.center_jitter)
max_center[1] += jitter
return max_center, max_index
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
"""
Args:
img (Image): Image to be cropped.
maskmap (Image): Mask to be cropped.
kpts (np.array): keypoints to be cropped.
bboxes (np.array): bounding boxes.
Returns:
Image: Cropped image.
Image: Cropped maskmap.
list: Cropped keypoints.
list: Cropped center points.
"""
assert isinstance(img, Image.Image)
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
center, index = self.get_center(img.size, bboxes)
# img = ImageHelper.draw_box(img, bboxes[index])
offset_left = int(center[0] - self.size[0] // 2)
offset_up = int(center[1] - self.size[1] // 2)
if kpts is not None and kpts.size > 0:
kpts[:, :, 0] -= offset_left
kpts[:, :, 1] -= offset_up
mask = np.logical_or.reduce((kpts[:, :, 0] >= self.size[0], kpts[:, :, 0] < 0,
kpts[:, :, 1] >= self.size[1], kpts[:, :, 1] < 0))
kpts[mask == 1, 2] = -1
if bboxes is not None and bboxes.size > 0:
if self.allow_outside_center:
mask = np.ones(bboxes.shape[0], dtype=bool)
else:
crop_bb = np.array([offset_left, offset_up, offset_left + self.size[0], offset_up + self.size[1]])
center = (bboxes[:, :2] + bboxes[:, 2:]) / 2
mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]).all(axis=1)
bboxes[:, 0::2] -= offset_left
bboxes[:, 1::2] -= offset_up
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, self.size[0] - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, self.size[1] - 1)
mask = np.logical_and(mask, (bboxes[:, :2] < bboxes[:, 2:]).all(axis=1))
bboxes = bboxes[mask]
if labels is not None:
labels = labels[mask]
if polygons is not None:
new_polygons = list()
for object_id in range(len(polygons)):
if mask[object_id] == 1:
for polygon_id in range(len(polygons[object_id])):
polygons[object_id][polygon_id][0::2] -= offset_left
polygons[object_id][polygon_id][1::2] -= offset_up
polygons[object_id][polygon_id][0::2] = np.clip(polygons[object_id][polygon_id][0::2],
0, self.size[0] - 1)
polygons[object_id][polygon_id][1::2] = np.clip(polygons[object_id][polygon_id][1::2],
0, self.size[1] - 1)
new_polygons.append(polygons[object_id])
polygons = new_polygons
w, h = img.size
img = ImageOps.expand(img,
border=(-offset_left, -offset_up,
self.size[0] + offset_left - w, self.size[1] + offset_up - h),
fill=tuple(self.mean))
img = img.crop((0, 0, self.size[0], self.size[1]))
if maskmap is not None:
maskmap = ImageOps.expand(maskmap,
border=(-offset_left, -offset_up,
self.size[0] + offset_left - w, self.size[1] + offset_up - h), fill=1)
maskmap = maskmap.crop((0, 0, self.size[0], self.size[1]))
if labelmap is not None:
labelmap = ImageOps.expand(labelmap, border=(-offset_left, -offset_up,
self.size[0] + offset_left - w,
self.size[1] + offset_up - h), fill=255)
labelmap = labelmap.crop((0, 0, self.size[0], self.size[1]))
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
class RandomDetCrop(object):
"""Crop
Arguments:
img (Image): the image being input during training
boxes (Tensor): the original bounding boxes in pt form
labels (Tensor): the class labels for each bbox
mode (float tuple): the min and max jaccard overlaps
Return:
(img, boxes, classes)
img (Image): the cropped image
boxes (Tensor): the adjusted bounding boxes in pt form
labels (Tensor): the class labels for each bbox
"""
def __init__(self, ratio=0.5, mean=(104, 117, 123)):
self.ratio = ratio
self.mean = mean
self.sample_options = (
# using entire original input image
None,
# sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9
(0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
# randomly sample a patch
(None, None),
)
@staticmethod
def intersect(box_a, box_b):
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
@staticmethod
def jaccard_numpy(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding boxes, Shape: [num_boxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]
"""
inter = RandomDetCrop.intersect(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0]) *
(box_a[:, 3] - box_a[:, 1])) # [A,B]
area_b = ((box_b[2] - box_b[0]) *
(box_b[3] - box_b[1])) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert labelmap is None and maskmap is None and kpts is None and polygons is None
assert bboxes is not None and labels is not None
if random.random() > self.ratio:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
width, height = img.size
while True:
# randomly choose a mode
mode = random.choice(self.sample_options)
if mode is None or bboxes.size == 0:
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
min_iou, max_iou = mode
if min_iou is None:
min_iou = float('-inf')
if max_iou is None:
max_iou = float('inf')
# max trails (50)
for _ in range(50):
scale = random.uniform(0.3, 1.)
min_ratio = max(0.5, scale * scale)
max_ratio = min(2.0, 1. / scale / scale)
ratio = math.sqrt(random.uniform(min_ratio, max_ratio))
w = int(scale * ratio * width)
h = int((scale / ratio) * height)
left = random.randint(0, width - w)
top = random.randint(0, height - h)
# convert to integer rect x1,y1,x2,y2
rect = np.array([int(left), int(top), int(left+w), int(top+h)])
# calculate IoU (jaccard overlap) b/t the cropped and gt boxes
overlap = self.jaccard_numpy(bboxes, rect)
# is min and max overlap constraint satisfied? if not try again
if overlap.min() < min_iou or max_iou < overlap.max():
continue
# keep overlap with gt box IF center in sampled patch
centers = (bboxes[:, :2] + bboxes[:, 2:]) / 2.0
# mask in all gt boxes that above and to the left of centers
m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])
# mask in all gt boxes that under and to the right of centers
m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])
# mask in that both m1 and m2 are true
mask = m1 * m2
# have any valid boxes? try again if not
if not mask.any():
continue
# take only matching gt boxes
current_boxes = bboxes[mask, :].copy()
# cut the crop from the image
current_img = img.crop((left, top, left + w, top + h))
# take only matching gt labels
current_labels = labels[mask]
# should we use the box left and top corner or the crop's
current_boxes[:, :2] = np.maximum(current_boxes[:, :2], rect[:2])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, :2] -= rect[:2]
current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:], rect[2:])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, 2:] -= rect[:2]
return current_img, labelmap, maskmap, kpts, current_boxes, current_labels, polygons
class Resize(object):
def __init__(self, target_size=None, min_side_length=None, max_side_length=None):
self.target_size = target_size
self.min_side_length = min_side_length
self.max_side_length = max_side_length
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
assert isinstance(img, (Image.Image, list))
assert labelmap is None or isinstance(labelmap, Image.Image)
assert maskmap is None or isinstance(maskmap, Image.Image)
width, height = img.size
if self.target_size is not None:
target_size = self.target_size
w_scale_ratio = self.target_size[0] / width
h_scale_ratio = self.target_size[1] / height
elif self.min_side_length is not None:
scale_ratio = self.min_side_length / min(width, height)
w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
target_size = [int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))]
else:
scale_ratio = self.max_side_length / max(width, height)
w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
target_size = [int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))]
if kpts is not None and kpts.size > 0:
kpts[:, :, 0] *= w_scale_ratio
kpts[:, :, 1] *= h_scale_ratio
if bboxes is not None and bboxes.size > 0:
bboxes[:, 0::2] *= w_scale_ratio
bboxes[:, 1::2] *= h_scale_ratio
if polygons is not None:
for object_id in range(len(polygons)):
for polygon_id in range(len(polygons[object_id])):
polygons[object_id][polygon_id][0::2] *= w_scale_ratio
polygons[object_id][polygon_id][1::2] *= h_scale_ratio
if not isinstance(img, list):
img = img.resize(target_size, Image.BILINEAR)
else:
img = [item.resize(target_size, Image.BILINEAR) for item in img]
if labelmap is not None:
labelmap = labelmap.resize(target_size, Image.NEAREST)
if maskmap is not None:
maskmap = maskmap.resize(target_size, Image.NEAREST)
return img, labelmap, maskmap, kpts, bboxes, labels, polygons
PIL_AUGMENTATIONS_DICT = {
'random_saturation': RandomSaturation,
'random_hue': RandomHue,
'random_perm': RandomPerm,
'random_contrast': RandomContrast,
'random_brightness': RandomBrightness,
'random_gauss_blur': RandomGaussBlur,
'random_hsv': RandomHSV,
'random_pad': RandomPad,
'padding': Padding,
'random_hflip': RandomHFlip,
'random_resize': RandomResize,
'random_crop': RandomCrop,
'random_focus_crop': RandomFocusCrop,
'random_det_crop': RandomDetCrop,
'random_resized_crop': RandomResizedCrop,
'random_rotate': RandomRotate,
'resize': Resize
}
class PILAugCompose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> PILAugCompose([
>>> RandomCrop(),
>>> ])
"""
def __init__(self, configer, split='train'):
self.configer = configer
self.transforms = dict()
self.split = split
aug_trans = self.configer.get(split, 'aug_trans')
shuffle_train_trans = []
if 'shuffle_trans_seq' in aug_trans:
if isinstance(aug_trans['shuffle_trans_seq'][0], list):
train_trans_seq_list = aug_trans['shuffle_trans_seq']
for train_trans_seq in train_trans_seq_list:
shuffle_train_trans += train_trans_seq
else:
shuffle_train_trans = aug_trans['shuffle_trans_seq']
for trans in aug_trans['trans_seq'] + shuffle_train_trans:
self.transforms[trans] = PIL_AUGMENTATIONS_DICT[trans](**aug_trans[trans])
def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):
aug_trans = self.configer.get(self.split, 'aug_trans')
shuffle_trans_seq = []
if 'shuffle_trans_seq' in aug_trans:
if isinstance(aug_trans['shuffle_trans_seq'][0], list):
shuffle_trans_seq_list = aug_trans['shuffle_trans_seq']
shuffle_trans_seq = shuffle_trans_seq_list[random.randint(0, len(shuffle_trans_seq_list))]
else:
shuffle_trans_seq = aug_trans['shuffle_trans_seq']
random.shuffle(shuffle_trans_seq)
for trans_key in (shuffle_trans_seq + aug_trans['trans_seq']):
(img, labelmap, maskmap, kpts,
bboxes, labels, polygons) = self.transforms[trans_key](img, labelmap, maskmap,
kpts, bboxes, labels, polygons)
out_list = [img]
for elem in [labelmap, maskmap, kpts, bboxes, labels, polygons]:
if elem is not None:
out_list.append(elem)
return out_list if len(out_list) > 1 else out_list[0]
|
py | b40d8f10d3df7d7c38d983068a060c1de19a3ce5 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
# +++your code here+++
res = 0
for word in words:
if len(word) >= 2 and word[0] == word[-1]:
res += 1
return res
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
# +++your code here+++
listx = []
listy = []
for word in words:
if word[0] == 'x':
listx.append(word)
else:
listy.append(word)
return sorted(listx) + sorted(listy)
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
# +++your code here+++
def second(tuple):
return tuple[1]
return sorted(tuples, key = second)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
|
py | b40d8f9851d2041bd0e212481a308e1bd2e4e56d | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DeleteImagePipelineRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DeleteImagePipeline','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ImagePipelineId(self):
return self.get_query_params().get('ImagePipelineId')
def set_ImagePipelineId(self,ImagePipelineId):
self.add_query_param('ImagePipelineId',ImagePipelineId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_TemplateTags(self):
return self.get_query_params().get('TemplateTag')
def set_TemplateTags(self, TemplateTags):
for depth1 in range(len(TemplateTags)):
if TemplateTags[depth1].get('Key') is not None:
self.add_query_param('TemplateTag.' + str(depth1 + 1) + '.Key', TemplateTags[depth1].get('Key'))
if TemplateTags[depth1].get('Value') is not None:
self.add_query_param('TemplateTag.' + str(depth1 + 1) + '.Value', TemplateTags[depth1].get('Value'))
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) |
py | b40d8f99d862a31b564dd0b021cc151f30598c15 | from abc import ABC, abstractmethod
from dataclasses import dataclass
from ..misc.enums import SolverType
class Solver:
@dataclass
class Generic(ABC):
"""
Abstract class for Solver Options
Methods
-------
set_convergence_tolerance(self,tol: float):
Set some convergence tolerance
set_constraint_tolerance(self, tol: float):
Set some constraint tolerance
"""
type: SolverType = SolverType.NONE
@abstractmethod
def set_convergence_tolerance(self, tol: float):
"""
This function set the convergence tolerance
Parameters
----------
tol: float
Global converge tolerance value
"""
@abstractmethod
def set_constraint_tolerance(self, tol: float):
"""
This function set the constraint tolerance.
Parameters
----------
tol: float
Global constraint tolerance value
"""
@abstractmethod
def set_maximum_iterations(self, num: int):
"""
This function set the number of maximal iterations.
Parameters
----------
num: int
Number of iterations
"""
@abstractmethod
def as_dict(self, solver) -> dict:
"""
This function return the dict options to launch the optimization
Parameters
----------
solver: SolverInterface
Ipopt ou Acados interface
"""
@abstractmethod
def set_print_level(self, num: int):
"""
This function set Output verbosity level.
Parameters
----------
num: int
print_level
"""
@dataclass
class IPOPT(Generic):
"""
Class for Solver Options of IPOPT
Attributes
----------
show_online_optim: bool
If the plot should be shown while optimizing. It will slow down the optimization a bit
solver_options: Generic
Any options to change the behavior of the solver. To know which options are available, you can refer to the
manual of the corresponding solver
show_options: dict
The graphs option to pass to PlotOcp
_tol: float
Desired convergence tolerance (relative)
_dual_inf_tol: float
Desired threshold for the dual infeasibility
_constr_viol_tol: float
Desired threshold for the constraint and variable bound violation.
_compl_inf_tol: float
Desired threshold for the complementarity conditions.
_acceptable_tol: float
Acceptable convergence tolerance (relative).
_acceptable_dual_inf_tol: float
Acceptance threshold for the dual infeasibility
_acceptable_constr_viol_tol: float
Acceptance threshold for the constraint violation.
_acceptable_compl_inf_tol: float
"Acceptance" threshold for the complementarity conditions.
_max_iter: int
Maximum number of iterations.
_hessian_approximation: str
Indicates what Hessian information is to be used.
_limited_memory_max_history: int
Maximum size of the history for the limited quasi-Newton Hessian approximation.
_linear_solver: str
Linear solver used for step computations.
_mu_init: float
Initial value for the barrier parameter.
_warm_start_init_point: float
Warm-start for initial point
_warm_start_mult_bound_push: float
same as mult_bound_push for the regular initializer
_warm_start_slack_bound_push: float
same as slack_bound_push for the regular initializer
_warm_start_slack_bound_frac: float
same as slack_bound_frac for the regular initializer
_warm_start_bound_frac: float
same as bound_frac for the regular initializer
_bound_push: float
Desired minimum absolute distance from the initial point to bound.
_bound_frac: float
Desired minimum relative distance from the initial point to bound.
_print_level: float
Output verbosity level. Sets the default verbosity level for console output.
The larger this value the more detailed is the output.
The valid range for this integer option is 0 ≤ print_level ≤ 12 and its default value is 5.
_c_compile: bool
True if you want to compile in C the code.
"""
type: SolverType = SolverType.IPOPT
show_online_optim: bool = False
show_options: dict = None
_tol: float = 1e-6 # default in ipopt 1e-8
_dual_inf_tol: float = 1.0
_constr_viol_tol: float = 0.0001
_compl_inf_tol: float = 0.0001
_acceptable_tol: float = 1e-6
_acceptable_dual_inf_tol: float = 1e10
_acceptable_constr_viol_tol: float = 1e-2
_acceptable_compl_inf_tol: float = 1e-2
_max_iter: int = 1000
_hessian_approximation: str = "exact" # "exact", "limited-memory"
_limited_memory_max_history: int = 50
_linear_solver: str = "mumps" # "ma57", "ma86", "mumps"
_mu_init: float = 0.1
_warm_start_init_point: str = "no"
_warm_start_mult_bound_push: float = 0.001
_warm_start_slack_bound_push: float = 0.001
_warm_start_bound_push: float = 0.001
_warm_start_slack_bound_frac: float = 0.001
_warm_start_bound_frac: float = 0.001
_bound_push: float = 0.01
_bound_frac: float = 0.01
_print_level: int = 5
_c_compile: bool = False
@property
def tol(self):
return self._tol
@property
def dual_inf_tol(self):
return self._constr_viol_tol
@property
def constr_viol_tol(self):
return self._tol
@property
def compl_inf_tol(self):
return self._compl_inf_tol
@property
def acceptable_tol(self):
return self._acceptable_tol
@property
def acceptable_dual_inf_tol(self):
return self._acceptable_dual_inf_tol
@property
def acceptable_constr_viol_tol(self):
return self._acceptable_constr_viol_tol
@property
def acceptable_compl_inf_tol(self):
return self._acceptable_compl_inf_tol
@property
def max_iter(self):
return self._max_iter
@property
def hessian_approximation(self):
return self._hessian_approximation
@property
def limited_memory_max_history(self):
return self._limited_memory_max_history
@property
def linear_solver(self):
return self._linear_solver
@property
def mu_init(self):
return self._mu_init
@property
def warm_start_init_point(self):
return self._warm_start_init_point
@property
def warm_start_mult_bound_push(self):
return self._warm_start_mult_bound_push
@property
def warm_start_slack_bound_push(self):
return self._warm_start_slack_bound_push
@property
def warm_start_bound_push(self):
return self._warm_start_bound_push
@property
def warm_start_slack_bound_frac(self):
return self._warm_start_init_point
@property
def warm_start_bound_frac(self):
return self._warm_start_bound_frac
@property
def bound_push(self):
return self._bound_push
@property
def bound_frac(self):
return self._bound_frac
@property
def print_level(self):
return self._print_level
@property
def c_compile(self):
return self._c_compile
def set_tol(self, val: float):
self._tol = val
def set_dual_inf_tol(self, val: float):
self._constr_viol_tol = val
def set_constr_viol_tol(self, val: float):
self._tol = val
def set_compl_inf_tol(self, val: float):
self._compl_inf_tol = val
def set_acceptable_tol(self, val: float):
self._acceptable_tol = val
def set_acceptable_dual_inf_tol(self, val: float):
self._acceptable_dual_inf_tol = val
def set_acceptable_constr_viol_tol(self, val: float):
self._acceptable_constr_viol_tol = val
def set_acceptable_compl_inf_tol(self, val: float):
self._acceptable_compl_inf_tol = val
def set_maximum_iterations(self, num):
self._max_iter = num
def set_hessian_approximation(self, val: str):
self._hessian_approximation = val
def set_limited_memory_max_history(self, num: int):
self._limited_memory_max_history = num
def set_linear_solver(self, val: str):
self._linear_solver = val
def set_mu_init(self, val: float):
self._mu_init = val
def set_warm_start_init_point(self, val: str):
self._warm_start_init_point = val
def set_warm_start_mult_bound_push(self, val: float):
self._warm_start_mult_bound_push = val
def set_warm_start_slack_bound_push(self, val: float):
self._warm_start_slack_bound_push = val
def set_warm_start_bound_push(self, val: float):
self._warm_start_bound_push = val
def set_warm_start_slack_bound_frac(self, val: float):
self._warm_start_slack_bound_frac = val
def set_warm_start_bound_frac(self, val: float):
self._warm_start_bound_frac = val
def set_bound_push(self, val: float):
self._bound_push = val
def set_bound_frac(self, val: float):
self._bound_frac = val
def set_print_level(self, num: int):
self._print_level = num
def set_c_compile(self, val: bool):
self._c_compile = val
def set_convergence_tolerance(self, val: float):
self._tol = val
self._compl_inf_tol = val
self._acceptable_tol = val
self._acceptable_compl_inf_tol = val
def set_constraint_tolerance(self, val: float):
self._constr_viol_tol = val
self._acceptable_constr_viol_tol = val
def set_warm_start_options(self, val: float = 1e-10):
"""
This function set global warm start options
Parameters
----------
val: float
warm start value
"""
self._warm_start_init_point = "yes"
self._mu_init = val
self._warm_start_mult_bound_push = val
self._warm_start_slack_bound_push = val
self._warm_start_bound_push = val
self._warm_start_slack_bound_frac = val
self._warm_start_bound_frac = val
def set_initialization_options(self, val: float):
"""
This function set global initialization options
Parameters
----------
val: float
warm start value
"""
self._bound_push = val
self._bound_frac = val
def as_dict(self, solver):
solver_options = self.__dict__
options = {}
non_python_options = ["_c_compile", "type", "show_online_optim", "show_options"]
for key in solver_options:
if key not in non_python_options:
ipopt_key = "ipopt." + key[1:]
options[ipopt_key] = solver_options[key]
return {**options, **solver.options_common}
@dataclass
class ACADOS(Generic):
"""
Class for Solver Options of ACADOS
Methods
----------
get_tolerance_keys
return the keys of the optimizer tolerance
Attributes
----------
_qp_solver: str
QP solver to be used in the NLP solver. String in (‘PARTIAL_CONDENSING_HPIPM’, ‘FULL_CONDENSING_QPOASES’,
‘FULL_CONDENSING_HPIPM’, ‘PARTIAL_CONDENSING_QPDUNES’, ‘PARTIAL_CONDENSING_OSQP’).
Default: ‘PARTIAL_CONDENSING_HPIPM’
_hessian_approx: str
Hessian approximation.
_integrator_type: str
Integrator type.
_nlp_solver_type: str
Desired threshold for the complementarity conditions.
_nlp_solver_tol_comp: float
NLP solver complementarity tolerance
_nlp_solver_tol_eq: float
NLP solver equality tolerance
_nlp_solver_tol_ineq: float
NLP solver inequality tolerance
_nlp_solver_tol_stat: float
NLP solver stationarity tolerance. Type: float > 0 Default: 1e-6
_nlp_solver_max_iter: int
NLP solver maximum number of iterations.
_sim_method_newton_iter: int
Number of Newton iterations in simulation method. Type: int > 0 Default: 3
_sim_method_num_stages: int
Number of stages in the integrator. Type: int > 0 or ndarray of ints > 0 of shape (N,). Default: 4
_sim_method_num_steps: int
Number of steps in the integrator. Type: int > 0 or ndarray of ints > 0 of shape (N,). Default: 1
_print_level: int
Verbosity of printing.
_cost_type: int
type of cost functions for cost.cost_type and cost.cost_type_e
_constr_type: int
type of constraint functions for constraints.constr_type and constraints.constr_type_e
_acados_dir: str
If Acados is installed using the acados_install.sh file, you probably can leave this unset
_has_tolerance_changed: bool
True if the tolerance has been modified, use ful for moving horizon estimation
_only_first_options_has_changed
True if non editable options has been modified in options.
"""
type: SolverType = SolverType.ACADOS
_qp_solver: str = "PARTIAL_CONDENSING_HPIPM" # FULL_CONDENSING_QPOASES
_hessian_approx: str = "GAUSS_NEWTON"
_integrator_type: str = "IRK"
_nlp_solver_type: str = "SQP"
_nlp_solver_tol_comp: float = 1e-06
_nlp_solver_tol_eq: float = 1e-06
_nlp_solver_tol_ineq: float = 1e-06
_nlp_solver_tol_stat: float = 1e-06
_nlp_solver_max_iter: int = 200
_sim_method_newton_iter: int = 5
_sim_method_num_stages: int = 4
_sim_method_num_steps: int = 1
_print_level: int = 1
_cost_type: str = "NONLINEAR_LS"
_constr_type: str = "BGH"
_acados_dir: str = ""
_has_tolerance_changed: bool = False
_only_first_options_has_changed: bool = False
@property
def qp_solver(self):
return self._qp_solver
def set_qp_solver(self, val: str):
self._qp_solver = val
self.set_only_first_options_has_changed(True)
@property
def hessian_approx(self):
return self._hessian_approx
def set_hessian_approx(self, val: str):
self._hessian_approx = val
self.set_only_first_options_has_changed(True)
@property
def integrator_type(self):
return self._integrator_type
def set_integrator_type(self, val: str):
self._integrator_type = val
self.set_only_first_options_has_changed(True)
@property
def nlp_solver_type(self):
return self._nlp_solver_type
def set_nlp_solver_type(self, val: str):
self._nlp_solver_type = val
self.set_only_first_options_has_changed(True)
@property
def sim_method_newton_iter(self):
return self._sim_method_newton_iter
def set_sim_method_newton_iter(self, val: int):
self._sim_method_newton_iter = val
self.set_only_first_options_has_changed(True)
@property
def sim_method_num_stages(self):
return self._sim_method_num_stages
def set_sim_method_num_stages(self, val: int):
self._sim_method_num_stages = val
self.set_only_first_options_has_changed(True)
@property
def sim_method_num_steps(self):
return self._sim_method_num_steps
def set_sim_method_num_steps(self, val: int):
self._sim_method_num_steps = val
self.set_only_first_options_has_changed(True)
@property
def cost_type(self):
return self._cost_type
def set_cost_type(self, val: str):
self._cost_type = val
@property
def constr_type(self):
return self._constr_type
def set_constr_type(self, val: str):
self._constr_type = val
@property
def acados_dir(self):
return self._acados_dir
def set_acados_dir(self, val: str):
self._acados_dir = val
@property
def nlp_solver_tol_comp(self):
return self._nlp_solver_tol_comp
def set_nlp_solver_tol_comp(self, val: float):
self._nlp_solver_tol_comp = val
self._has_tolerance_changed = True
@property
def nlp_solver_tol_eq(self):
return self._nlp_solver_tol_eq
def set_nlp_solver_tol_eq(self, val: float):
self._nlp_solver_tol_eq = val
self.set_has_tolerance_changed(True)
@property
def nlp_solver_tol_ineq(self):
return self._nlp_solver_tol_ineq
def set_nlp_solver_tol_ineq(self, val: float):
self._nlp_solver_tol_ineq = val
self.set_has_tolerance_changed(True)
@property
def nlp_solver_tol_stat(self):
return self._nlp_solver_tol_stat
def set_nlp_solver_tol_stat(self, val: float):
self._nlp_solver_tol_stat = val
self.set_has_tolerance_changed(True)
def set_convergence_tolerance(self, val: float):
self.set_nlp_solver_tol_eq(val)
self.set_nlp_solver_tol_ineq(val)
self.set_nlp_solver_tol_comp(val)
self.set_nlp_solver_tol_stat(val)
self.set_has_tolerance_changed(True)
def set_constraint_tolerance(self, val: float):
self.set_nlp_solver_tol_eq(val)
self.set_nlp_solver_tol_ineq(val)
self.set_has_tolerance_changed(True)
@property
def has_tolerance_changed(self):
return self._has_tolerance_changed
def set_has_tolerance_changed(self, val: bool):
self._has_tolerance_changed = val
@property
def only_first_options_has_changed(self):
return self._only_first_options_has_changed
def set_only_first_options_has_changed(self, val: bool):
self._only_first_options_has_changed = val
@property
def nlp_solver_max_iter(self):
return self._nlp_solver_max_iter
def set_maximum_iterations(self, num):
self._nlp_solver_max_iter = num
self.set_only_first_options_has_changed(True)
def as_dict(self, solver):
options = {}
for key in self.__annotations__.keys():
if (
key == "_acados_dir"
or key == "_cost_type"
or key == "_constr_type"
or key == "_has_tolerance_changed"
or key == "_only_first_options_has_changed"
or key == "type"
):
continue
if key[0] == "_":
options[key[1:]] = self.__getattribute__(key)
else:
options[key] = self.__getattribute__(key)
return options
@property
def print_level(self):
return self._print_level
def set_print_level(self, num: int):
self._print_level = num
self.set_only_first_options_has_changed(True)
@staticmethod
def get_tolerance_keys():
return [
"_nlp_solver_tol_comp",
"_nlp_solver_tol_eq",
"_nlp_solver_tol_ineq",
"_nlp_solver_tol_stat",
]
|
py | b40d90f027d9eef5bc3ca512f62299436293756f | # Generated by Django 3.1.3 on 2021-07-30 08:21
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_employer', models.BooleanField(default=False)),
('is_client', models.BooleanField(default=False)),
('phone_number', models.IntegerField(default=0)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='NewsLetterRecipients',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Client',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='jobsapp.user')),
],
),
migrations.CreateModel(
name='Employer',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='jobsapp.user')),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
('position', models.CharField(max_length=100, null=True)),
('description', models.CharField(max_length=500, null=True)),
('salary', models.IntegerField(null=True)),
('experience', models.IntegerField(null=True)),
('Location', models.CharField(max_length=100, null=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Candidates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
('email', models.CharField(max_length=60, null=True)),
('phone_number', models.IntegerField(default=0, null=True)),
('birth_date', models.DateField(null=True)),
('gender', models.CharField(choices=[('Male', 'male'), ('Female', 'female'), ('Prefer not to say', 'prefer not to say')], max_length=50, null=True)),
('resume', models.FileField(null=True, upload_to='')),
('company', models.ManyToManyField(blank=True, to='jobsapp.Company')),
],
),
]
|
py | b40d90f5e3a430f97736ece06e712c0c92c1f545 | """
This example is meant to be paired with LabJack's other examples:
https://labjack.com/support/software/examples/ljm/python
This example demonstrates reading a single analog input (AIN0)
from a LabJack (looping forever) and sampling at 10Hz or with a
100ms delay between samples). Samples are logged to a .csv file
and UDP broadcast.
Docs for datetime: https://docs.python.org/3/library/datetime.html
A few relevant stackoverflow examples:
https://stackoverflow.com/questions/3316882/how-do-i-get-a-string-format-of-the-current-date-time-in-python
Docs for os (how to get the CWD):
https://docs.python.org/3/library/os.html
Docs for os.path (how to join paths):
https://docs.python.org/3/library/os.path.html#module-os.path
"""
from labjack import ljm
import datetime
import os
import sys
import time
import socket
# Labjack Connection - DATA IN
# Open the LabJack serial number 470025307 on IP connection.
handle = ljm.openS("T7", "ETHERNET", "470025307") # T7 device, IP connection, SN
# Get the connection metadata
info = ljm.getHandleInfo(handle)
print("\nOpened a LabJack with Device type: %i, Connection type: %i,\n"
"Serial number: %i, IP address: %s, Port: %i,\nMax bytes per MB: %i" %
(info[0], info[1], info[2], ljm.numberToIP(info[3]), info[4], info[5]))
# UDP Socket - DATA OUT
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Setup and call eReadName to read from AIN1 on the LabJack.
name = "AIN1"
rate = 1000 # in ms
rateUS = rate*1000
# Get the current time to build a time-stamp.
appStartTime = datetime.datetime.now()
startTimeStr = appStartTime.isoformat(timespec='milliseconds')
timeStr = appStartTime.isoformat(timespec='milliseconds')
print(timeStr)
# # Get the current working directory
# cwd = os.getcwd()
# # Build a file-name and the file path.
# fileNameColon = timeStr + "-%s-Example.csv" % name
# c1 = 13
# c2 = 16
# newDelimiter = '.'
# fileName = fileNameColon[0:c1] + newDelimiter + fileNameColon[c1+1:c2] + newDelimiter + fileNameColon[c2+1: ]
# filePath = os.path.join(cwd, fileName)
# print(fileName + filePath)
# with open(fileName, 'w', newline='') as file:
# writer = csv.writer(file)
# # Open the file & write a header-line
# f = open(filePath, 'w')
# f.writerow("Time Stamp, Duration/Jitter (ms), %s\n" % name)
# f.write("hello there")
# Print some program-initialization information
print("The time is: %s" % startTimeStr)
# Prepare final variables for program execution
intervalHandle = 0
ljm.startInterval(intervalHandle, rateUS)
numSkippedIntervals = 0
lastTick = ljm.getHostTick()
duration = 0
while True:
try:
numSkippedIntervals = ljm.waitForNextInterval(intervalHandle)
curTick = ljm.getHostTick()
duration = (curTick-lastTick)/1000
curTime = datetime.datetime.now()
curTimeStr = curTime.isoformat(timespec='milliseconds')
# Read AIN0
result = ljm.eReadName(handle, name)
# f.write("%s, %0.1f, %0.3f\r\n" % (curTimeStr, duration, result))
#
print(f"{curTimeStr}, {duration}, {result}")
message = "%s, %0.1f, %0.3f\r\n" % (curTimeStr, duration, result)
#message = f"{curTimeStr}, {duration}, {result}"
print("Sending UDP")
sock.sendto(bytes(message, "utf-8"), ("255.255.255.255", 30325))
print("Sending Done")
# Set lastTick equal to curTick
lastTick = curTick
except KeyboardInterrupt:
break
except Exception:
import sys
print(sys.exc_info()[1])
break
print("\nFinished!")
# Get the final time
appEndTime = datetime.datetime.now()
endTimeStr = appEndTime.isoformat(timespec='milliseconds')
print("The final time is: %s" % endTimeStr)
# Close fi
# Close handles
ljm.cleanInterval(intervalHandle)
ljm.close(handle)
|
py | b40d917c5e970c6d22de7b4fa09bbcc276d7b533 | import pandas as pd
import matplotlib
class MACD(object):
"""
TREND INDICATOR
Moving Average Convergence/Divergence
close_series: pd.Series: close data with index as date
"""
def __init__(self, close_series: pd.Series,
slow_ema: int = 26,
fast_ema: int = 12,
signal_span: int = 9,
date_series: pd.Series = None):
self._slow_ema = slow_ema
self._fast_ema = fast_ema
self._signal_span = signal_span
self._close_series = close_series
self._date_series = date_series
self.calc()
def calc(self):
self.df = pd.DataFrame()
self.df[f'{self._slow_ema} ema'] = self._close_series.ewm(span=self._slow_ema, adjust=False).mean()
self.df[f'{self._fast_ema} ema'] = self._close_series.ewm(span=self._fast_ema, adjust=False).mean()
self.df['MACD'] = (self.df[f'{self._fast_ema} ema'] - self.df[f'{self._slow_ema} ema'])
self.df['Signal'] = self.df['MACD'].ewm(span=self._signal_span, adjust=False).mean()
self.df['Crossover'] = self.df['MACD'] - self.df['Signal']
self.df['YCrossover'] = self.df.Crossover.shift() #yesterday crossover
self.df['MACD_indicator'] = 0
self.df.loc[(self.df.Crossover < 0) & (self.df.YCrossover > 0), 'MACD_indicator'] = 1 # Sell, cross line going negative
self.df.loc[(self.df.Crossover > 0) & (self.df.YCrossover < 0), 'MACD_indicator'] = 2 # Buy, cross line going positive
def plot(self, ax):
# MACD buy sell indicators
if self._date_series is not None:
df = pd.concat([self._date_series, self._close_series], axis=1)
else:
df = self.df
for index, row in df[df.MACD_indicator == 2].iterrows():
ax.text(index, row.MACD, 'B', color='g')
for index, row in df[df.MACD_indicator == 1].iterrows():
ax.text(index, row.MACD, 'S', color='r')
# MACD bars
df["MACD Crossover diff"] = df.Crossover.diff(1)
df["MACD bar color"] = 'r'
df.loc[df["MACD Crossover diff"] > 0, "MACD bar color"] = 'g'
ax.bar(df.index, df.Crossover, width=1, color=df["MACD bar color"])
ax.axhline(y=0, color='gray', linestyle='-.')
df.plot(y=['MACD', 'Signal', 'Crossover'], ax=ax)
ax.legend(loc='center left')
|
py | b40d91aadb3aa67f8b08ca1152fc8a7431d8b2d8 | import pytest
from ampel.secret.DictSecretProvider import NamedSecret
from ampel.ztf.t3.skyportal.SkyPortalClient import SkyPortalClient
def test_validate_url():
"""URL path may not be set"""
with pytest.raises(ValueError):
SkyPortalClient.validate(
dict(base_url="http://foo.bar/", token=NamedSecret(label="foo", value="seekrit"))
)
SkyPortalClient.validate(
dict(base_url="http://foo.bar", token=NamedSecret(label="foo", value="seekrit"))
)
|
py | b40d91d62a7a5257f583910b0f9d118f26221095 | #!/usr/bin/env python
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Merges the debug symbols and uploads them to cipd.
"""
import argparse
import collections
import json
import os
import platform
import shutil
import subprocess
import sys
import tarfile
def IsLinux():
return platform.system() == 'Linux'
def GetPackagingDir(out_dir):
return os.path.abspath(os.path.join(out_dir, os.pardir))
def CreateCIPDDefinition(target_arch, out_dir):
dir_name = os.path.basename(os.path.normpath(out_dir))
return """
package: flutter/fuchsia-debug-symbols-%s
description: Flutter and Dart runner debug symbols for Fuchsia. Target architecture %s.
install_mode: copy
data:
- dir: %s
""" % (target_arch, target_arch, dir_name)
# CIPD CLI needs the definition and data directory to be relative to each other.
def WriteCIPDDefinition(target_arch, out_dir):
_packaging_dir = GetPackagingDir(out_dir)
yaml_file = os.path.join(_packaging_dir, 'debug_symbols.cipd.yaml')
with open(yaml_file, 'w') as f:
cipd_def = CreateCIPDDefinition(target_arch, out_dir)
f.write(cipd_def)
return yaml_file
def ProcessCIPDPackage(upload, cipd_yaml, engine_version, out_dir, target_arch):
_packaging_dir = GetPackagingDir(out_dir)
if upload and IsLinux():
command = [
'cipd', 'create', '-pkg-def', cipd_yaml, '-ref', 'latest', '-tag',
'git_revision:%s' % engine_version
]
else:
command = [
'cipd', 'pkg-build', '-pkg-def', cipd_yaml, '-out',
os.path.join(_packaging_dir,
'fuchsia-debug-symbols-%s.cipd' % target_arch)
]
# Retry up to three times. We've seen CIPD fail on verification in some
# instances. Normally verification takes slightly more than 1 minute when
# it succeeds.
num_tries = 3
for tries in range(num_tries):
try:
subprocess.check_call(command, cwd=_packaging_dir)
break
except subprocess.CalledProcessError:
print('Failed %s times' % tries + 1)
if tries == num_tries - 1:
raise
# Recursively hardlinks contents from one directory to another,
# skipping over collisions.
def HardlinkContents(dirA, dirB):
for src_dir, _, filenames in os.walk(dirA):
for filename in filenames:
src = os.path.join(src_dir, filename)
dest_dir = os.path.join(dirB, os.path.relpath(src_dir, dirA))
try:
os.makedirs(dest_dir)
except:
pass
dest = os.path.join(dest_dir, filename)
if os.path.exists(dest):
# The last two path components provide a content address for a .build-id entry.
tokens = os.path.split(dest)
name = os.path.join(tokens[-2], tokens[-1])
print('%s already exists in destination; skipping linking' % name)
continue
os.link(src, dest)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--symbol-dirs',
required=True,
nargs='+',
help='Space separated list of directories that contain the debug symbols.'
)
parser.add_argument(
'--out-dir',
required=True,
action='store',
dest='out_dir',
help='Output directory where the executables will be placed.')
parser.add_argument(
'--target-arch', type=str, choices=['x64', 'arm64'], required=True)
parser.add_argument(
'--engine-version',
required=True,
help='Specifies the flutter engine SHA.')
parser.add_argument('--upload', default=False, action='store_true')
args = parser.parse_args()
symbol_dirs = args.symbol_dirs
for symbol_dir in symbol_dirs:
assert os.path.exists(symbol_dir) and os.path.isdir(symbol_dir)
out_dir = args.out_dir
if os.path.exists(out_dir):
print 'Directory: %s is not empty, deleting it.' % out_dir
shutil.rmtree(out_dir)
os.makedirs(out_dir)
for symbol_dir in symbol_dirs:
HardlinkContents(symbol_dir, out_dir)
arch = args.target_arch
cipd_def = WriteCIPDDefinition(arch, out_dir)
ProcessCIPDPackage(args.upload, cipd_def, args.engine_version, out_dir, arch)
return 0
if __name__ == '__main__':
sys.exit(main())
|
py | b40d92ff0186a30f04400bcf6a731b8f4e9c979a | import pprint
import re
import requests
import upnpclient
from philips_hue.models import Bridge
def discover_hue(**kwargs):
cloud = kwargs.get("cloud", False)
upnp = kwargs.get("upnp", True)
bridges = Bridge.select()
bridge_addresses = []
for bridge in bridges:
bridge_addresses.append(bridge.ip)
if cloud:
devices_resp = requests.get("https://discovery.meethue.com/")
if devices_resp.status_code == 200:
devices = devices_resp.json()
for device in devices:
ip_address = device.get("internalipaddress")
urlbase = "http://%s:80" % ip_address
debug_address = "%s/debug/clip.html" % urlbase
if ip_address not in bridge_addresses:
new_bridge = Bridge.create(
name="Philips Hue Bridge",
ip=ip_address,
serial_number=device.get("id"),
url=urlbase,
debug=debug_address,
device_id=device.get("id"),
)
bridge_addresses.append(ip_address)
if upnp:
devices = upnpclient.discover()
for device in devices:
if "Philips hue" in device.friendly_name:
urlbase = device.location.replace("/description.xml", "")
ip_address = re.search(
r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", urlbase
).group(1)
debug_address = "%s/debug/clip.html" % urlbase
if ip_address not in bridge_addresses:
new_bridge = Bridge.create(
name=device.friendly_name,
ip=ip_address,
serial_number=device.serial_number,
url=urlbase,
debug=debug_address,
device_id=device.serial_number,
)
bridge_addresses.append(ip_address)
bridges = Bridge.select()
return bridges
bridges = discover_hue(cloud=True, upnp=False)
for bridge in bridges:
bridge.connect("test")
for light in bridge.lights():
if light.name == "Floor lamp":
print(light.name)
light.toggle()
if light.is_on:
for x in range(0, 255):
light.brightness(x)
|
py | b40d935276361749b717d4295714f1bf3cabfb12 | __all__ = [
'OMFReader',
]
import omf
import omfvista
import vtk
from .. import _helpers
from ..base import ReaderBaseBase
class OMFReader(ReaderBaseBase):
"""Handles reading an OMF Project
"""
__displayname__ = 'OMF Project Reader'
__category__ = 'reader'
extensions = 'omf'
description = 'PVGeo: Open Mining Format Project'
def __init__(self):
ReaderBaseBase.__init__(self,
nOutputPorts=1, outputType='vtkMultiBlockDataSet')
# Properties:
self._dataselection = vtk.vtkDataArraySelection()
self._dataselection.AddObserver("ModifiedEvent", _helpers.create_modified_callback(self))
self.__names = []
self.__data = dict()
self.__project = None
def Modified(self, read_again=False):
"""Ensure default is overridden to be false so array selector can call.
"""
ReaderBaseBase.Modified(self, read_again=read_again)
def modified(self, read_again=False):
"""Ensure default is overridden to be false so array selector can call.
"""
return self.Modified(read_again=read_again)
def get_file_name(self):
"""Super class has file names as a list but we will only handle a single
project file. This provides a conveinant way of making sure we only
access that single file.
A user could still access the list of file names using ``get_file_names()``.
"""
return ReaderBaseBase.get_file_names(self, idx=0)
#### Methods for performing the read ####
def _read_up_front(self):
"""Internal functiona to read all data at the start"""
# Read all elements
reader = omf.OMFReader(self.get_file_name())
self.__project = reader.get_project()
self.__names = [e.name for e in self.__project.elements]
for n in self.__names:
self._dataselection.AddArray(n)
self.need_to_read(flag=False)
return 1
def _get_raw_data(self):
"""Converts OMF data to VTK data objects."""
# Now iterate over the elements and add converted data to the data dict:
data = dict()
for e in self.__project.elements:
if self._dataselection.ArrayIsEnabled(e.name):
if e.name not in self.__data:
self.__data[e.name] = omfvista.wrap(e)
data[e.name] = self.__data[e.name]
return data
#### pipeline methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to get data for current timestep and populate the output data object.
"""
# Get output:
#output = self.GetOutputData(outInfo, 0)
output = vtk.vtkMultiBlockDataSet.GetData(outInfo, 0)
# Perfrom the read
if self.need_to_read():
self._read_up_front()
data = self._get_raw_data()
# Set number of blocks based on user choice in the selction
output.SetNumberOfBlocks(self._dataselection.GetNumberOfArraysEnabled())
blk = 0
# iterate over data set to produce output based on users selection
keys = data.keys()
for name in keys:
output.SetBlock(blk, data[name])
output.GetMetaData(blk).Set(vtk.vtkCompositeDataSet.NAME(), name)
blk += 1
return 1
#### Getters / Setters ####
def GetDataSelection(self):
"""Return the current user selection of data elements"""
if self.need_to_read():
self._read_up_front()
return self._dataselection
|
py | b40d937a7ceda96edf9c750ee9a6c4736d743b1d | import torch
from torch import nn
import numpy as np
from envs.past.grid.safety_gridworld import PitWorld
from models.ours.grid_model import OneHotCostAllocator
env = PitWorld(size = 14,
max_step = 200,
per_step_penalty = -1.0,
goal_reward = 1000.0,
obstace_density = 0.3,
constraint_cost = 10.0,
random_action_prob = 0.005,
one_hot_features=True,
rand_goal=True,)
state = env.reset()
print(np.concatenate((state, state)).shape)
cost = np.array([10.0, 10.0, 10.0]).transpose()
state = np.array([state, state, state])
t_state = torch.Tensor(state)
from models.ours.grid_model import OneHotCostAllocator, OneHotValueNetwork
device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
s = env.reset()
shape = (s.shape[0]+1, )
C = OneHotCostAllocator(num_inputs=shape).to(device=device)
V = OneHotValueNetwork(num_inputs=shape).to(device=device)
#print(C(state, cost))
#print(V(t_state))
s = torch.FloatTensor(s).to(device)
A = torch.FloatTensor([100]).to(device=device)
X = torch.cat((s,A))
w = C(X)
print(w)
B = w[0]*A
C = w[1]*A
print(B.backward())
print(A, B, C)
|
py | b40d93c812ad3b6e7c30f2dc69f663cd7d6971e7 | from copy import deepcopy
from .base import API
from .routing import Router
class CommandsAPI(API):
__router, route = Router.new()
@route('verdict')
def _perform(self, payload, **kwargs):
"""
Command allow to simple query CTR
for a verdict for a bunch of observables
"""
response = self._post(
'/iroh/iroh-inspect/inspect',
json={'content': str(payload)},
**kwargs
)
response = self._post(
'/iroh/iroh-enrich/deliberate/observables',
json=response,
**kwargs
)
verdicts = build_array_for_verdicts(response)
return {"response": response, "verdicts": verdicts}
@route('targets')
def _perform(self, payload, **kwargs):
"""
Command allow to simple query CTR for a targets
for a bunch of observables
"""
response = self._post(
'/iroh/iroh-inspect/inspect',
json={'content': str(payload)},
**kwargs
)
response = self._post(
'/iroh/iroh-enrich/observe/observables',
json=response,
**kwargs
)
result = build_array_for_targets(response)
return {"response": response, "targets": result}
def build_array_for_verdicts(verdict_dict):
verdicts = []
# According to the official documentation, `disposition_name` is
# optional, so simply infer it from required `disposition`.
disposition_map = {
1: 'Clean',
2: 'Malicious',
3: 'Suspicious',
4: 'Common',
5: 'Unknown',
}
for module in verdict_dict.get('data', []):
module_name = module['module']
module_type_id = module['module_type_id']
module_instance_id = module['module_instance_id']
for doc in module.get('data', {}) \
.get('verdicts', {}) \
.get('docs', []):
verdicts.append({
'observable_value': doc['observable']['value'],
'observable_type': doc['observable']['type'],
'expiration': doc['valid_time'].get('end_time', ''),
'module': module_name,
'module_type_id': module_type_id,
'module_instance_id': module_instance_id,
'disposition_name': disposition_map[doc['disposition']],
})
return verdicts
def build_array_for_targets(targets_dict):
result = []
for module in targets_dict.get('data', []):
module_name = module['module']
module_type_id = module['module_type_id']
module_instance_id = module['module_instance_id']
targets = []
for doc in module.get('data', {}) \
.get('sightings', {}) \
.get('docs', []):
for target in doc.get('targets', []):
element = deepcopy(target)
element.pop('observed_time', None)
if element not in targets:
targets.append(element)
result.append({
'module': module_name,
'module_type_id': module_type_id,
'module_instance_id': module_instance_id,
'targets': targets
})
return result
|
py | b40d940366da086a158ce4279855bfce53c9e714 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os.path as op
from argparse import Namespace
from fairseq.data import Dictionary, encoders
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
SpeechToTextDataset,
SpeechToTextDatasetCreator,
get_features_or_waveform,
)
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("speech_to_text")
class SpeechToTextTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=6000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
dict_path = op.join(args.data, data_cfg.vocab_filename)
if not op.isfile(dict_path):
raise FileNotFoundError(f"Dict not found: {dict_path}")
tgt_dict = Dictionary.load(dict_path)
logger.info(
f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}"
)
if getattr(args, "train_subset", None) is not None:
if not all(s.startswith("train") for s in args.train_subset.split(",")):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1:
raise ValueError(
'Please set "--ignore-prefix-size 1" since '
"target language ID token is prepended as BOS."
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return None
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
return super(SpeechToTextTask, self).build_model(args)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1:
raise ValueError(
'Please set "--prefix-size 1" since '
"target language ID token is prepended as BOS."
)
lang_token_ids = {
i
for s, i in self.tgt_dict.indices.items()
if SpeechToTextDataset.is_lang_tag(s)
}
extra_gen_cls_kwargs = {"symbols_to_strip_from_output": lang_token_ids}
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_tokenizer(self, args):
logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
n_frames = [get_features_or_waveform(p).shape[0] for p in lines]
return lines, n_frames
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
return SpeechToTextDataset(
"interactive", False, self.data_cfg, src_tokens, src_lengths
)
|
py | b40d94ca7e4d45ec82ca64b9fac91a313bde2591 | # -*- coding: utf-8 -*-
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.openua.views.award import TenderUaAwardResource
from openprocurement.tender.openeu.views.award import TenderAwardResource as TenderEUAwardResource
from openprocurement.tender.competitivedialogue.constants import STAGE_2_EU_TYPE, STAGE_2_UA_TYPE
@optendersresource(
name="{}:Tender Awards".format(STAGE_2_EU_TYPE),
collection_path="/tenders/{tender_id}/awards",
path="/tenders/{tender_id}/awards/{award_id}",
description="Competitive Dialogue Stage 2 EU awards",
procurementMethodType=STAGE_2_EU_TYPE,
)
class CompetitiveDialogueStage2EUAwardResource(TenderEUAwardResource):
""" Competitive Dialogue Stage 2 EU award resource """
@optendersresource(
name="{}:Tender Awards".format(STAGE_2_UA_TYPE),
collection_path="/tenders/{tender_id}/awards",
path="/tenders/{tender_id}/awards/{award_id}",
description="Competitive Dialogue Stage 2 UA awards",
procurementMethodType=STAGE_2_UA_TYPE,
)
class CompetitiveDialogueStage2UAAwardResource(TenderUaAwardResource):
""" Competitive Dialogue Stage 2 UA award resource """
|
py | b40d94ccafef3bac88a1e79d32fe346d8d6f43a1 | import torch
import torch.nn as nn
import torch.nn.functional as f
import numpy as np
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Network(nn.Module):
def __init__(self, input_dim, hidden_in_dim, hidden_out_dim, output_dim, actor=False):
super(Network, self).__init__()
"""self.input_norm = nn.BatchNorm1d(input_dim)
self.input_norm.weight.data.fill_(1)
self.input_norm.bias.data.fill_(0)"""
self.fc1 = nn.Linear(input_dim,hidden_in_dim)
self.fc2 = nn.Linear(hidden_in_dim,hidden_out_dim)
self.fc3 = nn.Linear(hidden_out_dim,output_dim)
self.nonlin = nn.leaky_relu #leaky_relu # OR f.relu
self.actor = actor
#self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-1e-3, 1e-3)
def forward(self, x):
if self.actor:
# return a vector of the force
h1 = self.nonlin(self.fc1(x))
h2 = self.nonlin(self.fc2(h1))
h3 = (self.fc3(h2))
norm = torch.norm(h3)
# h3 is a 2D vector (a force that is applied to the agent)
# we bound the norm of the vector to be between 0 and 10
return 10.0*(f.tanh(norm))*h3/norm if norm > 0 else 10*h3
else:
# critic network simply outputs a number
h1 = self.nonlin(self.fc1(x))
h2 = self.nonlin(self.fc2(h1))
h3 = (self.fc3(h2))
return h3
|
py | b40d95bf5b11bb9c5c32411778bfba7c6f578961 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
class ContextTestCase(test.NoDBTestCase):
# NOTE(danms): Avoid any cells setup by claiming we will
# do things ourselves.
USES_DB_SELF = True
def setUp(self):
super(ContextTestCase, self).setUp()
self.useFixture(o_fixture.ClearRequestContext())
def test_request_context_elevated(self):
user_ctxt = context.RequestContext('111',
'222',
is_admin=False)
self.assertFalse(user_ctxt.is_admin)
admin_ctxt = user_ctxt.elevated()
self.assertTrue(admin_ctxt.is_admin)
self.assertIn('admin', admin_ctxt.roles)
self.assertFalse(user_ctxt.is_admin)
self.assertNotIn('admin', user_ctxt.roles)
def test_request_context_sets_is_admin(self):
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_by_role(self):
ctxt = context.RequestContext('111',
'222',
roles=['administrator'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_upcase(self):
ctxt = context.RequestContext('111',
'222',
roles=['Admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_read_deleted(self):
ctxt = context.RequestContext('111',
'222',
read_deleted='yes')
self.assertEqual('yes', ctxt.read_deleted)
ctxt.read_deleted = 'no'
self.assertEqual('no', ctxt.read_deleted)
def test_request_context_read_deleted_invalid(self):
self.assertRaises(ValueError,
context.RequestContext,
'111',
'222',
read_deleted=True)
ctxt = context.RequestContext('111', '222')
self.assertRaises(ValueError,
setattr,
ctxt,
'read_deleted',
True)
def test_service_catalog_default(self):
ctxt = context.RequestContext('111', '222')
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=[])
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=None)
self.assertEqual([], ctxt.service_catalog)
def test_service_catalog_filter(self):
service_catalog = [
{u'type': u'compute', u'name': u'nova'},
{u'type': u's3', u'name': u's3'},
{u'type': u'image', u'name': u'glance'},
{u'type': u'volumev3', u'name': u'cinderv3'},
{u'type': u'network', u'name': u'neutron'},
{u'type': u'ec2', u'name': u'ec2'},
{u'type': u'object-store', u'name': u'swift'},
{u'type': u'identity', u'name': u'keystone'},
{u'type': u'block-storage', u'name': u'cinder'},
{u'type': None, u'name': u'S_withouttype'},
{u'type': u'vo', u'name': u'S_partofvolume'}]
volume_catalog = [{u'type': u'image', u'name': u'glance'},
{u'type': u'volumev3', u'name': u'cinderv3'},
{u'type': u'network', u'name': u'neutron'},
{u'type': u'block-storage', u'name': u'cinder'}]
ctxt = context.RequestContext('111', '222',
service_catalog=service_catalog)
self.assertEqual(volume_catalog, ctxt.service_catalog)
def test_to_dict_from_dict_no_log(self):
warns = []
def stub_warn(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
warns.append(str(msg) % a)
self.stub_out('nova.context.LOG.warning', stub_warn)
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
context.RequestContext.from_dict(ctxt.to_dict())
self.assertEqual(0, len(warns), warns)
def test_store_when_no_overwrite(self):
# If no context exists we store one even if overwrite is false
# (since we are not overwriting anything).
ctx = context.RequestContext('111',
'222',
overwrite=False)
self.assertIs(o_context.get_current(), ctx)
def test_no_overwrite(self):
# If there is already a context in the cache a new one will
# not overwrite it if overwrite=False.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.RequestContext('333',
'444',
overwrite=False)
self.assertIs(o_context.get_current(), ctx1)
def test_get_context_no_overwrite(self):
# If there is already a context in the cache creating another context
# should not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_context()
self.assertIs(ctx1, o_context.get_current())
def test_admin_no_overwrite(self):
# If there is already a context in the cache creating an admin
# context will not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_admin_context()
self.assertIs(o_context.get_current(), ctx1)
def test_convert_from_rc_to_dict(self):
ctx = context.RequestContext(
111, 222, request_id='req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
timestamp='2015-03-02T22:31:56.641629')
values2 = ctx.to_dict()
expected_values = {'auth_token': None,
'domain': None,
'is_admin': False,
'is_admin_project': True,
'project_id': 222,
'project_domain': None,
'project_name': None,
'quota_class': None,
'read_deleted': 'no',
'read_only': False,
'remote_address': None,
'request_id':
'req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
'resource_uuid': None,
'roles': [],
'service_catalog': [],
'show_deleted': False,
'tenant': 222,
'timestamp': '2015-03-02T22:31:56.641629',
'user': 111,
'user_domain': None,
'user_id': 111,
'user_identity': '111 222 - - -',
'user_name': None}
for k, v in expected_values.items():
self.assertIn(k, values2)
self.assertEqual(values2[k], v)
@mock.patch.object(context.policy, 'authorize')
def test_can(self, mock_authorize):
mock_authorize.return_value = True
ctxt = context.RequestContext('111', '222')
result = ctxt.can(mock.sentinel.rule)
self.assertTrue(result)
mock_authorize.assert_called_once_with(
ctxt, mock.sentinel.rule,
{'project_id': ctxt.project_id, 'user_id': ctxt.user_id})
@mock.patch.object(context.policy, 'authorize')
def test_can_fatal(self, mock_authorize):
mock_authorize.side_effect = exception.Forbidden
ctxt = context.RequestContext('111', '222')
self.assertRaises(exception.Forbidden,
ctxt.can, mock.sentinel.rule)
@mock.patch.object(context.policy, 'authorize')
def test_can_non_fatal(self, mock_authorize):
mock_authorize.side_effect = exception.Forbidden
ctxt = context.RequestContext('111', '222')
result = ctxt.can(mock.sentinel.rule, mock.sentinel.target,
fatal=False)
self.assertFalse(result)
mock_authorize.assert_called_once_with(ctxt, mock.sentinel.rule,
mock.sentinel.target)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell(self, mock_create_ctxt_mgr, mock_rpc):
mock_create_ctxt_mgr.return_value = mock.sentinel.cdb
mock_rpc.return_value = mock.sentinel.cmq
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
# Verify the existing db_connection, if any, is restored
ctxt.db_connection = mock.sentinel.db_conn
ctxt.mq_connection = mock.sentinel.mq_conn
mapping = objects.CellMapping(database_connection='fake://',
transport_url='fake://',
uuid=uuids.cell)
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(cctxt.db_connection, mock.sentinel.cdb)
self.assertEqual(cctxt.mq_connection, mock.sentinel.cmq)
self.assertEqual(cctxt.cell_uuid, mapping.uuid)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn, ctxt.mq_connection)
self.assertIsNone(ctxt.cell_uuid)
# Test again now that we have populated the cache
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(cctxt.db_connection, mock.sentinel.cdb)
self.assertEqual(cctxt.mq_connection, mock.sentinel.cmq)
self.assertEqual(cctxt.cell_uuid, mapping.uuid)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell_unset(self, mock_create_ctxt_mgr, mock_rpc):
"""Tests that passing None as the mapping will temporarily
untarget any previously set cell context.
"""
mock_create_ctxt_mgr.return_value = mock.sentinel.cdb
mock_rpc.return_value = mock.sentinel.cmq
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
ctxt.db_connection = mock.sentinel.db_conn
ctxt.mq_connection = mock.sentinel.mq_conn
with context.target_cell(ctxt, None) as cctxt:
self.assertIsNone(cctxt.db_connection)
self.assertIsNone(cctxt.mq_connection)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn, ctxt.mq_connection)
@mock.patch('nova.context.set_target_cell')
def test_target_cell_regenerates(self, mock_set):
ctxt = context.RequestContext('fake', 'fake')
# Set a non-tracked property on the context to make sure it
# does not make it to the targeted one (like a copy would do)
ctxt.sentinel = mock.sentinel.parent
with context.target_cell(ctxt, mock.sentinel.cm) as cctxt:
# Should be a different object
self.assertIsNot(cctxt, ctxt)
# Should not have inherited the non-tracked property
self.assertFalse(hasattr(cctxt, 'sentinel'),
'Targeted context was copied from original')
# Set another non-tracked property
cctxt.sentinel = mock.sentinel.child
# Make sure we didn't pollute the original context
self.assertNotEqual(ctxt.sentinel, mock.sentinel.child)
def test_get_context(self):
ctxt = context.get_context()
self.assertIsNone(ctxt.user_id)
self.assertIsNone(ctxt.project_id)
self.assertFalse(ctxt.is_admin)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell_caching(self, mock_create_cm, mock_create_tport):
mock_create_cm.return_value = mock.sentinel.db_conn_obj
mock_create_tport.return_value = mock.sentinel.mq_conn_obj
ctxt = context.get_context()
mapping = objects.CellMapping(database_connection='fake://db',
transport_url='fake://mq',
uuid=uuids.cell)
# First call should create new connection objects.
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(mock.sentinel.db_conn_obj, cctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn_obj, cctxt.mq_connection)
mock_create_cm.assert_called_once_with('fake://db')
mock_create_tport.assert_called_once_with('fake://mq')
# Second call should use cached objects.
mock_create_cm.reset_mock()
mock_create_tport.reset_mock()
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(mock.sentinel.db_conn_obj, cctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn_obj, cctxt.mq_connection)
mock_create_cm.assert_not_called()
mock_create_tport.assert_not_called()
def test_is_cell_failure_sentinel(self):
record = context.did_not_respond_sentinel
self.assertTrue(context.is_cell_failure_sentinel(record))
record = TypeError()
self.assertTrue(context.is_cell_failure_sentinel(record))
record = objects.Instance()
self.assertFalse(context.is_cell_failure_sentinel(record))
@mock.patch('nova.context.target_cell')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells(self, mock_get_inst, mock_target_cell):
ctxt = context.get_context()
mapping = objects.CellMapping(database_connection='fake://db',
transport_url='fake://mq',
uuid=uuids.cell)
mappings = objects.CellMappingList(objects=[mapping])
# Use a mock manager to assert call order across mocks.
manager = mock.Mock()
manager.attach_mock(mock_get_inst, 'get_inst')
manager.attach_mock(mock_target_cell, 'target_cell')
filters = {'deleted': False}
context.scatter_gather_cells(
ctxt, mappings, 60, objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
# NOTE(melwitt): This only works without the SpawnIsSynchronous fixture
# because when the spawn is treated as synchronous and the thread
# function is called immediately, it will occur inside the target_cell
# context manager scope when it wouldn't with a real spawn.
# Assert that InstanceList.get_by_filters was called before the
# target_cell context manager exited.
get_inst_call = mock.call.get_inst(
mock_target_cell.return_value.__enter__.return_value, filters,
sort_dir='foo')
expected_calls = [get_inst_call,
mock.call.target_cell().__exit__(None, None, None)]
manager.assert_has_calls(expected_calls)
@mock.patch('nova.context.LOG.warning')
@mock.patch('eventlet.timeout.Timeout')
@mock.patch('eventlet.queue.LightQueue.get')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells_timeout(self, mock_get_inst,
mock_get_result, mock_timeout,
mock_log_warning):
# This is needed because we're mocking get_by_filters.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mappings = objects.CellMappingList(objects=[mapping0, mapping1])
# Simulate cell1 not responding.
mock_get_result.side_effect = [(mapping0.uuid,
mock.sentinel.instances),
exception.CellTimeout()]
results = context.scatter_gather_cells(
ctxt, mappings, 30, objects.InstanceList.get_by_filters)
self.assertEqual(2, len(results))
self.assertIn(mock.sentinel.instances, results.values())
self.assertIn(context.did_not_respond_sentinel, results.values())
mock_timeout.assert_called_once_with(30, exception.CellTimeout)
self.assertTrue(mock_log_warning.called)
@mock.patch('nova.context.LOG.exception')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells_exception(self, mock_get_inst,
mock_log_exception):
# This is needed because we're mocking get_by_filters.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mappings = objects.CellMappingList(objects=[mapping0, mapping1])
# Simulate cell1 raising an exception.
mock_get_inst.side_effect = [mock.sentinel.instances,
test.TestingException()]
results = context.scatter_gather_cells(
ctxt, mappings, 30, objects.InstanceList.get_by_filters)
self.assertEqual(2, len(results))
self.assertIn(mock.sentinel.instances, results.values())
isinstance(results.values(), Exception)
self.assertTrue(mock_log_exception.called)
@mock.patch('nova.context.scatter_gather_cells')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_scatter_gather_all_cells(self, mock_get_all, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mock_get_all.return_value = objects.CellMappingList(
objects=[mapping0, mapping1])
filters = {'deleted': False}
context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, mock_get_all.return_value, 60,
objects.InstanceList.get_by_filters, filters, sort_dir='foo')
@mock.patch('nova.context.scatter_gather_cells')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_scatter_gather_skip_cell0(self, mock_get_all, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mock_get_all.return_value = objects.CellMappingList(
objects=[mapping0, mapping1])
filters = {'deleted': False}
context.scatter_gather_skip_cell0(
ctxt, objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, [mapping1], 60, objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
@mock.patch('nova.context.scatter_gather_cells')
def test_scatter_gather_single_cell(self, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
filters = {'deleted': False}
context.scatter_gather_single_cell(ctxt, mapping0,
objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, [mapping0], context.CELL_TIMEOUT,
objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
|
py | b40d95d8b39c9ea3db4654bc5e9c94fc5a38dd79 | # coding: utf-8
import glob
import logging
import os
import sys
import socket
import tempfile
import time
import numpy as np
import pickle
import pytest
import ray
import ray.ray_constants as ray_constants
import ray.util.accelerators
import ray.cluster_utils
import ray.test_utils
from ray import resource_spec
import setproctitle
import subprocess
from ray.test_utils import (check_call_ray, RayTestTimeoutException,
wait_for_condition, wait_for_num_actors,
new_scheduler_enabled)
logger = logging.getLogger(__name__)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info(f"Counts are {counts}.")
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets
# in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.node.unique_id
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_local_scheduling_first(ray_start_cluster):
cluster = ray_start_cluster
num_cpus = 8
# Disable worker caching.
cluster.add_node(
num_cpus=num_cpus,
_system_config={
"worker_lease_timeout_milliseconds": 0,
})
cluster.add_node(num_cpus=num_cpus)
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.node.unique_id
def local():
return ray.get(f.remote()) == ray.worker.global_worker.node.unique_id
# Wait for a worker to get started.
wait_for_condition(local)
# Check that we are scheduling locally while there are resources available.
for i in range(20):
assert local()
@pytest.mark.skipif(new_scheduler_enabled(), reason="flakes more often")
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.node.unique_id
# This object will be local to one of the raylets. Make sure
# this doesn't prevent tasks from being scheduled on other raylets.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.objects()) >= num_objects:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
def test_global_state_api(shutdown_only):
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
assert ray.cluster_resources()["CPU"] == 5
assert ray.cluster_resources()["GPU"] == 3
assert ray.cluster_resources()["CustomResource"] == 1
# A driver/worker creates a temporary object during startup. Although the
# temporary object is freed immediately, in a rare case, we can still find
# the object ref in GCS because Raylet removes the object ref from GCS
# asynchronously.
# Because we can't control when workers create the temporary objects, so
# We can't assert that `ray.objects()` returns an empty dict. Here we just
# make sure `ray.objects()` succeeds.
assert len(ray.objects()) >= 0
job_id = ray.utils.compute_job_id_from_driver(
ray.WorkerID(ray.worker.global_worker.worker_id))
client_table = ray.nodes()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
class Actor:
def __init__(self):
pass
_ = Actor.options(name="test_actor").remote() # noqa: F841
# Wait for actor to be created
wait_for_num_actors(1)
actor_table = ray.actors()
assert len(actor_table) == 1
actor_info, = actor_table.values()
assert actor_info["JobID"] == job_id.hex()
assert actor_info["Name"] == "test_actor"
assert "IPAddress" in actor_info["Address"]
assert "IPAddress" in actor_info["OwnerAddress"]
assert actor_info["Address"]["Port"] != actor_info["OwnerAddress"]["Port"]
job_table = ray.jobs()
assert len(job_table) == 1
assert job_table[0]["JobID"] == job_id.hex()
assert job_table[0]["DriverIPAddress"] == node_ip_address
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError:
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(10):
print(i, end=" ")
print(100 + i, end=" ", file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
out_lines = captured["out"]
err_lines = captured["err"]
for i in range(10):
assert str(i) in out_lines
for i in range(100, 110):
assert str(i) in err_lines
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
err_lines = captured["err"]
assert len(err_lines) == 0
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
def test_object_ref_properties():
id_bytes = b"00112233445566778899"
object_ref = ray.ObjectRef(id_bytes)
assert object_ref.binary() == id_bytes
object_ref = ray.ObjectRef.nil()
assert object_ref.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectRef(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectRef(b"0123456789")
object_ref = ray.ObjectRef.from_random()
assert not object_ref.is_nil()
assert object_ref.binary() != id_bytes
id_dumps = pickle.dumps(object_ref)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_ref
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(
num_cpus=1,
object_store_memory=int(10**8),
_system_config={"object_pinning_enabled": 0})
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(ray_start_2_cpus):
@ray.remote
class UniqueName:
def __init__(self):
assert setproctitle.getproctitle() == "ray::UniqueName.__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray::UniqueName.f()"
@ray.remote
def unique_1():
assert "unique_1" in setproctitle.getproctitle()
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_ray_task_name_setproctitle(ray_start_2_cpus):
method_task_name = "foo"
@ray.remote
class UniqueName:
def __init__(self):
assert setproctitle.getproctitle() == "ray::UniqueName.__init__()"
def f(self):
assert setproctitle.getproctitle() == f"ray::{method_task_name}"
task_name = "bar"
@ray.remote
def unique_1():
assert task_name in setproctitle.getproctitle()
actor = UniqueName.remote()
ray.get(actor.f.options(name=method_task_name).remote())
ray.get(unique_1.options(name=task_name).remote())
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(ray_start_2_cpus):
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(
check_call_ray(["stack"], capture_stdout=True))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_raylet_is_robust_to_random_messages(ray_start_regular):
node_manager_address = None
node_manager_port = None
for client in ray.nodes():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b"asdf")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start_regular):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_put_pins_object(ray_start_object_store_memory):
obj = np.ones(200 * 1024, dtype=np.uint8)
x_id = ray.put(obj)
x_binary = x_id.binary()
assert (ray.get(ray.ObjectRef(x_binary)) == obj).all()
# x cannot be evicted since x_id pins it
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
assert (ray.get(x_id) == obj).all()
assert (ray.get(ray.ObjectRef(x_binary)) == obj).all()
# now it can be evicted since x_id pins it but x_binary does not
del x_id
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
assert not ray.worker.global_worker.core_worker.object_exists(
ray.ObjectRef(x_binary))
def test_decorated_function(ray_start_regular):
def function_invocation_decorator(f):
def new_f(args, kwargs):
# Reverse the arguments.
return f(args[::-1], {"d": 5}), kwargs
return new_f
def f(a, b, c, d=None):
return a, b, c, d
f.__ray_invocation_decorator__ = function_invocation_decorator
f = ray.remote(f)
result_id, kwargs = f.remote(1, 2, 3, d=4)
assert kwargs == {"d": 4}
assert ray.get(result_id) == (3, 2, 1, 5)
def test_get_postprocess(ray_start_regular):
def get_postprocessor(object_refs, values):
return [value for value in values if value > 0]
ray.worker.global_worker._post_get_hooks.append(get_postprocessor)
assert ray.get(
[ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]
def test_export_after_shutdown(ray_start_regular):
# This test checks that we can use actor and remote function definitions
# across multiple Ray sessions.
@ray.remote
def f():
pass
@ray.remote
class Actor:
def method(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray and use the remote function and actor again.
ray.init(num_cpus=1)
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray again and make sure that these definitions can be exported from
# workers.
ray.init(num_cpus=2)
@ray.remote
def export_definitions_from_worker(remote_function, actor_class):
ray.get(remote_function.remote())
actor_handle = actor_class.remote()
ray.get(actor_handle.method.remote())
ray.get(export_definitions_from_worker.remote(f, Actor))
def test_ray_start_and_stop():
for i in range(10):
subprocess.check_call(["ray", "start", "--head"])
subprocess.check_call(["ray", "stop"])
def test_invalid_unicode_in_worker_log(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
# Wait till first worker log file is created.
while True:
log_file_paths = glob.glob(f"{logs_dir}/worker*.out")
if len(log_file_paths) == 0:
time.sleep(0.2)
else:
break
with open(log_file_paths[0], "wb") as f:
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.flush()
# Wait till the log monitor reads the file.
time.sleep(1.0)
# Make sure that nothing has died.
assert ray._private.services.remaining_processes_alive()
@pytest.mark.skip(reason="This test is too expensive to run.")
def test_move_log_files_to_old(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
@ray.remote
class Actor:
def f(self):
print("function f finished")
# First create a temporary actor.
actors = [
Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)
]
ray.get([a.f.remote() for a in actors])
# Make sure no log files are in the "old" directory before the actors
# are killed.
assert len(glob.glob(f"{logs_dir}/old/worker*.out")) == 0
# Now kill the actors so the files get moved to logs/old/.
[a.__ray_terminate__.remote() for a in actors]
while True:
log_file_paths = glob.glob(f"{logs_dir}/old/worker*.out")
if len(log_file_paths) > 0:
with open(log_file_paths[0], "r") as f:
assert "function f finished\n" in f.readlines()
break
# Make sure that nothing has died.
assert ray._private.services.remaining_processes_alive()
def test_lease_request_leak(shutdown_only):
ray.init(num_cpus=1, _system_config={"object_timeout_milliseconds": 200})
assert len(ray.objects()) == 0
@ray.remote
def f(x):
time.sleep(0.1)
return
# Submit pairs of tasks. Tasks in a pair can reuse the same worker leased
# from the raylet.
tasks = []
for _ in range(10):
obj_ref = ray.put(1)
for _ in range(2):
tasks.append(f.remote(obj_ref))
del obj_ref
ray.get(tasks)
time.sleep(
1) # Sleep for an amount longer than the reconstruction timeout.
assert len(ray.objects()) == 0, ray.objects()
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 0,
"num_nodes": 1,
"do_init": False,
}],
indirect=True)
def test_ray_address_environment_variable(ray_start_cluster):
address = ray_start_cluster.address
# In this test we use zero CPUs to distinguish between starting a local
# ray cluster and connecting to an existing one.
# Make sure we connect to an existing cluster if
# RAY_ADDRESS is set.
os.environ["RAY_ADDRESS"] = address
ray.init()
assert "CPU" not in ray.state.cluster_resources()
del os.environ["RAY_ADDRESS"]
ray.shutdown()
# Make sure we start a new cluster if RAY_ADDRESS is not set.
ray.init()
assert "CPU" in ray.state.cluster_resources()
ray.shutdown()
def test_ray_resources_environment_variable(ray_start_cluster):
address = ray_start_cluster.address
os.environ[
"RAY_OVERRIDE_RESOURCES"] = "{\"custom1\":1, \"custom2\":2, \"CPU\":3}"
ray.init(address=address, resources={"custom1": 3, "custom3": 3})
cluster_resources = ray.cluster_resources()
print(cluster_resources)
assert cluster_resources["custom1"] == 1
assert cluster_resources["custom2"] == 2
assert cluster_resources["custom3"] == 3
assert cluster_resources["CPU"] == 3
def test_gpu_info_parsing():
info_string = """Model: Tesla V100-SXM2-16GB
IRQ: 107
GPU UUID: GPU-8eaaebb8-bb64-8489-fda2-62256e821983
Video BIOS: 88.00.4f.00.09
Bus Type: PCIe
DMA Size: 47 bits
DMA Mask: 0x7fffffffffff
Bus Location: 0000:00:1e.0
Device Minor: 0
Blacklisted: No
"""
constraints_dict = resource_spec._constraints_from_gpu_info(info_string)
expected_dict = {
f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}V100": 1,
}
assert constraints_dict == expected_dict
info_string = """Model: Tesla T4
IRQ: 10
GPU UUID: GPU-415fe7a8-f784-6e3d-a958-92ecffacafe2
Video BIOS: 90.04.84.00.06
Bus Type: PCIe
DMA Size: 47 bits
DMA Mask: 0x7fffffffffff
Bus Location: 0000:00:1b.0
Device Minor: 0
Blacklisted: No
"""
constraints_dict = resource_spec._constraints_from_gpu_info(info_string)
expected_dict = {
f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}T4": 1,
}
assert constraints_dict == expected_dict
assert resource_spec._constraints_from_gpu_info(None) == {}
def test_accelerator_type_api(shutdown_only):
v100 = ray.util.accelerators.NVIDIA_TESLA_V100
resource_name = f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}{v100}"
ray.init(num_cpus=4, resources={resource_name: 1})
quantity = 1
@ray.remote(accelerator_type=v100)
def decorated_func(quantity):
wait_for_condition(
lambda: ray.available_resources()[resource_name] < quantity)
return True
assert ray.get(decorated_func.remote(quantity))
def via_options_func(quantity):
wait_for_condition(
lambda: ray.available_resources()[resource_name] < quantity)
return True
assert ray.get(
ray.remote(via_options_func).options(
accelerator_type=v100).remote(quantity))
@ray.remote(accelerator_type=v100)
class DecoratedActor:
def __init__(self):
pass
def initialized(self):
pass
class ActorWithOptions:
def __init__(self):
pass
def initialized(self):
pass
decorated_actor = DecoratedActor.remote()
# Avoid a race condition where the actor hasn't been initialized and
# claimed the resources yet.
ray.get(decorated_actor.initialized.remote())
wait_for_condition(
lambda: ray.available_resources()[resource_name] < quantity)
quantity = ray.available_resources()[resource_name]
with_options = ray.remote(ActorWithOptions).options(
accelerator_type=v100).remote()
ray.get(with_options.initialized.remote())
wait_for_condition(
lambda: ray.available_resources()[resource_name] < quantity)
def test_detect_docker_cpus():
# No limits set
with tempfile.NamedTemporaryFile(
"w") as quota_file, tempfile.NamedTemporaryFile(
"w") as period_file, tempfile.NamedTemporaryFile(
"w") as cpuset_file:
quota_file.write("-1")
period_file.write("100000")
cpuset_file.write("0-63")
quota_file.flush()
period_file.flush()
cpuset_file.flush()
assert ray.utils._get_docker_cpus(
cpu_quota_file_name=quota_file.name,
cpu_share_file_name=period_file.name,
cpuset_file_name=cpuset_file.name) == 64
# No cpuset used
with tempfile.NamedTemporaryFile(
"w") as quota_file, tempfile.NamedTemporaryFile(
"w") as period_file, tempfile.NamedTemporaryFile(
"w") as cpuset_file:
quota_file.write("-1")
period_file.write("100000")
cpuset_file.write("0-10,20,50-63")
quota_file.flush()
period_file.flush()
cpuset_file.flush()
assert ray.utils._get_docker_cpus(
cpu_quota_file_name=quota_file.name,
cpu_share_file_name=period_file.name,
cpuset_file_name=cpuset_file.name) == 26
# Quota set
with tempfile.NamedTemporaryFile(
"w") as quota_file, tempfile.NamedTemporaryFile(
"w") as period_file, tempfile.NamedTemporaryFile(
"w") as cpuset_file:
quota_file.write("42")
period_file.write("100")
cpuset_file.write("0-63")
quota_file.flush()
period_file.flush()
cpuset_file.flush()
assert ray.utils._get_docker_cpus(
cpu_quota_file_name=quota_file.name,
cpu_share_file_name=period_file.name,
cpuset_file_name=cpuset_file.name) == 0.42
def test_override_environment_variables_task(ray_start_regular):
@ray.remote
def get_env(key):
return os.environ.get(key)
assert (ray.get(
get_env.options(override_environment_variables={
"a": "b"
}).remote("a")) == "b")
def test_override_environment_variables_actor(ray_start_regular):
@ray.remote
class EnvGetter:
def get(self, key):
return os.environ.get(key)
a = EnvGetter.options(override_environment_variables={
"a": "b",
"c": "d"
}).remote()
assert (ray.get(a.get.remote("a")) == "b")
assert (ray.get(a.get.remote("c")) == "d")
def test_override_environment_variables_nested_task(ray_start_regular):
@ray.remote
def get_env(key):
return os.environ.get(key)
@ray.remote
def get_env_wrapper(key):
return ray.get(get_env.remote(key))
assert (ray.get(
get_env_wrapper.options(override_environment_variables={
"a": "b"
}).remote("a")) == "b")
def test_override_environment_variables_multitenancy(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(worker_env={
"foo1": "bar1",
"foo2": "bar2"
}))
@ray.remote
def get_env(key):
return os.environ.get(key)
assert ray.get(get_env.remote("foo1")) == "bar1"
assert ray.get(get_env.remote("foo2")) == "bar2"
assert ray.get(
get_env.options(override_environment_variables={
"foo1": "baz1"
}).remote("foo1")) == "baz1"
assert ray.get(
get_env.options(override_environment_variables={
"foo1": "baz1"
}).remote("foo2")) == "bar2"
def test_override_environment_variables_complex(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(worker_env={
"a": "job_a",
"b": "job_b",
"z": "job_z"
}))
@ray.remote
def get_env(key):
return os.environ.get(key)
@ray.remote
class NestedEnvGetter:
def get(self, key):
return os.environ.get(key)
def get_task(self, key):
return ray.get(get_env.remote(key))
@ray.remote
class EnvGetter:
def get(self, key):
return os.environ.get(key)
def get_task(self, key):
return ray.get(get_env.remote(key))
def nested_get(self, key):
aa = NestedEnvGetter.options(override_environment_variables={
"c": "e",
"d": "dd"
}).remote()
return ray.get(aa.get.remote(key))
a = EnvGetter.options(override_environment_variables={
"a": "b",
"c": "d"
}).remote()
assert (ray.get(a.get.remote("a")) == "b")
assert (ray.get(a.get_task.remote("a")) == "b")
assert (ray.get(a.nested_get.remote("a")) == "b")
assert (ray.get(a.nested_get.remote("c")) == "e")
assert (ray.get(a.nested_get.remote("d")) == "dd")
assert (ray.get(
get_env.options(override_environment_variables={
"a": "b"
}).remote("a")) == "b")
assert (ray.get(a.get.remote("z")) == "job_z")
assert (ray.get(a.get_task.remote("z")) == "job_z")
assert (ray.get(a.nested_get.remote("z")) == "job_z")
assert (ray.get(
get_env.options(override_environment_variables={
"a": "b"
}).remote("z")) == "job_z")
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
py | b40d95e999d321c8599ddfcf9194763e4185380b | import lgsvl
from lgsvl.geometry import Vector
from common import SimConnection, CarControl
from common.scene import load_ego, load_npc, spawn_state
class SchoolBus:
def __init__(self, ego_speed: float, ego_target: Vector, ego_brake: float,
npc_speed: float, npc_source: Vector, npc_target: Vector,
sim_connection: SimConnection):
self.ego_target = ego_target
self.ego_speed = ego_speed
self.ego_brake = ego_brake
self.npc_speed = npc_speed
self.npc_source = npc_source
self.npc_target = npc_target
self.collisions = []
self.simConnection = sim_connection
def on_collision(self, agent1, agent2, contact):
self.collisions.append([agent1, agent2, contact])
self.simConnection.sim.close()
# print("Exception: {} collided with {}".format(agent1, agent2))
raise Exception()
def run(self):
# Setup environment
lgsvl_sim = self.simConnection.connect()
control = lgsvl.NPCControl()
ego_control = lgsvl.VehicleControl()
# Placing the school_bus
school_bus_state = spawn_state(lgsvl_sim)
school_bus_state = CarControl.place_car_on_the_point(state=school_bus_state, sim=lgsvl_sim, point=self.npc_source)
school_bus = load_npc(lgsvl_sim, "SchoolBus", school_bus_state)
# Placing the ego on the starting point
ego_state = spawn_state(lgsvl_sim)
ego_state = CarControl.place_car_from_the_point(dimension="horizontal", distance=-6, state=ego_state)
ego_state = CarControl.drive_ego_car(ego_state, [("vertical", self.ego_speed)])
ego = load_ego(lgsvl_sim, "Lincoln2017MKZ (Apollo 5.0)", ego_state)
# Callback collision function
ego.on_collision(self.on_collision)
school_bus.on_collision(self.on_collision)
# Set waypoints for School Bus
waypoints = []
for point in [self.npc_source, self.npc_target]:
waypoints.append(lgsvl.DriveWaypoint(point, self.npc_speed, school_bus.state.transform.rotation))
try:
# Start the scenario
# The School Bus is parked on the street
control.headlights = 2
control.e_stop = True
school_bus.apply_control(control)
# Let the ego running for 2 seconds
self.simConnection.execute(timeout=2)
# The school bus turns on signal to prepare for the turn
control.headlights = 0 # turn off headlight
control.turn_signal_left = True
school_bus.apply_control(control)
self.simConnection.execute(timeout=2)
# Brake the ego
CarControl.brake_ego(ego=ego, control=ego_control, brake_value=self.ego_brake, sticky=True)
# The school bus starts to turn right
school_bus.follow(waypoints)
self.simConnection.execute(timeout=10)
except Exception:
print("Failed!")
|
py | b40d96519c2561a6d37bd9bf9297e0b851979fa0 | """
Setup for packaging clr into an egg.
"""
from distutils.core import setup, Extension
from distutils.command.build_ext import build_ext
from platform import architecture
import subprocess
import shutil
import sys
import os
from distutils import msvc9compiler
msvc9compiler.VERSION = 11
class PythonNET_BuildExt(build_ext):
def build_extension(self, ext):
"""
Builds the .pyd file using msbuild.
"""
if ext.name != "clr":
return super(PythonNET_BuildExt, self).build_extension(ext)
cc = msvc9compiler.MSVCCompiler()
cc.initialize()
msbuild = cc.find_exe("msbuild.exe")
platform = "x64" if architecture()[0] == "64bit" else "x86"
defines = [
"PYTHON%d%s" % (sys.version_info[:2]),
"UCS2"
]
cmd = [
msbuild,
"pythonnet.sln",
"/p:Configuration=ReleaseWin",
"/p:Platform=%s" % platform,
"/p:DefineConstants=\"%s\"" % ";".join(defines),
"/t:clrmodule",
]
self.announce("Building: %s" % " ".join(cmd))
subprocess.check_call(" ".join(cmd))
dest_file = self.get_ext_fullpath(ext.name)
dest_dir = os.path.dirname(dest_file)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
src_file = os.path.join("src", "clrmodule", "bin", platform, "Release", "clr.pyd")
self.announce("Copying %s to %s" % (src_file, dest_file))
shutil.copyfile(src_file, dest_file)
dest_file = os.path.join(dest_dir, "Python.Runtime.dll")
src_file = os.path.join("src", "runtime", "bin", platform, "Release", "Python.Runtime.dll")
self.announce("Copying %s to %s" % (src_file, dest_file))
shutil.copyfile(src_file, dest_file)
setup(name="pythonnet",
ext_modules=[
Extension("clr", sources=[])
],
cmdclass = {
"build_ext" : PythonNET_BuildExt
}
)
|
py | b40d96bba0a9ff78fa5c7bfb9c808cdf15099da8 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Will Thames and contributors
# Copyright (c) 2018 Ansible Project
# Modified work Copyright (c) 2019 Warpnet B.V.
import re
from saltlint.linter import SaltLintRule
class YamlHasOctalValueRule(SaltLintRule):
id = '210'
shortdesc = 'Numbers that start with `0` should always be encapsulated in quotation marks'
description = 'Numbers that start with `0` should always be encapsulated in quotation marks'
severity = 'HIGH'
tags = ['formatting']
version_added = 'v0.0.6'
bracket_regex = re.compile(r"(?<=:)\s{0,}0[0-9]{1,}\s{0,}((?={#)|(?=#)|(?=$))")
def match(self, file, line):
return self.bracket_regex.search(line)
|
py | b40d96fe66f9766e7ea7675555cd007ac763afb9 | import numpy as np
import torch
from torch.autograd import Variable
from core.observations.deformable_objects.landmarks.landmark import Landmark
from support.utilities.general_settings import Settings
class SurfaceMesh(Landmark):
"""
3D Triangular mesh.
"""
####################################################################################################################
### Constructor:
####################################################################################################################
def __init__(self):
Landmark.__init__(self)
self.type = 'SurfaceMesh'
# All of these are torch tensor attributes.
self.connectivity = None
self.centers = None
self.normals = None
# Clone.
def clone(self):
clone = SurfaceMesh()
clone.points = np.copy(self.points)
clone.is_modified = self.is_modified
clone.bounding_box = self.bounding_box
clone.norm = self.norm
clone.connectivity = self.connectivity.clone()
clone.centers = self.centers.clone()
clone.normals = self.normals.clone()
return clone
####################################################################################################################
### Public methods:
####################################################################################################################
def update(self):
self.get_centers_and_normals()
Landmark.update(self)
def set_connectivity(self, connectivity):
self.connectivity = torch.from_numpy(connectivity).type(Settings().tensor_integer_type)
self.is_modified = True
def get_centers_and_normals(self, points=None):
"""
Given a new set of points, use the corresponding connectivity available in the polydata
to compute the new normals, all in torch
"""
if points is None:
if self.is_modified:
torch_points_coordinates = Variable(torch.from_numpy(self.points).type(Settings().tensor_scalar_type))
a = torch_points_coordinates[self.connectivity[:, 0]]
b = torch_points_coordinates[self.connectivity[:, 1]]
c = torch_points_coordinates[self.connectivity[:, 2]]
centers = (a+b+c)/3.
self.centers = centers
self.normals = torch.cross(b-a, c-a)/2
else:
a = points[self.connectivity[:, 0]]
b = points[self.connectivity[:, 1]]
c = points[self.connectivity[:, 2]]
centers = (a+b+c)/3.
self.centers = centers
self.normals = torch.cross(b-a, c-a)/2
return self.centers, self.normals
|
py | b40d98197bf2e4cf55c564e7ec0874f1a0cb954c | __author__ = 'Bohdan Mushkevych'
import datetime
import random
import time
import math
from threading import Thread
from amqp import AMQPError
from db.model.raw_data import RawData
from synergy.mq.flopsy import Publisher
from synergy.system.performance_tracker import SimpleTracker
from synergy.system.synergy_process import SynergyProcess
SLEEP_TIME = 0.05
TICK_INTERVAL = 10
class EventStreamGenerator(SynergyProcess):
""" illustration suite worker:
- emulates user activity on the web site """
def __init__(self, process_name):
super(EventStreamGenerator, self).__init__(process_name)
self.main_thread = None
self.publisher = Publisher(process_name)
self.performance_tracker = SimpleTracker(self.logger)
self.previous_tick = time.time()
self.thread_is_running = True
utc_date = datetime.datetime.utcnow()
self.number_of_groups = utc_date.year * math.pow(10, 12) + \
utc_date.month * math.pow(10, 10) + \
utc_date.day * math.pow(10, 8) + \
utc_date.hour * math.pow(10, 6) + \
utc_date.minute * math.pow(10, 4) + \
utc_date.second * math.pow(10, 2)
self.logger.info(f'Started {self.process_name}')
def __del__(self):
self.publisher.close()
self.performance_tracker.cancel()
super(EventStreamGenerator, self).__del__()
self.logger.info('Exiting main thread. All auxiliary threads stopped.')
def _generate_key(self):
_id = random.randint(0, 100000)
domain_name = 'domain{0}__com'.format(_id)
session_no = self.number_of_groups + random.randint(0, 99)
session_id = 'session_{0}'.format(session_no)
return domain_name, time.time(), session_id
def _run_stream_generation(self):
self.logger.info('Stream Generator: ON. Expected rate: {0}/s, {1}/m, {2}/h, {3}/d'
.format(1 / SLEEP_TIME, 1 / SLEEP_TIME * 60, 1 / SLEEP_TIME * 3600, 1 / SLEEP_TIME * 86400))
self.performance_tracker.start()
random.seed('RANDOM_SEED_OBJECT')
document = RawData()
while self.thread_is_running:
if time.time() - self.previous_tick > TICK_INTERVAL:
# increment group number every TICK_INTERVAL seconds
self.number_of_groups += 100
self.previous_tick = time.time()
try:
document.key = self._generate_key()
document.ip = '{0}.{1}.{2}.{3}'.format(random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
document.screen_resolution = (random.randrange(340, 1080, 100), random.randrange(240, 980, 100))
if self.performance_tracker.tracker.success.per_tick % 7 == 0:
document.os = 'OSX'
document.browser = 'Safari-10'
document.language = 'en_us'
document.country = 'usa'
elif self.performance_tracker.tracker.success.per_tick % 5 == 0:
document.os = 'Linux'
document.browser = 'FireFox-40'
document.language = 'en_ca'
document.country = 'canada'
elif self.performance_tracker.tracker.success.per_tick % 3 == 0:
document.os = 'Windows'
document.browser = 'IE-60'
document.language = 'ge_de'
document.country = 'germany'
else:
document.os = 'Android'
document.browser = 'FireMini-20'
document.language = 'es'
document.country = 'eu'
document.is_page_view = True
self.publisher.publish(document.document)
self.performance_tracker.tracker.increment_success()
time.sleep(SLEEP_TIME)
except (AMQPError, IOError) as e:
self.thread_is_running = False
self.performance_tracker.cancel()
self.logger.error(f'AMQPError: {e}')
except Exception as e:
self.performance_tracker.tracker.increment_failure()
self.logger.info(f'safety fuse: {e}')
def start(self, *_):
self.main_thread = Thread(target=self._run_stream_generation)
self.main_thread.start()
def cancel(self):
self.thread_is_running = False
if __name__ == '__main__':
from constants import PROCESS_STREAM_GEN
generator = EventStreamGenerator(PROCESS_STREAM_GEN)
generator.start()
|
py | b40d98926c5fe86078dddd97eb2aa69d720a4311 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import inspect
import json
import logging
import os
import pkgutil
import warnings
from collections import namedtuple, defaultdict
from importlib import import_module
from pprint import pprint
import six
ArgInfo = namedtuple('ArgInfo', ['name', 'type', 'required', 'default', 'doc'])
def sasctl_command(name, subname=None):
"""Decorator that tags the function as being usable from the command line.
Parameters
----------
name : str
the name of the command that will be shown on the command line.
subname : str
the name of the service that the command will be listed under
Returns
-------
function
Examples
--------
Define a command called 'cmd' not associated with a service
>>> @sasctl_command('cmd')
>>> def func():
...
Define a command called 'cmd' associated with the 'svc' service
>>> @sasctl_command('svc', 'cmd')
>>> def func():
...
Define a command and allow it's name and service to be auto-assigned
>>> @sasctl_command
>>> def func():
...
"""
def decorator(func):
if isinstance(name, six.string_types):
if isinstance(subname, six.string_types):
command_name = subname
service_name = name
else:
command_name = name
service_name = subname
else:
command_name = func.__name__
if any(command_name.startswith(x) for x in ['list_', 'update_', 'get_', 'create_', 'delete_']):
parts = command_name.split('_')
command_name = parts[0]
service_name = parts[-1]
else:
service_name = subname
def parse_args():
"""Retrieve argument metadata from function signature and docstring."""
arg_spec = inspect.getargspec(func)
defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else []
required = [True] * (len(arg_spec.args) - len(defaults)) + [False] * len(defaults)
defaults = [None] * (len(arg_spec.args) - len(defaults)) + defaults
types = []
help_doc = []
doc = inspect.getdoc(func)
if doc and doc.find('Parameters\n'):
doc_lines = doc[doc.find('Parameters\n'):].splitlines()
doc_lines.pop(0) # First line is "Parameters"
if doc_lines and doc_lines[0].startswith('---'):
doc_lines.pop(0) # Discard ----------- line under "Parameters" heading
while doc_lines:
var = doc_lines.pop(0)
if var.startswith('Returns') or var.strip() == '':
break
if ':' in var:
types.append(var.split(':')[-1].strip())
else:
types.append('str')
if doc_lines and doc_lines[0].startswith(' '):
help_doc.append(doc_lines.pop(0).strip())
else:
help_doc.append('')
else:
types = ['str'] * len(arg_spec.args)
help_doc = [None] * len(arg_spec.args)
return [ArgInfo(n, t, r, d, o) for n, t, r, d, o in
zip(arg_spec.args, types, required, defaults, help_doc)]
func._cli_command = command_name
func._cli_service = service_name
func._cli_arguments = parse_args
return func
if six.callable(name):
# allow direct decoration without arguments
return decorator(name)
return decorator
def _find_services(module='sasctl'):
"""Recursively find all functions in all modules that have been decorated as CLI commands."""
m = __import__(module, fromlist=['']) # returns a module
def find_recurse(module, services):
for obj in dir(module):
obj = getattr(module, obj)
if hasattr(obj, '_cli_command') and hasattr(obj, '_cli_service'):
services[obj._cli_service][obj._cli_command] = obj
elif type(obj).__module__.startswith('sasctl._services'):
for atr in dir(obj):
atr = getattr(obj, atr)
if hasattr(atr, '_cli_command') and hasattr(atr, '_cli_service'):
services[atr._cli_service][atr._cli_command] = atr
# recurse into submodules
submodules = pkgutil.iter_modules(getattr(module, '__path__', []))
for submodule in submodules:
if hasattr(submodule, 'name'):
# ModuleInfo returned py 3.6
submodule = import_module('.' + submodule.name, package=module.__name__)
else:
# Tuple of (module_loader, name, ispkg) returned by older versions
submodule = import_module('.' + submodule[1], package=module.__name__)
services = find_recurse(submodule, services)
return services
services = find_recurse(m, defaultdict(dict))
return services
def _get_func_description(func):
description = getattr(func, '__doc__', '')
lines = description.split('\n')
if lines:
return lines[0]
def _build_parser(services):
from sasctl import __version__
# TODO: Set command docstring
# Create standard, top-level arguments
parser = argparse.ArgumentParser(prog='sasctl', description='sasctl interacts with a SAS Viya environment.')
parser.add_argument('-k', '--insecure', action='store_true',
help='skip SSL verification')
parser.add_argument('-f', '--format', choices=['json'],
default='json', help='output format')
parser.add_argument('-v', '--verbose', action='count')
parser.add_argument('--version',
action='version',
version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers(title='service', dest='service')
subparsers.required = True
for service, commands in six.iteritems(services):
service_parser = subparsers.add_parser(service)
service_subparser = service_parser.add_subparsers(title='command',
dest='command')
service_subparser.required = True
# Add the command and arguments for each command
for command in commands:
func = services[service][command]
cmd_parser = service_subparser.add_parser(command, help=_get_func_description(func))
for arg in func._cli_arguments():
if arg.name in ('self', 'cls'):
continue
if arg.required:
cmd_parser.add_argument(arg.name, help=arg.doc)
else:
cmd_parser.add_argument('--' + arg.name, required=arg.required, default=arg.default, help=arg.doc)
return parser
def main(args=None):
"""Main entry point when executed as a command line utility."""
from sasctl import Session, current_session
# Find all services and associated commands
services = _find_services()
parser = _build_parser(services)
args = parser.parse_args(args)
if args.verbose is None or args.verbose == 0:
lvl = logging.WARNING
elif args.verbose == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
handler = logging.StreamHandler()
handler.setLevel(lvl)
logging.getLogger('sasctl.core').addHandler(handler)
logging.getLogger('sasctl.core').setLevel(lvl)
warnings.simplefilter('ignore')
func = services[args.service][args.command]
kwargs = vars(args).copy()
# Remove args that shouldn't be passed to the underlying command
for k in ['command', 'service', 'insecure', 'verbose', 'format']:
kwargs.pop(k, None)
username = os.environ.get('SASCTL_USER_NAME')
password = os.environ.get('SASCTL_PASSWORD')
server = os.environ.get('SASCTL_SERVER_NAME')
if server is None:
parser.error("Hostname must be specified in the 'SASCTL_SERVER_NAME' environment variable.")
verify_ssl = not args.insecure
try:
# current_session() should never be set when executing from the
# command line but it allows us to provide a pre-created session
# during testing
with current_session() or Session(server, username, password,
verify_ssl=verify_ssl):
result = func(**kwargs)
if isinstance(result, list):
pprint([str(x) for x in result])
elif isinstance(result, dict) and args.format == 'json':
print(json.dumps(result, indent=2))
else:
pprint(result)
except RuntimeError as e:
parser.error(e) |
py | b40d98f0cb679634bc5d3c6c2a252e65f98dd245 | # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import mock
from neutron.api import extensions as api_extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.common import constants as l3_constants
from neutron import context
from neutron.db import agentschedulers_db
from neutron.db import l3_agentschedulers_db
from neutron.db import servicetype_db as sdb
from neutron import extensions as nextensions
from neutron.extensions import l3 as l3_exception
from neutron import manager
from neutron.plugins.common import constants
from neutron.scheduler import l3_agent_scheduler
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin
from neutron.tests.unit.extensions import test_l3 as test_l3_plugin
from oslo_utils import uuidutils
import six
import webob.exc
from neutron_vpnaas.db.vpn import vpn_db
from neutron_vpnaas.db.vpn import vpn_models
from neutron_vpnaas.services.vpn import plugin as vpn_plugin
from neutron_vpnaas.tests import base
from neutron_vpnaas import extensions
from neutron_vpnaas.extensions import vpnaas
DB_CORE_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
DB_VPN_PLUGIN_KLASS = "neutron_vpnaas.services.vpn.plugin.VPNPlugin"
ROOTDIR = os.path.normpath(os.path.join(
os.path.dirname(__file__),
'..', '..', '..', '..'))
extensions_path = ':'.join(extensions.__path__ + nextensions.__path__)
class TestVpnCorePlugin(test_l3_plugin.TestL3NatIntPlugin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin):
def __init__(self, configfile=None):
super(TestVpnCorePlugin, self).__init__()
self.router_scheduler = l3_agent_scheduler.ChanceScheduler()
class VPNTestMixin(object):
resource_prefix_map = dict(
(k.replace('_', '-'),
"/vpn")
for k in vpnaas.RESOURCE_ATTRIBUTE_MAP
)
def _create_ikepolicy(self, fmt,
name='ikepolicy1',
auth_algorithm='sha1',
encryption_algorithm='aes-128',
phase1_negotiation_mode='main',
lifetime_units='seconds',
lifetime_value=3600,
ike_version='v1',
pfs='group5',
expected_res_status=None, **kwargs):
data = {'ikepolicy': {
'name': name,
'auth_algorithm': auth_algorithm,
'encryption_algorithm': encryption_algorithm,
'phase1_negotiation_mode': phase1_negotiation_mode,
'lifetime': {
'units': lifetime_units,
'value': lifetime_value},
'ike_version': ike_version,
'pfs': pfs,
'tenant_id': self._tenant_id
}}
if kwargs.get('description') is not None:
data['ikepolicy']['description'] = kwargs['description']
ikepolicy_req = self.new_create_request('ikepolicies', data, fmt)
ikepolicy_res = ikepolicy_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(ikepolicy_res.status_int, expected_res_status)
return ikepolicy_res
@contextlib.contextmanager
def ikepolicy(self, fmt=None,
name='ikepolicy1',
auth_algorithm='sha1',
encryption_algorithm='aes-128',
phase1_negotiation_mode='main',
lifetime_units='seconds',
lifetime_value=3600,
ike_version='v1',
pfs='group5',
do_delete=True,
**kwargs):
if not fmt:
fmt = self.fmt
res = self._create_ikepolicy(fmt,
name,
auth_algorithm,
encryption_algorithm,
phase1_negotiation_mode,
lifetime_units,
lifetime_value,
ike_version,
pfs,
**kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
ikepolicy = self.deserialize(fmt or self.fmt, res)
yield ikepolicy
if do_delete:
self._delete('ikepolicies', ikepolicy['ikepolicy']['id'])
def _create_ipsecpolicy(self, fmt,
name='ipsecpolicy1',
auth_algorithm='sha1',
encryption_algorithm='aes-128',
encapsulation_mode='tunnel',
transform_protocol='esp',
lifetime_units='seconds',
lifetime_value=3600,
pfs='group5',
expected_res_status=None,
**kwargs):
data = {'ipsecpolicy': {'name': name,
'auth_algorithm': auth_algorithm,
'encryption_algorithm': encryption_algorithm,
'encapsulation_mode': encapsulation_mode,
'transform_protocol': transform_protocol,
'lifetime': {'units': lifetime_units,
'value': lifetime_value},
'pfs': pfs,
'tenant_id': self._tenant_id}}
if kwargs.get('description') is not None:
data['ipsecpolicy']['description'] = kwargs['description']
ipsecpolicy_req = self.new_create_request('ipsecpolicies', data, fmt)
ipsecpolicy_res = ipsecpolicy_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(ipsecpolicy_res.status_int, expected_res_status)
return ipsecpolicy_res
@contextlib.contextmanager
def ipsecpolicy(self, fmt=None,
name='ipsecpolicy1',
auth_algorithm='sha1',
encryption_algorithm='aes-128',
encapsulation_mode='tunnel',
transform_protocol='esp',
lifetime_units='seconds',
lifetime_value=3600,
pfs='group5',
do_delete=True, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_ipsecpolicy(fmt,
name,
auth_algorithm,
encryption_algorithm,
encapsulation_mode,
transform_protocol,
lifetime_units,
lifetime_value,
pfs,
**kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
ipsecpolicy = self.deserialize(fmt or self.fmt, res)
yield ipsecpolicy
if do_delete:
self._delete('ipsecpolicies', ipsecpolicy['ipsecpolicy']['id'])
def _create_vpnservice(self, fmt, name,
admin_state_up,
router_id, subnet_id,
expected_res_status=None, **kwargs):
tenant_id = kwargs.get('tenant_id', self._tenant_id)
data = {'vpnservice': {'name': name,
'subnet_id': subnet_id,
'router_id': router_id,
'admin_state_up': admin_state_up,
'tenant_id': tenant_id}}
if kwargs.get('description') is not None:
data['vpnservice']['description'] = kwargs['description']
vpnservice_req = self.new_create_request('vpnservices', data, fmt)
if (kwargs.get('set_context') and
'tenant_id' in kwargs):
# create a specific auth context for this request
vpnservice_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
vpnservice_res = vpnservice_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(vpnservice_res.status_int, expected_res_status)
return vpnservice_res
@contextlib.contextmanager
def vpnservice(self, fmt=None, name='vpnservice1',
subnet=None,
router=None,
admin_state_up=True,
do_delete=True,
plug_subnet=True,
external_subnet_cidr='192.168.100.0/24',
external_router=True,
**kwargs):
if not fmt:
fmt = self.fmt
with test_db_plugin.optional_ctx(subnet, self.subnet) as tmp_subnet, \
test_db_plugin.optional_ctx(router,
self.router) as tmp_router, \
self.subnet(cidr=external_subnet_cidr) as public_sub:
if external_router:
self._set_net_external(
public_sub['subnet']['network_id'])
self._add_external_gateway_to_router(
tmp_router['router']['id'],
public_sub['subnet']['network_id'])
tmp_router['router']['external_gateway_info'] = {
'network_id': public_sub['subnet']['network_id']}
if plug_subnet:
self._router_interface_action(
'add',
tmp_router['router']['id'],
tmp_subnet['subnet']['id'], None)
res = self._create_vpnservice(fmt,
name,
admin_state_up,
router_id=(tmp_router['router']
['id']),
subnet_id=(tmp_subnet['subnet']
['id']),
**kwargs)
vpnservice = self.deserialize(fmt or self.fmt, res)
if res.status_int < 400:
yield vpnservice
if do_delete and vpnservice.get('vpnservice'):
self._delete('vpnservices',
vpnservice['vpnservice']['id'])
if plug_subnet:
self._router_interface_action(
'remove',
tmp_router['router']['id'],
tmp_subnet['subnet']['id'], None)
if external_router:
external_gateway = tmp_router['router'].get(
'external_gateway_info')
if external_gateway:
network_id = external_gateway['network_id']
self._remove_external_gateway_from_router(
tmp_router['router']['id'], network_id)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(
code=res.status_int, detail=vpnservice)
self._delete('subnets', public_sub['subnet']['id'])
if not subnet:
self._delete('subnets', tmp_subnet['subnet']['id'])
def _create_ipsec_site_connection(self, fmt, name='test',
peer_address='192.168.1.10',
peer_id='192.168.1.10',
peer_cidrs=None,
mtu=1500,
psk='abcdefg',
initiator='bi-directional',
dpd_action='hold',
dpd_interval=30,
dpd_timeout=120,
vpnservice_id='fake_id',
ikepolicy_id='fake_id',
ipsecpolicy_id='fake_id',
admin_state_up=True,
expected_res_status=None, **kwargs):
data = {
'ipsec_site_connection': {'name': name,
'peer_address': peer_address,
'peer_id': peer_id,
'peer_cidrs': peer_cidrs,
'mtu': mtu,
'psk': psk,
'initiator': initiator,
'dpd': {
'action': dpd_action,
'interval': dpd_interval,
'timeout': dpd_timeout,
},
'vpnservice_id': vpnservice_id,
'ikepolicy_id': ikepolicy_id,
'ipsecpolicy_id': ipsecpolicy_id,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}
}
if kwargs.get('description') is not None:
data['ipsec_site_connection'][
'description'] = kwargs['description']
ipsec_site_connection_req = self.new_create_request(
'ipsec-site-connections', data, fmt
)
ipsec_site_connection_res = ipsec_site_connection_req.get_response(
self.ext_api
)
if expected_res_status:
self.assertEqual(
ipsec_site_connection_res.status_int, expected_res_status
)
return ipsec_site_connection_res
@contextlib.contextmanager
def ipsec_site_connection(self, fmt=None, name='ipsec_site_connection1',
peer_address='192.168.1.10',
peer_id='192.168.1.10',
peer_cidrs=None,
mtu=1500,
psk='abcdefg',
initiator='bi-directional',
dpd_action='hold',
dpd_interval=30,
dpd_timeout=120,
vpnservice=None,
ikepolicy=None,
ipsecpolicy=None,
admin_state_up=True, do_delete=True,
**kwargs):
if not fmt:
fmt = self.fmt
with test_db_plugin.optional_ctx(vpnservice, self.vpnservice
) as tmp_vpnservice, \
test_db_plugin.optional_ctx(ikepolicy, self.ikepolicy
) as tmp_ikepolicy, \
test_db_plugin.optional_ctx(ipsecpolicy, self.ipsecpolicy
) as tmp_ipsecpolicy:
vpnservice_id = tmp_vpnservice['vpnservice']['id']
ikepolicy_id = tmp_ikepolicy['ikepolicy']['id']
ipsecpolicy_id = tmp_ipsecpolicy['ipsecpolicy']['id']
res = self._create_ipsec_site_connection(fmt,
name,
peer_address,
peer_id,
peer_cidrs,
mtu,
psk,
initiator,
dpd_action,
dpd_interval,
dpd_timeout,
vpnservice_id,
ikepolicy_id,
ipsecpolicy_id,
admin_state_up,
**kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
ipsec_site_connection = self.deserialize(
fmt or self.fmt, res
)
yield ipsec_site_connection
if do_delete:
self._delete(
'ipsec-site-connections',
ipsec_site_connection[
'ipsec_site_connection']['id']
)
def _check_ipsec_site_connection(self, ipsec_site_connection, keys, dpd):
self.assertEqual(
keys,
dict((k, v) for k, v
in ipsec_site_connection.items()
if k in keys))
self.assertEqual(
dpd,
dict((k, v) for k, v
in ipsec_site_connection['dpd'].items()
if k in dpd))
def _set_active(self, model, resource_id):
service_plugin = manager.NeutronManager.get_service_plugins()[
constants.VPN]
adminContext = context.get_admin_context()
with adminContext.session.begin(subtransactions=True):
resource_db = service_plugin._get_resource(
adminContext,
model,
resource_id)
resource_db.status = constants.ACTIVE
class VPNPluginDbTestCase(VPNTestMixin,
test_l3_plugin.L3NatTestCaseMixin,
base.NeutronDbPluginV2TestCase):
def setUp(self, core_plugin=None, vpnaas_plugin=DB_VPN_PLUGIN_KLASS,
vpnaas_provider=None):
if not vpnaas_provider:
vpnaas_provider = (
constants.VPN +
':vpnaas:neutron_vpnaas.services.vpn.'
'service_drivers.ipsec.IPsecVPNDriver:default')
bits = vpnaas_provider.split(':')
vpnaas_provider = {
'service_type': bits[0],
'name': bits[1],
'driver': bits[2]
}
if len(bits) == 4:
vpnaas_provider['default'] = True
# override the default service provider
self.service_providers = (
mock.patch.object(sdb.ServiceTypeManager,
'get_service_providers').start())
self.service_providers.return_value = [vpnaas_provider]
# force service type manager to reload configuration:
sdb.ServiceTypeManager._instance = None
service_plugins = {'vpnaas_plugin': vpnaas_plugin}
plugin_str = ('neutron_vpnaas.tests.unit.db.vpn.'
'test_vpn_db.TestVpnCorePlugin')
super(VPNPluginDbTestCase, self).setUp(
plugin_str,
service_plugins=service_plugins
)
self._subnet_id = uuidutils.generate_uuid()
self.core_plugin = TestVpnCorePlugin()
self.plugin = vpn_plugin.VPNPlugin()
ext_mgr = api_extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: self.core_plugin,
constants.VPN: self.plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = api_extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
class TestVpnaas(VPNPluginDbTestCase):
def setUp(self, **kwargs):
# TODO(armax): this is far from being a unit test case, as it tests
# that multiple parties (core + vpn) are integrated properly and
# should be replaced by API test that do not rely on so much mocking.
# NOTE(armax): make sure that the callbacks needed by this test are
# registered, as they may get wiped out depending by the order in
# which imports, subscriptions and mocks occur.
super(TestVpnaas, self).setUp(**kwargs)
vpn_db.subscribe()
def _check_policy(self, policy, keys, lifetime):
for k, v in keys:
self.assertEqual(policy[k], v)
for k, v in six.iteritems(lifetime):
self.assertEqual(policy['lifetime'][k], v)
def test_create_ikepolicy(self):
"""Test case to create an ikepolicy."""
name = "ikepolicy1"
description = 'ipsec-ikepolicy'
keys = [('name', name),
('description', 'ipsec-ikepolicy'),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('phase1_negotiation_mode', 'main'),
('ike_version', 'v1'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ikepolicy(name=name, description=description) as ikepolicy:
self._check_policy(ikepolicy['ikepolicy'], keys, lifetime)
def test_delete_ikepolicy(self):
"""Test case to delete an ikepolicy."""
with self.ikepolicy(do_delete=False) as ikepolicy:
req = self.new_delete_request('ikepolicies',
ikepolicy['ikepolicy']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_ikepolicy(self):
"""Test case to show or get an ikepolicy."""
name = "ikepolicy1"
description = 'ipsec-ikepolicy'
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('phase1_negotiation_mode', 'main'),
('ike_version', 'v1'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ikepolicy(name=name, description=description) as ikepolicy:
req = self.new_show_request('ikepolicies',
ikepolicy['ikepolicy']['id'],
fmt=self.fmt)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self._check_policy(res['ikepolicy'], keys, lifetime)
def test_list_ikepolicies(self):
"""Test case to list all ikepolicies."""
name = "ikepolicy_list"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('phase1_negotiation_mode', 'main'),
('ike_version', 'v1'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ikepolicy(name=name) as ikepolicy:
keys.append(('id', ikepolicy['ikepolicy']['id']))
req = self.new_list_request('ikepolicies')
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res), 1)
for k, v in keys:
self.assertEqual(res['ikepolicies'][0][k], v)
for k, v in six.iteritems(lifetime):
self.assertEqual(res['ikepolicies'][0]['lifetime'][k], v)
def test_list_ikepolicies_with_sort_emulated(self):
"""Test case to list all ikepolicies."""
with self.ikepolicy(name='ikepolicy1') as ikepolicy1, \
self.ikepolicy(name='ikepolicy2') as ikepolicy2, \
self.ikepolicy(name='ikepolicy3') as ikepolicy3:
self._test_list_with_sort('ikepolicy', (ikepolicy3,
ikepolicy2,
ikepolicy1),
[('name', 'desc')],
'ikepolicies')
def test_list_ikepolicies_with_pagination_emulated(self):
"""Test case to list all ikepolicies with pagination."""
with self.ikepolicy(name='ikepolicy1') as ikepolicy1, \
self.ikepolicy(name='ikepolicy2') as ikepolicy2, \
self.ikepolicy(name='ikepolicy3') as ikepolicy3:
self._test_list_with_pagination('ikepolicy',
(ikepolicy1,
ikepolicy2,
ikepolicy3),
('name', 'asc'), 2, 2,
'ikepolicies')
def test_list_ikepolicies_with_pagination_reverse_emulated(self):
"""Test case to list all ikepolicies with reverse pagination."""
with self.ikepolicy(name='ikepolicy1') as ikepolicy1, \
self.ikepolicy(name='ikepolicy2') as ikepolicy2, \
self.ikepolicy(name='ikepolicy3') as ikepolicy3:
self._test_list_with_pagination_reverse('ikepolicy',
(ikepolicy1,
ikepolicy2,
ikepolicy3),
('name', 'asc'), 2, 2,
'ikepolicies')
def test_update_ikepolicy(self):
"""Test case to update an ikepolicy."""
name = "new_ikepolicy1"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('phase1_negotiation_mode', 'main'),
('ike_version', 'v1'),
('pfs', 'group5'),
('tenant_id', self._tenant_id),
('lifetime', {'units': 'seconds',
'value': 60})]
with self.ikepolicy(name=name) as ikepolicy:
data = {'ikepolicy': {'name': name,
'lifetime': {'units': 'seconds',
'value': 60}}}
req = self.new_update_request("ikepolicies",
data,
ikepolicy['ikepolicy']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['ikepolicy'][k], v)
def test_create_ikepolicy_with_invalid_values(self):
"""Test case to test invalid values."""
name = 'ikepolicy1'
self._create_ikepolicy(name=name,
fmt=self.fmt,
auth_algorithm='md5',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
auth_algorithm=200,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
encryption_algorithm='des',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
encryption_algorithm=100,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
phase1_negotiation_mode='aggressive',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
phase1_negotiation_mode=-100,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
ike_version='v6',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
ike_version=500,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
pfs='group1',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
pfs=120,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
lifetime_units='Megabytes',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
lifetime_units=20000,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
lifetime_value=-20,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
lifetime_value='Megabytes',
expected_res_status=400)
def test_create_ipsecpolicy(self):
"""Test case to create an ipsecpolicy."""
name = "ipsecpolicy1"
description = 'my-ipsecpolicy'
keys = [('name', name),
('description', 'my-ipsecpolicy'),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('encapsulation_mode', 'tunnel'),
('transform_protocol', 'esp'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ipsecpolicy(name=name,
description=description) as ipsecpolicy:
self._check_policy(ipsecpolicy['ipsecpolicy'], keys, lifetime)
def test_delete_ipsecpolicy(self):
"""Test case to delete an ipsecpolicy."""
with self.ipsecpolicy(do_delete=False) as ipsecpolicy:
req = self.new_delete_request('ipsecpolicies',
ipsecpolicy['ipsecpolicy']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_ipsecpolicy(self):
"""Test case to show or get an ipsecpolicy."""
name = "ipsecpolicy1"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('encapsulation_mode', 'tunnel'),
('transform_protocol', 'esp'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ipsecpolicy(name=name) as ipsecpolicy:
req = self.new_show_request('ipsecpolicies',
ipsecpolicy['ipsecpolicy']['id'],
fmt=self.fmt)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self._check_policy(res['ipsecpolicy'], keys, lifetime)
def test_list_ipsecpolicies(self):
"""Test case to list all ipsecpolicies."""
name = "ipsecpolicy_list"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('encapsulation_mode', 'tunnel'),
('transform_protocol', 'esp'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ipsecpolicy(name=name) as ipsecpolicy:
keys.append(('id', ipsecpolicy['ipsecpolicy']['id']))
req = self.new_list_request('ipsecpolicies')
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res), 1)
self._check_policy(res['ipsecpolicies'][0], keys, lifetime)
def test_list_ipsecpolicies_with_sort_emulated(self):
"""Test case to list all ipsecpolicies."""
with self.ipsecpolicy(name='ipsecpolicy1') as ipsecpolicy1, \
self.ipsecpolicy(name='ipsecpolicy2') as ipsecpolicy2, \
self.ipsecpolicy(name='ipsecpolicy3') as ipsecpolicy3:
self._test_list_with_sort('ipsecpolicy', (ipsecpolicy3,
ipsecpolicy2,
ipsecpolicy1),
[('name', 'desc')],
'ipsecpolicies')
def test_list_ipsecpolicies_with_pagination_emulated(self):
"""Test case to list all ipsecpolicies with pagination."""
with self.ipsecpolicy(name='ipsecpolicy1') as ipsecpolicy1, \
self.ipsecpolicy(name='ipsecpolicy2') as ipsecpolicy2, \
self.ipsecpolicy(name='ipsecpolicy3') as ipsecpolicy3:
self._test_list_with_pagination('ipsecpolicy',
(ipsecpolicy1,
ipsecpolicy2,
ipsecpolicy3),
('name', 'asc'), 2, 2,
'ipsecpolicies')
def test_list_ipsecpolicies_with_pagination_reverse_emulated(self):
"""Test case to list all ipsecpolicies with reverse pagination."""
with self.ipsecpolicy(name='ipsecpolicy1') as ipsecpolicy1, \
self.ipsecpolicy(name='ipsecpolicy2') as ipsecpolicy2, \
self.ipsecpolicy(name='ipsecpolicy3') as ipsecpolicy3:
self._test_list_with_pagination_reverse('ipsecpolicy',
(ipsecpolicy1,
ipsecpolicy2,
ipsecpolicy3),
('name', 'asc'), 2, 2,
'ipsecpolicies')
def test_update_ipsecpolicy(self):
"""Test case to update an ipsecpolicy."""
name = "new_ipsecpolicy1"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('encapsulation_mode', 'tunnel'),
('transform_protocol', 'esp'),
('pfs', 'group5'),
('tenant_id', self._tenant_id),
('lifetime', {'units': 'seconds',
'value': 60})]
with self.ipsecpolicy(name=name) as ipsecpolicy:
data = {'ipsecpolicy': {'name': name,
'lifetime': {'units': 'seconds',
'value': 60}}}
req = self.new_update_request("ipsecpolicies",
data,
ipsecpolicy['ipsecpolicy']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['ipsecpolicy'][k], v)
def test_update_ipsecpolicy_lifetime(self):
with self.ipsecpolicy() as ipsecpolicy:
data = {'ipsecpolicy': {'lifetime': {'units': 'seconds'}}}
req = self.new_update_request("ipsecpolicies",
data,
ipsecpolicy['ipsecpolicy']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['ipsecpolicy']['lifetime']['units'],
'seconds')
data = {'ipsecpolicy': {'lifetime': {'value': 60}}}
req = self.new_update_request("ipsecpolicies",
data,
ipsecpolicy['ipsecpolicy']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['ipsecpolicy']['lifetime']['value'], 60)
def test_create_ipsecpolicy_with_invalid_values(self):
"""Test case to test invalid values."""
name = 'ipsecpolicy1'
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, auth_algorithm='md5', expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, auth_algorithm=100, expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, encryption_algorithm='des', expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, encryption_algorithm=200, expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, transform_protocol='abcd', expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, transform_protocol=500, expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name,
encapsulation_mode='unsupported', expected_res_status=400)
self._create_ipsecpolicy(name=name,
fmt=self.fmt,
encapsulation_mode=100,
expected_res_status=400)
self._create_ipsecpolicy(name=name,
fmt=self.fmt,
pfs='group9', expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt, name=name, pfs=-1, expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt, name=name, lifetime_units='minutes',
expected_res_status=400)
self._create_ipsecpolicy(fmt=self.fmt, name=name, lifetime_units=100,
expected_res_status=400)
self._create_ipsecpolicy(fmt=self.fmt, name=name,
lifetime_value=-800, expected_res_status=400)
self._create_ipsecpolicy(fmt=self.fmt, name=name,
lifetime_value='Megabytes',
expected_res_status=400)
def test_create_vpnservice(self, **extras):
"""Test case to create a vpnservice."""
description = 'my-vpn-service'
expected = {'name': 'vpnservice1',
'description': 'my-vpn-service',
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id, }
expected.update(extras)
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
expected['router_id'] = router['router']['id']
expected['subnet_id'] = subnet['subnet']['id']
name = expected['name']
with self.vpnservice(name=name,
subnet=subnet,
router=router,
description=description,
**extras) as vpnservice:
self.assertEqual(dict((k, v) for k, v in
vpnservice['vpnservice'].items()
if k in expected),
expected)
def test_delete_router_interface_in_use_by_vpnservice(self):
"""Test delete router interface in use by vpn service."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(subnet=subnet,
router=router):
self._router_interface_action('remove',
router['router']['id'],
subnet['subnet']['id'],
None,
expected_code=webob.exc.
HTTPConflict.code)
def test_delete_external_gateway_interface_in_use_by_vpnservice(self):
"""Test delete external gateway interface in use by vpn service."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.subnet(cidr='11.0.0.0/24') as public_sub:
self._set_net_external(
public_sub['subnet']['network_id'])
self._add_external_gateway_to_router(
router['router']['id'],
public_sub['subnet']['network_id'])
with self.vpnservice(subnet=subnet,
router=router):
self._remove_external_gateway_from_router(
router['router']['id'],
public_sub['subnet']['network_id'],
expected_code=webob.exc.HTTPConflict.code)
def test_router_update_after_ipsec_site_connection(self):
"""Test case to update router after vpn connection."""
rname1 = "router_one"
rname2 = "router_two"
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router(name=rname1) as r:
with self.vpnservice(subnet=subnet,
router=r
) as vpnservice:
self.ipsec_site_connection(
name='connection1', vpnservice=vpnservice
)
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['name'], rname1)
body = self._update('routers', r['router']['id'],
{'router': {'name': rname2}})
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['name'], rname2)
def test_update_vpnservice(self):
"""Test case to update a vpnservice."""
name = 'new_vpnservice1'
keys = [('name', name)]
with self.subnet(cidr='10.2.0.0/24') as subnet, \
self.router() as router:
with self.vpnservice(name=name,
subnet=subnet,
router=router) as vpnservice:
keys.append(('subnet_id',
vpnservice['vpnservice']['subnet_id']))
keys.append(('router_id',
vpnservice['vpnservice']['router_id']))
data = {'vpnservice': {'name': name}}
self._set_active(vpn_models.VPNService,
vpnservice['vpnservice']['id'])
req = self.new_update_request(
'vpnservices',
data,
vpnservice['vpnservice']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['vpnservice'][k], v)
def test_update_vpnservice_with_invalid_state(self):
"""Test case to update a vpnservice in invalid state ."""
name = 'new_vpnservice1'
keys = [('name', name)]
with self.subnet(cidr='10.2.0.0/24') as subnet, \
self.router() as router:
with self.vpnservice(name=name,
subnet=subnet,
router=router) as vpnservice:
keys.append(('subnet_id',
vpnservice['vpnservice']['subnet_id']))
keys.append(('router_id',
vpnservice['vpnservice']['router_id']))
data = {'vpnservice': {'name': name}}
req = self.new_update_request(
'vpnservices',
data,
vpnservice['vpnservice']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertIn(vpnservice['vpnservice']['id'],
res['NeutronError']['message'])
def test_delete_vpnservice(self):
"""Test case to delete a vpnservice."""
with self.vpnservice(name='vpnserver',
do_delete=False) as vpnservice:
req = self.new_delete_request('vpnservices',
vpnservice['vpnservice']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_vpnservice(self):
"""Test case to show or get a vpnservice."""
name = "vpnservice1"
keys = [('name', name),
('description', ''),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.vpnservice(name=name) as vpnservice:
req = self.new_show_request('vpnservices',
vpnservice['vpnservice']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['vpnservice'][k], v)
def test_list_vpnservices(self):
"""Test case to list all vpnservices."""
name = "vpnservice_list"
keys = [('name', name),
('description', ''),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.vpnservice(name=name) as vpnservice:
keys.append(('subnet_id', vpnservice['vpnservice']['subnet_id']))
keys.append(('router_id', vpnservice['vpnservice']['router_id']))
req = self.new_list_request('vpnservices')
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res), 1)
for k, v in keys:
self.assertEqual(res['vpnservices'][0][k], v)
def test_list_vpnservices_with_sort_emulated(self):
"""Test case to list all vpnservices with sorting."""
with self.subnet() as subnet:
with self.router() as router:
with self.vpnservice(name='vpnservice1',
subnet=subnet,
router=router,
external_subnet_cidr='192.168.10.0/24'
) as vpnservice1, \
self.vpnservice(name='vpnservice2',
subnet=subnet,
router=router,
plug_subnet=False,
external_router=False,
external_subnet_cidr='192.168.11.0/24'
) as vpnservice2, \
self.vpnservice(name='vpnservice3',
subnet=subnet,
router=router,
plug_subnet=False,
external_router=False,
external_subnet_cidr='192.168.13.0/24'
) as vpnservice3:
self._test_list_with_sort('vpnservice', (vpnservice3,
vpnservice2,
vpnservice1),
[('name', 'desc')])
def test_list_vpnservice_with_pagination_emulated(self):
"""Test case to list all vpnservices with pagination."""
with self.subnet() as subnet:
with self.router() as router:
with self.vpnservice(name='vpnservice1',
subnet=subnet,
router=router,
external_subnet_cidr='192.168.10.0/24'
) as vpnservice1, \
self.vpnservice(name='vpnservice2',
subnet=subnet,
router=router,
plug_subnet=False,
external_subnet_cidr='192.168.20.0/24',
external_router=False
) as vpnservice2, \
self.vpnservice(name='vpnservice3',
subnet=subnet,
router=router,
plug_subnet=False,
external_subnet_cidr='192.168.30.0/24',
external_router=False
) as vpnservice3:
self._test_list_with_pagination('vpnservice',
(vpnservice1,
vpnservice2,
vpnservice3),
('name', 'asc'), 2, 2)
def test_list_vpnservice_with_pagination_reverse_emulated(self):
"""Test case to list all vpnservices with reverse pagination."""
with self.subnet() as subnet:
with self.router() as router:
with self.vpnservice(name='vpnservice1',
subnet=subnet,
router=router,
external_subnet_cidr='192.168.10.0/24'
) as vpnservice1, \
self.vpnservice(name='vpnservice2',
subnet=subnet,
router=router,
plug_subnet=False,
external_subnet_cidr='192.168.11.0/24',
external_router=False
) as vpnservice2, \
self.vpnservice(name='vpnservice3',
subnet=subnet,
router=router,
plug_subnet=False,
external_subnet_cidr='192.168.12.0/24',
external_router=False
) as vpnservice3:
self._test_list_with_pagination_reverse('vpnservice',
(vpnservice1,
vpnservice2,
vpnservice3),
('name', 'asc'),
2, 2)
def test_create_ipsec_site_connection_with_invalid_values(self):
"""Test case to create an ipsec_site_connection with invalid values."""
name = 'connection1'
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, peer_cidrs='myname', expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, mtu=-100, expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, dpd_action='unsupported', expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, dpd_interval=-1, expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, dpd_timeout=-200, expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, initiator='unsupported', expected_status_int=400)
def _test_create_ipsec_site_connection(self, key_overrides=None,
setup_overrides=None,
expected_status_int=200):
"""Create ipsec_site_connection and check results."""
params = {'ikename': 'ikepolicy1',
'ipsecname': 'ipsecpolicy1',
'vpnsname': 'vpnservice1',
'subnet_cidr': '10.2.0.0/24',
'subnet_version': 4}
if setup_overrides is not None:
params.update(setup_overrides)
keys = {'name': 'connection1',
'description': 'my-ipsec-connection',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
if key_overrides is not None:
keys.update(key_overrides)
dpd = {'action': 'hold',
'interval': 40,
'timeout': 120}
with self.ikepolicy(name=params['ikename']) as ikepolicy, \
self.ipsecpolicy(name=params['ipsecname']) as ipsecpolicy, \
self.subnet(cidr=params['subnet_cidr'],
ip_version=params['subnet_version']) as subnet, \
self.router() as router:
with self.vpnservice(name=params['vpnsname'], subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id']
keys['vpnservice_id'] = vpnservice1['vpnservice']['id']
try:
with self.ipsec_site_connection(
self.fmt,
keys['name'],
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
dpd['action'],
dpd['interval'],
dpd['timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=keys['description']
) as ipsec_site_connection:
if expected_status_int != 200:
self.fail("Expected failure on create")
self._check_ipsec_site_connection(
ipsec_site_connection['ipsec_site_connection'],
keys,
dpd)
except webob.exc.HTTPClientError as ce:
self.assertEqual(ce.code, expected_status_int)
self._delete('subnets', subnet['subnet']['id'])
def test_create_ipsec_site_connection(self, **extras):
"""Test case to create an ipsec_site_connection."""
self._test_create_ipsec_site_connection(key_overrides=extras)
def test_delete_ipsec_site_connection(self):
"""Test case to delete a ipsec_site_connection."""
with self.ipsec_site_connection(
do_delete=False) as ipsec_site_connection:
req = self.new_delete_request(
'ipsec-site-connections',
ipsec_site_connection['ipsec_site_connection']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_update_ipsec_site_connection(self):
"""Test case for valid updates to IPSec site connection."""
dpd = {'action': 'hold',
'interval': 40,
'timeout': 120}
self._test_update_ipsec_site_connection(update={'dpd': dpd})
self._test_update_ipsec_site_connection(update={'mtu': 2000})
ipv6_settings = {
'peer_address': 'fe80::c0a8:10a',
'peer_id': 'fe80::c0a8:10a',
'peer_cidrs': ['fe80::c0a8:200/120', 'fe80::c0a8:300/120'],
'subnet_cidr': 'fe80::a02:0/120',
'subnet_version': 6}
self._test_update_ipsec_site_connection(update={'mtu': 2000},
overrides=ipv6_settings)
def test_update_ipsec_site_connection_with_invalid_state(self):
"""Test updating an ipsec_site_connection in invalid state."""
self._test_update_ipsec_site_connection(
overrides={'make_active': False},
expected_status_int=400)
def test_update_ipsec_site_connection_peer_cidrs(self):
"""Test updating an ipsec_site_connection for peer_cidrs."""
new_peers = {'peer_cidrs': ['192.168.4.0/24',
'192.168.5.0/24']}
self._test_update_ipsec_site_connection(
update=new_peers)
def _test_update_ipsec_site_connection(self,
update={'name': 'new name'},
overrides=None,
expected_status_int=200):
"""Creates and then updates ipsec_site_connection."""
keys = {'name': 'new_ipsec_site_connection',
'ikename': 'ikepolicy1',
'ipsecname': 'ipsecpolicy1',
'vpnsname': 'vpnservice1',
'description': 'my-ipsec-connection',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'ACTIVE',
'admin_state_up': True,
'action': 'hold',
'interval': 40,
'timeout': 120,
'subnet_cidr': '10.2.0.0/24',
'subnet_version': 4,
'make_active': True}
if overrides is not None:
keys.update(overrides)
with self.ikepolicy(name=keys['ikename']) as ikepolicy, \
self.ipsecpolicy(name=keys['ipsecname']) as ipsecpolicy, \
self.subnet(cidr=keys['subnet_cidr'],
ip_version=keys['subnet_version']) as subnet, \
self.router() as router:
with self.vpnservice(name=keys['vpnsname'], subnet=subnet,
router=router) as vpnservice1:
ext_gw = router['router']['external_gateway_info']
if ext_gw:
self._create_subnet(self.fmt,
net_id=ext_gw['network_id'],
ip_version=6, cidr='2001:db8::/32')
keys['vpnservice_id'] = vpnservice1['vpnservice']['id']
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id']
with self.ipsec_site_connection(
self.fmt,
keys['name'],
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
keys['action'],
keys['interval'],
keys['timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=keys['description']
) as ipsec_site_connection:
data = {'ipsec_site_connection': update}
if keys.get('make_active', None):
self._set_active(
vpn_models.IPsecSiteConnection,
(ipsec_site_connection['ipsec_site_connection']
['id']))
req = self.new_update_request(
'ipsec-site-connections',
data,
ipsec_site_connection['ipsec_site_connection']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(expected_status_int, res.status_int)
if expected_status_int == 200:
res_dict = self.deserialize(self.fmt, res)
actual = res_dict['ipsec_site_connection']
for k, v in update.items():
# Sort lists before checking equality
if isinstance(actual[k], list):
self.assertEqual(v, sorted(actual[k]))
else:
self.assertEqual(v, actual[k])
self._delete('networks', subnet['subnet']['network_id'])
def test_show_ipsec_site_connection(self):
"""Test case to show a ipsec_site_connection."""
ikename = "ikepolicy1"
ipsecname = "ipsecpolicy1"
vpnsname = "vpnservice1"
name = "connection1"
description = "my-ipsec-connection"
keys = {'name': name,
'description': "my-ipsec-connection",
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
dpd = {'action': 'hold',
'interval': 40,
'timeout': 120}
with self.ikepolicy(name=ikename) as ikepolicy, \
self.ipsecpolicy(name=ipsecname) as ipsecpolicy, \
self.subnet() as subnet, \
self.router() as router:
with self.vpnservice(name=vpnsname, subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id']
keys['vpnservice_id'] = vpnservice1['vpnservice']['id']
with self.ipsec_site_connection(
self.fmt,
name,
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
dpd['action'],
dpd['interval'],
dpd['timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=description,
) as ipsec_site_connection:
req = self.new_show_request(
'ipsec-site-connections',
ipsec_site_connection[
'ipsec_site_connection']['id'],
fmt=self.fmt
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self._check_ipsec_site_connection(
res['ipsec_site_connection'],
keys,
dpd)
def test_list_ipsec_site_connections_with_sort_emulated(self):
"""Test case to list all ipsec_site_connections with sort."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(subnet=subnet,
router=router
) as vpnservice:
with self.ipsec_site_connection(name='connection1',
vpnservice=vpnservice
) as conn1, \
self.ipsec_site_connection(name='connection2',
vpnservice=vpnservice
) as conn2, \
self.ipsec_site_connection(name='connection3',
vpnservice=vpnservice
) as conn3:
self._test_list_with_sort('ipsec-site-connection',
(conn3, conn2, conn1),
[('name', 'desc')])
def test_list_ipsec_site_connections_with_pagination_emulated(self):
"""Test case to list all ipsec_site_connections with pagination."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(subnet=subnet,
router=router
) as vpnservice:
with self.ipsec_site_connection(
name='ipsec_site_connection1',
vpnservice=vpnservice) as conn1, \
self.ipsec_site_connection(
name='ipsec_site_connection1',
vpnservice=vpnservice) as conn2, \
self.ipsec_site_connection(
name='ipsec_site_connection1',
vpnservice=vpnservice) as conn3:
self._test_list_with_pagination(
'ipsec-site-connection',
(conn1, conn2, conn3),
('name', 'asc'), 2, 2)
def test_list_ipsec_site_conns_with_pagination_reverse_emulated(self):
"""Test to list all ipsec_site_connections with reverse pagination."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(subnet=subnet,
router=router
) as vpnservice:
with self.ipsec_site_connection(name='connection1',
vpnservice=vpnservice
) as conn1, \
self.ipsec_site_connection(name='connection2',
vpnservice=vpnservice
) as conn2, \
self.ipsec_site_connection(name='connection3',
vpnservice=vpnservice
) as conn3:
self._test_list_with_pagination_reverse(
'ipsec-site-connection',
(conn1, conn2, conn3),
('name', 'asc'), 2, 2
)
def test_create_vpn(self):
"""Test case to create a vpn."""
vpns_name = "vpnservice1"
ike_name = "ikepolicy1"
ipsec_name = "ipsecpolicy1"
name1 = "ipsec_site_connection1"
with self.ikepolicy(name=ike_name) as ikepolicy, \
self.ipsecpolicy(name=ipsec_name) as ipsecpolicy, \
self.vpnservice(name=vpns_name) as vpnservice:
vpnservice_id = vpnservice['vpnservice']['id']
ikepolicy_id = ikepolicy['ikepolicy']['id']
ipsecpolicy_id = ipsecpolicy['ipsecpolicy']['id']
with self.ipsec_site_connection(
self.fmt,
name1,
'192.168.1.10',
'192.168.1.10',
['192.168.2.0/24',
'192.168.3.0/24'],
1500,
'abcdef',
'bi-directional',
'hold',
30,
120,
vpnservice,
ikepolicy,
ipsecpolicy,
True
) as vpnconn1:
vpnservice_req = self.new_show_request(
'vpnservices',
vpnservice_id,
fmt=self.fmt)
vpnservice_updated = self.deserialize(
self.fmt,
vpnservice_req.get_response(self.ext_api)
)
self.assertEqual(
vpnservice_updated['vpnservice']['id'],
vpnconn1['ipsec_site_connection']['vpnservice_id']
)
ikepolicy_req = self.new_show_request('ikepolicies',
ikepolicy_id,
fmt=self.fmt)
ikepolicy_res = self.deserialize(
self.fmt,
ikepolicy_req.get_response(self.ext_api)
)
self.assertEqual(
ikepolicy_res['ikepolicy']['id'],
vpnconn1['ipsec_site_connection']['ikepolicy_id'])
ipsecpolicy_req = self.new_show_request(
'ipsecpolicies',
ipsecpolicy_id,
fmt=self.fmt)
ipsecpolicy_res = self.deserialize(
self.fmt,
ipsecpolicy_req.get_response(self.ext_api)
)
self.assertEqual(
ipsecpolicy_res['ipsecpolicy']['id'],
vpnconn1['ipsec_site_connection']['ipsecpolicy_id']
)
def test_delete_ikepolicy_inuse(self):
"""Test case to delete an ikepolicy, that is in use."""
vpns_name = "vpnservice1"
ike_name = "ikepolicy1"
ipsec_name = "ipsecpolicy1"
name1 = "ipsec_site_connection1"
with self.ikepolicy(name=ike_name) as ikepolicy:
with self.ipsecpolicy(name=ipsec_name) as ipsecpolicy:
with self.vpnservice(name=vpns_name) as vpnservice:
with self.ipsec_site_connection(
self.fmt,
name1,
'192.168.1.10',
'192.168.1.10',
['192.168.2.0/24',
'192.168.3.0/24'],
1500,
'abcdef',
'bi-directional',
'hold',
30,
120,
vpnservice,
ikepolicy,
ipsecpolicy,
True
):
delete_req = self.new_delete_request(
'ikepolicies',
ikepolicy['ikepolicy']['id']
)
delete_res = delete_req.get_response(self.ext_api)
self.assertEqual(409, delete_res.status_int)
def test_delete_ipsecpolicy_inuse(self):
"""Test case to delete an ipsecpolicy, that is in use."""
vpns_name = "vpnservice1"
ike_name = "ikepolicy1"
ipsec_name = "ipsecpolicy1"
name1 = "ipsec_site_connection1"
with self.ikepolicy(name=ike_name) as ikepolicy:
with self.ipsecpolicy(name=ipsec_name) as ipsecpolicy:
with self.vpnservice(name=vpns_name) as vpnservice:
with self.ipsec_site_connection(
self.fmt,
name1,
'192.168.1.10',
'192.168.1.10',
['192.168.2.0/24',
'192.168.3.0/24'],
1500,
'abcdef',
'bi-directional',
'hold',
30,
120,
vpnservice,
ikepolicy,
ipsecpolicy,
True
):
delete_req = self.new_delete_request(
'ipsecpolicies',
ipsecpolicy['ipsecpolicy']['id']
)
delete_res = delete_req.get_response(self.ext_api)
self.assertEqual(409, delete_res.status_int)
def test_router_in_use_by_vpnaas(self):
"""Check that exception raised, if router in use by VPNaaS."""
with self.subnet(cidr='10.2.0.0/24') as subnet, \
self.router() as router:
with self.vpnservice(subnet=subnet,
router=router):
self.assertRaises(l3_exception.RouterInUse,
self.plugin.check_router_in_use,
context.get_admin_context(),
router['router']['id'])
def test_subnet_in_use_by_vpnaas(self):
"""Check that exception raised, if subnet in use by VPNaaS."""
with self.subnet(cidr='10.2.0.0/24') as subnet, \
self.router() as router:
with self.vpnservice(subnet=subnet,
router=router):
self.assertRaises(vpnaas.SubnetInUseByVPNService,
self.plugin.check_subnet_in_use,
context.get_admin_context(),
subnet['subnet']['id'])
def test_check_router_has_no_vpn(self):
with mock.patch.object(
manager.NeutronManager, 'get_service_plugins') as sp:
vpn_plugin = mock.Mock()
sp.return_value = {'VPN': vpn_plugin}
kwargs = {'context': mock.ANY, 'router': {'id': 'foo_id'}}
self.assertTrue(vpn_db.migration_callback(
mock.ANY, mock.ANY, mock.ANY, **kwargs))
vpn_plugin.check_router_in_use.assert_called_once_with(
mock.ANY, 'foo_id')
# Note: Below are new database related tests that only exercise the database
# instead of going through the client API. The intent here is to (eventually)
# convert all the database tests to this method, for faster, more granular
# tests.
# TODO(pcm): Put helpers in another module for sharing
class NeutronResourcesMixin(object):
def create_network(self, overrides=None):
"""Create datatbase entry for network."""
network_info = {'network': {'name': 'my-net',
'tenant_id': self.tenant_id,
'admin_state_up': True,
'shared': False}}
if overrides:
network_info['network'].update(overrides)
return self.core_plugin.create_network(self.context, network_info)
def create_subnet(self, overrides=None):
"""Create database entry for subnet."""
subnet_info = {'subnet': {'name': 'my-subnet',
'tenant_id': self.tenant_id,
'ip_version': 4,
'enable_dhcp': True,
'dns_nameservers': None,
'host_routes': None,
'allocation_pools': None}}
if overrides:
subnet_info['subnet'].update(overrides)
return self.core_plugin.create_subnet(self.context, subnet_info)
def create_router(self, overrides=None, gw=None):
"""Create database entry for router with optional gateway."""
router_info = {
'router': {
'name': 'my-router',
'tenant_id': self.tenant_id,
'admin_state_up': True,
}
}
if overrides:
router_info['router'].update(overrides)
if gw:
gw_info = {
'external_gateway_info': {
'network_id': gw['net_id'],
'external_fixed_ips': [{'subnet_id': gw['subnet_id'],
'ip_address': gw['ip']}],
}
}
router_info['router'].update(gw_info)
return self.l3_plugin.create_router(self.context, router_info)
def create_router_port_for_subnet(self, router, subnet):
"""Creates port on router for subnet specified."""
port = {'port': {
'tenant_id': self.tenant_id,
'network_id': subnet['network_id'],
'fixed_ips': [
{'ip_address': subnet['gateway_ip'],
'subnet_id': subnet['id']}
],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': router['id'],
'device_owner': l3_constants.DEVICE_OWNER_ROUTER_INTF,
'name': ''
}}
return self.core_plugin.create_port(self.context, port)
def create_basic_topology(self):
"""Setup networks, subnets, and a router for testing VPN."""
public_net = self.create_network(overrides={'name': 'public',
'router:external': True})
private_net = self.create_network(overrides={'name': 'private'})
overrides = {'name': 'private-subnet',
'cidr': '10.2.0.0/24',
'gateway_ip': '10.2.0.1',
'network_id': private_net['id']}
private_subnet = self.create_subnet(overrides=overrides)
overrides = {'name': 'public-subnet',
'cidr': '192.168.100.0/24',
'gateway_ip': '192.168.100.1',
'allocation_pools': [{'start': '192.168.100.2',
'end': '192.168.100.254'}],
'network_id': public_net['id']}
public_subnet = self.create_subnet(overrides=overrides)
gw_info = {'net_id': public_net['id'],
'subnet_id': public_subnet['id'],
'ip': '192.168.100.5'}
router = self.create_router(gw=gw_info)
self.create_router_port_for_subnet(router, private_subnet)
return (private_subnet, router)
class TestVpnDatabase(base.NeutronDbPluginV2TestCase, NeutronResourcesMixin):
def setUp(self):
# Setup the core plugin
self.plugin_str = ('neutron_vpnaas.tests.unit.db.vpn.'
'test_vpn_db.TestVpnCorePlugin')
super(TestVpnDatabase, self).setUp(self.plugin_str)
# Get the plugins
self.core_plugin = manager.NeutronManager.get_plugin()
self.l3_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
# Create VPN database instance
self.plugin = vpn_db.VPNPluginDb()
self.tenant_id = uuidutils.generate_uuid()
self.context = context.get_admin_context()
def prepare_service_info(self, private_subnet, router):
return {'vpnservice': {'name': 'my-service',
'description': 'new service',
'subnet_id': private_subnet['id'],
'router_id': router['id'],
'admin_state_up': True}}
def test_create_vpnservice(self):
private_subnet, router = self.create_basic_topology()
info = self.prepare_service_info(private_subnet, router)
expected = {'admin_state_up': True,
'external_v4_ip': None,
'external_v6_ip': None,
'status': 'PENDING_CREATE'}
expected.update(info['vpnservice'])
new_service = self.plugin.create_vpnservice(self.context, info)
self.assertDictSupersetOf(expected, new_service)
def test_update_external_tunnel_ips(self):
"""Verify that external tunnel IPs can be set."""
private_subnet, router = self.create_basic_topology()
info = self.prepare_service_info(private_subnet, router)
expected = {'admin_state_up': True,
'external_v4_ip': None,
'external_v6_ip': None,
'status': 'PENDING_CREATE'}
expected.update(info['vpnservice'])
new_service = self.plugin.create_vpnservice(self.context, info)
self.assertDictSupersetOf(expected, new_service)
external_v4_ip = '192.168.100.5'
external_v6_ip = 'fd00:1000::4'
expected.update({'external_v4_ip': external_v4_ip,
'external_v6_ip': external_v6_ip})
mod_service = self.plugin.set_external_tunnel_ips(self.context,
new_service['id'],
v4_ip=external_v4_ip,
v6_ip=external_v6_ip)
self.assertDictSupersetOf(expected, mod_service)
|
py | b40d990993ceacc624bbc059c173f8ff76d7d2fc | from __future__ import absolute_import, division, unicode_literals
from flask.ext.restful import reqparse
from changes.api.base import APIView
from changes.api.serializer.models.testcase import GeneralizedTestCase
from changes.config import db
from changes.constants import Result, Status
from changes.models.build import Build
from changes.models.job import Job
from changes.models.project import Project
from changes.models.source import Source
from changes.models.test import TestCase
SORT_CHOICES = (
'duration',
'name',
)
class ProjectTestIndexAPIView(APIView):
parser = reqparse.RequestParser()
parser.add_argument('min_duration', type=int, location='args')
parser.add_argument('query', type=unicode, location='args')
parser.add_argument('sort', type=unicode, location='args',
choices=SORT_CHOICES, default='duration')
def get(self, project_id):
project = Project.get(project_id)
if not project:
return '', 404
args = self.parser.parse_args()
latest_build = Build.query.join(
Source, Source.id == Build.source_id,
).filter(
Source.patch_id == None, # NOQA
Build.project_id == project.id,
Build.result == Result.passed,
Build.status == Status.finished,
).order_by(
Build.date_created.desc(),
).limit(1).first()
if not latest_build:
return self.respond([])
job_list = db.session.query(Job.id).filter(
Job.build_id == latest_build.id,
)
if not job_list:
return self.respond([])
# use the most recent test
test_list = TestCase.query.filter(
TestCase.project_id == project.id,
TestCase.job_id.in_(job_list),
)
if args.min_duration:
test_list = test_list.filter(
TestCase.duration >= args.min_duration,
)
if args.query:
test_list = test_list.filter(
TestCase.name.contains(args.query),
)
if args.sort == 'duration':
sort_by = TestCase.duration.desc()
elif args.sort == 'name':
sort_by = TestCase.name.asc()
test_list = test_list.order_by(sort_by)
return self.paginate(test_list, serializers={
TestCase: GeneralizedTestCase(),
})
|
py | b40d99b7fe643b3758273e96600ee032222514b4 | #!/usr/bin/python -*- coding: utf-8 -*-
#
# Merlin - Almost Native Python Machine Learning Library: Gaussian Distribution
#
# Copyright (C) 2014-2015 alvations
# URL:
# For license information, see LICENSE.md
import numpy as np
"""
Class for univariate gaussian
p(x) = 1/sqrt(2*pi*simga^2) * e ^ - (x-miu)^2/2*sigma^2
Where miu is the gaussian mean, and sigma^2 is the gaussian variance
"""
class Gaussian:
def __init__(self,mean,variance):
self.mean = mean;
self.variance = variance;
def sample(self,points):
return np.random.normal(self.mean,self.variance,points)
def estimate_gaussian(X):
"""
Returns the mean and the variance of a data set of X points assuming that
the points come from a gaussian distribution X.
"""
mean = np.mean(X,0)
variance = np.var(X,0)
return Gaussian(mean,variance) |
py | b40d9ab421d5930e77214c78e382473b48f0854e | from datetime import datetime
def timestamp(): # pragma: no cover
return datetime.now().isoformat()
|
py | b40d9b3bc98d4e435e13c8b0aa1ac23b91aa84b6 | import os
import subprocess
def read_binary_list(projectdir):
"""
get all binary file's path
"""
binary_paths = []
for root, dirs, files in os.walk(projectdir):
for file_name in files:
file_path = os.path.join(root, file_name)
if "." not in file_name:
binary_paths.append(file_path)
return binary_paths
def write_file(path, content):
if os.path.exists(os.path.dirname(path)) is False:
os.makedirs(os.path.dirname(path))
log_file = open(path, "w")
log_file.write(content)
log_file.close()
def extract_debug_dump_information(python_path ,readelf_file_path, binary_paths, result_dir):
output_dir = os.path.join(result_dir, "output")
error_dir = os.path.join(result_dir, "error")
for binary_file_path in binary_paths:
print("processing file {}, number {} of total {}".format(os.path.basename(binary_file_path),
binary_paths.index(binary_file_path),
len(binary_paths)))
command = "{} {} --debug-dump=decodedline {}".format(python_path, readelf_file_path, binary_file_path)
ret = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if ret.returncode == 0:
write_file(os.path.join(output_dir, os.path.basename(binary_file_path)), ret.stdout)
else:
write_file(os.path.join(error_dir, os.path.basename(binary_file_path)), ret.stderr)
def extract_debug_information(python_path, opt_dir, mapping_path_name):
""" this will generate line number mapping in folder llvm-coreutils\\debug-"""
binary_dir = os.path.join(os.path.join(opt_dir, "coreutils"), "src")
binary_paths = read_binary_list(binary_dir)
readelf_file_path = "readelf"
result_dir = os.path.join(opt_dir, mapping_path_name)
# if os.path.exists(result_dir):
# return
extract_debug_dump_information(python_path, readelf_file_path, binary_paths, result_dir)
def main():
dataset_dir = "/home/llvm-coreutils"
optimizations = ["O0", "O1", "O2", "O3", "Ofast"]
mapping_path_name = "mapping_results"
python_path = ""
for opt_part in optimizations:
# if opt_part != "O1":
# continue
opt_dir = os.path.join(dataset_dir, opt_part)
extract_debug_information(python_path, opt_dir, mapping_path_name)
if __name__ == '__main__':
main()
|
py | b40d9bbd7316e450c0182dce0cda66cdf819bfe8 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: services/node_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ..entity import request_pb2 as entity_dot_request__pb2
from ..entity import response_pb2 as entity_dot_response__pb2
from ..entity import stream_message_pb2 as entity_dot_stream__message__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='services/node_service.proto',
package='grpc',
syntax='proto3',
serialized_options=b'Z\006.;grpc',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bservices/node_service.proto\x12\x04grpc\x1a\x14\x65ntity/request.proto\x1a\x15\x65ntity/response.proto\x1a\x1b\x65ntity/stream_message.proto2\xfa\x01\n\x0bNodeService\x12+\n\x08Register\x12\r.grpc.Request\x1a\x0e.grpc.Response\"\x00\x12\x30\n\rSendHeartbeat\x12\r.grpc.Request\x1a\x0e.grpc.Response\"\x00\x12\'\n\x04Ping\x12\r.grpc.Request\x1a\x0e.grpc.Response\"\x00\x12\x33\n\tSubscribe\x12\r.grpc.Request\x1a\x13.grpc.StreamMessage\"\x00\x30\x01\x12.\n\x0bUnsubscribe\x12\r.grpc.Request\x1a\x0e.grpc.Response\"\x00\x42\x08Z\x06.;grpcb\x06proto3'
,
dependencies=[entity_dot_request__pb2.DESCRIPTOR,entity_dot_response__pb2.DESCRIPTOR,entity_dot_stream__message__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
_NODESERVICE = _descriptor.ServiceDescriptor(
name='NodeService',
full_name='grpc.NodeService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=112,
serialized_end=362,
methods=[
_descriptor.MethodDescriptor(
name='Register',
full_name='grpc.NodeService.Register',
index=0,
containing_service=None,
input_type=entity_dot_request__pb2._REQUEST,
output_type=entity_dot_response__pb2._RESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SendHeartbeat',
full_name='grpc.NodeService.SendHeartbeat',
index=1,
containing_service=None,
input_type=entity_dot_request__pb2._REQUEST,
output_type=entity_dot_response__pb2._RESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Ping',
full_name='grpc.NodeService.Ping',
index=2,
containing_service=None,
input_type=entity_dot_request__pb2._REQUEST,
output_type=entity_dot_response__pb2._RESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Subscribe',
full_name='grpc.NodeService.Subscribe',
index=3,
containing_service=None,
input_type=entity_dot_request__pb2._REQUEST,
output_type=entity_dot_stream__message__pb2._STREAMMESSAGE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Unsubscribe',
full_name='grpc.NodeService.Unsubscribe',
index=4,
containing_service=None,
input_type=entity_dot_request__pb2._REQUEST,
output_type=entity_dot_response__pb2._RESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_NODESERVICE)
DESCRIPTOR.services_by_name['NodeService'] = _NODESERVICE
# @@protoc_insertion_point(module_scope)
|
py | b40d9be4c1c074a4f01a4581d06c8e03c9c6bdf2 | from output.models.ms_data.datatypes.facets.long.long_min_inclusive004_xsd.long_min_inclusive004 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
|
py | b40d9c4d2cc6c7adbcc751d70d70e78dfbf60f4d | import pytest
from stories import story
from stories.exceptions import StoryDefinitionError
def test_story_private_fields():
"""Deny access to the private fields of the story class and object."""
@story
def do(I):
I.one
assert do.__dict__ == {}
def test_deny_empty_stories():
"""We can not define a story which does not have any steps.
This will make it impossible to determine the right executor in the stories
composition.
"""
with pytest.raises(StoryDefinitionError) as exc_info:
class Action:
@story
def do(I):
pass
assert str(exc_info.value) == "Story should have at least one step defined"
def test_deny_repeat_steps():
"""We can not define a story which has duplicating steps."""
with pytest.raises(StoryDefinitionError) as exc_info:
class Action:
@story
def do(I):
I.foo
I.bar
I.foo
assert str(exc_info.value) == "Story has repeated steps: foo"
def test_deny_recursive_stories():
"""Story can not call itself as step.
This should prevent recursion error at the wrapping step.
"""
with pytest.raises(StoryDefinitionError) as exc_info:
class Action:
@story
def do(I):
I.one
I.do
I.two
assert str(exc_info.value) == "Story should not call itself recursively"
def test_story_representation(x):
story = repr(x.Simple().x)
expected = """
Simple.x
one
two
three
""".strip()
assert story == expected
story = repr(x.SubstoryDI(x.Simple().x).y)
expected = """
SubstoryDI.y
start
before
x (Simple.x)
one
two
three
after
""".strip()
assert story == expected
def test_story_class_attribute_representation(x):
story = repr(x.Simple.x)
expected = """
Simple.x
one
two
three
""".strip()
assert story == expected
story = repr(x.SubstoryDI.y)
expected = """
SubstoryDI.y
start
before
x
after
""".strip()
assert story == expected
def test_deny_coroutine_stories(r, x):
"""Story specification can not be a coroutine function."""
r.skip_if_function()
expected = "Story should be a regular function"
with pytest.raises(StoryDefinitionError) as exc_info:
x.define_coroutine_story()
assert str(exc_info.value) == expected
def test_deny_mix_coroutine_with_regular_methods(r, x):
"""If all story steps are functions, we can not use coroutine method in it."""
r.skip_if_function()
class T(x.Child, x.MixedCoroutineMethod):
pass
class J(x.Parent, x.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
Coroutines and functions can not be used together in story definition.
This method should be a function: T.three
Story method: T.x
""".strip()
with pytest.raises(StoryDefinitionError) as exc_info:
T().x
assert str(exc_info.value) == expected
# Substory DI.
expected = """
Coroutines and functions can not be used together in story definition.
This method should be a function: T.three
Story method: T.x
""".strip()
with pytest.raises(StoryDefinitionError) as exc_info:
J().a
assert str(exc_info.value) == expected
def test_deny_mix_function_with_coroutine_methods(r, x):
"""If all story steps are functions, we can not use coroutine method in it."""
r.skip_if_function()
class T(x.Child, x.MixedFunctionMethod):
pass
class J(x.Parent, x.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
Coroutines and functions can not be used together in story definition.
This method should be a coroutine: T.three
Story method: T.x
""".strip()
with pytest.raises(StoryDefinitionError) as exc_info:
T().x
assert str(exc_info.value) == expected
# Substory DI.
expected = """
Coroutines and functions can not be used together in story definition.
This method should be a coroutine: T.three
Story method: T.x
""".strip()
with pytest.raises(StoryDefinitionError) as exc_info:
J().a
assert str(exc_info.value) == expected
def test_deny_compose_coroutine_with_function_stories(r, x):
"""If child story steps are coroutines, we can not inject this story in a parent
which steps are functions."""
r.skip_if_function()
class T(x.Child, x.NormalMethod):
pass
class J(x.Parent, x.FunctionParentMethod):
def __init__(self):
self.x = T().x
# Substory DI.
expected = """
Coroutine and function stories can not be injected into each other.
Story function method: J.a
Substory coroutine method: T.x
""".strip()
with pytest.raises(StoryDefinitionError) as exc_info:
J().a
assert str(exc_info.value) == expected
def test_deny_compose_function_with_coroutine_stories(r, x):
"""If child story steps are functions, we can not inject this story in a parent
which steps are coroutines."""
r.skip_if_function()
class T(x.Child, x.FunctionMethod):
pass
class J(x.Parent, x.NormalParentMethod):
def __init__(self):
self.x = T().x
# Substory DI.
expected = """
Coroutine and function stories can not be injected into each other.
Story coroutine method: J.a
Substory function method: T.x
""".strip()
with pytest.raises(StoryDefinitionError) as exc_info:
J().a
assert str(exc_info.value) == expected
|
py | b40d9ca5c61b84e36e291d6801c6a4986b4f043e | #!/usr/bin/env python
# This file is part of Androguard.
#
# This is a tool to extract permissions and permission groups from Android Open Source Project.
# The information about the permissions and permission groups is appended to a file, which is
# later used in Androguard project.
#
# Author: Yury Zhauniarovich
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CONSTANTS
PATH_TO_PSCOUT_FOLDER = "/home/yury/TMP/PScout/results/API_09"
API_VERSION = 9
MAPPINGS_MODULE_PATH = "../../androguard/core/api_specific_resources/api_permission_mappings/" # where to append the results
MAPPINGS_MODULE_NAME = "api_permission_mappings"
PSCOUT_METHOD_MAPPING_FILENAME = "allmappings"
PSCOUT_CONTENTPROVIDERFIELDS_MAPPING_FILENAME = "contentproviderfieldpermission"
METHODS_MAPPING_PARAM_NAME = "AOSP_PERMISSIONS_BY_METHODS"
FIELDS_MAPPING_PARAM_NAME = "AOSP_PERMISSIONS_BY_FIELDS"
# IMPORTS
import os, re, codecs
# auxiliary
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
DESCRIPTOR_TYPE = {
'void': 'V',
'boolean': 'Z',
'byte': 'B',
'short': 'S',
'char': 'C',
'int': 'I',
'long': 'J',
'float': 'F',
'double': 'D',
}
def countBrackets(atype):
res = re.findall('\[\s*\]', atype)
return len(res)
def transformClassParam(atype):
res = ""
arrDim = countBrackets(atype)
if arrDim > 0:
pos = atype.find('[')
atype = atype[0:pos].strip()
res = '[' * arrDim
if atype in DESCRIPTOR_TYPE:
res += DESCRIPTOR_TYPE[atype]
else:
res += FormatClassToJava(atype)
return res
def FormatClassToJava(i):
"""
Transoform a typical xml format class into java format
:param i: the input class name
:rtype: string
"""
return "L" + i.replace(".", "/") + ";"
def parseMethod(methodString):
ms = methodString.strip()
mParamStartPos = ms.find('(')
mParamEndPos = ms.find(')')
paramString = ms[mParamStartPos + 1:mParamEndPos].strip()
params = [l.strip() for l in paramString.split(',')]
retValue_mName = ms[0:mParamStartPos].strip()
mNameStartPos = retValue_mName.rfind(' ')
returnValue = retValue_mName[0:mNameStartPos].strip()
methodName = retValue_mName[mNameStartPos + 1:].strip()
return methodName, params, returnValue
# end of auxiliary
print("Starting conversion of PScout data: [%s]" % PATH_TO_PSCOUT_FOLDER)
if not os.path.exists(MAPPINGS_MODULE_PATH):
os.makedirs(MAPPINGS_MODULE_PATH)
print("Checking if we already have the file with the version %d..." % API_VERSION)
api_specific_mappings_module_name = "%s_api%s.py" % (MAPPINGS_MODULE_NAME,
API_VERSION)
api_specific_mappings_module_path = os.path.join(
MAPPINGS_MODULE_PATH, api_specific_mappings_module_name)
if os.path.exists(api_specific_mappings_module_path):
print("API specific file for this version already exists!")
print("If you want create a file for newer version, please, delete file: %s" % api_specific_mappings_module_path)
exit(1)
print("Reading method mapping file...")
pscout_method_mapping_filepath = os.path.join(PATH_TO_PSCOUT_FOLDER,
PSCOUT_METHOD_MAPPING_FILENAME)
methods_mapping_file_lines = []
with open(pscout_method_mapping_filepath, 'r') as pscout_file:
methods_mapping_file_lines = pscout_file.readlines()
print("Starting to parse file: [%s]" % pscout_method_mapping_filepath)
perm_name = None
methods_mapping = {}
for line in methods_mapping_file_lines:
line = line.strip()
if line.startswith("Permission:"):
perm_name = line.split("Permission:")[1].strip()
print("PROCESSING PERMISSIONS: %s" % perm_name)
elif line.startswith("<"):
class_method = line[line.find('<') + 1:line.rfind('>')]
sepPos = class_method.find(':')
className = class_method[0:sepPos].strip()
methodStr = class_method[sepPos + 1:].strip()
methodName, params, returnValue = parseMethod(methodStr)
modParStr = ""
for par in params:
if par != "":
modParStr += transformClassParam(par) + ' '
modParStr = modParStr.strip()
method_identificator = "%s-%s-(%s)%s" % (
transformClassParam(className), methodName, modParStr,
transformClassParam(returnValue))
try:
methods_mapping[method_identificator].add(perm_name)
except KeyError:
methods_mapping[method_identificator] = set()
methods_mapping[method_identificator].add(perm_name)
print("Reading contentproviderfield mapping file...")
pscout_contentproviderfields_mapping_filepath = os.path.join(
PATH_TO_PSCOUT_FOLDER, PSCOUT_CONTENTPROVIDERFIELDS_MAPPING_FILENAME)
contentproviderfields_mapping_file_lines = []
with open(pscout_contentproviderfields_mapping_filepath, 'r') as pscout_file:
contentproviderfields_mapping_file_lines = pscout_file.readlines()
perm_name = None
fields_mapping = {}
for line in contentproviderfields_mapping_file_lines:
line = line.strip()
if line.startswith("PERMISSION:"):
perm_name = line.split("PERMISSION:")[1].strip()
print("PROCESSING PERMISSIONS: %s" % perm_name)
elif line.startswith("<"):
field_entry = line[line.find('<') + 1:line.rfind('>')]
classNameSepPos = field_entry.find(':')
className = field_entry[0:classNameSepPos].strip()
proto_name_str = field_entry[classNameSepPos + 1:].strip()
proto_name_parts = proto_name_str.split()
proto = proto_name_parts[0].strip()
name = proto_name_parts[1].strip()
field_identificator = "%s-%s-%s" % (transformClassParam(className),
name, transformClassParam(proto))
try:
fields_mapping[field_identificator].add(perm_name)
except KeyError:
fields_mapping[field_identificator] = set()
fields_mapping[field_identificator].add(perm_name)
print("Appending found information to the mappings file...")
with codecs.open(api_specific_mappings_module_path, 'w',
'utf-8') as perm_py_module:
perm_py_module.write('#!/usr/bin/python\n')
perm_py_module.write('# -*- coding: %s -*-\n\n' % 'utf-8')
perm_py_module.write('# This file is a part of Androguard.\n')
perm_py_module.write('#\n')
perm_py_module.write(
'# This file is generated automatically from the data\n')
perm_py_module.write(
'# provided by PScout tool [http://pscout.csl.toronto.edu/]\n')
perm_py_module.write('# using script: %s\n' % os.path.basename(__file__))
perm_py_module.write('#\n')
perm_py_module.write('# Author: Yury Zhauniarovich\n')
perm_py_module.write('#\n')
perm_py_module.write('#\n')
perm_py_module.write(
'# Licensed under the Apache License, Version 2.0 (the "License");\n')
perm_py_module.write(
'# you may not use this file except in compliance with the License.\n')
perm_py_module.write('# You may obtain a copy of the License at\n')
perm_py_module.write('#\n')
perm_py_module.write('# http://www.apache.org/licenses/LICENSE-2.0\n')
perm_py_module.write('#\n')
perm_py_module.write(
'# Unless required by applicable law or agreed to in writing, software\n')
perm_py_module.write(
'# distributed under the License is distributed on an "AS-IS" BASIS,\n')
perm_py_module.write(
'# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n')
perm_py_module.write(
'# See the License for the specific language governing permissions and\n')
perm_py_module.write('# limitations under the License"\n\n')
perm_py_module.write('#################################################\n')
perm_py_module.write('### API version: %d \n' % API_VERSION)
perm_py_module.write(
'#################################################\n\n\n')
perm_py_module.write("%s = {\n" % METHODS_MAPPING_PARAM_NAME)
for method in list(methods_mapping.keys()):
permissions = methods_mapping.get(method)
perms_string = ", ".join(["'%s'" % prm for prm in permissions])
perm_py_module.write("\t'%s' : [%s],\n" % (method, perms_string))
perm_py_module.write("}\n\n")
perm_py_module.write("%s = {\n" % FIELDS_MAPPING_PARAM_NAME)
for field in list(fields_mapping.keys()):
permissions = fields_mapping.get(field)
perms_string = ", ".join(["'%s'" % prm for prm in permissions])
perm_py_module.write("\t'%s' : [%s],\n" % (field, perms_string))
perm_py_module.write("}\n")
perm_py_module.write("#################################################\n")
print("Done...")
|
py | b40d9d08271dfaa103f96670451274f558b4b78d | #Faça um programa que mostre uma contagem regressiva
#para estouro de fogos de artificio.
# Exercicio pensado por: Gustavo Guanabará / Curso Em Vídeo
from time import sleep
def countdown(initial):
if initial >= 0:
print(initial)
initial -= 1
sleep(1)
countdown(initial)
else:
print("Fim da contagem!")
if __name__ == '__main__':
countdown(10) |
py | b40d9db3b9e32b127f7f4326f0abe132a4c87361 | class Buffer:
def __init__(self, n_elements, max_buffer_size, reset_on_query):
self.reset_on_query = reset_on_query
self.max_buffer_size = max_buffer_size
self.buffers = [list() for i in range(0, n_elements)]
def update_buffer(self, datas):
if isinstance(datas[0], list):
for buffer, data in zip(self.buffers, datas):
buffer.extend(data)
else:
for buffer, data in zip(self.buffers, datas):
buffer.append(data)
while len(self.buffers[0]) > self.max_buffer_size:
for buffer in self.buffers:
del buffer[0]
def read_buffer(self, reset=None):
if reset is None:
reset = self.reset_on_query
res = tuple([buffer for buffer in self.buffers])
if reset:
for i in range(0, len(self.buffers)):
self.buffers[i] = []
return res
def __len__(self):
return len(self.buffers[0])
|
py | b40d9e251f8596aa219f10a87940f8126026bf71 | # The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import functools
import logging
import os
import shutil
import sys
import uuid
import zipfile
from typing import Any, Collection, Dict, Iterable, List, Optional, Sequence, Union
from pip._vendor.packaging.markers import Marker
from pip._vendor.packaging.requirements import Requirement
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import Version
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.pep517.wrappers import Pep517HookCaller
from pip._internal.build_env import BuildEnvironment, NoOpBuildEnvironment
from pip._internal.exceptions import InstallationError, LegacyInstallFailure
from pip._internal.locations import get_scheme
from pip._internal.metadata import (
BaseDistribution,
get_default_environment,
get_directory_distribution,
)
from pip._internal.models.link import Link
from pip._internal.operations.build.metadata import generate_metadata
from pip._internal.operations.build.metadata_editable import generate_editable_metadata
from pip._internal.operations.build.metadata_legacy import (
generate_metadata as generate_metadata_legacy,
)
from pip._internal.operations.install.editable_legacy import (
install_editable as install_editable_legacy,
)
from pip._internal.operations.install.legacy import install as install_legacy
from pip._internal.operations.install.wheel import install_wheel
from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path
from pip._internal.req.req_uninstall import UninstallPathSet
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.direct_url_helpers import (
direct_url_for_editable,
direct_url_from_link,
)
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.misc import (
ConfiguredPep517HookCaller,
ask_path_exists,
backup_dir,
display_path,
hide_url,
redact_auth_from_url,
)
from pip._internal.utils.packaging import safe_extra
from pip._internal.utils.subprocess import runner_with_spinner_message
from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
from pip._internal.utils.virtualenv import running_under_virtualenv
from pip._internal.vcs import vcs
logger = logging.getLogger(__name__)
class InstallRequirement:
"""
Represents something that may be installed later on, may have information
about where to fetch the relevant requirement and also contains logic for
installing the said requirement.
"""
def __init__(
self,
req: Optional[Requirement],
comes_from: Optional[Union[str, "InstallRequirement"]],
editable: bool = False,
link: Optional[Link] = None,
markers: Optional[Marker] = None,
use_pep517: Optional[bool] = None,
isolated: bool = False,
install_options: Optional[List[str]] = None,
global_options: Optional[List[str]] = None,
hash_options: Optional[Dict[str, List[str]]] = None,
config_settings: Optional[Dict[str, str]] = None,
constraint: bool = False,
extras: Collection[str] = (),
user_supplied: bool = False,
permit_editable_wheels: bool = False,
) -> None:
assert req is None or isinstance(req, Requirement), req
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.editable = editable
self.permit_editable_wheels = permit_editable_wheels
self.legacy_install_reason: Optional[int] = None
# source_dir is the local directory where the linked requirement is
# located, or unpacked. In case unpacking is needed, creating and
# populating source_dir is done by the RequirementPreparer. Note this
# is not necessarily the directory where pyproject.toml or setup.py is
# located - that one is obtained via unpacked_source_directory.
self.source_dir: Optional[str] = None
if self.editable:
assert link
if link.is_file:
self.source_dir = os.path.normpath(os.path.abspath(link.file_path))
if link is None and req and req.url:
# PEP 508 URL requirement
link = Link(req.url)
self.link = self.original_link = link
self.original_link_is_in_wheel_cache = False
# Path to any downloaded or already-existing package.
self.local_file_path: Optional[str] = None
if self.link and self.link.is_file:
self.local_file_path = self.link.file_path
if extras:
self.extras = extras
elif req:
self.extras = {safe_extra(extra) for extra in req.extras}
else:
self.extras = set()
if markers is None and req:
markers = req.marker
self.markers = markers
# This holds the Distribution object if this requirement is already installed.
self.satisfied_by: Optional[BaseDistribution] = None
# Whether the installation process should try to uninstall an existing
# distribution before installing this requirement.
self.should_reinstall = False
# Temporary build location
self._temp_build_dir: Optional[TempDirectory] = None
# Set to True after successful installation
self.install_succeeded: Optional[bool] = None
# Supplied options
self.install_options = install_options if install_options else []
self.global_options = global_options if global_options else []
self.hash_options = hash_options if hash_options else {}
self.config_settings = config_settings
# Set to True after successful preparation of this requirement
self.prepared = False
# User supplied requirement are explicitly requested for installation
# by the user via CLI arguments or requirements files, as opposed to,
# e.g. dependencies, extras or constraints.
self.user_supplied = user_supplied
self.isolated = isolated
self.build_env: BuildEnvironment = NoOpBuildEnvironment()
# For PEP 517, the directory where we request the project metadata
# gets stored. We need this to pass to build_wheel, so the backend
# can ensure that the wheel matches the metadata (see the PEP for
# details).
self.metadata_directory: Optional[str] = None
# The static build requirements (from pyproject.toml)
self.pyproject_requires: Optional[List[str]] = None
# Build requirements that we will check are available
self.requirements_to_check: List[str] = []
# The PEP 517 backend we should use to build the project
self.pep517_backend: Optional[Pep517HookCaller] = None
# Are we using PEP 517 for this requirement?
# After pyproject.toml has been loaded, the only valid values are True
# and False. Before loading, None is valid (meaning "use the default").
# Setting an explicit value before loading pyproject.toml is supported,
# but after loading this flag should be treated as read only.
self.use_pep517 = use_pep517
# This requirement needs more preparation before it can be built
self.needs_more_preparation = False
def __str__(self) -> str:
if self.req:
s = str(self.req)
if self.link:
s += " from {}".format(redact_auth_from_url(self.link.url))
elif self.link:
s = redact_auth_from_url(self.link.url)
else:
s = "<InstallRequirement>"
if self.satisfied_by is not None:
s += " in {}".format(display_path(self.satisfied_by.location))
if self.comes_from:
if isinstance(self.comes_from, str):
comes_from: Optional[str] = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += f" (from {comes_from})"
return s
def __repr__(self) -> str:
return "<{} object: {} editable={!r}>".format(
self.__class__.__name__, str(self), self.editable
)
def format_debug(self) -> str:
"""An un-tested helper for getting state, for debugging."""
attributes = vars(self)
names = sorted(attributes)
state = ("{}={!r}".format(attr, attributes[attr]) for attr in sorted(names))
return "<{name} object: {{{state}}}>".format(
name=self.__class__.__name__,
state=", ".join(state),
)
# Things that are valid for all kinds of requirements?
@property
def name(self) -> Optional[str]:
if self.req is None:
return None
return self.req.name
@functools.lru_cache() # use cached_property in python 3.8+
def supports_pyproject_editable(self) -> bool:
if not self.use_pep517:
return False
assert self.pep517_backend
with self.build_env:
runner = runner_with_spinner_message(
"Checking if build backend supports build_editable"
)
with self.pep517_backend.subprocess_runner(runner):
return "build_editable" in self.pep517_backend._supported_features()
@property
def specifier(self) -> SpecifierSet:
return self.req.specifier
@property
def is_pinned(self) -> bool:
"""Return whether I am pinned to an exact version.
For example, some-package==1.2 is pinned; some-package>1.2 is not.
"""
specifiers = self.specifier
return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="}
def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> bool:
if not extras_requested:
# Provide an extra to safely evaluate the markers
# without matching any extra
extras_requested = ("",)
if self.markers is not None:
return any(
self.markers.evaluate({"extra": extra}) for extra in extras_requested
)
else:
return True
@property
def has_hash_options(self) -> bool:
"""Return whether any known-good hashes are specified as options.
These activate --require-hashes mode; hashes specified as part of a
URL do not.
"""
return bool(self.hash_options)
def hashes(self, trust_internet: bool = True) -> Hashes:
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.hash_options.copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes)
def from_path(self) -> Optional[str]:
"""Format a nice indicator to show where this "comes from" """
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, str):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += "->" + comes_from
return s
def ensure_build_location(
self, build_dir: str, autodelete: bool, parallel_builds: bool
) -> str:
assert build_dir is not None
if self._temp_build_dir is not None:
assert self._temp_build_dir.path
return self._temp_build_dir.path
if self.req is None:
# Some systems have /tmp as a symlink which confuses custom
# builds (such as numpy). Thus, we ensure that the real path
# is returned.
self._temp_build_dir = TempDirectory(
kind=tempdir_kinds.REQ_BUILD, globally_managed=True
)
return self._temp_build_dir.path
# This is the only remaining place where we manually determine the path
# for the temporary directory. It is only needed for editables where
# it is the value of the --src option.
# When parallel builds are enabled, add a UUID to the build directory
# name so multiple builds do not interfere with each other.
dir_name: str = canonicalize_name(self.name)
if parallel_builds:
dir_name = f"{dir_name}_{uuid.uuid4().hex}"
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug("Creating directory %s", build_dir)
os.makedirs(build_dir)
actual_build_dir = os.path.join(build_dir, dir_name)
# `None` indicates that we respect the globally-configured deletion
# settings, which is what we actually want when auto-deleting.
delete_arg = None if autodelete else False
return TempDirectory(
path=actual_build_dir,
delete=delete_arg,
kind=tempdir_kinds.REQ_BUILD,
globally_managed=True,
).path
def _set_requirement(self) -> None:
"""Set requirement after generating metadata."""
assert self.req is None
assert self.metadata is not None
assert self.source_dir is not None
# Construct a Requirement object from the generated metadata
if isinstance(parse_version(self.metadata["Version"]), Version):
op = "=="
else:
op = "==="
self.req = Requirement(
"".join(
[
self.metadata["Name"],
op,
self.metadata["Version"],
]
)
)
def warn_on_mismatching_name(self) -> None:
metadata_name = canonicalize_name(self.metadata["Name"])
if canonicalize_name(self.req.name) == metadata_name:
# Everything is fine.
return
# If we're here, there's a mismatch. Log a warning about it.
logger.warning(
"Generating metadata for package %s "
"produced metadata for project name %s. Fix your "
"#egg=%s fragments.",
self.name,
metadata_name,
self.name,
)
self.req = Requirement(metadata_name)
def check_if_exists(self, use_user_site: bool) -> None:
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.should_reinstall appropriately.
"""
if self.req is None:
return
existing_dist = get_default_environment().get_distribution(self.req.name)
if not existing_dist:
return
version_compatible = self.req.specifier.contains(
existing_dist.version,
prereleases=True,
)
if not version_compatible:
self.satisfied_by = None
if use_user_site:
if existing_dist.in_usersite:
self.should_reinstall = True
elif running_under_virtualenv() and existing_dist.in_site_packages:
raise InstallationError(
f"Will not install to the user site because it will "
f"lack sys.path precedence to {existing_dist.raw_name} "
f"in {existing_dist.location}"
)
else:
self.should_reinstall = True
else:
if self.editable:
self.should_reinstall = True
# when installing editables, nothing pre-existing should ever
# satisfy
self.satisfied_by = None
else:
self.satisfied_by = existing_dist
# Things valid for wheels
@property
def is_wheel(self) -> bool:
if not self.link:
return False
return self.link.is_wheel
# Things valid for sdists
@property
def unpacked_source_directory(self) -> str:
return os.path.join(
self.source_dir, self.link and self.link.subdirectory_fragment or ""
)
@property
def setup_py_path(self) -> str:
assert self.source_dir, f"No source dir for {self}"
setup_py = os.path.join(self.unpacked_source_directory, "setup.py")
return setup_py
@property
def setup_cfg_path(self) -> str:
assert self.source_dir, f"No source dir for {self}"
setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg")
return setup_cfg
@property
def pyproject_toml_path(self) -> str:
assert self.source_dir, f"No source dir for {self}"
return make_pyproject_path(self.unpacked_source_directory)
def load_pyproject_toml(self) -> None:
"""Load the pyproject.toml file.
After calling this routine, all of the attributes related to PEP 517
processing for this requirement have been set. In particular, the
use_pep517 attribute can be used to determine whether we should
follow the PEP 517 or legacy (setup.py) code path.
"""
pyproject_toml_data = load_pyproject_toml(
self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self)
)
if pyproject_toml_data is None:
self.use_pep517 = False
return
self.use_pep517 = True
requires, backend, check, backend_path = pyproject_toml_data
self.requirements_to_check = check
self.pyproject_requires = requires
self.pep517_backend = ConfiguredPep517HookCaller(
self,
self.unpacked_source_directory,
backend,
backend_path=backend_path,
)
def isolated_editable_sanity_check(self) -> None:
"""Check that an editable requirement if valid for use with PEP 517/518.
This verifies that an editable that has a pyproject.toml either supports PEP 660
or as a setup.py or a setup.cfg
"""
if (
self.editable
and self.use_pep517
and not self.supports_pyproject_editable()
and not os.path.isfile(self.setup_py_path)
and not os.path.isfile(self.setup_cfg_path)
):
raise InstallationError(
f"Project {self} has a 'pyproject.toml' and its build "
f"backend is missing the 'build_editable' hook. Since it does not "
f"have a 'setup.py' nor a 'setup.cfg', "
f"it cannot be installed in editable mode. "
f"Consider using a build backend that supports PEP 660."
)
def prepare_metadata(self) -> None:
"""Ensure that project metadata is available.
Under PEP 517 and PEP 660, call the backend hook to prepare the metadata.
Under legacy processing, call setup.py egg-info.
"""
assert self.source_dir
details = self.name or f"from {self.link}"
if self.use_pep517:
assert self.pep517_backend is not None
if (
self.editable
and self.permit_editable_wheels
and self.supports_pyproject_editable()
):
self.metadata_directory = generate_editable_metadata(
build_env=self.build_env,
backend=self.pep517_backend,
details=details,
)
else:
self.metadata_directory = generate_metadata(
build_env=self.build_env,
backend=self.pep517_backend,
details=details,
)
else:
self.metadata_directory = generate_metadata_legacy(
build_env=self.build_env,
setup_py_path=self.setup_py_path,
source_dir=self.unpacked_source_directory,
isolated=self.isolated,
details=details,
)
# Act on the newly generated metadata, based on the name and version.
if not self.name:
self._set_requirement()
else:
self.warn_on_mismatching_name()
self.assert_source_matches_version()
@property
def metadata(self) -> Any:
if not hasattr(self, "_metadata"):
self._metadata = self.get_dist().metadata
return self._metadata
def get_dist(self) -> BaseDistribution:
return get_directory_distribution(self.metadata_directory)
def assert_source_matches_version(self) -> None:
assert self.source_dir
version = self.metadata["version"]
if self.req.specifier and version not in self.req.specifier:
logger.warning(
"Requested %s, but installing version %s",
self,
version,
)
else:
logger.debug(
"Source in %s has version %s, which satisfies requirement %s",
display_path(self.source_dir),
version,
self,
)
# For both source distributions and editables
def ensure_has_source_dir(
self,
parent_dir: str,
autodelete: bool = False,
parallel_builds: bool = False,
) -> None:
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.ensure_build_location(
parent_dir,
autodelete=autodelete,
parallel_builds=parallel_builds,
)
# For editable installations
def update_editable(self) -> None:
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == "file":
# Static paths don't get updated
return
vcs_backend = vcs.get_backend_for_scheme(self.link.scheme)
# Editable requirements are validated in Requirement constructors.
# So here, if it's neither a path nor a valid VCS URL, it's a bug.
assert vcs_backend, f"Unsupported VCS URL {self.link.url}"
hidden_url = hide_url(self.link.url)
vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0)
# Top-level Actions
def uninstall(
self, auto_confirm: bool = False, verbose: bool = False
) -> Optional[UninstallPathSet]:
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
assert self.req
dist = get_default_environment().get_distribution(self.req.name)
if not dist:
logger.warning("Skipping %s as it is not installed.", self.name)
return None
logger.info("Found existing installation: %s", dist)
uninstalled_pathset = UninstallPathSet.from_dist(dist)
uninstalled_pathset.remove(auto_confirm, verbose)
return uninstalled_pathset
def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str:
def _clean_zip_name(name: str, prefix: str) -> str:
assert name.startswith(
prefix + os.path.sep
), f"name {name!r} doesn't start with prefix {prefix!r}"
name = name[len(prefix) + 1 :]
name = name.replace(os.path.sep, "/")
return name
path = os.path.join(parentdir, path)
name = _clean_zip_name(path, rootdir)
return self.name + "/" + name
def archive(self, build_dir: Optional[str]) -> None:
"""Saves archive to provided build_dir.
Used for saving downloaded VCS requirements as part of `pip download`.
"""
assert self.source_dir
if build_dir is None:
return
create_archive = True
archive_name = "{}-{}.zip".format(self.name, self.metadata["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
"The file {} exists. (i)gnore, (w)ipe, "
"(b)ackup, (a)bort ".format(display_path(archive_path)),
("i", "w", "b", "a"),
)
if response == "i":
create_archive = False
elif response == "w":
logger.warning("Deleting %s", display_path(archive_path))
os.remove(archive_path)
elif response == "b":
dest_file = backup_dir(archive_path)
logger.warning(
"Backing up %s to %s",
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
elif response == "a":
sys.exit(-1)
if not create_archive:
return
zip_output = zipfile.ZipFile(
archive_path,
"w",
zipfile.ZIP_DEFLATED,
allowZip64=True,
)
with zip_output:
dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory))
for dirpath, dirnames, filenames in os.walk(dir):
for dirname in dirnames:
dir_arcname = self._get_archive_name(
dirname,
parentdir=dirpath,
rootdir=dir,
)
zipdir = zipfile.ZipInfo(dir_arcname + "/")
zipdir.external_attr = 0x1ED << 16 # 0o755
zip_output.writestr(zipdir, "")
for filename in filenames:
file_arcname = self._get_archive_name(
filename,
parentdir=dirpath,
rootdir=dir,
)
filename = os.path.join(dirpath, filename)
zip_output.write(filename, file_arcname)
logger.info("Saved %s", display_path(archive_path))
def install(
self,
install_options: List[str],
global_options: Optional[Sequence[str]] = None,
root: Optional[str] = None,
home: Optional[str] = None,
prefix: Optional[str] = None,
warn_script_location: bool = True,
use_user_site: bool = False,
pycompile: bool = True,
) -> None:
scheme = get_scheme(
self.name,
user=use_user_site,
home=home,
root=root,
isolated=self.isolated,
prefix=prefix,
)
global_options = global_options if global_options is not None else []
if self.editable and not self.is_wheel:
install_editable_legacy(
install_options,
global_options,
prefix=prefix,
home=home,
use_user_site=use_user_site,
name=self.name,
setup_py_path=self.setup_py_path,
isolated=self.isolated,
build_env=self.build_env,
unpacked_source_directory=self.unpacked_source_directory,
)
self.install_succeeded = True
return
if self.is_wheel:
assert self.local_file_path
direct_url = None
if self.editable:
direct_url = direct_url_for_editable(self.unpacked_source_directory)
elif self.original_link:
direct_url = direct_url_from_link(
self.original_link,
self.source_dir,
self.original_link_is_in_wheel_cache,
)
install_wheel(
self.name,
self.local_file_path,
scheme=scheme,
req_description=str(self.req),
pycompile=pycompile,
warn_script_location=warn_script_location,
direct_url=direct_url,
requested=self.user_supplied,
)
self.install_succeeded = True
return
# TODO: Why don't we do this for editable installs?
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options = list(global_options) + self.global_options
install_options = list(install_options) + self.install_options
try:
success = install_legacy(
install_options=install_options,
global_options=global_options,
root=root,
home=home,
prefix=prefix,
use_user_site=use_user_site,
pycompile=pycompile,
scheme=scheme,
setup_py_path=self.setup_py_path,
isolated=self.isolated,
req_name=self.name,
build_env=self.build_env,
unpacked_source_directory=self.unpacked_source_directory,
req_description=str(self.req),
)
except LegacyInstallFailure as exc:
self.install_succeeded = False
raise exc
except Exception:
self.install_succeeded = True
raise
self.install_succeeded = success
if success and self.legacy_install_reason == 8368:
deprecated(
reason=(
"{} was installed using the legacy 'setup.py install' "
"method, because a wheel could not be built for it.".format(
self.name
)
),
replacement="to fix the wheel build issue reported above",
gone_in=None,
issue=8368,
)
def check_invalid_constraint_type(req: InstallRequirement) -> str:
# Check for unsupported forms
problem = ""
if not req.name:
problem = "Unnamed requirements are not allowed as constraints"
elif req.editable:
problem = "Editable requirements are not allowed as constraints"
elif req.extras:
problem = "Constraints cannot have extras"
if problem:
deprecated(
reason=(
"Constraints are only allowed to take the form of a package "
"name and a version specifier. Other forms were originally "
"permitted as an accident of the implementation, but were "
"undocumented. The new implementation of the resolver no "
"longer supports these forms."
),
replacement="replacing the constraint with a requirement",
# No plan yet for when the new resolver becomes default
gone_in=None,
issue=8210,
)
return problem
|
py | b40d9fb8d145f8177c74bfd14f5733059a47359e | from .context import mango
from .fakes import fake_account_info, fake_public_key
from decimal import Decimal
def test_constructor():
account_info = fake_account_info()
program_id = fake_public_key()
market = fake_public_key()
owner = fake_public_key()
flags = mango.SerumAccountFlags(mango.Version.V1, True, False, True, False, False, False, False, False)
actual = mango.OpenOrders(account_info, mango.Version.V1, program_id, flags, market,
owner, Decimal(0), Decimal(0), Decimal(0), Decimal(0),
Decimal(0), Decimal(0), [], [], Decimal(0))
assert actual is not None
assert actual.logger is not None
|
py | b40da0a7a2a70732f295d63377a2a5adef07f2b4 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: LocalServerInfo.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='LocalServerInfo.proto',
package='com.eufylife.smarthome.protobuftool',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x15LocalServerInfo.proto\x12#com.eufylife.smarthome.protobuftool\"\xe6\r\n\x12LocalServerMessage\x12\x11\n\tmagic_num\x18\x01 \x01(\r\x12\x11\n\tlocalcode\x18\x02 \x01(\t\x12V\n\x01\x61\x18\x03 \x01(\x0b\x32I.com.eufylife.smarthome.protobuftool.LocalServerMessage.PingPacketMessageH\x00\x12U\n\x01\x62\x18\x04 \x01(\x0b\x32H.com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessageH\x00\x12T\n\x01\x63\x18\x05 \x01(\x0b\x32G.com.eufylife.smarthome.protobuftool.LocalServerMessage.UserDataMessageH\x00\x12S\n\x01\x64\x18\x06 \x01(\x0b\x32\x46.com.eufylife.smarthome.protobuftool.LocalServerMessage.DevinfoMessageH\x00\x1a\xb2\x01\n\x11PingPacketMessage\x12\x66\n\x04type\x18\x01 \x01(\x0e\x32X.com.eufylife.smarthome.protobuftool.LocalServerMessage.PingPacketMessage.PingPacketType\"5\n\x0ePingPacketType\x12\x10\n\x0cPING_REQUEST\x10\x00\x12\x11\n\rPING_RESPONSE\x10\x01\x1a\xf3\x05\n\x10OtaPacketMessage\x12\x64\n\x04type\x18\x01 \x01(\x0e\x32V.com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.OtaPacketType\x12\x14\n\x0cotafile_size\x18\x02 \x01(\r\x12o\n\x08ota_data\x18\x03 \x01(\x0b\x32].com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.OtaUpdateDataMessage\x12\x61\n\x05\x63\x61use\x18\x04 \x01(\x0e\x32R.com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.FailCause\x1aO\n\x14OtaUpdateDataMessage\x12\x12\n\nadr_offset\x18\x01 \x01(\r\x12\x15\n\rpacket_length\x18\x02 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"\xce\x01\n\rOtaPacketType\x12\x1b\n\x17REQUEST_UPDATE_FIRMWARE\x10\x01\x12\x11\n\rPERMIT_UPDATE\x10\x02\x12\x18\n\x14OTA_UPDATE_DATA_SEND\x10\x03\x12\x16\n\x12UPDATE_DATA_VERIFY\x10\x04\x12\x14\n\x10OTA_UPDATE_ABORT\x10\x05\x12\x17\n\x13OTA_COMPLETE_NOTIFY\x10\x06\x12\x14\n\x10OTA_STATUS_FAILD\x10\x07\x12\x16\n\x12OTA_STATUS_SUCCESS\x10\x08\"m\n\tFailCause\x12\x17\n\x13\x45RASE_SECTION_FAILD\x10\x01\x12\x15\n\x11\x44\x41TA_OFFSET_ERROR\x10\x02\x12\x15\n\x11SWITCH_SIGN_FAILD\x10\x03\x12\x19\n\x15\x42RUN_UNFINISHED_ERROR\x10\x04\x1a\xdb\x01\n\x0fUserDataMessage\x12\x62\n\x04type\x18\x01 \x01(\x0e\x32T.com.eufylife.smarthome.protobuftool.LocalServerMessage.UserDataMessage.UserDataType\x12\x10\n\x08usr_data\x18\x02 \x01(\x0c\"R\n\x0cUserDataType\x12\x14\n\x10sendUsrDataToDev\x10\x00\x12\x14\n\x10getDevStatusData\x10\x01\x12\x16\n\x12sendStausDataToApp\x10\x02\x1a\xba\x01\n\x0e\x44\x65vinfoMessage\x12`\n\x04type\x18\x01 \x01(\x0e\x32R.com.eufylife.smarthome.protobuftool.LocalServerMessage.DevinfoMessage.DevInfoType\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"8\n\x0b\x44\x65vInfoType\x12\x13\n\x0fREQUEST_KEYCODE\x10\x01\x12\x14\n\x10RESPONSE_KEYCODE\x10\x02\x42\n\n\x08playload')
)
_LOCALSERVERMESSAGE_PINGPACKETMESSAGE_PINGPACKETTYPE = _descriptor.EnumDescriptor(
name='PingPacketType',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.PingPacketMessage.PingPacketType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PING_REQUEST', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PING_RESPONSE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=595,
serialized_end=648,
)
_sym_db.RegisterEnumDescriptor(_LOCALSERVERMESSAGE_PINGPACKETMESSAGE_PINGPACKETTYPE)
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAPACKETTYPE = _descriptor.EnumDescriptor(
name='OtaPacketType',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.OtaPacketType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REQUEST_UPDATE_FIRMWARE', index=0, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMIT_UPDATE', index=1, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTA_UPDATE_DATA_SEND', index=2, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPDATE_DATA_VERIFY', index=3, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTA_UPDATE_ABORT', index=4, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTA_COMPLETE_NOTIFY', index=5, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTA_STATUS_FAILD', index=6, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTA_STATUS_SUCCESS', index=7, number=8,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1089,
serialized_end=1295,
)
_sym_db.RegisterEnumDescriptor(_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAPACKETTYPE)
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_FAILCAUSE = _descriptor.EnumDescriptor(
name='FailCause',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.FailCause',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ERASE_SECTION_FAILD', index=0, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_OFFSET_ERROR', index=1, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SWITCH_SIGN_FAILD', index=2, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BRUN_UNFINISHED_ERROR', index=3, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1297,
serialized_end=1406,
)
_sym_db.RegisterEnumDescriptor(_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_FAILCAUSE)
_LOCALSERVERMESSAGE_USERDATAMESSAGE_USERDATATYPE = _descriptor.EnumDescriptor(
name='UserDataType',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.UserDataMessage.UserDataType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='sendUsrDataToDev', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='getDevStatusData', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sendStausDataToApp', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1546,
serialized_end=1628,
)
_sym_db.RegisterEnumDescriptor(_LOCALSERVERMESSAGE_USERDATAMESSAGE_USERDATATYPE)
_LOCALSERVERMESSAGE_DEVINFOMESSAGE_DEVINFOTYPE = _descriptor.EnumDescriptor(
name='DevInfoType',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.DevinfoMessage.DevInfoType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REQUEST_KEYCODE', index=0, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESPONSE_KEYCODE', index=1, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1761,
serialized_end=1817,
)
_sym_db.RegisterEnumDescriptor(_LOCALSERVERMESSAGE_DEVINFOMESSAGE_DEVINFOTYPE)
_LOCALSERVERMESSAGE_PINGPACKETMESSAGE = _descriptor.Descriptor(
name='PingPacketMessage',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.PingPacketMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.PingPacketMessage.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_LOCALSERVERMESSAGE_PINGPACKETMESSAGE_PINGPACKETTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=470,
serialized_end=648,
)
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAUPDATEDATAMESSAGE = _descriptor.Descriptor(
name='OtaUpdateDataMessage',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.OtaUpdateDataMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='adr_offset', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.OtaUpdateDataMessage.adr_offset', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packet_length', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.OtaUpdateDataMessage.packet_length', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.OtaUpdateDataMessage.data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1007,
serialized_end=1086,
)
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE = _descriptor.Descriptor(
name='OtaPacketMessage',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='otafile_size', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.otafile_size', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ota_data', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.ota_data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cause', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.cause', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAUPDATEDATAMESSAGE, ],
enum_types=[
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAPACKETTYPE,
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_FAILCAUSE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=651,
serialized_end=1406,
)
_LOCALSERVERMESSAGE_USERDATAMESSAGE = _descriptor.Descriptor(
name='UserDataMessage',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.UserDataMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.UserDataMessage.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='usr_data', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.UserDataMessage.usr_data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_LOCALSERVERMESSAGE_USERDATAMESSAGE_USERDATATYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1409,
serialized_end=1628,
)
_LOCALSERVERMESSAGE_DEVINFOMESSAGE = _descriptor.Descriptor(
name='DevinfoMessage',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.DevinfoMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.DevinfoMessage.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.DevinfoMessage.data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_LOCALSERVERMESSAGE_DEVINFOMESSAGE_DEVINFOTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1631,
serialized_end=1817,
)
_LOCALSERVERMESSAGE = _descriptor.Descriptor(
name='LocalServerMessage',
full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='magic_num', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.magic_num', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='localcode', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.localcode', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='a', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.a', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='b', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.b', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='c', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.c', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='d', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.d', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LOCALSERVERMESSAGE_PINGPACKETMESSAGE, _LOCALSERVERMESSAGE_OTAPACKETMESSAGE, _LOCALSERVERMESSAGE_USERDATAMESSAGE, _LOCALSERVERMESSAGE_DEVINFOMESSAGE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='playload', full_name='com.eufylife.smarthome.protobuftool.LocalServerMessage.playload',
index=0, containing_type=None, fields=[]),
],
serialized_start=63,
serialized_end=1829,
)
_LOCALSERVERMESSAGE_PINGPACKETMESSAGE.fields_by_name['type'].enum_type = _LOCALSERVERMESSAGE_PINGPACKETMESSAGE_PINGPACKETTYPE
_LOCALSERVERMESSAGE_PINGPACKETMESSAGE.containing_type = _LOCALSERVERMESSAGE
_LOCALSERVERMESSAGE_PINGPACKETMESSAGE_PINGPACKETTYPE.containing_type = _LOCALSERVERMESSAGE_PINGPACKETMESSAGE
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAUPDATEDATAMESSAGE.containing_type = _LOCALSERVERMESSAGE_OTAPACKETMESSAGE
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE.fields_by_name['type'].enum_type = _LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAPACKETTYPE
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE.fields_by_name['ota_data'].message_type = _LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAUPDATEDATAMESSAGE
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE.fields_by_name['cause'].enum_type = _LOCALSERVERMESSAGE_OTAPACKETMESSAGE_FAILCAUSE
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE.containing_type = _LOCALSERVERMESSAGE
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAPACKETTYPE.containing_type = _LOCALSERVERMESSAGE_OTAPACKETMESSAGE
_LOCALSERVERMESSAGE_OTAPACKETMESSAGE_FAILCAUSE.containing_type = _LOCALSERVERMESSAGE_OTAPACKETMESSAGE
_LOCALSERVERMESSAGE_USERDATAMESSAGE.fields_by_name['type'].enum_type = _LOCALSERVERMESSAGE_USERDATAMESSAGE_USERDATATYPE
_LOCALSERVERMESSAGE_USERDATAMESSAGE.containing_type = _LOCALSERVERMESSAGE
_LOCALSERVERMESSAGE_USERDATAMESSAGE_USERDATATYPE.containing_type = _LOCALSERVERMESSAGE_USERDATAMESSAGE
_LOCALSERVERMESSAGE_DEVINFOMESSAGE.fields_by_name['type'].enum_type = _LOCALSERVERMESSAGE_DEVINFOMESSAGE_DEVINFOTYPE
_LOCALSERVERMESSAGE_DEVINFOMESSAGE.containing_type = _LOCALSERVERMESSAGE
_LOCALSERVERMESSAGE_DEVINFOMESSAGE_DEVINFOTYPE.containing_type = _LOCALSERVERMESSAGE_DEVINFOMESSAGE
_LOCALSERVERMESSAGE.fields_by_name['a'].message_type = _LOCALSERVERMESSAGE_PINGPACKETMESSAGE
_LOCALSERVERMESSAGE.fields_by_name['b'].message_type = _LOCALSERVERMESSAGE_OTAPACKETMESSAGE
_LOCALSERVERMESSAGE.fields_by_name['c'].message_type = _LOCALSERVERMESSAGE_USERDATAMESSAGE
_LOCALSERVERMESSAGE.fields_by_name['d'].message_type = _LOCALSERVERMESSAGE_DEVINFOMESSAGE
_LOCALSERVERMESSAGE.oneofs_by_name['playload'].fields.append(
_LOCALSERVERMESSAGE.fields_by_name['a'])
_LOCALSERVERMESSAGE.fields_by_name['a'].containing_oneof = _LOCALSERVERMESSAGE.oneofs_by_name['playload']
_LOCALSERVERMESSAGE.oneofs_by_name['playload'].fields.append(
_LOCALSERVERMESSAGE.fields_by_name['b'])
_LOCALSERVERMESSAGE.fields_by_name['b'].containing_oneof = _LOCALSERVERMESSAGE.oneofs_by_name['playload']
_LOCALSERVERMESSAGE.oneofs_by_name['playload'].fields.append(
_LOCALSERVERMESSAGE.fields_by_name['c'])
_LOCALSERVERMESSAGE.fields_by_name['c'].containing_oneof = _LOCALSERVERMESSAGE.oneofs_by_name['playload']
_LOCALSERVERMESSAGE.oneofs_by_name['playload'].fields.append(
_LOCALSERVERMESSAGE.fields_by_name['d'])
_LOCALSERVERMESSAGE.fields_by_name['d'].containing_oneof = _LOCALSERVERMESSAGE.oneofs_by_name['playload']
DESCRIPTOR.message_types_by_name['LocalServerMessage'] = _LOCALSERVERMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LocalServerMessage = _reflection.GeneratedProtocolMessageType('LocalServerMessage', (_message.Message,), dict(
PingPacketMessage = _reflection.GeneratedProtocolMessageType('PingPacketMessage', (_message.Message,), dict(
DESCRIPTOR = _LOCALSERVERMESSAGE_PINGPACKETMESSAGE,
__module__ = 'LocalServerInfo_pb2'
# @@protoc_insertion_point(class_scope:com.eufylife.smarthome.protobuftool.LocalServerMessage.PingPacketMessage)
))
,
OtaPacketMessage = _reflection.GeneratedProtocolMessageType('OtaPacketMessage', (_message.Message,), dict(
OtaUpdateDataMessage = _reflection.GeneratedProtocolMessageType('OtaUpdateDataMessage', (_message.Message,), dict(
DESCRIPTOR = _LOCALSERVERMESSAGE_OTAPACKETMESSAGE_OTAUPDATEDATAMESSAGE,
__module__ = 'LocalServerInfo_pb2'
# @@protoc_insertion_point(class_scope:com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage.OtaUpdateDataMessage)
))
,
DESCRIPTOR = _LOCALSERVERMESSAGE_OTAPACKETMESSAGE,
__module__ = 'LocalServerInfo_pb2'
# @@protoc_insertion_point(class_scope:com.eufylife.smarthome.protobuftool.LocalServerMessage.OtaPacketMessage)
))
,
UserDataMessage = _reflection.GeneratedProtocolMessageType('UserDataMessage', (_message.Message,), dict(
DESCRIPTOR = _LOCALSERVERMESSAGE_USERDATAMESSAGE,
__module__ = 'LocalServerInfo_pb2'
# @@protoc_insertion_point(class_scope:com.eufylife.smarthome.protobuftool.LocalServerMessage.UserDataMessage)
))
,
DevinfoMessage = _reflection.GeneratedProtocolMessageType('DevinfoMessage', (_message.Message,), dict(
DESCRIPTOR = _LOCALSERVERMESSAGE_DEVINFOMESSAGE,
__module__ = 'LocalServerInfo_pb2'
# @@protoc_insertion_point(class_scope:com.eufylife.smarthome.protobuftool.LocalServerMessage.DevinfoMessage)
))
,
DESCRIPTOR = _LOCALSERVERMESSAGE,
__module__ = 'LocalServerInfo_pb2'
# @@protoc_insertion_point(class_scope:com.eufylife.smarthome.protobuftool.LocalServerMessage)
))
_sym_db.RegisterMessage(LocalServerMessage)
_sym_db.RegisterMessage(LocalServerMessage.PingPacketMessage)
_sym_db.RegisterMessage(LocalServerMessage.OtaPacketMessage)
_sym_db.RegisterMessage(LocalServerMessage.OtaPacketMessage.OtaUpdateDataMessage)
_sym_db.RegisterMessage(LocalServerMessage.UserDataMessage)
_sym_db.RegisterMessage(LocalServerMessage.DevinfoMessage)
# @@protoc_insertion_point(module_scope)
|
py | b40da1f19fc03feab72041424f1b6203b4371098 | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code shared by container based launchers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Text, Union
import jinja2
from tfx import types
from tfx.components.base import executor_spec
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental import placeholders
def resolve_container_template(
container_spec_tmpl: Union[executor_spec.ExecutorContainerSpec,
executor_specs.TemplatedExecutorContainerSpec],
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> executor_spec.ExecutorContainerSpec:
"""Resolves Jinja2 template languages from an executor container spec.
Args:
container_spec_tmpl: the container spec template to be resolved.
input_dict: Dictionary of input artifacts consumed by this component.
output_dict: Dictionary of output artifacts produced by this component.
exec_properties: Dictionary of execution properties.
Returns:
A resolved container spec.
"""
context = {
'input_dict': input_dict,
'output_dict': output_dict,
'exec_properties': exec_properties,
}
if isinstance(container_spec_tmpl,
executor_specs.TemplatedExecutorContainerSpec):
return executor_spec.ExecutorContainerSpec(
image=container_spec_tmpl.image,
command=resolve_container_command_line(
container_spec=container_spec_tmpl,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
),
)
return executor_spec.ExecutorContainerSpec(
image=_render_text(container_spec_tmpl.image, context),
command=_render_items(container_spec_tmpl.command, context),
args=_render_items(container_spec_tmpl.args, context))
def _render_items(items: List[Text], context: Dict[Text, Any]) -> List[Text]:
if not items:
return items
return [_render_text(item, context) for item in items]
def _render_text(text: Text, context: Dict[Text, Any]) -> Text:
return jinja2.Template(text).render(context)
def resolve_container_command_line(
container_spec: executor_specs.TemplatedExecutorContainerSpec,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> List[Text]:
"""Resolves placeholders in the command line of a container.
Args:
container_spec: ContainerSpec to resolve
input_dict: Dictionary of input artifacts consumed by this component.
output_dict: Dictionary of output artifacts produced by this component.
exec_properties: Dictionary of execution properties.
Returns:
Resolved command line.
"""
def expand_command_line_arg(
cmd_arg: executor_specs.CommandlineArgumentType,
) -> Text:
"""Resolves a single argument."""
if isinstance(cmd_arg, str):
return cmd_arg
elif isinstance(cmd_arg, placeholders.InputValuePlaceholder):
if cmd_arg.input_name in exec_properties:
return exec_properties[cmd_arg.input_name]
else:
artifact = input_dict[cmd_arg.input_name][0]
return str(artifact.value)
elif isinstance(cmd_arg, placeholders.InputUriPlaceholder):
return input_dict[cmd_arg.input_name][0].uri
elif isinstance(cmd_arg, placeholders.OutputUriPlaceholder):
return output_dict[cmd_arg.output_name][0].uri
elif isinstance(cmd_arg, placeholders.ConcatPlaceholder):
resolved_items = [expand_command_line_arg(item) for item in cmd_arg.items]
for item in resolved_items:
if not isinstance(item, (str, Text)):
raise TypeError('Expanded item "{}" has incorrect type "{}"'.format(
item, type(item)))
return ''.join(resolved_items)
else:
raise TypeError(
('Unsupported type of command-line arguments: "{}".'
' Supported types are {}.')
.format(type(cmd_arg), str(executor_specs.CommandlineArgumentType)))
resolved_command_line = []
for cmd_arg in (container_spec.command or []):
resolved_cmd_arg = expand_command_line_arg(cmd_arg)
if not isinstance(resolved_cmd_arg, (str, Text)):
raise TypeError(
'Resolved argument "{}" (type="{}") is not a string.'.format(
resolved_cmd_arg, type(resolved_cmd_arg)))
resolved_command_line.append(resolved_cmd_arg)
return resolved_command_line
def to_swagger_dict(config: Any) -> Any:
"""Converts a config object to a swagger API dict.
This utility method recursively converts swagger code generated configs into
a valid swagger dictionary. This method is trying to workaround a bug
(https://github.com/swagger-api/swagger-codegen/issues/8948)
from swagger generated code
Args:
config: The config object. It can be one of List, Dict or a Swagger code
generated object, which has a `attribute_map` attribute.
Returns:
The original object with all Swagger generated object replaced with
dictionary object.
"""
if isinstance(config, list):
return [to_swagger_dict(x) for x in config]
if hasattr(config, 'attribute_map'):
return {
swagger_name: to_swagger_dict(getattr(config, key))
for (key, swagger_name) in config.attribute_map.items()
if getattr(config, key)
}
if isinstance(config, dict):
return {key: to_swagger_dict(value) for key, value in config.items()}
return config
|
py | b40da39be9461741d0e37472cdd8fffea201163a | from __future__ import unicode_literals
from helium import Device
Device # To avoid warnings
|
py | b40da3c1327165149bea2e48195118f794d137e3 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("WARNING: \"import zmq\" failed. Setting ENABLE_ZMQ=0. " \
"To run zmq tests, see dependency info in /qa/README.md.")
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "DASHD" not in os.environ:
os.environ["DASHD"] = BUILDDIR + '/src/dashd' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'dip3-deterministicmns.py', # NOTE: needs dash_hash to pass
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py', # NOTE: needs dash_hash to pass
'fundrawtransaction.py',
'fundrawtransaction-hd.py',
'p2p-autoinstantsend.py',
'autoix-mempool.py',
# vv Tests less than 2m vv
'p2p-instantsend.py',
'wallet.py',
'wallet-accounts.py',
'wallet-dump.py',
'listtransactions.py',
'multikeysporks.py',
# vv Tests less than 60s vv
'sendheaders.py', # NOTE: needs dash_hash to pass
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'keypool.py',
'keypool-hd.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs dash_hash to pass
'invalidtxrequest.py', # NOTE: needs dash_hash to pass
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'p2p-compactblocks.py',
'sporks.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
# 'pruning.py', # Prune mode is incompatible with -txindex.
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs dash_hash to pass
'bipdersig-p2p.py', # NOTE: needs dash_hash to pass
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs dash_hash to pass
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
flags.append("--cachedir=%s/qa/cache" % BUILDDIR)
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print('' if passed else stdout + '\n', end='')
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
py | b40da4ad61c96a4f3efc0b1504094981d4ccb0d5 | import hashlib
import logging
import time
from cacheops import CacheMiss, cache
from django.conf import settings
from django_rq import job
from pyVim import connect
from pyVmomi import vim
from netbox_vcenter.models import ClusterVCenter
logger = logging.getLogger('netbox_vcenter')
def get_virtual_machines(vcenter: ClusterVCenter):
if not vcenter:
return None
logger.debug("Checking for VMs on {}".format(vcenter.server))
try:
cache_key = get_cache_key(vcenter)
vms = cache.get(cache_key)
if vms != 'FAILED':
logger.debug("Found cached VMs on {}".format(vcenter.server))
return vms
except CacheMiss:
# Get the VMs in the background worker, it will fill the cache
logger.info("Initiating background task to retrieve VMs from {}".format(vcenter.server))
refresh_virtual_machines.delay(vcenter=vcenter)
return None
def get_nic_vlan(content, dvs_cache, portgroup_cache, vm, dev):
dev_backing = dev.backing
vlan_id = None
if hasattr(dev_backing, 'port'):
port_group_key = dev.backing.port.portgroupKey
dvs_uuid = dev.backing.port.switchUuid
if dvs_uuid in dvs_cache:
dvs = dvs_cache[dvs_uuid]
else:
try:
dvs = content.dvSwitchManager.QueryDvsByUuid(dvs_uuid)
dvs_cache[dvs_uuid] = dvs
except Exception:
dvs = None
if dvs:
pg_obj = dvs.LookupDvPortGroup(port_group_key)
vlan_id = str(pg_obj.config.defaultPortConfig.vlan.vlanId)
else:
portgroup = dev.backing.network.name
vm_host = vm.runtime.host
if vm_host in portgroup_cache:
pgs = portgroup_cache[vm_host]
else:
pgs = vm_host.config.network.portgroup
portgroup_cache[vm_host] = pgs
for p in pgs:
if portgroup in p.key:
vlan_id = str(p.spec.vlanId)
return vlan_id
def get_objects_of_type(content, obj_type):
view_mgr = content.viewManager.CreateContainerView(content.rootFolder,
[obj_type],
True)
try:
return list(view_mgr.view)
finally:
view_mgr.Destroy()
def get_cache_key(vcenter: ClusterVCenter):
raw_key = f'{vcenter.server}\t{vcenter.username}\t{vcenter.password}'
key = hashlib.sha256(raw_key.encode('utf-8')).hexdigest()[-16]
return key
@job
def refresh_virtual_machines(vcenter: ClusterVCenter, force=False):
config = settings.PLUGINS_CONFIG['netbox_vcenter']
vcenter_cache_key = get_cache_key(vcenter)
# Check whether this server has failed recently and shouldn't be retried yet
try:
cached_data = cache.get(vcenter_cache_key)
if not force and cached_data == 'FAILED':
logger.info("Skipping vCenter update; server {} failed recently".format(vcenter.server))
return
if not force:
logger.info("Skipping vCenter update; server {} already in cache".format(vcenter.server))
return cached_data
except CacheMiss:
pass
service_instance = None
try:
logger.debug("Fetching VMs from {}".format(vcenter.server))
# Connect to the vCenter server
if vcenter.validate_certificate:
service_instance = connect.Connect(vcenter.server, user=vcenter.username, pwd=vcenter.password)
else:
service_instance = connect.ConnectNoSSL(vcenter.server, user=vcenter.username, pwd=vcenter.password)
content = service_instance.RetrieveContent()
vms = get_objects_of_type(content, vim.VirtualMachine)
all_stats = {
'timestamp': time.time(),
'vms': {}
}
dvs_cache = {}
portgroup_cache = {}
for vm in vms:
vm_stats = {
'power': None,
'vcpus': None,
'memory': None,
'disk': None,
'nics': [],
}
try:
if vm.runtime.powerState:
vm_stats['powered_on'] = vm.runtime.powerState == 'poweredOn'
if vm.config.hardware.numCPU:
vm_stats['vcpus'] = vm.config.hardware.numCPU
if vm.config.hardware.memoryMB:
vm_stats['memory'] = vm.config.hardware.memoryMB
disk_devices = [device for device in vm.config.hardware.device
if isinstance(device, vim.vm.device.VirtualDisk)]
if disk_devices:
# Sum and convert from KB to GB
total_capacity = 0
for device in disk_devices:
total_capacity += device.capacityInKB
vm_stats['disk'] = round(total_capacity / 1048576)
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualEthernetCard):
vlan = get_nic_vlan(content, dvs_cache, portgroup_cache, vm, dev)
vm_stats['nics'].append({
'label': dev.deviceInfo.label,
'mac_address': dev.macAddress,
'vlan': vlan,
})
except Exception:
logger.exception("Error while fetching virtual machine {} from {}".format(vm.name, vcenter.server))
continue
# Collect all stats for returning
all_stats['vms'][vm.name] = vm_stats
# Cache a list of all VMs
cache.set(vcenter_cache_key, all_stats, config['CACHE_TIMEOUT'])
return all_stats
except Exception:
# Set a cookie in the cache so we don't keep retrying
logger.exception("Error while fetching virtual machines from {}. "
"Disabling checks for 5 minutes.".format(vcenter.server))
cache.set(vcenter_cache_key, 'FAILED', config['CACHE_FAILURE_TIMEOUT'])
finally:
if service_instance:
connect.Disconnect(service_instance)
|
py | b40da899663f4d6cbf097afdd646ecbd31d078a9 | """
----------------------------------------------------------------------
Copyright 2014 Smartsheet, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
----------------------------------------------------------------------
"""
from utils import config
import requests
import logging
# debugging
import pdb
theConfig = config.Config()
# read app config
appConfig = theConfig.getConfigFromFile("app.json")
logger = theConfig.getLogger(appConfig)
#class Sources:
# def __init__(self):
# return self
class RestGETCon:
def __init__(self, sourceConfig):
# authorize api connection
self.apiConfig = sourceConfig;
"""
Example REST GET Configuration ( to be set in the settings/sources.json file )
{
"sourceId": "markitOnDemandAPI",
"connectorClassName": "RestGETCon",
"apiUrl": "http://dev.markitondemand.com/Api/v2/Quote/json?symbol={}",
"isArray": false,
"isStrict": false
}
list required fields other than 'sourceId' and 'connectorClassName' from sourceConfig entry
'sourceId' and 'connectorClassName' are required for every source, and are already being checked
"""
requiredFields = "apiUrl,isArray"
self.apiConfig = theConfig.validateSourceConfig(sourceConfig, logger, requiredFields)
return None
def findSourceMatch(self, lookupVal, lookupKey):
matchingRecord = {}
# query API
resp = requests.get(self.apiConfig['apiUrl'].format(lookupVal))
respJSON = resp.json()
#build matchingRecord array
if self.apiConfig['isArray']:
for key,val in respJSON[0].items():
matchingRecord[key] = val
else:
for key,val in respJSON.items():
matchingRecord[key] = val
return matchingRecord |
py | b40da8e283c71da438e124a58cb7cb2f576008b5 |
class LocationImportError(Exception):
pass
|
py | b40da91caf04d153726b78d1e3029e86f5cdae88 | from data.nomes_desord import nomes
from time import time
comps = 0
passadas = 0
trocas = 0
def bubble_sort(lista):
global comps, passadas, trocas
comps = 0
passadas = 0
trocas = 0
while True:
passadas +=1
trocou = False
for i in range (len(lista)-1):
comps +=1
if lista [i+1] < lista[i]:
lista[i+1], lista[i] = lista[i], lista[i + 1]
trocas +=1
trocou = True
if not trocou:
break
a = [2, 84, 66, 4, 14, 9, 95,57, 34, 10, 5, 8]
b = [88, 44, 33, 0, 99, 55, 77, 22, 11, 66]
c = [99, 88, 77, 66, 55, 44, 33, 22, 11, 0]
#muda = nomes
nomes2 = nomes
#print(muda)
inicio = time()
bubble_sort(nomes2)
final = time()
#print(muda)
print(f'Comparações: {comps}\nPassadas: {passadas}\nTrocas: {trocas}\nTempo em minutos: {(final - inicio)/60}')
|
py | b40da987d3d73ce28714983f29ea6b5e1f56c98b | import sys
sys.path.insert(0, '../')
from DatasetManager.chorale_dataset import ChoraleDataset
from DatasetManager.dataset_manager import DatasetManager
from DatasetManager.metadata import FermataMetadata, TickMetadata, KeyMetadata
import click
import random
from DeepBach.helpers import ensure_dir
import os, shutil
from tqdm import tqdm
import numpy as np
from collections import defaultdict
@click.command()
@click.option('--include_transpositions', is_flag=True,
help='whether to include transpositions (for dataset creation, or for pointing to the right folder at generation time)')
def main(include_transpositions):
get_pairs(folders=[1,2], num_comparisons=10)
def get_pairs(folders=None, eval_dir='../generations/paired_evaluation/iters_1_2', num_comparisons=10):
"""
Arguments:
dataset: dataset of real Bach chorales
model_ids: list of model IDs we are comparing
eval_dir: folder for evaluation
num_comparisons_per_model: number of generations for each model
"""
answer = {}
for iter_id, chorale_id in enumerate(np.random.choice(range(50), size=num_comparisons, replace=False)):
pair_dir = os.path.join(eval_dir, f'{iter_id}')
ensure_dir(pair_dir)
labels = ['a','b']
np.random.shuffle(labels)
for i, folder in enumerate(folders):
input_chorale = f'../generations/21/{folder}/c{chorale_id}.mid'
output_folder = f'{pair_dir}/{labels[i]}_{chorale_id}.mid'
if folder == 2:
answer[iter_id] = labels[i]
shutil.copy(input_chorale, output_folder)
print(answer)
if __name__ == '__main__':
main()
|
py | b40daba48689bfc0c40e695e749caf228a3e7f90 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import shutil
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig
class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
name="data1", shape=[-1, 3, 64, 64], dtype="float32")
data2 = fluid.data(
name="data2", shape=[-1, 3, 64, 1], dtype="float32")
eltwise_out = self.append_eltwise(data1, data2)
out = fluid.layers.batch_norm(eltwise_out, is_test=True)
self.feeds = {
"data1": np.random.random([1, 3, 64, 64]).astype("float32"),
"data2": np.random.random([1, 3, 64, 1]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassElementwiseBroadcastTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False)
self.fetch_list = [out]
def append_eltwise(self, data1, data2):
return fluid.layers.elementwise_add(x=data1, y=data2, axis=0)
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TensorRTSubgraphPassElementwiseBroadcastTest1(
TensorRTSubgraphPassElementwiseBroadcastTest):
def append_eltwise(self, data1, data2):
return fluid.layers.elementwise_sub(x=data1, y=data2, axis=0)
class TensorRTSubgraphPassElementwiseBroadcastTest2(
TensorRTSubgraphPassElementwiseBroadcastTest):
def append_eltwise(self, data1, data2):
return fluid.layers.elementwise_mul(x=data1, y=data2, axis=0)
class TensorRTSubgraphPassElementwiseBroadcastTest3(
TensorRTSubgraphPassElementwiseBroadcastTest):
def append_eltwise(self, data1, data2):
return fluid.layers.elementwise_div(x=data1, y=data2, axis=0)
if __name__ == "__main__":
unittest.main()
|
py | b40dad7a8683b29c746b8b468e09bc555b7821f9 | from __future__ import print_function
import numpy as np
import pickle
import glob,os
import gc
import astropy.constants as ac
import astropy.units as au
import pyathena as pa
from .utils import compare_files
from .set_units import *
from .plot_tools.plot_projection import plot_projection
from .plot_tools.plot_slices import slice2 as plot_slice
from .plot_tools.plot_slice_proj import plot_slice_proj as plot_slice_proj
from .plot_tools.set_aux import set_aux
from .cooling import coolftn
coolftn = coolftn()
unit = set_units(muH=1.4271)
to_Myr = unit['time'].to('Myr').value
to_Pok = (unit['pressure']/ac.k_B).cgs.value
to_microG = unit['magnetic_field'].value
to_surf = (unit['density']*unit['length']).to('Msun/pc^2').value
data_axis={'x':2,'y':1,'z':0}
domain_axis={'x':0,'y':1,'z':2}
proj_axis={'z':('x','y'),'y':('x','z'),'x':('y','z')}
def get_scalars(ds):
scal_fields=[]
for f in ds.field_list:
if f.startswith('specific_scalar'):
scal_fields.append(f)
return scal_fields
def create_surface_density(ds,surf_fname):
'''
specific function to create pickle file containing surface density
'''
time = ds.domain['time']*to_Myr
dx=ds.domain['dx']
surf_data={}
le=ds.domain['left_edge']
re=ds.domain['right_edge']
pdata=ds.read_all_data('density')
proj = pdata.mean(axis=0)
proj *= ds.domain['Lx'][2]*to_surf
bounds = np.array([le[0],re[0],le[1],re[1]])
surf_data={'time':time,'data':proj,'bounds':bounds}
pickle.dump(surf_data,open(surf_fname,'wb'),pickle.HIGHEST_PROTOCOL)
def create_projections(ds, fname, fields, weight_fields=dict(), aux=None,
force_recal=False, verbose=False):
"""
Generic function to create pickle file containing projections of field along all axes
Parameters
----------
ds: AthenaDataset
fname: string
Name of pickle file to save data
fields: list of strings
List of field names to be projected
weight_fields: dictionary
Dictionary for weight fields to be multiplied to the projected field
aux: dictionary
aux dictionary
force_recal: bool
If True, override existing pickles
verbose: bool
If True, print verbose messages
"""
time = ds.domain['time']*to_Myr
dx = ds.domain['dx']
le = ds.domain['left_edge']
re = ds.domain['right_edge']
if aux is None:
aux = set_aux()
for f in fields:
fp = f + '_proj'
if fp in aux and 'weight_field' in aux[fp]:
weight_fields[f] = aux[fp]['weight_field']
import copy
field_to_proj = copy.copy(fields)
# Check if pickle exists and remove existing fields from field_to_proj
if os.path.isfile(fname) and not force_recal:
proj_data = pickle.load(open(fname, 'rb'))
existing_fields = proj_data['z'].keys()
for f in existing_fields:
if f in field_to_proj:
#print('Pickle for {} exists.'.format(f))
field_to_proj.remove(f)
else:
proj_data = {}
proj_data['time'] = time
for i, axis in enumerate(['x', 'y', 'z']):
bounds = np.array([le[domain_axis[proj_axis[axis][0]]],
re[domain_axis[proj_axis[axis][0]]],
le[domain_axis[proj_axis[axis][1]]],
re[domain_axis[proj_axis[axis][1]]]])
proj_data[axis] = {}
proj_data[axis + 'extent'] = bounds
if verbose:
print('making projections...', end='')
for f in field_to_proj:
if verbose:
print('{}...'.format(f),end='')
pdata = ds.read_all_data(f)
if isinstance(weight_fields,dict) and f in weight_fields:
if weight_fields[f] == 'cell_volume':
wdata = ds.domain['dx'].prod()*np.ones(pdata.shape)
else:
wdata = ds.read_all_data(weight_field[f])
pdata *= wdata
for i, axis in enumerate(['x', 'y', 'z']):
dl_cgs = (ds.domain['dx'][domain_axis[axis]]*unit['length'].cgs).value
proj = pdata.sum(axis=data_axis[axis])*dl_cgs
if isinstance(weight_fields, dict) and f in weight_fields:
wproj = wdata.sum(axis=data_axis[axis])*dl_cgs
proj /= wproj
bounds = np.array([le[domain_axis[proj_axis[axis][0]]],
re[domain_axis[proj_axis[axis][0]]],
le[domain_axis[proj_axis[axis][1]]],
re[domain_axis[proj_axis[axis][1]]]])
try:
proj *= aux[f]['proj_mul']
except KeyError:
#print('proj field {}: multiplication factor not available in aux.'.format(f))
pass
proj_data[axis][f] = proj
try:
del pdata
del wdata
except UnboundLocalError:
pass
if verbose:
print('')
pickle.dump(proj_data, open(fname, 'wb'), pickle.HIGHEST_PROTOCOL)
return proj_data
def create_slices(ds, fname, fields, force_recal=False, factors={}, verbose=False):
'''
generic function to create pickle file containing slices of fields
fields: list of field names to be sliced
factors: multiplication factors for unit conversion
'''
time = ds.domain['time']*to_Myr
dx = ds.domain['dx']
c = ds.domain['center']
le = ds.domain['left_edge']
re = ds.domain['right_edge']
cidx = pa.cc_idx(ds.domain,ds.domain['center']).astype('int')
import copy
field_to_slice = copy.copy(fields)
if os.path.isfile(fname) and not force_recal:
slc_data = pickle.load(open(fname,'rb'))
existing_fields = slc_data['z'].keys()
for f in existing_fields:
if f in field_to_slice:
#print('Pickle for {} exists'.format(f))
field_to_slice.remove(f)
else:
slc_data={}
slc_data['time']=time
for i,axis in enumerate(['x','y','z']):
bounds = np.array([le[domain_axis[proj_axis[axis][0]]],
re[domain_axis[proj_axis[axis][0]]],
le[domain_axis[proj_axis[axis][1]]],
re[domain_axis[proj_axis[axis][1]]]])
slc_data[axis] = {}
slc_data[axis+'extent'] = bounds
if verbose:
print('making slices...', end='')
for f in field_to_slice:
if verbose:
print('{}...'.format(f), end='')
if f == 'temperature':
if 'xn' in ds.derived_field_list:
pdata = ds.read_all_data('temperature')
else:
pdata = ds.read_all_data('T1')
elif f == 'magnetic_field_strength':
pdata = ds.read_all_data('magnetic_field')
elif f == 'ram_pok_z':
pdata = ds.read_all_data('kinetic_energy3')*2.0
elif f == 'pok':
pdata = ds.read_all_data('pressure')
elif f == 'velocity_z':
pdata = ds.read_all_data('velocity3')
elif f == 'mag_pok':
pdata = ds.read_all_data('magnetic_pressure')
elif f == 'nH':
pdata = ds.read_all_data('density')
else:
pdata = ds.read_all_data(f)
for i, axis in enumerate(['x','y','z']):
if f == 'temperature' and not 'xn' in ds.derived_field_list:
slc = coolftn.get_temp(pdata.take(cidx[i],axis=2-i))
elif f == 'magnetic_field_strength':
slc = np.sqrt((pdata.take(cidx[i],axis=2-i)**2).sum(axis=-1))
else:
slc = pdata.take(cidx[i],axis=2-i)
if f in factors:
slc_data[axis][f] = slc*factors[f]
else:
slc_data[axis][f] = slc
try:
del pdata
except UnboundLocalError:
pass
if verbose:
print('')
pickle.dump(slc_data, open(fname, 'wb'), pickle.HIGHEST_PROTOCOL)
return slc_data
def create_all_pickles_mhd(force_recal=False, force_redraw=False, verbose=True, **kwargs):
"""
Original create_all_pickles used to extract and draw gas surface density and slices
"""
dir = kwargs['base_directory']+kwargs['directory']
fname=glob.glob(dir+'id0/'+kwargs['id']+'.????.vtk')
fname.sort()
if kwargs['range'] != '':
sp=kwargs['range'].split(',')
start = eval(sp[0])
end = eval(sp[1])
fskip = eval(sp[2])
else:
start = 0
end = len(fname)
fskip = 1
fname=fname[start:end:fskip]
#ngrids=len(glob.glob(dir+'id*/'+kwargs['id']+'*'+fname[0][-8:]))
ds=pa.AthenaDataSet(fname[0])
mhd='magnetic_field' in ds.field_list
cooling='pressure' in ds.field_list
Omega=kwargs['rotation']
rotation=kwargs['rotation'] != 0.
if verbose:
print("MHD:", mhd)
print("cooling:", cooling)
print("rotation:", rotation, Omega)
slc_fields=['nH','pok','temperature','velocity_z','ram_pok_z']
fields_to_draw=['star_particles','nH','temperature','pok','velocity_z']
if mhd:
slc_fields.append('magnetic_field_strength')
slc_fields.append('mag_pok')
fields_to_draw.append('magnetic_field_strength')
mul_factors={'pok':to_Pok,'magnetic_field_strength':to_microG,'mag_pok':to_Pok,'ram_pok_z':to_Pok}
scal_fields=get_scalars(ds)
slc_fields+=scal_fields
if not os.path.isdir(dir+'slice/'): os.mkdir(dir+'slice/')
if not os.path.isdir(dir+'surf/'): os.mkdir(dir+'surf/')
for i,f in enumerate(fname):
slcfname=dir+'slice/'+kwargs['id']+f[-9:-4]+'.slice.p'
surfname=dir+'surf/'+kwargs['id']+f[-9:-4]+'.surf.p'
tasks={'slice':(not compare_files(f,slcfname)) or force_recal,
'surf':(not compare_files(f,surfname)) or force_recal,
}
do_task=(tasks['slice'] or tasks['surf'])
if verbose:
print('file number: {} -- Tasks to be done ['.format(i),end='')
for k in tasks: print('{}:{} '.format(k,tasks[k]),end='')
print(']')
if do_task:
ds = pa.AthenaDataSet(f)
if tasks['surf']: create_projection(ds,surfname,conversion={'z':ds.domain['Lx'][2]*to_surf})
if tasks['slice']: create_slices(ds,slcfname,slc_fields,factors=mul_factors,force_recal=force_recal)
aux=set_aux(kwargs['id'])
for i,f in enumerate(fname):
slcfname=dir+'slice/'+kwargs['id']+f[-9:-4]+'.slice.p'
surfname=dir+'surf/'+kwargs['id']+f[-9:-4]+'.surf.p'
starpardir='id0/'
if os.path.isdir(dir+'starpar/'): starpardir='starpar/'
starfname=dir+starpardir+kwargs['id']+f[-9:-4]+'.starpar.vtk'
tasks={'slice':(not compare_files(f,slcfname+'ng')) or force_redraw,
'surf':(not compare_files(f,surfname+'ng')) or force_redraw,
}
do_task=(tasks['slice'] and tasks['surf'])
if verbose:
print('file number: {} -- Tasks to be done ['.format(i),end='')
for k in tasks: print('{}:{} '.format(k,tasks[k]),end='')
print(']')
if tasks['surf']:
plot_projection(surfname,starfname,runaway=True,aux=aux['surface_density'])
if tasks['slice']:
plot_slice(slcfname,starfname,fields_to_draw,aux=aux)
def create_all_pickles(
datadir, problem_id,
nums=None,
fields_slc=['nH', 'nHI', 'temperature', 'xn', 'ne', 'nesq', 'Erad0', 'Erad1'],
fields_proj=['rho', 'xn', 'nesq'],
fields_draw=['star_particles', 'rho_proj', 'xn_proj', 'nesq_proj',
'nH', 'temperature', 'xn', 'Erad0', 'Erad1'],
force_recal=False, force_redraw=False, no_save=False, savdir=None,
verbose=True, **plt_args):
"""
Function to pickle slices and projections from AthenaDataset and draw snapshots.
Set force_recal to True if additional fields need to be extracted.
Parameters
----------
datadir: string
Base data directory
problem_id: string
Prefix for vtk files
num: array of integers
List of vtk output numbers. Search all vtk files in the directory if None.
fields_slc: list of strings
List of field names to be sliced
fields_proj: list of strings
List of field names to be projected
fields_draw: list of strings
List of field names to be drawn
force_recal: bool
If True, override existing pickles.
force_redraw: bool
If True, override existing figures.
no_save: bool
If True, returns a list of matplotlib figure objects instead of
saving them.
savdir: str
Directory to save snapshot. If None, saves in snapshot subdirectory under datadir.
Default value is None.
verbose: bool
Print verbose message
Returns
-------
fig: figures
Returns lists of figure if no_save is True.
"""
aux = set_aux(problem_id)
_plt_args = dict(zoom=1.0)
_plt_args.update(**plt_args)
fglob = os.path.join(datadir, problem_id + '.????.vtk')
fname = sorted(glob.glob(fglob))
if not fname:
fglob = os.path.join(datadir, 'vtk', problem_id + '.????.vtk')
fname = glob.glob(fglob)
if not fname:
fglob = os.path.join(datadir, 'vtk', 'id0', problem_id + '.????.vtk')
fname = glob.glob(fglob)
if not fname:
fglob = os.path.join(datadir, 'id0', problem_id + '.????.vtk')
fname = glob.glob(fglob)
fname.sort()
if not fname:
print('No vtk files are found in {0:s}'.format(datadir))
raise
if nums is None:
nums = [int(f[-8:-4]) for f in fname]
if nums[0] == 0: # remove the zeroth snapshot
start = 1
del nums[0]
else:
start = 0
end = len(fname)
fskip = 1
fname = fname[start:end:fskip]
else:
nums = np.atleast_1d(nums)
fname = [fname[i] for i in nums]
#ngrids = len(glob.glob(datadir+'id*/' + id + '*' + fname[0][-8:]))
ds = pa.AthenaDataSet(fname[0])
mhd = 'magnetic_field' in ds.field_list
cooling = 'pressure' in ds.field_list
print('[Create_pickle_all_rad]')
print('- basedir:', datadir)
print('- problem id:', problem_id)
print('- vtk file num:', end=' ')
for i in nums:
print(i,end=' ')
print('')
print('slc: {0:s}'.format(' '.join(fields_slc)))
print('proj: {0:s}'.format(' '.join(fields_proj)))
print('draw: {0:s}'.format(' '.join(fields_draw)))
if mhd:
slc_fields.append('magnetic_field_strength')
slc_fields.append('mag_pok')
draw_fields.append('magnetic_field_strength')
mul_factors = {'pok':to_Pok,
'magnetic_field_strength':to_microG,
'mag_pok':to_Pok,
'ram_pok_z':to_Pok}
if not os.path.isdir(os.path.join(datadir, 'slice')):
os.mkdir(os.path.join(datadir, 'slice'))
if not os.path.isdir(os.path.join(datadir, 'proj')):
os.mkdir(os.path.join(datadir, 'proj'))
print('\n*** Extract slices and projections ***')
print('- num: ',end='')
for i, f in enumerate(fname):
print('{}'.format(int(f.split('.')[-2])), end=' ')
fname_slc = os.path.join(datadir, 'slice', problem_id + f[-9:-4] + '.slice.p')
fname_proj = os.path.join(datadir, 'proj', problem_id + f[-9:-4] + '.proj.p')
tasks = dict(slc=(not compare_files(f,fname_slc)) or force_recal,
proj=(not compare_files(f,fname_proj)) or force_recal)
do_task = (tasks['slc'] or tasks['proj'])
if do_task:
ds = pa.AthenaDataSet(f)
if tasks['slc']:
_ = create_slices(ds, fname_slc, fields_slc, factors=mul_factors,
force_recal=force_recal, verbose=verbose)
if tasks['proj']:
_ = create_projections(ds, fname_proj, fields_proj, aux=aux,
force_recal=force_recal, verbose=verbose)
del ds
gc.collect()
#print(fname)
print('')
print('*** Draw snapshots (zoom {0:.1f}) ***'.format(_plt_args['zoom']))
print('num: ',end='')
if no_save:
force_redraw = True
figs = []
if savdir is None:
savdir = os.path.join(datadir, 'snapshots')
if not os.path.isdir(savdir):
os.mkdir(savdir)
print('savdir:', savdir)
for i,f in enumerate(fname):
num = f.split('.')[-2]
print('{}'.format(int(num)), end=' ')
fname_slc = os.path.join(datadir, 'slice',
problem_id + f[-9:-4] + '.slice.p')
fname_proj = os.path.join(datadir, 'proj',
problem_id + f[-9:-4] + '.proj.p')
starpardir = 'id0'
if os.path.isdir(os.path.join(datadir, 'starpar')):
starpardir = 'starpar'
fname_sp = os.path.join(datadir, starpardir,
problem_id + f[-9:-4] + '.starpar.vtk')
savname = os.path.join(savdir, problem_id + '.' + num + '.slc_proj.png')
if _plt_args['zoom'] == 1.0:
savname = os.path.join(savdir, problem_id + '.' + num + '.slc_proj.png')
else:
# append zoom factor
savname = os.path.join(savdir, problem_id + '.' + num + '.slc_proj-' + \
'zoom{0:02d}'.format(int(10.0*_plt_args['zoom'])) + '.png')
tasks = dict(slc_proj=(not compare_files(f, savname)) or force_redraw,
proj=(not compare_files(f, fname_proj+'ng')) or force_redraw)
do_task = (tasks['slc_proj'] and tasks['proj'])
if tasks['proj']:
plot_projection(fname_proj, fname_sp, 'rho', runaway=True,
aux=aux['rho_proj'])
if tasks['slc_proj']:
if no_save:
savname = None
fig = plot_slice_proj(fname_slc, fname_proj, fname_sp, fields_draw,
savname, aux=aux, **_plt_args)
figs.append(fig)
else:
plot_slice_proj(fname_slc, fname_proj, fname_sp, fields_draw,
savname, aux=aux, **_plt_args)
print('')
print('*** Done! ***')
if no_save:
return tuple(figs)
|
py | b40dae82d27709382b4d0fecf76a9603ee5853b6 | import urllib.parse
from sp_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class Catalog(Client):
"""
:link: https://github.com/amzn/selling-partner-api-docs/blob/main/references/catalog-items-api/catalogItemsV0.md
"""
@sp_endpoint('/catalog/v0/items/{}')
def get_item(self, asin: str, **kwargs) -> ApiResponse:
"""
get_item(self, asin: str, **kwargs) -> ApiResponse
Returns a specified item and its attributes.
**Usage Plan:**
====================================== ==============
Rate (requests per second) Burst
====================================== ==============
1 1
====================================== ==============
For more information, see "Usage Plans and Rate Limits" in the Selling Partner API documentation.
Args:
asin: str
key MarketplaceId: str
**kwargs:
Returns:
GetCatalogItemResponse:
"""
return self._request(fill_query_params(kwargs.pop('path'), asin), params=kwargs)
@sp_endpoint('/catalog/v0/items')
def list_items(self, **kwargs) -> ApiResponse:
"""
list_items(self, **kwargs) -> ApiResponse
Returns a list of items and their attributes, based on a search query or item identifiers that you specify. When based on a search query, provide the Query parameter and optionally, the QueryContextId parameter. When based on item identifiers, provide a single appropriate parameter based on the identifier type, and specify the associated item value. MarketplaceId is always required.
**Usage Plan:**
====================================== ==============
Rate (requests per second) Burst
====================================== ==============
1 1
====================================== ==============
For more information, see "Usage Plans and Rate Limits" in the Selling Partner API documentation.
Args:
key MarketplaceId: str
key Query: str
key QueryContextId: str
key SellerSKU: str
key UPC: str
key EAN: str
key ISBN: str
key JAN: str
Returns:
ListCatalogItemsResponse:
"""
if 'Query' in kwargs:
kwargs.update({'Query': urllib.parse.quote_plus(kwargs.pop('Query'))})
return self._request(kwargs.pop('path'), params=kwargs)
|
py | b40daf02ebfe4e7de34dc1c86c95bc18dfd063b6 | from pypy.conftest import gettestobjspace
class AppTestSieve:
def setup_class(cls):
cls.space = gettestobjspace(usemodules=('_demo',))
def test_sieve(self):
import _demo
lst = _demo.sieve(100)
assert lst == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
|
py | b40db04088eb82e2fc71a3f6bbaeae9d62fec030 | import argparse
import json
import datetime
import logging
import time
from IPython.core.display import display
from ipywidgets import widgets
from graph_notebook.authentication.iam_credentials_provider.credentials_factory import credentials_provider_factory
from graph_notebook.authentication.iam_credentials_provider.credentials_provider import Credentials
from graph_notebook.configuration.generate_config import Configuration, AuthModeEnum
from graph_notebook.magics.parsing import str_to_namespace_var
from graph_notebook.ml.sagemaker import start_export, get_export_status, start_processing_job, get_processing_status, \
start_training, get_training_status, start_create_endpoint, get_endpoint_status, EXPORT_SERVICE_NAME
logger = logging.getLogger("neptune_ml_magic_handler")
DEFAULT_WAIT_INTERVAL = 60
DEFAULT_WAIT_TIMEOUT = 3600
def generate_neptune_ml_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='sub-command help', dest='which')
# Begin Export subparsers
parser_export = subparsers.add_parser('export', help='')
export_sub_parsers = parser_export.add_subparsers(help='', dest='which_sub')
export_start_parser = export_sub_parsers.add_parser('start', help='start a new exporter job')
export_start_parser.add_argument('--export-url', type=str,
help='api gateway endpoint to call the exporter such as foo.execute-api.us-east-1.amazonaws.com/v1')
export_start_parser.add_argument('--export-iam', action='store_true',
help='flag for whether to sign requests to the export url with SigV4')
export_start_parser.add_argument('--export-no-ssl', action='store_true',
help='toggle ssl off when connecting to exporter')
export_start_parser.add_argument('--wait', action='store_true', help='wait for the exporter to finish running')
export_start_parser.add_argument('--wait-interval', default=DEFAULT_WAIT_INTERVAL, type=int,
help=f'time in seconds between export status check. default: {DEFAULT_WAIT_INTERVAL}')
export_start_parser.add_argument('--wait-timeout', default=DEFAULT_WAIT_TIMEOUT, type=int,
help=f'time in seconds to wait for a given export job to complete before returning most recent status. default: {DEFAULT_WAIT_TIMEOUT}')
export_start_parser.add_argument('--store-to', default='', dest='store_to',
help='store result to this variable. If --wait is specified, will store the final status.')
export_status_parser = export_sub_parsers.add_parser('status', help='obtain status of exporter job')
export_status_parser.add_argument('--job-id', type=str, help='job id to check the status of')
export_status_parser.add_argument('--export-url', type=str,
help='api gateway endpoint to call the exporter such as foo.execute-api.us-east-1.amazonaws.com/v1')
export_status_parser.add_argument('--export-iam', action='store_true',
help='flag for whether to sign requests to the export url with SigV4')
export_status_parser.add_argument('--export-no-ssl', action='store_true',
help='toggle ssl off when connecting to exporter')
export_status_parser.add_argument('--store-to', default='', dest='store_to',
help='store result to this variable')
export_status_parser.add_argument('--wait', action='store_true', help='wait for the exporter to finish running')
export_status_parser.add_argument('--wait-interval', default=DEFAULT_WAIT_INTERVAL, type=int,
help=f'time in seconds between export status check. default: {DEFAULT_WAIT_INTERVAL}')
export_status_parser.add_argument('--wait-timeout', default=DEFAULT_WAIT_TIMEOUT, type=int,
help=f'time in seconds to wait for a given export job to complete before returning most recent status. default: {DEFAULT_WAIT_TIMEOUT}')
# Begin dataprocessing subparsers
parser_dataprocessing = subparsers.add_parser('dataprocessing', help='')
dataprocessing_subparsers = parser_dataprocessing.add_subparsers(help='dataprocessing sub-command',
dest='which_sub')
dataprocessing_start_parser = dataprocessing_subparsers.add_parser('start', help='start a new dataprocessing job')
dataprocessing_start_parser.add_argument('--job-id', type=str,
default='the unique identifier for for this processing job')
dataprocessing_start_parser.add_argument('--s3-input-uri', type=str, default='input data location in s3')
dataprocessing_start_parser.add_argument('--s3-processed-uri', type=str, default='processed data location in s3')
dataprocessing_start_parser.add_argument('--config-file-name', type=str, default='')
dataprocessing_start_parser.add_argument('--store-to', type=str, default='',
help='store result to this variable')
dataprocessing_start_parser.add_argument('--wait', action='store_true',
help='wait for the exporter to finish running')
dataprocessing_start_parser.add_argument('--wait-interval', default=DEFAULT_WAIT_INTERVAL, type=int,
help='wait interval between checks for export status')
dataprocessing_start_parser.add_argument('--wait-timeout', default=DEFAULT_WAIT_TIMEOUT, type=int,
help='timeout while waiting for export job to complete')
dataprocessing_status_parser = dataprocessing_subparsers.add_parser('status',
help='obtain the status of an existing dataprocessing job')
dataprocessing_status_parser.add_argument('--job-id', type=str)
dataprocessing_status_parser.add_argument('--store-to', type=str, default='',
help='store result to this variable')
dataprocessing_status_parser.add_argument('--wait', action='store_true',
help='wait for the exporter to finish running')
dataprocessing_status_parser.add_argument('--wait-interval', default=DEFAULT_WAIT_INTERVAL, type=int,
help='wait interval between checks for export status')
dataprocessing_status_parser.add_argument('--wait-timeout', default=DEFAULT_WAIT_TIMEOUT, type=int,
help='timeout while waiting for export job to complete')
# Begin training subparsers
parser_training = subparsers.add_parser('training', help='training command help')
training_subparsers = parser_training.add_subparsers(help='training sub-command help',
dest='which_sub')
training_start_parser = training_subparsers.add_parser('start', help='start a new training job')
training_start_parser.add_argument('--job-id', type=str, default='')
training_start_parser.add_argument('--data-processing-id', type=str, default='')
training_start_parser.add_argument('--s3-output-uri', type=str, default='')
training_start_parser.add_argument('--instance-type', type=str, default='')
training_start_parser.add_argument('--store-to', type=str, default='', help='store result to this variable')
training_start_parser.add_argument('--wait', action='store_true',
help='wait for the exporter to finish running')
training_start_parser.add_argument('--wait-interval', default=DEFAULT_WAIT_INTERVAL, type=int,
help='wait interval between checks for export status')
training_start_parser.add_argument('--wait-timeout', default=DEFAULT_WAIT_TIMEOUT, type=int,
help='timeout while waiting for export job to complete')
training_status_parser = training_subparsers.add_parser('status',
help='obtain the status of an existing training job')
training_status_parser.add_argument('--job-id', type=str)
training_status_parser.add_argument('--store-to', type=str, default='', help='store result to this variable')
training_status_parser.add_argument('--wait', action='store_true',
help='wait for the exporter to finish running')
training_status_parser.add_argument('--wait-interval', default=DEFAULT_WAIT_INTERVAL, type=int,
help='wait interval between checks for export status')
training_status_parser.add_argument('--wait-timeout', default=DEFAULT_WAIT_TIMEOUT, type=int,
help='timeout while waiting for export job to complete')
# Begin endpoint subparsers
parser_endpoint = subparsers.add_parser('endpoint', help='endpoint command help')
endpoint_subparsers = parser_endpoint.add_subparsers(help='endpoint sub-command help',
dest='which_sub')
endpoint_start_parser = endpoint_subparsers.add_parser('create', help='create a new endpoint')
endpoint_start_parser.add_argument('--job-id', type=str, default='')
endpoint_start_parser.add_argument('--model-job-id', type=str, default='')
endpoint_start_parser.add_argument('--instance-type', type=str, default='ml.r5.xlarge')
endpoint_start_parser.add_argument('--store-to', type=str, default='', help='store result to this variable')
endpoint_start_parser.add_argument('--wait', action='store_true',
help='wait for the exporter to finish running')
endpoint_start_parser.add_argument('--wait-interval', default=DEFAULT_WAIT_INTERVAL, type=int,
help='wait interval between checks for export status')
endpoint_start_parser.add_argument('--wait-timeout', default=DEFAULT_WAIT_TIMEOUT, type=int,
help='timeout while waiting for export job to complete')
endpoint_status_parser = endpoint_subparsers.add_parser('status',
help='obtain the status of an existing endpoint creation job')
endpoint_status_parser.add_argument('--job-id', type=str, default='')
endpoint_status_parser.add_argument('--store-to', type=str, default='', help='store result to this variable')
endpoint_status_parser.add_argument('--wait', action='store_true',
help='wait for the exporter to finish running')
endpoint_status_parser.add_argument('--wait-interval', default=DEFAULT_WAIT_INTERVAL, type=int,
help='wait interval between checks for export status')
endpoint_status_parser.add_argument('--wait-timeout', default=DEFAULT_WAIT_TIMEOUT, type=int,
help='timeout while waiting for export job to complete')
return parser
def neptune_ml_export_start(params, export_url: str, export_ssl: bool = True, creds: Credentials = None):
if type(params) is str:
params = json.loads(params)
job = start_export(export_url, params, export_ssl, creds)
return job
def wait_for_export(export_url: str, job_id: str, output: widgets.Output,
export_ssl: bool = True, wait_interval: int = DEFAULT_WAIT_INTERVAL,
wait_timeout: int = DEFAULT_WAIT_TIMEOUT, creds: Credentials = None):
job_id_output = widgets.Output()
update_widget_output = widgets.Output()
with output:
display(job_id_output, update_widget_output)
with job_id_output:
print(f'Wait called on export job {job_id}')
with update_widget_output:
beginning_time = datetime.datetime.utcnow()
while datetime.datetime.utcnow() - beginning_time < (datetime.timedelta(seconds=wait_timeout)):
update_widget_output.clear_output()
print('Checking for latest status...')
export_status = get_export_status(export_url, export_ssl, job_id, creds)
if export_status['status'] in ['succeeded', 'failed']:
print('Export is finished')
return export_status
else:
print(f'Status is {export_status["status"]}')
print(f'Waiting for {wait_interval} before checking again...')
time.sleep(wait_interval)
def neptune_ml_export(args: argparse.Namespace, config: Configuration, output: widgets.Output, cell: str):
auth_mode = AuthModeEnum.IAM if args.export_iam else AuthModeEnum.DEFAULT
creds = None
if auth_mode == AuthModeEnum.IAM:
creds = credentials_provider_factory(config.iam_credentials_provider_type).get_iam_credentials()
export_ssl = not args.export_no_ssl
if args.which_sub == 'start':
if cell == '':
return 'Cell body must have json payload or reference notebook variable using syntax ${payload_var}'
export_job = neptune_ml_export_start(cell, args.export_url, export_ssl, creds)
if args.wait:
return wait_for_export(args.export_url, export_job['jobId'],
output, export_ssl, args.wait_interval, args.wait_timeout, creds)
else:
return export_job
elif args.which_sub == 'status':
if args.wait:
status = wait_for_export(args.export_url, args.job_id, output, export_ssl, args.wait_interval,
args.wait_timeout, creds)
else:
status = get_export_status(args.export_url, export_ssl, args.job_id, creds)
return status
def wait_for_dataprocessing(job_id: str, config: Configuration, request_param_generator, output: widgets.Output,
wait_interval: int = DEFAULT_WAIT_INTERVAL, wait_timeout: int = DEFAULT_WAIT_TIMEOUT):
job_id_output = widgets.Output()
update_status_output = widgets.Output()
with output:
display(job_id_output, update_status_output)
with job_id_output:
print(f'Wait called on dataprocessing job {job_id}')
with update_status_output:
beginning_time = datetime.datetime.utcnow()
while datetime.datetime.utcnow() - beginning_time < (datetime.timedelta(seconds=wait_timeout)):
update_status_output.clear_output()
status = get_processing_status(config.host, str(config.port), config.ssl, request_param_generator, job_id)
if status['status'] in ['Completed', 'Failed']:
print('Data processing is finished')
return status
else:
print(f'Status is {status["status"]}')
print(f'Waiting for {wait_interval} before checking again...')
time.sleep(wait_interval)
def neptune_ml_dataprocessing(args: argparse.Namespace, request_param_generator, output: widgets.Output,
config: Configuration, params: dict = None):
if args.which_sub == 'start':
if params is None or params == '' or params == {}:
params = {
'inputDataS3Location': args.s3_input_uri,
'processedDataS3Location': args.s3_processed_uri,
'id': args.job_id,
'configFileName': args.config_file_name
}
processing_job = start_processing_job(config.host, str(config.port), config.ssl,
request_param_generator, params)
job_id = params['id']
if args.wait:
return wait_for_dataprocessing(job_id, config, request_param_generator,
output, args.wait_interval, args.wait_timeout)
else:
return processing_job
elif args.which_sub == 'status':
if args.wait:
return wait_for_dataprocessing(args.job_id, config, request_param_generator, output, args.wait_interval,
args.wait_timeout)
else:
return get_processing_status(config.host, str(config.port), config.ssl, request_param_generator,
args.job_id)
else:
return f'Sub parser "{args.which} {args.which_sub}" was not recognized'
def wait_for_training(job_id: str, config: Configuration, request_param_generator, output: widgets.Output,
wait_interval: int = DEFAULT_WAIT_INTERVAL, wait_timeout: int = DEFAULT_WAIT_TIMEOUT):
job_id_output = widgets.Output()
update_status_output = widgets.Output()
with output:
display(job_id_output, update_status_output)
with job_id_output:
print(f'Wait called on training job {job_id}')
with update_status_output:
beginning_time = datetime.datetime.utcnow()
while datetime.datetime.utcnow() - beginning_time < (datetime.timedelta(seconds=wait_timeout)):
update_status_output.clear_output()
status = get_training_status(config.host, str(config.port), config.ssl, request_param_generator, job_id)
if status['status'] in ['Completed', 'Failed']:
print('Training is finished')
return status
else:
print(f'Status is {status["status"]}')
print(f'Waiting for {wait_interval} before checking again...')
time.sleep(wait_interval)
def neptune_ml_training(args: argparse.Namespace, request_param_generator, config: Configuration,
output: widgets.Output, params):
if args.which_sub == 'start':
if params is None or params == '' or params == {}:
params = {
"id": args.job_id,
"dataProcessingJobId": args.data_processing_id,
"trainingInstanceType": args.instance_type,
"trainModelS3Location": args.s3_output_uri
}
training_job = start_training(config.host, str(config.port), config.ssl, request_param_generator, params)
if args.wait:
return wait_for_training(training_job['id'], config, request_param_generator, output, args.wait_interval,
args.wait_timeout)
else:
return training_job
elif args.which_sub == 'status':
if args.wait:
return wait_for_training(args.job_id, config, request_param_generator, output, args.wait_interval,
args.wait_timeout)
else:
return get_training_status(config.host, str(config.port), config.ssl, request_param_generator,
args.job_id)
else:
return f'Sub parser "{args.which} {args.which_sub}" was not recognized'
def wait_for_endpoint(job_id: str, config: Configuration, request_param_generator, output: widgets.Output,
wait_interval: int = DEFAULT_WAIT_INTERVAL, wait_timeout: int = DEFAULT_WAIT_TIMEOUT):
job_id_output = widgets.Output()
update_status_output = widgets.Output()
with output:
display(job_id_output, update_status_output)
with job_id_output:
print(f'Wait called on endpoint creation job {job_id}')
with update_status_output:
beginning_time = datetime.datetime.utcnow()
while datetime.datetime.utcnow() - beginning_time < (datetime.timedelta(seconds=wait_timeout)):
update_status_output.clear_output()
status = get_endpoint_status(config.host, str(config.port), config.ssl, request_param_generator, job_id)
if status['status'] in ['InService', 'Failed']:
print('Endpoint creation is finished')
return status
else:
print(f'Status is {status["status"]}')
print(f'Waiting for {wait_interval} before checking again...')
time.sleep(wait_interval)
def neptune_ml_endpoint(args: argparse.Namespace, request_param_generator,
config: Configuration, output: widgets.Output, params):
if args.which_sub == 'create':
if params is None or params == '' or params == {}:
params = {
"id": args.job_id,
"mlModelTrainingJobId": args.model_job_id,
'instanceType': args.instance_type
}
create_endpoint_job = start_create_endpoint(config.host, str(config.port), config.ssl,
request_param_generator, params)
if args.wait:
return wait_for_endpoint(create_endpoint_job['id'], config, request_param_generator, output,
args.wait_interval, args.wait_timeout)
else:
return create_endpoint_job
elif args.which_sub == 'status':
if args.wait:
return wait_for_endpoint(args.job_id, config, request_param_generator, output,
args.wait_interval, args.wait_timeout)
else:
return get_endpoint_status(config.host, str(config.port), config.ssl, request_param_generator, args.job_id)
else:
return f'Sub parser "{args.which} {args.which_sub}" was not recognized'
def neptune_ml_magic_handler(args, request_param_generator, config: Configuration, output: widgets.Output,
cell: str = '', local_ns: dict = None) -> any:
if local_ns is None:
local_ns = {}
cell = str_to_namespace_var(cell, local_ns)
if args.which == 'export':
return neptune_ml_export(args, config, output, cell)
elif args.which == 'dataprocessing':
return neptune_ml_dataprocessing(args, request_param_generator, output, config, cell)
elif args.which == 'training':
return neptune_ml_training(args, request_param_generator, config, output, cell)
elif args.which == 'endpoint':
return neptune_ml_endpoint(args, request_param_generator, config, output, cell)
else:
return f'sub parser {args.which} was not recognized'
|
py | b40db11cb05c9fdcad4f0c60733cfc9527da2b2c | # ------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License")
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
from torchvision.datasets.vision import VisionDataset
import torchvision
import torch
import numpy as np
import json
import cv2
import random
import PIL
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from PIL import Image
coco_classes_originID = {
"person": 1,
"bicycle": 2,
"car": 3,
"motorcycle": 4,
"airplane": 5,
"bus": 6,
"train": 7,
"truck": 8,
"boat": 9,
"traffic light": 10,
"fire hydrant": 11,
"stop sign": 13,
"parking meter": 14,
"bench": 15,
"bird": 16,
"cat": 17,
"dog": 18,
"horse": 19,
"sheep": 20,
"cow": 21,
"elephant": 22,
"bear": 23,
"zebra": 24,
"giraffe": 25,
"backpack": 27,
"umbrella": 28,
"handbag": 31,
"tie": 32,
"suitcase": 33,
"frisbee": 34,
"skis": 35,
"snowboard": 36,
"sports ball": 37,
"kite": 38,
"baseball bat": 39,
"baseball glove": 40,
"skateboard": 41,
"surfboard": 42,
"tennis racket": 43,
"bottle": 44,
"wine glass": 46,
"cup": 47,
"fork": 48,
"knife": 49,
"spoon": 50,
"bowl": 51,
"banana": 52,
"apple": 53,
"sandwich": 54,
"orange": 55,
"broccoli": 56,
"carrot": 57,
"hot dog": 58,
"pizza": 59,
"donut": 60,
"cake": 61,
"chair": 62,
"couch": 63,
"potted plant": 64,
"bed": 65,
"dining table": 67,
"toilet": 70,
"tv": 72,
"laptop": 73,
"mouse": 74,
"remote": 75,
"keyboard": 76,
"cell phone": 77,
"microwave": 78,
"oven": 79,
"toaster": 80,
"sink": 81,
"refrigerator": 82,
"book": 84,
"clock": 85,
"vase": 86,
"scissors": 87,
"teddy bear": 88,
"hair drier": 89,
"toothbrush": 90,
}
coco_instance_ID_to_name = {
1: "person",
2: "bicycle",
3: "car",
4: "motorcycle",
5: "airplane",
6: "bus",
7: "train",
8: "truck",
9: "boat",
10: "traffic light",
11: "fire hydrant",
13: "stop sign",
14: "parking meter",
15: "bench",
16: "bird",
17: "cat",
18: "dog",
19: "horse",
20: "sheep",
21: "cow",
22: "elephant",
23: "bear",
24: "zebra",
25: "giraffe",
27: "backpack",
28: "umbrella",
31: "handbag",
32: "tie",
33: "suitcase",
34: "frisbee",
35: "skis",
36: "snowboard",
37: "sports ball",
38: "kite",
39: "baseball bat",
40: "baseball glove",
41: "skateboard",
42: "surfboard",
43: "tennis racket",
44: "bottle",
46: "wine glass",
47: "cup",
48: "fork",
49: "knife",
50: "spoon",
51: "bowl",
52: "banana",
53: "apple",
54: "sandwich",
55: "orange",
56: "broccoli",
57: "carrot",
58: "hot dog",
59: "pizza",
60: "donut",
61: "cake",
62: "chair",
63: "couch",
64: "potted plant",
65: "bed",
67: "dining table",
70: "toilet",
72: "tv",
73: "laptop",
74: "mouse",
75: "remote",
76: "keyboard",
77: "cell phone",
78: "microwave",
79: "oven",
80: "toaster",
81: "sink",
82: "refrigerator",
84: "book",
85: "clock",
86: "vase",
87: "scissors",
88: "teddy bear",
89: "hair drier",
90: "toothbrush",
}
hoi_interaction_names = json.loads(
open('/data/DATA/VCOCO/vcoco_verb_names.json', 'r').readlines()[0])['verb_names']
def convert_xywh2x1y1x2y2(box, shape, flip):
ih, iw = shape[:2]
x, y, w, h = box
if flip == 1:
x1_org = x
x2_org = x + w - 1
x2 = iw - 1 - x1_org
x1 = iw - 1 - x2_org
else:
x1 = x
x2 = x + w - 1
x1 = max(x1, 0)
x2 = min(x2, iw-1)
y1 = max(y, 0)
y2 = min(y + h - 1, ih-1)
return [x1, y1, x2, y2]
def get_det_annotation_from_odgt(item, shape, flip, gt_size_min=1):
total_boxes, gt_boxes, ignored_boxes = [], [], []
for annot in item['gtboxes']:
box = convert_xywh2x1y1x2y2(annot['box'], shape, flip)
x1, y1, x2, y2 = box
cls_id = coco_classes_originID[annot['tag']]
total_boxes.append([x1, y1, x2, y2, cls_id, ])
if annot['tag'] not in coco_classes_originID:
continue
if annot.get('extra', {}).get('ignore', 0) == 1:
ignored_boxes.append(box)
continue
if (x2 - x1 + 1) * (y2 - y1 + 1) < gt_size_min ** 2:
ignored_boxes.append(box)
continue
if x2 <= x1 or y2 <= y1:
ignored_boxes.append(box)
continue
gt_boxes.append([x1, y1, x2, y2, cls_id, ])
return gt_boxes, ignored_boxes, total_boxes
def get_interaction_box(human_box, object_box, hoi_id):
hx1, hy1, hx2, hy2, hid = human_box
ox1, oy1, ox2, oy2, oid = object_box
# hcx, hcy = (hx1 + hx2) / 2, (hy1 + hy2) / 2
# ocx, ocy = (ox1 + ox2) / 2, (oy1 + oy2) / 2
# dx = (hcx - ocx) / 5
# dy = (hcy - ocy) / 5
# xx1, yy1, xx2, yy2 = list(map(int, [ox1 + dx, oy1 + dy, ox2 + dx, oy2 + dy]))
xx1, yy1, xx2, yy2 = min(hx1, ox1), min(hy1, oy1), max(hx2, ox2), max(hy2, oy2)
return [xx1, yy1, xx2, yy2, hoi_id]
def xyxy_to_cxcywh(box):
x0, y0, x1, y1, cid = box
return [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0), cid]
def get_hoi_annotation_from_odgt(item, total_boxes, scale):
human_boxes, object_boxes, action_boxes = [], [], []
human_labels, object_labels, action_labels = [], [], []
img_hh, img_ww = item['height'], item['width']
for hoi in item.get('hoi', []):
x1, y1, x2, y2, cls_id = list(map(int, total_boxes[hoi['subject_id']]))
human_box = x1 // scale, y1 // scale, x2 // scale, y2 // scale, cls_id
if cls_id == -1 or x1 >= x2 or y1 >= y2:
continue
x1, y1, x2, y2, cls_id = list(map(int, total_boxes[hoi['object_id']]))
object_box = x1 // scale, y1 // scale, x2 // scale, y2 // scale, cls_id
if cls_id == -1 or x1 >= x2 or y1 >= y2:
continue
hoi_id = hoi_interaction_names.index(hoi['interaction'])
hoi_box = get_interaction_box(human_box=human_box, object_box=object_box, hoi_id=hoi_id)
human_boxes.append(human_box[0:4])
object_boxes.append(object_box[0:4])
action_boxes.append(hoi_box[0:4])
human_labels.append(human_box[4])
object_labels.append(object_box[4])
action_labels.append(hoi_box[4])
return dict(
human_boxes=torch.from_numpy(np.array(human_boxes).astype(np.float32)),
human_labels=torch.from_numpy(np.array(human_labels)),
object_boxes=torch.from_numpy(np.array(object_boxes).astype(np.float32)),
object_labels=torch.from_numpy(np.array(object_labels)),
action_boxes=torch.from_numpy(np.array(action_boxes).astype(np.float32)),
action_labels=torch.from_numpy(np.array(action_labels)),
image_id=item['file_name'],
org_size=torch.as_tensor([int(img_hh), int(img_ww)]),
)
def parse_one_gt_line(gt_line, scale=1):
item = json.loads(gt_line)
img_name = item['file_name']
img_shape = item['height'], item['width']
gt_boxes, ignored_boxes, total_boxes = get_det_annotation_from_odgt(item, img_shape, flip=0)
interaction_boxes = get_hoi_annotation_from_odgt(item, total_boxes, scale)
return dict(image_id=img_name, annotations=interaction_boxes)
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "human_boxes" in target:
boxes = target["human_boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["human_boxes"] = boxes
if "object_boxes" in target:
boxes = target["object_boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["object_boxes"] = boxes
if "action_boxes" in target:
boxes = target["action_boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["action_boxes"] = boxes
return flipped_image, target
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomAdjustImage(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
img = F.adjust_brightness(img, random.choice([0.8, 0.9, 1.0, 1.1, 1.2]))
if random.random() < self.p:
img = F.adjust_contrast(img, random.choice([0.8, 0.9, 1.0, 1.1, 1.2]))
return img, target
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
def resize(image, target, size, max_size=None):
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return h, w
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return oh, ow
rescale_size = get_size_with_aspect_ratio(image_size=image.size, size=size, max_size=max_size)
rescaled_image = F.resize(image, rescale_size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "human_boxes" in target:
boxes = target["human_boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["human_boxes"] = scaled_boxes
if "object_boxes" in target:
boxes = target["object_boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["object_boxes"] = scaled_boxes
if "action_boxes" in target:
boxes = target["action_boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["action_boxes"] = scaled_boxes
return rescaled_image, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
def crop(image, org_target, region):
cropped_image = F.crop(image, *region)
target = org_target.copy()
i, j, h, w = region
fields = ["human_labels", "object_labels", "action_labels"]
if "human_boxes" in target:
boxes = target["human_boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["human_boxes"] = cropped_boxes.reshape(-1, 4)
fields.append("human_boxes")
if "object_boxes" in target:
boxes = target["object_boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["object_boxes"] = cropped_boxes.reshape(-1, 4)
fields.append("object_boxes")
if "action_boxes" in target:
boxes = target["action_boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["action_boxes"] = cropped_boxes.reshape(-1, 4)
fields.append("action_boxes")
# remove elements for which the boxes or masks that have zero area
if "human_boxes" in target and "object_boxes" in target:
cropped_boxes = target['human_boxes'].reshape(-1, 2, 2)
keep1 = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
cropped_boxes = target['object_boxes'].reshape(-1, 2, 2)
keep2 = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
keep = keep1 * keep2
if keep.any().sum() == 0:
return image, org_target
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, target: dict):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, (h, w))
return crop(img, target, region)
class ToTensor(object):
def __call__(self, img, target):
return torchvision.transforms.functional.to_tensor(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = torchvision.transforms.functional.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "human_boxes" in target:
boxes = target["human_boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["human_boxes"] = boxes
if "object_boxes" in target:
boxes = target["object_boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["object_boxes"] = boxes
if "action_boxes" in target:
boxes = target["action_boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["action_boxes"] = boxes
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def make_hico_transforms(image_set, test_scale=-1):
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
normalize = Compose([
ToTensor(),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
if image_set == 'train':
return Compose([
RandomHorizontalFlip(),
RandomAdjustImage(),
RandomSelect(
RandomResize(scales, max_size=1333),
Compose([
RandomResize([400, 500, 600]),
RandomSizeCrop(384, 600),
RandomResize(scales, max_size=1333),
])
),
normalize,
])
if image_set == 'test':
if test_scale == -1:
return Compose([
normalize,
])
assert 400 <= test_scale <= 800, test_scale
return Compose([
RandomResize([test_scale], max_size=1333),
normalize,
])
raise ValueError(f'unknown {image_set}')
class HoiDetection(VisionDataset):
def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None):
super(HoiDetection, self).__init__(root, transforms, transform, target_transform)
self.annotations = [parse_one_gt_line(l.strip()) for l in open(annFile, 'r').readlines()]
self.transforms = transforms
def __getitem__(self, index):
ann = self.annotations[index]
img_name = ann['image_id']
target = ann['annotations']
if 'train2014' in img_name:
img_path = './data/vcoco/images/train2014/%s' % img_name
elif 'val2014' in img_name:
img_path = './data/vcoco/images/val2014/%s' % img_name
else:
raise NotImplementedError()
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = Image.fromarray(img[:, :, ::-1]).convert('RGB')
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.annotations)
def build(image_set, test_scale=-1):
assert image_set in ['train', 'test'], image_set
if image_set == 'train':
annotation_file = './data/vcoco/vcoco_trainval_retag_hoitr.odgt'
else:
annotation_file = './data/vcoco/vcoco_test_retag_hoitr.odgt'
dataset = HoiDetection(root='./data/vcoco', annFile=annotation_file,
transforms=make_hico_transforms(image_set, test_scale))
return dataset
|
py | b40db243914e0e0286df5d7d3166aae3f1eeac7e | """A dataframe concatenation function for PySpark."""
from collections import abc
import functools
from typing import (
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import warnings
import pandas as pd
from pyspark.sql import (
DataFrame as SparkDF,
functions as F,
)
from ons_utils.generic import list_convert
Key = Sequence[Union[str, Sequence[str]]]
# The order of these is important, big ---> small.
SPARK_NUMBER_TYPES = (
'decimal(10,0)',
'double',
'float',
'bigint',
'int',
'smallint',
'tinyint',
)
def concat(
frames: Union[Iterable[SparkDF], Mapping[Key, SparkDF]],
keys: Optional[Key] = None,
names: Optional[Union[str, Sequence[str]]] = None,
) -> SparkDF:
"""
Concatenate pyspark DataFrames with additional key columns.
Will attempt to cast column data types where schemas are mismatched
and fill empty columns with Nulls:
* upcasts to largest number data type present (for that column)
* casts to string if there is at least one dtype of 'string' for a
given column
Parameters
----------
frames : a sequence or mapping of SparkDF
If a mapping is passed, then the sorted keys will be used as the
`keys` argument, unless it is passed, in which case the values
will be selected.
keys : a sequence of str or str sequences, optional
The keys to differentiate child dataframes in the concatenated
dataframe. Each key can have multiple parts but each key should
have an equal number of parts. The length of `names` should be
equal to the number of parts. Keys must be passed if `frames` is
a sequence.
names : str or list of str, optional
The name or names to give each new key column. Must match the
size of each key.
Returns
-------
SparkDF
A single DataFrame combining the given frames with a
``unionByName()`` call. The resulting DataFrame has new columns
for each given name, that contains the keys which identify the
child frames.
Notes
-----
This code is mostly adapted from :func:`pandas.concat`.
"""
if isinstance(frames, (SparkDF, str)):
raise TypeError(
"first argument must be an iterable of pyspark DataFrames,"
f" you passed an object of type '{type(frames)}'"
)
if len(frames) == 0:
raise ValueError("No objects to concatenate")
if isinstance(frames, abc.Sequence):
if keys and (len(frames) != len(keys)):
raise ValueError(
"keys must be same length as frames"
" when frames is a list or tuple"
)
if isinstance(frames, abc.Mapping):
if names is None:
raise ValueError(
"when the first argument is a mapping,"
" the names argument must be given"
)
if keys is None:
keys = list(frames.keys())
# If keys are passed with a mapping, then the mapping is subset
# using the keys. This also ensures the order is correct.
frames = [frames[k] for k in keys]
else:
frames = list(frames)
for frame in frames:
if not isinstance(frame, SparkDF):
raise TypeError(
f"cannot concatenate object of type '{type(frame)}'; "
"only pyspark.sql.DataFrame objs are valid"
)
schemas_df = _get_schemas_df(frames, keys, names)
schemas_are_equal = _compare_schemas(schemas_df)
# Allows dataframes with inconsistent schemas to be concatenated by
# filling empty columns with Nulls and casting some column data
# types where appropriate.
#
# Potentially remove when Spark 3.1.0 available.
if not schemas_are_equal:
frames = [
_ensure_consistent_schema(frame, schemas_df)
for frame in frames
]
# Potentially update with commented line when Spark 3.1.0 available.
# union = functools.partial(SparkDF.unionByName, allowMissingColumns=True)
union = SparkDF.unionByName
# If no keys or names are given then simply union the DataFrames.
if not names and not keys:
return functools.reduce(union, frames)
# Convert names and keys elements to a list if not already, so they
# can be iterated over in the next step.
names = list_convert(names)
keys = [list_convert(key) for key in keys]
if not all([len(key) == len(names) for key in keys]):
raise ValueError(
"the length of each key must equal the length of names"
)
if not all([len(key) == len(keys[0]) for key in keys]):
raise ValueError(
"all keys must be of equal length"
)
frames_to_concat = []
# Loop through each frame, and add each part in the keys to a new
# column defined by name.
for parts, frame in zip(keys, frames):
for name, part in reversed(tuple(zip(names, parts))):
frame = frame.select(F.lit(part).alias(name), '*')
frames_to_concat.append(frame)
return functools.reduce(union, frames_to_concat)
def _ensure_consistent_schema(
frame: SparkDF,
schemas_df: pd.DataFrame,
) -> SparkDF:
"""Ensure the dataframe is consistent with the schema.
If there are column data type mismatches, (more than one data type
for a column name in the column schemas) then will try to convert
the data type if possible:
* if they are all number data types, then picks the largest number
type present
* if one of the types is string, then ensures it casts the column to
string type
Also fills any missing columns with Null values, ensuring correct
dtype.
Parameters
----------
frame : SparkDF
column_schemas : set
A set of simple column schemas in the form (name, dtype) for all
dataframes set to be concatenated.
Returns
-------
SparkDF
Input dataframe with consistent schema.
"""
final_schema = _get_final_schema(schemas_df)
missing_fields = [f for f in final_schema if f not in frame.dtypes]
for column, dtype in missing_fields:
# If current frame missing the column in the schema, then
# set values to Null.
vals = (
F.lit(None) if column not in frame.columns
else F.col(column)
)
# Cast the values with the correct dtype.
frame = frame.withColumn(column, vals.cast(dtype))
return frame
def _get_final_schema(
schemas_df: pd.DataFrame
) -> Sequence[Tuple[str, str]]:
"""Get the final schema by coercing the types."""
# For a given column, if one of the types is string coerce all types
# to string.
schemas_df = schemas_df.mask(
schemas_df.eq('string').any(axis=1),
'string',
)
# For a given column, if all types are number types coerce all types
# to the largest spark number type present.
number_types = (
schemas_df
.fillna('int')
.isin(SPARK_NUMBER_TYPES)
.all(axis=1)
)
largest_num_types = schemas_df[number_types].apply(
lambda row: _get_largest_number_dtype(row.to_list()),
axis=1,
)
schemas_df = schemas_df.mask(number_types, largest_num_types, axis=0)
if not _check_equal_schemas(schemas_df).all():
raise TypeError(
"Spark column data type mismatch, can't auto-convert between"
f" types. \n\n{str(schemas_df[~_check_equal_schemas(schemas_df)])}"
)
# Return the final schema.
return [
(name, dtype)
# Only need the first two columns.
for name, dtype, *_ in schemas_df.reset_index().to_numpy()
]
def _get_largest_number_dtype(dtypes: Sequence[str]) -> str:
"""Return the largest Spark number data type in the input."""
return next((
dtype for dtype in SPARK_NUMBER_TYPES
if dtype in dtypes
))
def _compare_schemas(schemas_df: pd.DataFrame) -> bool:
"""Return True if schemas are equal, else throw warning.
If unequal, throws a warning that displays the schemas for all the
unequal columns.
Parameters
----------
schemas_df : pandas DataFrame
A dataframe of schemas with columns along the index, dataframe
name across the columns and the dtypes as the values. Create
with :func:`_get_schemas_df`.
Returns
-------
bool
True if column schemas are equal, else False.
"""
equal_schemas = _check_equal_schemas(schemas_df)
# Fill types across missing columns. We only want to raise a warning
# if the types are different.
schemas_df_filled = schemas_df.bfill(1).ffill(1)
equal_ignoring_missing_cols = _check_equal_schemas(schemas_df_filled)
if not equal_ignoring_missing_cols.all():
warnings.warn(
"column dtypes in the schemas are not equal, attempting to coerce"
f"\n\n{str(schemas_df.loc[~equal_schemas])}",
UnequalSchemaWarning,
)
return False
elif not equal_schemas.all():
return False
else:
return True
def _check_equal_schemas(df: pd.DataFrame) -> pd.DataFrame:
"""Checks that the first schema matches the rest."""
return df.apply(lambda col: col.eq(df.iloc[:, 0])).all(axis=1)
def _get_schemas_df(
frames: Sequence[pd.DataFrame],
keys: Optional[Key] = None,
names: Optional[Union[str, Sequence[str]]] = None,
) -> pd.DataFrame:
"""Return dataframe of column schemas for given frames."""
schemas_df = pd.DataFrame()
for df in frames:
col_names, dtypes = zip(*df.dtypes)
schema = pd.Series(dtypes, index=col_names)
schemas_df = pd.concat([schemas_df, schema], axis=1)
if keys:
keys = [list_convert(key) for key in keys]
names = list_convert(names) if names else names
schemas_df.columns = pd.MultiIndex.from_tuples(keys, names=names)
else:
schemas_df.columns = [f'dtype_{i+1}' for i in range(len(frames))]
return schemas_df
class UnequalSchemaWarning(Warning):
pass
|
py | b40db6bf9a103e02adf2ff471a98ee16eb595f27 | import os
from .. import config
from ..messages import *
from . import (
manifest,
updateBackRefs,
updateBiblio,
updateCanIUse,
updateCrossRefs,
updateLanguages,
updateLinkDefaults,
updateMdn,
updateTestSuites,
updateWpt,
)
def update(
anchors=False,
backrefs=False,
biblio=False,
caniuse=False,
linkDefaults=False,
mdn=False,
testSuites=False,
languages=False,
wpt=False,
path=None,
dryRun=False,
force=False,
):
if path is None:
path = config.scriptPath("spec-data")
# Update via manifest by default, falling back to a full update only if failed or forced.
if not force:
success = manifest.updateByManifest(path=path, dryRun=dryRun)
if not success:
say("Falling back to a manual update...")
force = True
if force:
# fmt: off
# If all are False, update everything
if anchors == backrefs == biblio == caniuse == linkDefaults == mdn == testSuites == languages == wpt == False: # noqa: E712
anchors = backrefs = biblio = caniuse = linkDefaults = mdn = testSuites = languages = wpt = True # noqa: E222
touchedPaths = {
"anchors": updateCrossRefs.update(path=path, dryRun=dryRun) if anchors else None,
"backrefs": updateBackRefs.update(path=path, dryRun=dryRun) if backrefs else None,
"biblio": updateBiblio.update(path=path, dryRun=dryRun) if biblio else None,
"caniuse": updateCanIUse.update(path=path, dryRun=dryRun) if caniuse else None,
"mdn": updateMdn.update(path=path, dryRun=dryRun) if mdn else None,
"linkDefaults": updateLinkDefaults.update(path=path, dryRun=dryRun) if linkDefaults else None,
"testSuites": updateTestSuites.update(path=path, dryRun=dryRun) if testSuites else None,
"languages": updateLanguages.update(path=path, dryRun=dryRun) if languages else None,
"wpt": updateWpt.update(path=path, dryRun=dryRun) if wpt else None,
}
# fmt: on
cleanupFiles(path, touchedPaths=touchedPaths, dryRun=dryRun)
manifest.createManifest(path=path, dryRun=dryRun)
def fixupDataFiles():
"""
Checks the readonly/ version is more recent than your current mutable data files.
This happens if I changed the datafile format and shipped updated files as a result;
using the legacy files with the new code is quite bad!
"""
try:
with open(localPath("version.txt")) as fh:
localVersion = int(fh.read())
except OSError:
localVersion = None
try:
with open(remotePath("version.txt")) as fh:
remoteVersion = int(fh.read())
except OSError as err:
warn("Couldn't check the datafile version. Bikeshed may be unstable.\n{0}", err)
return
if localVersion == remoteVersion:
# Cool
return
# If versions don't match, either the remote versions have been updated
# (and we should switch you to them, because formats may have changed),
# or you're using a historical version of Bikeshed (ditto).
try:
for filename in os.listdir(remotePath()):
copyanything(remotePath(filename), localPath(filename))
except Exception as err:
warn(
"Couldn't update datafiles from cache. Bikeshed may be unstable.\n{0}", err
)
return
def updateReadonlyDataFiles():
"""
Like fixupDataFiles(), but in the opposite direction --
copies all my current mutable data files into the readonly directory.
This is a debugging tool to help me quickly update the built-in data files,
and will not be called as part of normal operation.
"""
try:
for filename in os.listdir(localPath()):
if filename.startswith("readonly"):
continue
copyanything(localPath(filename), remotePath(filename))
except Exception as err:
warn("Error copying over the datafiles:\n{0}", err)
return
def cleanupFiles(root, touchedPaths, dryRun=False):
if dryRun:
return
paths = set()
deletableFiles = []
deletableFolders = []
if touchedPaths["anchors"] is not None:
deletableFiles.extend(["specs.json", "methods.json", "fors.json"])
deletableFolders.extend(["headings", "anchors"])
paths.update(touchedPaths["anchors"])
if touchedPaths["biblio"] is not None:
deletableFiles.extend(["biblio-keys.json", "biblio-numeric-suffixes.json"])
deletableFolders.extend(["biblio"])
paths.update(touchedPaths["biblio"])
if touchedPaths["caniuse"] is not None:
deletableFiles.extend(["caniuse.json"])
deletableFolders.extend(["caniuse"])
paths.update(touchedPaths["caniuse"])
if touchedPaths["mdn"] is not None:
deletableFolders.extend(["mdn"])
paths.update(touchedPaths["mdn"])
say("Cleaning up old data files...")
oldPaths = []
for absPath, relPath in getDatafilePaths(root):
if "/" not in relPath and relPath not in deletableFiles:
continue
if "/" in relPath and relPath.partition("/")[0] not in deletableFolders:
continue
if absPath not in paths:
os.remove(absPath)
oldPaths.append(relPath)
if oldPaths:
say("Success! Deleted {} old files.".format(len(oldPaths)))
else:
say("Success! Nothing to delete.")
def copyanything(src, dst):
import errno
import shutil
try:
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno in [errno.ENOTDIR, errno.EINVAL]:
shutil.copy(src, dst)
else:
raise
def localPath(*segs):
return config.scriptPath("spec-data", *segs)
def remotePath(*segs):
return config.scriptPath("spec-data", "readonly", *segs)
def getDatafilePaths(basePath):
for root, _, files in os.walk(basePath):
for filename in files:
filePath = os.path.join(root, filename)
yield filePath, os.path.relpath(filePath, basePath)
|
py | b40db6de29e9275fc65bf3b7f222fe35e6107abe | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class DocumentAttributeHasStickers(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.DocumentAttribute`.
Details:
- Layer: ``123``
- ID: ``0x9801d2f7``
**No parameters required.**
"""
__slots__: List[str] = []
ID = 0x9801d2f7
QUALNAME = "types.DocumentAttributeHasStickers"
def __init__(self) -> None:
pass
@staticmethod
def read(data: BytesIO, *args: Any) -> "DocumentAttributeHasStickers":
# No flags
return DocumentAttributeHasStickers()
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
return data.getvalue()
|
py | b40db73e3f230a8573661d7e5b9d399618b17ba1 | #
# Generated with LinearCurrentCoefficientBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.moao import MOAOBlueprint
class LinearCurrentCoefficientBlueprint(MOAOBlueprint):
""""""
def __init__(self, name="LinearCurrentCoefficient", package_path="sima/hydro", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(EnumAttribute("symmetry","sima/hydro/DirectionSymmetry",""))
self.attributes.append(BlueprintAttribute("items","sima/hydro/LinearCurrentCoefficientItem","",True,Dimension("*"))) |
py | b40db7cc8f2fe2d955efd1fd78605ac11ca3f050 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
# ****************************************************************************
# This package is part of the package CNDB.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# CNDB.GTW.__init__
#
# Purpose
# RESTful web services for CNDB
#
# Revision Dates
# 6-Jun-2012 (CT) Creation
# ««revision-date»»···
#--
from _TFL.Package_Namespace import Package_Namespace
from _CNDB import CNDB
GTW = Package_Namespace ()
CNDB._Export ("GTW")
del Package_Namespace
### __END__ CNDB.GTW.__init__
|
py | b40db8497dc27202ac0209f63f745f1ee03d0da2 | from typing import List
import click
from lhotse.bin.modes import prepare
from lhotse.recipes import prepare_eval2000
from lhotse.utils import Pathlike
@prepare.command(context_settings=dict(show_default=True))
@click.argument("corpus-dir", type=click.Path(exists=True, file_okay=False))
@click.argument("output-dir", type=click.Path())
@click.option(
"--absolute-paths",
default=False,
help="Whether to return absolute or relative (to the corpus dir) paths for recordings.",
)
def eval2000(corpus_dir: Pathlike, output_dir: Pathlike, absolute_paths: bool):
"""
The Eval2000 corpus preparation.
\b
This is conversational telephone speech collected as 2-channel, 8kHz-sampled data.
The catalog number LDC2002S09 for audio corpora and LDC2002T43 for transcripts.
This data is not available for free - your institution needs to have an LDC subscription.
"""
prepare_eval2000(
corpus_dir=corpus_dir, output_dir=output_dir, absolute_paths=absolute_paths
)
|
py | b40db88d3c9b2850508aa1b17a7dfcc20f07481d | from functools import reduce
import hashlib as hl
import json
import pickle
import requests
# Import two functions from our hash_util.py file. Omit the ".py" in the import
from utility.hash_util import hash_block
from utility.verification import Verification
from block import Block
from transaction import Transaction
from wallet import Wallet
# The reward we give to miners (for creating a new block)
MINING_REWARD = 10
print(__name__)
class Blockchain:
"""The Blockchain class in the project_blockchain folder manages the
chain of blocks ( = project SPRINTS in Agile / WEEKS in WF) as well as
open transactions ( = project TASK) and the
node on which it's running.
Attributes:
:chain: The list of blocks (sprints)
:open_transactions (private): The list of open transactions / tasks
:hosting_node: The connected node (which runs the blockchain).
"""
def __init__(self, public_key, node_id):
"""The constructor of the Blockchain (Project) class."""
# Our starting block for the blockchain
genesis_block = Block(0, '', [], [], 0)
# Initializing our (empty) blockchain list
self.chain = [genesis_block]
# Unhandled transactions
self.__open_transactions = []
self.public_key = public_key
self.__peer_nodes = set()
self.node_id = node_id
self.resolve_conflicts = False
self.load_data()
# This turns the chain attribute into a property with a getter (the method
# below) and a setter (@chain.setter)
@property
def chain(self):
return self.__chain[:]
# The setter for the chain property
@chain.setter
def chain(self, val):
self.__chain = val
def get_open_transactions(self):
"""Returns a copy of the open transactions list."""
return self.__open_transactions[:]
def load_data(self):
"""Initialize blockchain + open transactions data from a file."""
try:
with open('blockchain-{}.txt'.format(self.node_id), mode='r') as f:
# file_content = pickle.loads(f.read())
file_content = f.readlines()
# blockchain = file_content['chain']
# open_transactions = file_content['ot']
blockchain = json.loads(file_content[0][:-1])
# We need to convert the loaded data because Transactions
# should use OrderedDict
updated_blockchain = []
for block in blockchain:
converted_tx = [Transaction(
tx['sender'],
tx['recipient'],
tx['signature'],
tx['amount']) for tx in block['transactions']]
updated_block = Block(
block['index'],
block['previous_hash'],
converted_tx,
block['proof'],
block['timestamp'])
updated_blockchain.append(updated_block)
self.chain = updated_blockchain
open_transactions = json.loads(file_content[1][:-1])
# We need to convert the loaded data because Transactions
# should use OrderedDict
updated_transactions = []
for tx in open_transactions:
updated_transaction = Transaction(
tx['sender'],
tx['recipient'],
tx['signature'],
tx['amount'])
updated_transactions.append(updated_transaction)
self.__open_transactions = updated_transactions
peer_nodes = json.loads(file_content[2])
self.__peer_nodes = set(peer_nodes)
except (IOError, IndexError):
pass
finally:
print('Cleanup!')
def save_data(self):
"""Save blockchain + open transactions snapshot to a file."""
try:
with open('blockchain-{}.txt'.format(self.node_id), mode='w') as f:
saveable_chain = [
block.__dict__ for block in
[
Block(block_el.index,
block_el.previous_hash,
[tx.__dict__ for tx in block_el.transactions],
block_el.proof,
block_el.timestamp) for block_el in self.__chain
]
]
f.write(json.dumps(saveable_chain))
f.write('\n')
saveable_tx = [tx.__dict__ for tx in self.__open_transactions]
f.write(json.dumps(saveable_tx))
f.write('\n')
f.write(json.dumps(list(self.__peer_nodes)))
# save_data = {
# 'chain': blockchain,
# 'ot': open_transactions
# }
# f.write(pickle.dumps(save_data))
except IOError:
print('Saving failed!')
def proof_of_work(self):
"""Generate a proof of work for the open transactions, the hash of the
previous block and a random number (which is guessed until it fits)."""
last_block = self.__chain[-1]
last_hash = hash_block(last_block)
proof = 0
# Try different PoW numbers and return the first valid one
while not Verification.valid_proof(
self.__open_transactions,
last_hash, proof
):
proof += 1
return proof
def get_balance(self, sender=None):
"""Calculate and return the balance for a participant.
"""
if sender is None:
if self.public_key is None:
return None
participant = self.public_key
else:
participant = sender
# Fetch a list of all sent coin amounts for the given person (empty
# lists are returned if the person was NOT the sender)
# This fetches sent amounts of transactions that were already included
# in blocks of the blockchain
tx_sender = [[tx.amount for tx in block.transactions
if tx.sender == participant] for block in self.__chain]
# Fetch a list of all sent coin amounts for the given person (empty
# lists are returned if the person was NOT the sender)
# This fetches sent amounts of open transactions (to avoid double
# spending)
open_tx_sender = [
tx.amount for tx in self.__open_transactions
if tx.sender == participant
]
tx_sender.append(open_tx_sender)
print(tx_sender)
amount_sent = reduce(lambda tx_sum, tx_amt: tx_sum + sum(tx_amt)
if len(tx_amt) > 0 else tx_sum + 0, tx_sender, 0)
# This fetches received coin amounts of transactions that were already
# included in blocks of the blockchain
# We ignore open transactions here because you shouldn't be able to
# spend coins before the transaction was confirmed + included in a
# block
tx_recipient = [
[
tx.amount for tx in block.transactions
if tx.recipient == participant
] for block in self.__chain
]
amount_received = reduce(
lambda tx_sum, tx_amt: tx_sum + sum(tx_amt)
if len(tx_amt) > 0 else tx_sum + 0,
tx_recipient,
0
)
# Return the total balance
return amount_received - amount_sent
def get_last_blockchain_value(self):
""" Returns the last value of the current blockchain. """
if len(self.__chain) < 1:
return None
return self.__chain[-1]
# This function accepts two arguments.
# One required one (transaction_amount) and one optional one
# (last_transaction)
# The optional one is optional because it has a default value => [1]
def add_transaction(self,
recipient,
sender,
signature,
amount=1.0,
is_receiving=False):
""" Append a new value as well as the last blockchain value to the blockchain.
Arguments:
:sender: The sender of the coins.
:recipient: The recipient of the coins.
:amount: The amount of coins sent with the transaction
(default = 1.0)
"""
# transaction = {
# 'sender': sender,
# 'recipient': recipient,
# 'amount': amount
# }
# if self.public_key == None:
# return False
transaction = Transaction(sender, recipient, signature, amount)
if Verification.verify_transaction(transaction, self.get_balance):
self.__open_transactions.append(transaction)
self.save_data()
if not is_receiving:
for node in self.__peer_nodes:
url = 'http://{}/broadcast-transaction'.format(node)
try:
response = requests.post(url,
json={
'sender': sender,
'recipient': recipient,
'amount': amount,
'signature': signature
})
if (response.status_code == 400 or
response.status_code == 500):
print('Transaction declined, needs resolving')
return False
except requests.exceptions.ConnectionError:
continue
return True
return False
def mine_block(self):
"""Create a new block and add open transactions to it."""
# Fetch the currently last block of the blockchain
if self.public_key is None:
return None
last_block = self.__chain[-1]
# Hash the last block (=> to be able to compare it to the stored hash
# value)
hashed_block = hash_block(last_block)
proof = self.proof_of_work()
# Miners should be rewarded, so let's create a reward transaction
# reward_transaction = {
# 'sender': 'MINING',
# 'recipient': owner,
# 'amount': MINING_REWARD
# }
reward_transaction = Transaction(
'MINING', self.public_key, '', MINING_REWARD)
# Copy transaction instead of manipulating the original
# open_transactions list
# This ensures that if for some reason the mining should fail,
# we don't have the reward transaction stored in the open transactions
copied_transactions = self.__open_transactions[:]
for tx in copied_transactions:
if not Wallet.verify_transaction(tx):
return None
copied_transactions.append(reward_transaction)
block = Block(len(self.__chain), hashed_block,
copied_transactions, proof)
self.__chain.append(block)
self.__open_transactions = []
self.save_data()
for node in self.__peer_nodes:
url = 'http://{}/broadcast-block'.format(node)
converted_block = block.__dict__.copy()
converted_block['transactions'] = [
tx.__dict__ for tx in converted_block['transactions']]
try:
response = requests.post(url, json={'block': converted_block})
if response.status_code == 400 or response.status_code == 500:
print('Block declined, needs resolving')
if response.status_code == 409:
self.resolve_conflicts = True
except requests.exceptions.ConnectionError:
continue
return block
def add_block(self, block):
"""Add a block which was received via broadcasting to the localb
lockchain."""
# Create a list of transaction objects
transactions = [Transaction(
tx['sender'],
tx['recipient'],
tx['signature'],
tx['amount']) for tx in block['transactions']]
# Validate the proof of work of the block and store the result (True
# or False) in a variable
proof_is_valid = Verification.valid_proof(
transactions[:-1], block['previous_hash'], block['proof'])
# Check if previous_hash stored in the block is equal to the local
# blockchain's last block's hash and store the result in a block
hashes_match = hash_block(self.chain[-1]) == block['previous_hash']
if not proof_is_valid or not hashes_match:
return False
# Create a Block object
converted_block = Block(
block['index'],
block['previous_hash'],
transactions,
block['proof'],
block['timestamp'])
self.__chain.append(converted_block)
stored_transactions = self.__open_transactions[:]
# Check which open transactions were included in the received block
# and remove them
# This could be improved by giving each transaction an ID that would
# uniquely identify it
for itx in block['transactions']:
for opentx in stored_transactions:
if (opentx.sender == itx['sender'] and
opentx.recipient == itx['recipient'] and
opentx.amount == itx['amount'] and
opentx.signature == itx['signature']):
try:
self.__open_transactions.remove(opentx)
except ValueError:
print('Item was already removed')
self.save_data()
return True
def resolve(self):
"""Checks all peer nodes' blockchains and replaces the local one with
longer valid ones."""
# Initialize the winner chain with the local chain
winner_chain = self.chain
replace = False
for node in self.__peer_nodes:
url = 'http://{}/chain'.format(node)
try:
# Send a request and store the response
response = requests.get(url)
# Retrieve the JSON data as a dictionary
node_chain = response.json()
# Convert the dictionary list to a list of block AND
# transaction objects
node_chain = [
Block(block['index'],
block['previous_hash'],
[
Transaction(
tx['sender'],
tx['recipient'],
tx['signature'],
tx['amount']) for tx in block['transactions']
],
block['proof'],
block['timestamp']) for block in node_chain
]
node_chain_length = len(node_chain)
local_chain_length = len(winner_chain)
# Store the received chain as the current winner chain if it's
# longer AND valid
if (node_chain_length > local_chain_length and
Verification.verify_chain(node_chain)):
winner_chain = node_chain
replace = True
except requests.exceptions.ConnectionError:
continue
self.resolve_conflicts = False
# Replace the local chain with the winner chain
self.chain = winner_chain
if replace:
self.__open_transactions = []
self.save_data()
return replace
def add_peer_node(self, node):
"""Adds a new node to the peer node set.
Arguments:
:node: The node URL which should be added.
"""
self.__peer_nodes.add(node)
self.save_data()
def remove_peer_node(self, node):
"""Removes a node from the peer node set.
Arguments:
:node: The node URL which should be removed.
"""
self.__peer_nodes.discard(node)
self.save_data()
def get_peer_nodes(self):
"""Return a list of all connected peer nodes."""
return list(self.__peer_nodes)
|
py | b40dba833577bbf673466015aacf508455e0443d | """
Canada-specific Form helpers
"""
from django.newforms import ValidationError
from django.newforms.fields import Field, RegexField, Select, EMPTY_VALUES
from django.newforms.util import smart_unicode
from django.utils.translation import gettext, ugettext
import re
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
sin_re = re.compile(r"^(\d{3})-(\d{3})-(\d{3})$")
class CAPostalCodeField(RegexField):
"""Canadian postal code field."""
default_error_messages = {
'invalid': gettext(u'Enter a postal code in the format XXX XXX.'),
}
def __init__(self, *args, **kwargs):
super(CAPostalCodeField, self).__init__(r'^[ABCEGHJKLMNPRSTVXYZ]\d[A-Z] \d[A-Z]\d$',
max_length=None, min_length=None, *args, **kwargs)
class CAPhoneNumberField(Field):
"""Canadian phone number field."""
default_error_messages = {
'invalid': u'Phone numbers must be in XXX-XXX-XXXX format.',
}
def clean(self, value):
"""Validate a phone number.
"""
super(CAPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class CAProvinceField(Field):
"""
A form field that validates its input is a Canadian province name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given province.
"""
default_error_messages = {
'invalid': u'Enter a Canadian province or territory.',
}
def clean(self, value):
from ca_provinces import PROVINCES_NORMALIZED
super(CAProvinceField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return PROVINCES_NORMALIZED[value.strip().lower()].decode('ascii')
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class CAProvinceSelect(Select):
"""
A Select widget that uses a list of Canadian provinces and
territories as its choices.
"""
def __init__(self, attrs=None):
from ca_provinces import PROVINCE_CHOICES # relative import
super(CAProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class CASocialInsuranceNumberField(Field):
"""
A Canadian Social Insurance Number (SIN).
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XXX-XXX format.
* Passes the check digit process "Luhn Algorithm"
See: http://en.wikipedia.org/wiki/Social_Insurance_Number
"""
default_error_messages = {
'invalid': ugettext('Enter a valid Canadian Social Insurance number in XXX-XXX-XXX format.'),
}
def clean(self, value):
super(CASocialInsuranceNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(sin_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number = u'%s-%s-%s' % (match.group(1), match.group(2), match.group(3))
check_number = u'%s%s%s' % (match.group(1), match.group(2), match.group(3))
if not self.luhn_checksum_is_valid(check_number):
raise ValidationError(self.error_messages['invalid'])
return number
def luhn_checksum_is_valid(self, number):
"""
Checks to make sure that the SIN passes a luhn mod-10 checksum
See: http://en.wikipedia.org/wiki/Luhn_algorithm
"""
sum = 0
num_digits = len(number)
oddeven = num_digits & 1
for count in range(0, num_digits):
digit = int(number[count])
if not (( count & 1 ) ^ oddeven ):
digit = digit * 2
if digit > 9:
digit = digit - 9
sum = sum + digit
return ( (sum % 10) == 0 )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.