filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_17047
|
"""
Copyright 2017 Pani Networks Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Functions for the calculation of a full Romana topology from the simplified
# user configuration.
#
from copy import copy
def calculate_num_groups(conf, num_networks=None):
"""
Calculates how many prefix groups we can have per AWS zone. Takes into
account that we need a route for each prefix group and we can't have more
than 48 route total.
"""
num_zones = len(conf['aws']['zones'])
num_nets = len(conf['networks']) if num_networks is None else \
num_networks
num_groups = 32
while num_groups * num_zones * num_nets > 48:
if num_groups == 1:
raise Exception("Too many networks and/or zones, reaching "
"50 route limit for AWS.")
num_groups //= 2
return num_groups
def _build_aws_topology(conf):
"""
Build a topology for am AWS VPC deployment.
"""
# - If just one zone, we need one group, since it's a flat network.
# - If it's more than one zone, we want many groups per zone, but
# the total number of groups should not exceed 50 or even 40.
# - We only have one topology if in VPC.
t = {
"networks" : [n['name'] for n in conf['networks']],
"map" : []
}
num_zones = len(conf['aws']['zones'])
if num_zones == 1:
t["map"].append({
"name" : conf['aws']['zones'][0],
"groups" : []
})
else:
num_groups = calculate_num_groups(conf)
for zone in conf['aws']['zones']:
m = {
"name" : zone,
"assignment" : {"failure-domain" : zone},
"groups" : []
}
for i in range(num_groups):
m["groups"].append({
"name" : "%s-%02d" % (zone, i),
"groups" : []
})
t["map"].append(m)
return t
def _build_dc_topology(conf):
"""
Build a topology for a routed data center network.
"""
t = {
"networks" : [n['name'] for n in conf['networks']],
}
top_level_group_label = None
cd = conf['datacenter']
if cd['flat_network']:
if cd['prefix_per_host']:
num_groups = cd['num_hosts']
top_level_group_label = "host-%d"
else:
num_groups = 1
else:
num_groups = cd['num_racks']
top_level_group_label = "rack-%d"
m = []
for i in range(num_groups):
g = {"groups" : []}
if top_level_group_label:
g["name"] = top_level_group_label % i
if not cd['flat_network']:
g["assignment"] = {"rack" : g["name"]}
m.append(g)
if not cd['flat_network']:
if cd['prefix_per_host']:
for top_level_group in m:
for i in range(cd['num_hosts_per_rack']):
g = {
"name" : "host-%d" % i,
"groups" : []
}
top_level_group["groups"].append(g)
t["map"] = m
return t
def build_topology(conf):
"""
From the user provided configuration, calculate the full topology config.
"""
topo = {"networks": [], "topologies" : []}
for n in conf['networks']:
net = copy(n)
# If block mask wasn't defined, we add a default value for it
if "block_mask" not in net:
net["block_mask"] = 29
topo["networks"].append(net)
if conf.get('aws'):
t = _build_aws_topology(conf)
else:
t = _build_dc_topology(conf)
topo["topologies"].append(t)
return topo
|
the-stack_0_17052
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 5 12:26:18 2020
@author: dmattox
"""
# import numpy as np
import collections
import numpy as np
from anytree import NodeMixin, RenderTree
def binary2decimal(binary):
# Accepts binary number as a string, returns decimal number as integer
out = 0
binary = binary[::-1] # Reverse direction of binary string
for i, b in enumerate(binary):
out += int(b) * (2 ** i)
return out
def decimal2binary(decimal):
# Accepts decimal number as integer, returns binary number as a string
out = ''
while decimal > 0:
out += str(decimal % 2) # bit equal to remainder of dividing by 2
decimal //= 2 # update value to iteger quotient
return out[::-1] # Reverse order of binary string and return it
def validParenth(gly):
# Returns true if all parentheses and brackets are paired and closed
stack = []
comp = {')': '(',
']': '['}
for p in gly:
if p in ['(',')','[',']','{','}']:
if p in comp.values():
stack.insert(0,p)
else:
if not stack:
return False
elif stack.pop(0) != comp[p]:
return False
if stack:
return False
else:
return True
def getSug(iupacGly):
mono = ''
for l in iupacGly[::-1]: # loop through string backwards and pop off first complete monosaccharide
if l not in ['[', ']', '(', ')']:
mono += l
else:
break
if mono == '':
raise SyntaxError('ERROR: Linkage punctuation found before any sugars, returning unchanged iupac glycan string')
mono = mono[::-1] # correct order for sugar iupac code
return(iupacGly[:(-1*len(mono))], mono)
def getLink(iupacGly):
link = ''
for l in iupacGly[::-1]: # loop through string backwards and pop off the link defined within the last closed parenthesises
if l != '(':
link += l
else:
link += l
break
link = link[::-1] # correct order for sugar iupac code
return(iupacGly[:(-1*len(link))], link[1:-1]) # Return iupac string without link, as well as the link without the parentheses
def getBranch(iupacGly):
# Pop an entire bracketed branch from the iupac string
branch = ''
nested = []
for l in iupacGly[::-1]:
if l == '[':
if nested == []:
branch += l
break
else:
branch += l
nested.pop(0)
elif l == ']' and branch != '': # If it hits a nested branch (BESIDES THE FIRST BRACKET)
branch += l
nested.append(l)
else:
branch += l
branch = branch[::-1] # Reverse back to correct order
return(iupacGly[:(-1*len(branch))], branch[1:-1]) # Return iupac string without branch, as well as the branch without the parentheses
# def decode(self, maxB, maxC, monoDict, anoDict = {'u': 0, 'a': 1, 'b': 2}):
# """
# Parameters (parameters used to generate encoding)
# ----------
# maxB : int
# Max value contained in a subway line in the root nodes of all glycans being considered (maximum number of observed branches).
# maxC : int
# Highest carbon number observed to participate in a glycosidic bond (from all glycans being considered).
# monoDict : dict
# Dictionary linking all observed monosaccharides to a corresponding integer (integers based on monosaccharide frequency rank).
# anoDict : dict, optional
# Dictionary encoding anomeric conformation information as integers. The default is {'u': 0, 'a': 1, 'b': 2}.
# Returns
# -------
# IUPAC string from encoding
# """
# pass
class GlyNode(NodeMixin):
# Class to hold nodes (rows) of tree representations
def __init__(self, base, ind, link = 'u0-0', parent = None):
super(GlyNode, self).__init__()
self.base = base
# self.depth = depth # Depth is an existing property of parent class
self.ind = ind
link = link.replace('(','') # clean extra parentheses
link = link.replace(')','')
self.anomeric = link[0] if link[0] in ['u','a','b'] else 'u' # state of anomeric carbon if indicated, otherwise assume unknown
self.parent = parent # parent GlyNode
self.name = '%s:%d*%d' % (self.base, self.depth, self.ind)
link = link[1:] if link[0] in ['u','a','b'] else link[:] # drop anomeric charcter if present
link = link.split('-') # Split linkage into child Carbon connection [0] and parent carbon connection [1]
self.Clinks = collections.Counter() # Holds the attachemnts at different carbon positions (based on formal carbon numbering)
if self.parent is None:
self.parentLink = 0
else:
self.Clinks[link[0]] = 2
self.parent.Clinks[link[1]] = 1
self.parentLink = link[1]
# self.parent.children.append(self) # Children handled by anytree parent class
# self.children = [] # handled by anytree parent class
self.subway = [] # Holds the indices of the subway lines that pass through the node
def __repr__(self):
return self.name
def __str__(self):
return self.name
def treePrint(self):
for pre, fill, node in RenderTree(self):
print("%s%s" % (pre, node.name))
def drawSubway(self, sbwyStp):
# Called by SugarBase.drawSbwyMap
self.subway.append(sbwyStp) # Add subway line for the terminal node
if self.parent is not None:
self.parent.drawSubway(sbwyStp) # Recursively pass the subway line up the tree towards the root node, adding the information to each node along the way
class SugarBase:
# Class to hold entires in SugarBase v2 database
def __init__(self, sbID, iupac, link, species, immunogenic):
self.sbID = sbID
self.iupac = iupac
self.link = link
self.species = [s.strip() for s in species.split(',')]
self.taxonomy = []
self.immunogenic = immunogenic
self.id = int(sbID.replace('SBID', ''))
self.tree = {}
# self.buildTree()
self.encoding = None
def __repr__(self):
return self.sbID
def __str__(self):
return self.sbID
def __int__(self):
return(self.id)
def print(self):
print('SugarBase ID:\n\t', self.id)
print('N/O Linked:\n\t', self.link)
if self.species[0] == '':
print('Species of origin:\n\tUnknown')
elif len(self.species) <= 2:
print('Species of origin: [', len(self.species),']\n\t', self.species)
else:
print('Species of origin: [', len(self.species),']\n\t', self.species[:3], '... (first 3, try print(SugarBase.species) to see the rest )')
print('Immunogenicity:\n\t', self.immunogenic)
print('IUPAC glycan:\n\t', self.iupac)
def treePrint(self):
if self.tree == {}:
print('ERROR: sugar tree not yet constructed')
else:
self.tree[(0,0)].treePrint()
def treeDepthCnt(self, depth):
# Returns the number of nodes in the tree at the given depth
cnt = 0
for k in self.tree.keys():
if k[0] == depth:
cnt += 1
return cnt
def buildTree(self):
if self.tree != {}:
print('WARNING: Tree already constructed, not rebuilding it')
elif (validParenth(self.iupac) == False):
raise SyntaxError('Unmatched parentheses or brackets detected in supplied IUPAC glyan string')
else:
gly = self.iupac
# add start token
par = (0,0)
self.tree[par] = GlyNode(base = 'START', ind = par[1])
# Process the root node
gly, base = getSug(gly)
chi = (1,0)
self.tree[chi] = GlyNode(base = base, ind = par[1], parent = self.tree[par])
par = chi
if gly: # if glycan is a monosaccharide, sets the queue to empty to avoid the while loop
branchQueue = [[gly,par]]
else:
branchQueue = []
while branchQueue:
if branchQueue[0][0][-1] != ')' and branchQueue[0][0][-1] != ']':
print('ERROR: no linkage or branch found for glycan ', self.sbID)
break
if branchQueue[0][0][-1] == ']':
par = branchQueue[0][1]
childLst = [] # Branching, at least 2 children from current parent node
while branchQueue[0][0][-1] == ']':
branchQueue[0][0], branch = getBranch(branchQueue[0][0])
childLst.append(branch)
childLst.append(branchQueue[0][0]) # add "main" branch to the list of branches as well
branchQueue.pop(0) # and remove it from the original queue
childLst.sort(key = lambda x: int(x[-2]), reverse = True) # sort all of the branches from the parent node by descending parentlink carbon number
for branch in childLst:
branchQueue.insert(0,[branch, par]) # Add braches to branch queue such that the lower numbered branches are on the top of the queue
chi = par # Since no monosacchairdes are removed, set chi to par to preserve true parent
if branchQueue[0][0][-1] == ')':
par = branchQueue[0][1]
chi = (par[0]+1, self.treeDepthCnt(par[0]+1)) # depth & index of child
branchQueue[0][0], link = getLink(branchQueue[0][0])
branchQueue[0][0], base = getSug(branchQueue[0][0])
self.tree[chi] = GlyNode(base, ind=chi[1], link=link, parent = self.tree[par])
if branchQueue[0][0] == '':
branchQueue.pop(0) # If a branch has been fully processed, remove it from the queue
else:
branchQueue[0][1] = chi # otherwise, update the parent for the remainder of the branch
# Add stop tokens to terminal monosaccharides
termNodes = []
for k,v in self.tree.items():
if v.children == ():
termNodes.append(k)
termNodes.sort(key= lambda x: x[1])
termNodes.sort(key= lambda x: x[0], reverse=True)
for par in termNodes:
chi = (par[0]+1, self.treeDepthCnt(par[0]+1)) # depth & index of child
self.tree[chi] = GlyNode('END', ind=chi[1], parent = self.tree[par])
def drawSbwyMap(self):
sbwyStps = []
for k,v in self.tree.items():
if v.children == ():
sbwyStps.append(k)
sbwyStps.sort(reverse = True)
for i,stp in enumerate(sbwyStps):
self.tree[stp].drawSubway(i)
def buildEncoding(self, maxB, maxC, monoDict, anoDict = {'u': 0, 'a': 1, 'b': 2}):
"""
Parameters
----------
maxB : int
Max value contained in a subway line in the root nodes of all glycans being considered (maximum number of observed branches).
maxC : int
Highest carbon number observed to participate in a glycosidic bond (from all glycans being considered).
monoDict : dict
Dictionary linking all observed monosaccharides to a corresponding integer (integers based on monosaccharide frequency rank).
anoDict : dict, optional
Dictionary encoding anomeric conformation information as integers. The default is {'u': 0, 'a': 1, 'b': 2}.
Returns
-------
None, builds encoding in self.encoding as a numpy array, where each row corresponds to a node/monosaccharide and each column corresponds to a descriptor for that node:
1 -- monosaccharide identity (represented by integer from monoDict)
2 -- anomeric conformation of the saccharide (0:unknown, 1: alpha, 2: beta)
3 -- Carbon postions on sacchride participating in glycosidic bonds as a binary converted to a decimal number
Ex 100001000 (C1 & C6 occupied) --> 264
4 -- "Subway lines" passing through the node as a binary converted to a decimal number
Ex 11111100000 (Root node on a glycan with 6 terminal non-reducing saccharides, all 6 subay lines pass through) --> 2016
5 -- The carbon position of the parent saccharide the node is connected to
6 -- depth
7 -- index (differentiate saccharides at the same depth on different branches)
"""
colNames = ['sugar'] + ['anomeric'] + ['C_links'] + ['B_lines'] + ['parLink', 'sDepth', 'sInd']
self.encoding = np.zeros((len(self.tree.keys()), len(colNames)), dtype = int) # Initialize 2D array to store encoding
for i,nodeKey in enumerate(list(self.tree.keys())):
base = self.tree[nodeKey] # GlyNode object for the current saccharide
# Prep col 3 value (occupied carbons)
carbLinks = [str(base.Clinks[str(i)]) for i in range(1,maxC+1)]
carbLinks = ['1' if c != '0' else '0' for c in carbLinks] # Drop parent/child linked info from each carbon position
C_binary = ''.join(carbLinks)
# Prep col 4 value (subway lines)
sbwyLines = ['1' if i in base.subway else '0' for i in range(maxB+1)]
B_binary = ''.join(sbwyLines)
# Columns 5-7
liDeIn = [int(base.parentLink), base.depth, base.ind] # link, depth, index info for sugar & parent
self.encoding[i,] = [monoDict[base.base], anoDict[base.anomeric], binary2decimal(C_binary), binary2decimal(B_binary)] + liDeIn
|
the-stack_0_17053
|
'''Utilities relating to interaction with Marathon
************************************************************************
FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
SHOULD ALSO BE APPLIED TO sdk_marathon IN ANY OTHER PARTNER REPOS
************************************************************************
'''
import logging
import json
import os
import tempfile
import retrying
import shakedown
import sdk_cmd
import sdk_metrics
TIMEOUT_SECONDS = 15 * 60
log = logging.getLogger(__name__)
def _get_config_once(app_name):
return sdk_cmd.cluster_request('GET', _api_url('apps/{}'.format(app_name)), retry=False)
def get_app_id(service_name):
# service_name may already contain a leading slash.
return '/' + service_name.lstrip('/')
def wait_for_deployment_and_app_removal(app_id, timeout=TIMEOUT_SECONDS):
"""
Waits for application to be gone, according to Marathon.
"""
log.info('Waiting for no deployments for {}'.format(app_id))
shakedown.deployment_wait(timeout, app_id)
client = shakedown.marathon.create_client()
def marathon_dropped_app():
app_ids = [app['id'] for app in client.get_apps()]
log.info('Marathon app IDs: {}'.format(app_ids))
matching_app_ids = list(filter(lambda x: x == app_id, app_ids))
if len(matching_app_ids) > 1:
log.warning('Found multiple apps with id {}'.format(app_id))
return len(matching_app_ids) == 0
log.info('Waiting for no {} Marathon app'.format(app_id))
shakedown.time_wait(marathon_dropped_app, timeout_seconds=timeout)
@retrying.retry(stop_max_attempt_number=5,
wait_fixed=5000,
retry_on_exception=lambda e: isinstance(e, Exception))
def retried_wait_for_deployment_and_app_removal(*args, **kwargs):
wait_for_deployment_and_app_removal(*args, **kwargs)
def app_exists(app_name):
try:
_get_config_once(app_name)
return True
except Exception:
return False
def get_config(app_name, timeout=TIMEOUT_SECONDS):
# Be permissive of flakes when fetching the app content:
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout * 1000)
def wait_for_response():
return _get_config_once(app_name).json()['app']
config = wait_for_response()
# The configuration JSON that marathon returns doesn't match the configuration JSON it accepts,
# so we have to remove some offending fields to make it re-submittable, since it's not possible to
# submit a partial config with only the desired fields changed.
if 'uris' in config:
del config['uris']
if 'version' in config:
del config['version']
return config
def is_app_running(app: dict) -> bool:
return (app.get('tasksStaged', 0) == 0 and app.get('tasksUnhealthy', 0) == 0 and app.get('tasksRunning', 0) > 0)
def wait_for_app_running(app_name: str, timeout: int) -> None:
@retrying.retry(stop_max_delay=timeout,
wait_fixed=5000,
retry_on_result=lambda result: not result)
def _wait_for_app_running(app_name: str) -> bool:
cmd = 'marathon app show {}'.format(app_name)
log.info('Running %s', cmd)
app = sdk_cmd.get_json_output(cmd)
return is_app_running(app)
_wait_for_app_running(app_name)
def wait_for_deployment_and_app_running(app_name: str, timeout: int) -> None:
shakedown.deployment_wait(timeout, app_name)
wait_for_app_running(app_name, timeout)
def install_app_from_file(app_name: str, app_def_path: str) -> (bool, str):
"""
Installs a marathon app using the path to an app definition.
Args:
app_def_path: Path to app definition
Returns:
(bool, str) tuple: Boolean indicates success of install attempt. String indicates
error message if install attempt failed.
"""
cmd = "marathon app add {}".format(app_def_path)
log.info("Running %s", cmd)
rc, stdout, stderr = sdk_cmd.run_raw_cli(cmd)
if rc or stderr:
log.error("returncode=%s stdout=%s stderr=%s", rc, stdout, stderr)
return False, stderr
if "Created deployment" not in stdout:
stderr = "'Created deployment' not in STDOUT"
log.error(stderr)
return False, stderr
log.info('Waiting for app %s to be deployed and running...', app_name)
wait_for_deployment_and_app_running(app_name, TIMEOUT_SECONDS)
return True, ''
def install_app(app_definition: dict) -> (bool, str):
"""
Installs a marathon app using the given `app_definition`.
Args:
app_definition: The definition of the app to pass to marathon.
Returns:
(bool, str) tuple: Boolean indicates success of install attempt. String indicates
error message if install attempt failed.
"""
app_name = app_definition["id"]
with tempfile.TemporaryDirectory() as d:
app_def_file = "{}.json".format(app_name.replace('/', '__'))
log.info("Launching {} marathon app".format(app_name))
app_def_path = os.path.join(d, app_def_file)
log.info("Writing app definition to %s", app_def_path)
with open(app_def_path, "w") as f:
json.dump(app_definition, f)
return install_app_from_file(app_name, app_def_path)
def update_app(app_name, config, timeout=TIMEOUT_SECONDS, wait_for_completed_deployment=True, force=True):
if "env" in config:
log.info("Environment for marathon app {} ({} values):".format(app_name, len(config["env"])))
for k in sorted(config["env"]):
log.info(" {}={}".format(k, config["env"][k]))
query_string = "?force=true" if force else ""
# throws on failure:
sdk_cmd.cluster_request('PUT', _api_url('apps/{}{}'.format(app_name, query_string)), log_args=False, json=config)
if wait_for_completed_deployment:
log.info("Waiting for Marathon deployment of {} to complete...".format(app_name))
shakedown.deployment_wait(app_id=app_name, timeout=timeout)
def destroy_app(app_name):
shakedown.delete_app_wait(app_name)
def restart_app(app_name):
log.info("Restarting {}...".format(app_name))
# throws on failure:
sdk_cmd.cluster_request('POST', _api_url('apps/{}/restart'.format(app_name)))
log.info("Restarted {}.".format(app_name))
def _api_url(path):
return '/marathon/v2/{}'.format(path)
def get_scheduler_host(service_name):
# Marathon mangles foldered paths as follows: "/path/to/svc" => "svc.to.path"
task_name_elems = service_name.lstrip('/').split('/')
task_name_elems.reverse()
app_name = '.'.join(task_name_elems)
ips = shakedown.get_service_ips('marathon', app_name)
if len(ips) == 0:
raise Exception('No IPs found for marathon task "{}". Available tasks are: {}'.format(
app_name, [task['name'] for task in shakedown.get_service_tasks('marathon')]))
return ips.pop()
def bump_cpu_count_config(service_name, key_name, delta=0.1):
config = get_config(service_name)
updated_cpus = float(config['env'][key_name]) + delta
config['env'][key_name] = str(updated_cpus)
update_app(service_name, config)
return updated_cpus
def bump_task_count_config(service_name, key_name, delta=1):
config = get_config(service_name)
updated_node_count = int(config['env'][key_name]) + delta
config['env'][key_name] = str(updated_node_count)
update_app(service_name, config)
def get_mesos_api_version(service_name):
return get_config(service_name)['env']['MESOS_API_VERSION']
def set_mesos_api_version(service_name, api_version, timeout=600):
'''Sets the mesos API version to the provided value, and then verifies that the scheduler comes back successfully'''
config = get_config(service_name)
config['env']['MESOS_API_VERSION'] = api_version
update_app(service_name, config, timeout=timeout)
# wait for scheduler to come back and successfully receive/process offers:
sdk_metrics.wait_for_scheduler_counter_value(service_name, 'offers.processed', 1, timeout_seconds=timeout)
|
the-stack_0_17056
|
import serial
import time
ports = {
"fpga_out": "COM16",
"fpga_in": "COM15",
"laser2": "COM14",
"laser1": "COM12",
"x_motor": "COM19",
"y_motor": "COM18",
}
fpga_out = serial.Serial(ports["fpga_out"], 115200)
fpga_in = serial.Serial(ports["fpga_in"], 115200)
y_motor = serial.Serial(ports["y_motor"], 9600)
fpga_out.name = "fpga_out"
fpga_in.name = "fpga_in"
y_motor.name = "y_motor"
def send(interface, cmd, receive_interface=None):
print(interface.name, cmd.encode('utf8'))
if receive_interface is None:
receive_interface = interface
interface.write(cmd.encode('utf8'))
time.sleep(0.1)
ret = b""
while receive_interface.in_waiting == 0:
pass
while receive_interface.in_waiting > 0:
ret += receive_interface.readline().rstrip() + b"\n"
return ret.decode('utf8')
res = send(y_motor, "1G\r")
print(res)
time.sleep(2)
res = send(y_motor, "1D\r")
print(res)
y_motor_pos = int(res.split('*')[1])
res = send(fpga_out, "TDIYERD\n", fpga_in)
print(res)
encoder_pos = int(res.split(' ')[1])
res = send(fpga_out, "TDIYPOS {}\n".format(encoder_pos - 100000), fpga_in)
print(res)
res = send(fpga_out, "TDIYARM2 8000 1\n", fpga_in)
print(res)
send(fpga_out, "TDIYWAIT\n")
time.sleep(1)
res = send(y_motor, "1D{}\r1G\r".format(y_motor_pos - 420000))
print(res)
time.sleep(10)
res = send(fpga_out, "TDIYERD\n", fpga_in)
print(res)
res = send(y_motor, "1D{}\r1G\r".format(y_motor_pos))
print(res)
res = send(fpga_out, "TDIYERD\n", fpga_in)
print(res)
|
the-stack_0_17059
|
from fastapi import FastAPI
from main_types import ModelName
app = FastAPI()
@app.get("/")
async def welcome():
return {"welcome": "To have here or take away?. Don't forget the cutlery!"}
@app.get("/items/{item_id}")
async def read_item(item_id: int):
return {"item_id": item_id}
@app.get("/float/{f_num}")
async def read_float(f_num: float):
return {"float_num": f_num}
@app.get("/model/{model_name}")
async def get_model(model_name: ModelName):
res = dict()
res["model_name"] = model_name
res["message"] = "Have some residuals"
if model_name == ModelName.alexnet:
res["message"] = "Deep Learning FTW!"
elif model_name.value == "lenet":
res["message"] = "LeCNN all the images"
return res
|
the-stack_0_17060
|
# Import librerias
import pandas
import matplotlib.pyplot as graficar
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA as ACP
from sklearn.cross_validation import train_test_split as separar
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error as ECM
from sklearn.ensemble import RandomForestRegressor
# Varibale global
GRAFICAR = True
juegos = pandas.read_csv("games.csv")
# Imprime las columnas que se leyeron
print("-"*5 + "Columnas" + "-"*5)
print(juegos.columns)
print()
# Imprime cuantas filas, columnas tenemos
print("-"*5 + "Tamaño de DataSet (Filas, Columnas)" + "-"*5)
print(juegos.shape)
print()
# Suponiendo que queremos predecir que puntuacion en promedio
# le harian los usuarios a un juego que aun no ha salido. Esta informacion
# se encuentra en la columna average_rating
# Hacemos un histograma de esta columna para saber la distribucion
# de las puntiaciones en promedio de todos los juegos usando
# Lee DataSet
# indexacion por columna que retorna toda una columna
if GRAFICAR:
graficar.title("Distribucion de puntuacion promedio")
graficar.xlabel("Puntuacion promedio")
graficar.ylabel("# de Juegos")
graficar.hist(juegos["average_rating"])
graficar.show()
graficar.clf()
# juegos[juegos["average_rating"] == 0] retornara un dataframe con solo las
# filas donde el valor de la columna average_rating es 0
# Indexando por posicion, podemos obtener toda una fila
# juegos.iloc[0] retornara toda la primera fila del dataframe juegos
# juegos.iloc[0,0] retornara la primera columna de la primera fila del dataframe
print("-"*5 + "Diff entre juego con puntaje de 0 y con puntaje superior a 0" + "-"*5)
print(juegos[juegos["average_rating"] == 0].iloc[0])
print(juegos[juegos["average_rating"] > 0].iloc[0])
print()
# Se determina que deben haber muchos juegos con 0 puntuaciones de usuarios
# por lo tanto el promedio de puntuaciones es 0
# Esta informacion se considera basura asi que se opta por eliminar los juegos
# que no hayan sido puntuados por algun usuarios
juegos = juegos[juegos["users_rated"] > 0]
# Remueve cualquier fila que le hagan falta valores
juegos = juegos.dropna(axis=0)
# Distribucion de puntuacion promedio
if GRAFICAR:
graficar.title("Distribucion de puntuacion promedio")
graficar.xlabel("Puntuacion promedio")
graficar.ylabel("# de Juegos")
graficar.hist(juegos["average_rating"])
graficar.show()
graficar.clf()
# Imprime cuantas filas, columnas tenemos
print("-"*5 + "Tamaño de DataSet (Filas, Columnas)" + "-"*5)
print(juegos.shape)
print()
# Análisis de grupos o agrupamiento
# es la tarea de agrupar un conjunto de objetos de tal manera que los miembros
# del mismo grupo (llamado clúster) sean más similares, en algún sentido u otro.
#EXPO
#Es la tarea principal de la minería de datos exploratoria y es una técnica común en el análisis de datos estadísticos. Además es utilizada en múltiples campos como el aprendizaje automático, el reconocimiento de patrones, el análisis de imágenes, la búsqueda y recuperación de información, la bioinformática, la compresión de datos y la computación gráfica.
# Un ejemplo de grupo son los juegos que no tenian puntuacion
# Se usara K-means, un metodo de agrupamiento donde cada elemento hará parte
# cuyo valor promedio se acerque mas
#EXPO
# K-means es un método de agrupamiento, que tiene como objetivo la partición de un conjunto de n observaciones en k grupos en el que cada observación pertenece al grupo cuyo valor medio es más cercano. Es un método utilizado en minería de datos.
# Se crea el modelo con 5 clusters y una semilla random de 1
modelo_kmeans = KMeans(n_clusters=5, random_state=1)
# Se quitan todos los tipos de datos que no sean numericos
columnas_numero = juegos._get_numeric_data()
# Se agrega la informacion al modelo
modelo_kmeans.fit(columnas_numero)
# Se obtienen las etiquetas de los clusters
etiquetas = modelo_kmeans.labels_
# Para visualizar los clusters o grupos, es necesario reducir el numero de
# columnas debido a que cada columna aumentara el grafico en 1 dimension
# asi que se usa Análisis de Componentes Principales (En español ACP, en inglés, PCA)
# es una tecnica para reducir la dimensionalidad de un conjunto de datos usando
# correlacion entre columnas
# Se crea modelo ACP
acp_2 = ACP(2)
# Se obtienen que columnas graficar
columnas_a_graficar = acp_2.fit_transform(columnas_numero)
if GRAFICAR:
# Se crea la grafica
graficar.title("Agrupacion de juegos en 5 clusters con ACP")
graficar.scatter(x=columnas_a_graficar[:,0], y=columnas_a_graficar[:,1], c=etiquetas)
graficar.show()
graficar.clf()
# Inteligencia artificial
# Para esto hay que determinar como se medira el error y que se va a predecir
# PREDECIR -> average_rating o el puntaje promedio de un juego
# ERROR
# Aqui se tiene en cuenta que se esta haciendo
# Regresion & variables continuas != Clasificacion & variables discretas
# En este caso se usara Error Cuadrático Medio (En español ECM, en inglés, MSE)
# porque es rapido de calcular y determina el promedio de que tan distantes
# estan las predicciones de los valores reales
# CORRELACION
# Sabiendo que se quiere predecir average_rating o el puntaje promedio de un juego
# es momento de decidir que columnas son de mayor interes para esto.
# Para esto se calculara la correlacion entre average_rating y el resto de columnas
print("-"*5 + "Correlacion de average_rating" + "-"*5)
print(juegos.corr()["average_rating"])
print()
# De aqui podemos decir que id y average_weight tienen mayor correlacion
# [ID]
# Suponiendo que este valor es dado cuando se agrega un juego
# es posible que los juegos mas nuevos tienen mejores puntuaciones
# tal vez al principio de BoardGameGeek los usuarios eran menos amables
# o que los juegos viejos tenian menos calidad
# [average_weight]
# Es posible que los juegos mas complejos hayan sido puntuados mejor
# Columnas para predecir
# Hay que remover las columnas no numericas
# Hay que remover las columnas que se calculen usando la columna a predecir average_rating
# se quitan "bayes_average_rating", "average_rating", "type", "name"
# Obtiene lista de columnas
columnas = juegos.columns.tolist()
# Filtrado de columnas, lo cual nos da los predictores
columnas = [columna for columna in columnas if columna not in ["bayes_average_rating", "average_rating", "type", "name"]]
# Se guarda la columna que se intentara predecir
columna_a_predecir = "average_rating"
# Es necesario separar el DataSet que se tiene en set para entrenamiento y set para pruebas
# Si no se hace, se consigue overfitting o sobre-ajuste que es sobre-entrenar un algoritmo
# de aprendizaje con un cierto set para los cuales ya conoce el resultado
# Ej: Si aprendes 1+1=2 y 2+2=4, seras capaz de responder con 0 errores
# Pero si te preguntan por 3+3, no seras capaz de resolverlo
# Por eso es necesario aprender de forma general
# Como norma, si el algoritmo de aprendizaje produce una cantidad de errores baja
# es recomendable revisar que no se este presentando un sobre-ajuste
# En este caso se usara el 80% del DataSet para entrenar y el 20% para probar
# Se crea el set de entrenamiento y de pruebas
set_entrenamiento = juegos.sample(frac=0.8, random_state=1)
set_test = juegos.loc[~juegos.index.isin(set_entrenamiento.index)]
# Imprime tamaño de ambos sets
print("-"*5 + "Tamaño de set_entrenamiento (Filas, Columnas)" + "-"*5)
print(set_entrenamiento.shape)
print()
print("-"*5 + "Tamaño de set_test (Filas, Columnas)" + "-"*5)
print(set_test.shape)
print()
# Se crea el modelo
modelo = LinearRegression()
# Se añaden los DataSets al modelo, el primero son los predictores y el segundo, el objetivo
modelo.fit(set_entrenamiento[columnas], set_entrenamiento[columna_a_predecir])
# Se crean predicciones
predicciones = modelo.predict(set_test[columnas])
print("-"*5 + "Predicciones" + "-"*5)
print(predicciones)
print("-"*5 + "VS" + "-"*5)
print(juegos.tail(1)["average_rating"])
print()
# Calcula error entre prediccion y los valores reales
print("-"*5 + "Error en prediccion" + "-"*5)
print(ECM(predicciones, set_test[columna_a_predecir]))
print()
# Grafica predicciones vs reales con regresion lineal
if GRAFICAR:
graficar.figure("lineal")
graficar.title("Regresion lineal")
graficar.xlabel("ID Juego")
graficar.ylabel("Puntuacion promedio")
graficar.scatter(set_test["id"], set_test["average_rating"], label="Real")
graficar.scatter(set_test["id"], predicciones, label="Prediccion")
graficar.legend(loc="upper left")
# FIN DE REGRESSION LINEAL
# Aunque Scikit-learn nos permite usar otro algoritmo, se usara
# random forest que es capaz de encontrar correlaciones entre DataSets no lineales
# cosa que la Regresion lineal no seria capaz
# EJ: si minage o edad minima para un juego afecta a la puntuacion
# edad < 5, el puntaje es bajo
# edad 5-10, el puntaje es alto
# edad 10-15, el puntaje es bajo
print("-"*5 + "Usando RANDOM FOREST" + "-"*5)
# Se crea el modelo
modelo = RandomForestRegressor(n_estimators=100, min_samples_leaf=10, random_state=1)
# Se pasan los DataSets
modelo.fit(set_entrenamiento[columnas], set_entrenamiento[columna_a_predecir])
# Se hace la prediccion
predicciones = modelo.predict(set_test[columnas])
print("-"*5 + "Predicciones" + "-"*5)
print(predicciones)
print("-"*5 + "VS" + "-"*5)
print(juegos.tail(1)["average_rating"])
print()
# Calcula el error
print("-"*5 + "Error en prediccion" + "-"*5)
print(ECM(predicciones, set_test[columna_a_predecir]))
print()
# Grafica predicciones vs reales con regresion random forest
if GRAFICAR:
graficar.figure("random")
graficar.title("Regresion Random Forest")
graficar.xlabel("ID Juego")
graficar.ylabel("Puntuacion promedio")
graficar.scatter(set_test["id"], set_test["average_rating"], label="Real")
graficar.scatter(set_test["id"], predicciones, label="Prediccion")
graficar.legend(loc="upper left")
# Muestra todas las graficas que esten cargadas
graficar.show()
|
the-stack_0_17061
|
#!/usr/bin/python
from paillier import *
from candidate import *
from voter import *
from election_board import *
from bulletin_board import *
import sys
import os
from Tkinter import *
import traceback
def submitVote():
global userPick
global userPIN
# print voters
if str(userPIN.get()).strip() in voters and (userPick.get()) != '':
if not voters[str(userPIN.get()).strip()].voted:
u_vote = []
for c in range(len(candidates)):
v = 0
if int(userPick.get()) == c:
v = 1
u_vote.append(encrypt(eb.public_key, v))
blind_signed_vote = []
for v in u_vote:
## We want to blind sign each vote. So blind it,
blinding_factor, blinded_msg = bs.blind(v, eb.public_signing_key)
signed = eb.blind_sign(blinded_msg)
unblinded = bs.unblind(signed, blinding_factor, eb.public_signing_key)
blind_signed_vote.append((unblinded, blinding_factor))
if not eb.has_voter_voted(str(userPIN.get().strip())):
bb.addVote(userPIN.get().strip(), u_vote, blind_signed_vote)
voters[str(userPIN.get()).strip()].voted = True
userPick = StringVar()
userPIN = StringVar()
toplevel.destroy()
def castVote():
global canCast
if canCast:
global toplevel
toplevel = Toplevel()
toplevel.geometry("600x800+200+200")
toplevel.focus_force()
label = Label(toplevel, text="Enter your voting ID", height=0, width=100)
label.pack()
e = Entry(toplevel,textvariable=userPIN)
e.pack()
for c in range(len(candidates)):
b = Radiobutton(toplevel, text=candidates[c].name, variable=userPick, value=c)
b.pack(anchor=W)
toplevel.focus_force()
b = Button(toplevel, text="Submit Vote", width=20, command=submitVote)
b.pack(side='bottom',padx=0,pady=0)
def endVoting():
global isOver
global canCast
global b
global button1
if not isOver:
isOver = True
canCast = False
e = bb.endElection()
final = ''
global resultsLabel
for candidate in e:
final += 'Number of votes for %s is %d\n'%(candidate.name, candidate.numVotes)
resultsLabel = Label(app, text=final, height=0, width=100)
resultsLabel.pack()
b.pack_forget()
button1.pack_forget()
if __name__ == "__main__":
## Get an instance of the election board
isOver = False
canCast = True
eb = ElectionBoard.Instance()
bb = BulletinBoard.Instance()
ca = CountingAuthority.Instance()
## Register voters and candidates
voters = {}
for line in open('voters.txt'):
parsed = line.strip().split(',')
voters[parsed[1].strip()] = Voter(parsed[0].strip(),parsed[1].strip())
candidates = []
for line in open("candidates.txt"):
candidates.append(Candidate(line.strip(), encrypt(eb.public_key, 0)))
eb.register_voters(voters)
eb.register_candidates(candidates)
app = Tk()
toplevel = None
app.title("Totally Secure and Legit Voting Machine 3000")
app.geometry("300x200+200+200")
userPick = StringVar()
userPIN = StringVar()
resultsLabel = None
b = Button(app, text="End Voting", width=20, command=endVoting)
button1 = Button(app, text="Cast Your Vote", width=20, command=castVote)
b.pack(side='bottom',padx=0,pady=0)
button1.pack(side='bottom',padx=5,pady=5)
app.mainloop()
|
the-stack_0_17062
|
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=20, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 100 # 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=14,
dataset_joints=14,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False,
)
# model settings
model = dict(
type='BottomUp',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='BottomUpHigherResolutionHead',
in_channels=32,
num_joints=14,
tag_per_joint=True,
extra=dict(final_conv_kernel=1, ),
num_deconv_layers=1,
num_deconv_filters=[32],
num_deconv_kernels=[4],
num_basic_blocks=4,
cat_output=[True],
with_ae_loss=[True, False]),
train_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
img_size=data_cfg['image_size']),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True, True],
with_ae=[True, False],
project2image=True,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True),
loss_pose=dict(
type='MultiLossFactory',
num_joints=14,
num_stages=2,
ae_loss_type='exp',
with_ae_loss=[True, False],
push_loss_factor=[0.001, 0.001],
pull_loss_factor=[0.001, 0.001],
with_heatmaps_loss=[True, True],
heatmaps_loss_factor=[1.0, 1.0],
),
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/aic'
data = dict(
samples_per_gpu=4, # 24
workers_per_gpu=2,
train=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_train.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_train_20170902/'
'keypoint_train_images_20170902/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
# data_root3 = 'data/image_keypoint'
# data_root = 'data/aic'
# data = dict(
# samples_per_gpu=8, # 24
# workers_per_gpu=2,
# train=dict(
# type='BottomUpAicDataset',
# ann_file=f'{data_root}/annotations/aic_train.json',
# img_prefix=f'{data_root}/ai_challenger_keypoint_train_20170902/'
# 'keypoint_train_images_20170902/',
# data_cfg=data_cfg,
# pipeline=train_pipeline),
# val=dict(
# type='BottomUpAicDataset',
# ann_file=f'{data_root3}/annotations/train2.json',
# img_prefix=f'{data_root3}/images/',
# data_cfg=data_cfg,
# pipeline=val_pipeline),
# test=dict(
# type='BottomUpAicDataset',
# ann_file=f'{data_root3}/annotations/train2.json',
# img_prefix=f'{data_root3}/images/',
# data_cfg=data_cfg,
# pipeline=val_pipeline),
# )
|
the-stack_0_17064
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest
from astropy.table import Table
from fermipy.tests.utils import requires_dependency, requires_st_version
from fermipy import spectrum
try:
from fermipy import gtanalysis
except ImportError:
pass
# Skip tests in this file if Fermi ST aren't available
pytestmark = requires_dependency('Fermi ST')
@pytest.fixture(scope='module')
def create_draco_analysis(request, tmpdir_factory):
path = tmpdir_factory.mktemp('draco')
url = 'https://raw.githubusercontent.com/fermiPy/fermipy-extras/master/data/fermipy_test_draco.tar.gz'
outfile = path.join('fermipy_test_draco.tar.gz')
dirname = path.join()
os.system('curl -o %s -OL %s' % (outfile, url))
os.system('cd %s;tar xzf %s' % (dirname, outfile))
request.addfinalizer(lambda: path.remove(rec=1))
cfgfile = path.join('fermipy_test_draco', 'config.yaml')
gta = gtanalysis.GTAnalysis(str(cfgfile))
gta.setup()
return gta
@pytest.fixture(scope='module')
def create_pg1553_analysis(request, tmpdir_factory):
path = tmpdir_factory.mktemp('pg1553')
url = 'https://raw.githubusercontent.com/fermiPy/fermipy-extras/master/data/fermipy_test_pg1553.tar.gz'
outfile = path.join('fermipy_test_pg1553.tar.gz')
dirname = path.join()
os.system('curl -o %s -OL %s' % (outfile, url))
os.system('cd %s;tar xzf %s' % (dirname, outfile))
ft2_files = ['P8_P302_TRANSIENT020E_239557414_242187214_ft2.fits',
'P8_P302_TRANSIENT020E_247446814_250076614_ft2.fits',
'P8_P302_TRANSIENT020E_255336214_257966014_ft2.fits',
'P8_P302_TRANSIENT020E_242187214_244817014_ft2.fits',
'P8_P302_TRANSIENT020E_250076614_252706414_ft2.fits',
'P8_P302_TRANSIENT020E_257966014_260595814_ft2.fits',
'P8_P302_TRANSIENT020E_244817014_247446814_ft2.fits',
'P8_P302_TRANSIENT020E_252706414_255336214_ft2.fits',
'P8_P302_TRANSIENT020E_260595814_263225614_ft2.fits']
for f in ft2_files:
url = 'https://raw.githubusercontent.com/fermiPy/fermipy-extras/master/data/ft2/%s' % f
outfile = path.join('fermipy_test_pg1553', f)
os.system('curl -o %s -OL %s' % (outfile, url))
#request.addfinalizer(lambda: path.remove(rec=1))
cfgfile = path.join('fermipy_test_pg1553', 'config.yaml')
gta = gtanalysis.GTAnalysis(str(cfgfile))
gta.setup()
return gta
def test_gtanalysis_setup(create_draco_analysis):
gta = create_draco_analysis
gta.print_roi()
def test_print_model(create_draco_analysis):
gta = create_draco_analysis
gta.print_model()
def test_print_params(create_draco_analysis):
gta = create_draco_analysis
gta.print_params(True)
def test_gtanalysis_write_roi(create_draco_analysis):
gta = create_draco_analysis
gta.write_roi('test', make_plots=True)
def test_gtanalysis_load_roi(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit0')
src = gta.roi['3FGL J1725.3+5853']
prefactor = src.spectral_pars['Prefactor']
index = src.spectral_pars['Index']
assert_allclose(prefactor['value'] * prefactor['scale'],
1.6266779e-13, rtol=1E-3)
assert_allclose(index['value'] * index['scale'], -2.17892, rtol=1E-3)
assert_allclose(src['flux'], 4.099648e-10, rtol=1E-3)
assert_allclose(src['flux_err'], np.nan, rtol=1E-3)
assert_allclose(src['eflux'], 9.76762e-07, rtol=1E-3)
assert_allclose(src['eflux_err'], np.nan, rtol=1E-3)
gta.load_roi('fit1')
src = gta.roi['3FGL J1725.3+5853']
prefactor = src.spectral_pars['Prefactor']
index = src.spectral_pars['Index']
assert_allclose(prefactor['value'] *
prefactor['scale'], 2.0878036e-13, rtol=1E-3)
assert_allclose(index['value'] * index['scale'], -2.053723, rtol=1E-3)
assert_allclose(src['flux'], 5.377593e-10, rtol=1E-3)
assert_allclose(src['flux_err'], 6.40203e-11, rtol=1E-3)
assert_allclose(src['eflux'], 1.34617749e-06, rtol=1E-3)
assert_allclose(src['eflux_err'], 1.584814e-07, rtol=1E-3)
assert_allclose(src['ts'], 200.604, rtol=1E-3)
assert_allclose(src['npred'], 170.258, rtol=1E-3)
def test_gtanalysis_optimize(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit0')
gta.optimize()
def test_gtanalysis_fit(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit0')
gta.free_sources(distance=3.0, pars='norm')
gta.write_xml('fit_test')
fit_output0 = gta.fit(optimizer='MINUIT')
gta.load_xml('fit_test')
fit_output1 = gta.fit(optimizer='NEWMINUIT')
assert (np.abs(fit_output0['loglike'] - fit_output1['loglike']) < 0.01)
@requires_st_version('11-04-00')
def test_gtanalysis_fit_newton(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit0')
gta.free_sources(distance=3.0, pars='norm')
gta.write_xml('fit_test')
fit_output0 = gta.fit(optimizer='MINUIT')
gta.load_xml('fit_test')
fit_output1 = gta.fit(optimizer='NEWTON')
assert (np.abs(fit_output0['loglike'] - fit_output1['loglike']) < 0.01)
def test_gtanalysis_tsmap(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
gta.tsmap(model={}, make_plots=True)
@requires_st_version('11-04-00')
def test_gtanalysis_tscube(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
gta.tscube(model={}, make_plots=True)
def test_gtanalysis_residmap(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
gta.residmap(model={}, make_plots=True)
def test_gtanalysis_find_sources(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
np.random.seed(1)
src0 = {'SpatialModel': 'PointSource',
'Index': 2.0, 'offset_glon': 0.0, 'offset_glat': 2.0,
'Prefactor': 1E-12}
src1 = {'SpatialModel': 'PointSource',
'Index': 2.0, 'offset_glon': 0.0, 'offset_glat': -2.0,
'Prefactor': 1E-12}
gta.add_source('src0', src0)
gta.add_source('src1', src1)
gta.simulate_roi()
src0 = gta.delete_source('src0')
src1 = gta.delete_source('src1')
gta.find_sources()
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
newsrcs0 = gta.get_sources(skydir=src0.skydir, distance=0.3,
exclude=diff_sources)
newsrcs1 = gta.get_sources(skydir=src1.skydir, distance=0.3,
exclude=diff_sources)
assert(len(newsrcs0) == 1)
assert(len(newsrcs1) == 1)
newsrc0 = newsrcs0[0]
newsrc1 = newsrcs1[0]
sep0 = src0.skydir.separation(newsrc0.skydir).deg
sep1 = src1.skydir.separation(newsrc1.skydir).deg
assert(sep0 < newsrc0['pos_r99'])
assert(sep1 < newsrc1['pos_r99'])
flux_diff0 = (np.abs(src0['flux'] - newsrc0['flux']) /
newsrc0['flux_err'])
flux_diff1 = (np.abs(src1['flux'] - newsrc1['flux']) /
newsrc1['flux_err'])
assert(flux_diff0 < 3.0)
assert(flux_diff1 < 3.0)
def test_gtanalysis_sed(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
np.random.seed(1)
gta.simulate_roi()
params = gta.roi['draco'].params
prefactor = 3E-12
index = 1.9
scale = params['Scale']['value']
emin = gta.energies[:-1]
emax = gta.energies[1:]
flux_true = spectrum.PowerLaw.eval_flux(emin, emax,
[prefactor, -index], scale)
gta.simulate_source({'SpatialModel': 'PointSource',
'Index': index,
'Scale': scale,
'Prefactor': prefactor})
gta.free_source('draco')
gta.fit()
o = gta.sed('draco', make_plots=True)
flux_resid = (flux_true - o['flux']) / o['flux_err']
assert_allclose(flux_resid, 0, atol=3.0)
params = gta.roi['draco'].params
index_resid = (-params['Index']['value'] - index) / \
params['Index']['error']
assert_allclose(index_resid, 0, atol=3.0)
prefactor_resid = (params['Prefactor']['value'] -
prefactor) / params['Prefactor']['error']
assert_allclose(prefactor_resid, 0, atol=3.0)
gta.simulate_roi(restore=True)
def test_gtanalysis_extension_gaussian(create_draco_analysis):
gta = create_draco_analysis
gta.simulate_roi(restore=True)
gta.load_roi('fit1')
np.random.seed(1)
spatial_width = 0.5
gta.simulate_source({'SpatialModel': 'RadialGaussian',
'SpatialWidth': spatial_width,
'Prefactor': 3E-12})
o = gta.extension('draco',
width=[0.4, 0.45, 0.5, 0.55, 0.6],
spatial_model='RadialGaussian')
assert_allclose(o['ext'], spatial_width, atol=0.1)
gta.simulate_roi(restore=True)
def test_gtanalysis_localization(create_draco_analysis):
gta = create_draco_analysis
gta.simulate_roi(restore=True)
gta.load_roi('fit1')
np.random.seed(1)
src_dict = {'SpatialModel': 'PointSource',
'Prefactor': 4E-12,
'glat': 36.0, 'glon': 86.0}
gta.simulate_source(src_dict)
src_dict['glat'] = 36.05
src_dict['glon'] = 86.05
gta.add_source('testloc', src_dict, free=True)
gta.fit()
result = gta.localize('testloc', nstep=4, dtheta_max=0.5, update=True,
make_plots=True)
assert result['fit_success'] is True
assert_allclose(result['glon'], 86.0, atol=0.02)
assert_allclose(result['glat'], 36.0, atol=0.02)
gta.delete_source('testloc')
gta.simulate_roi(restore=True)
def test_gtanalysis_lightcurve(create_pg1553_analysis):
gta = create_pg1553_analysis
gta.load_roi('fit1')
o = gta.lightcurve('3FGL J1555.7+1111', nbins=2,
free_radius=3.0)
rtol = 0.01
flux = np.array([2.917568e-08,
2.359114e-08])
flux_err = np.array([1.931940e-09,
1.822694e-09])
ts = np.array([1463.066,
1123.160])
assert_allclose(o['flux'], flux, rtol=rtol)
assert_allclose(o['flux_err'], flux_err, rtol=rtol)
assert_allclose(o['ts'], ts, rtol=rtol)
tab = Table.read(os.path.join(gta.workdir, o['file']))
assert_allclose(tab['flux'], flux, rtol=rtol)
assert_allclose(tab['flux_err'], flux_err, rtol=rtol)
assert_allclose(tab['ts'], ts, rtol=rtol)
|
the-stack_0_17065
|
""" OpenCV Backend RTSP Client """
import cv2
from io import BytesIO
from PIL import Image
from threading import Thread
class Client:
""" Maintain live RTSP feed without buffering. """
_stream = None
def __init__(self, rtsp_server_uri, verbose = False):
"""
rtsp_server_uri: the path to an RTSP server. should start with "rtsp://"
verbose: print log or not
"""
self.rtsp_server_uri = rtsp_server_uri
self._verbose = verbose
if isinstance(rtsp_server_uri,str) and 'picam' in rtsp_server_uri:
self.__class__ = PicamVideoFeed
_pc = PicamVideoFeed()
self.__dict__.update(_pc.__dict__)
self._bg_run = False
self.open()
def __enter__(self,*args,**kwargs):
""" Returns the object which later will have __exit__ called.
This relationship creates a context manager. """
return self
def __exit__(self, type=None, value=None, traceback=None):
""" Together with __enter__, allows support for `with-` clauses. """
self.close()
def open(self):
if self.isOpened():
return
self._stream = cv2.VideoCapture(self.rtsp_server_uri)
if self._verbose:
print("Connected to video source {}.".format(self.rtsp_server_uri))
self._bg_run = True
t = Thread(target=self._update, args=())
t.daemon = True
t.start()
self._bgt = t
return self
def close(self):
""" signal background thread to stop. release CV stream """
self._bg_run = False
self._bgt.join()
if self._verbose:
print("Disconnected from {}".format(self.rtsp_server_uri))
def isOpened(self):
""" return true if stream is opened and being read, else ensure closed """
try:
return (self._stream is not None) and self._stream.isOpened() and self._bg_run
except:
self.close()
return False
def _update(self):
while self.isOpened():
(grabbed, frame) = self._stream.read()
if not grabbed:
self._bg_run = False
else:
self._queue = frame
self._stream.release()
def read(self,raw=False):
""" Retrieve most recent frame and convert to PIL. Return unconverted with raw=True. """
try:
if raw:
return self._queue
else:
return Image.fromarray(cv2.cvtColor(self._queue, cv2.COLOR_BGR2RGB))
except:
return None
def preview(self):
""" Blocking function. Opens OpenCV window to display stream. """
win_name = 'RTSP'
cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(win_name,20,20)
while(self.isOpened()):
cv2.imshow(win_name,self.read(raw=True))
if cv2.waitKey(30) == ord('q'): # wait 30 ms for 'q' input
break
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
class PicamVideoFeed(Client):
def __init__(self):
import picamera
self.cam = picamera.PiCamera()
def preview(self,*args,**kwargs):
""" Blocking function. Opens OpenCV window to display stream. """
self.cam.start_preview(*args,**kwargs)
def open(self):
pass
def isOpened(self):
return True
def read(self):
"""https://picamera.readthedocs.io/en/release-1.13/recipes1.html#capturing-to-a-pil-image"""
stream = BytesIO()
self.cam.capture(stream, format='png')
# "Rewind" the stream to the beginning so we can read its content
stream.seek(0)
return Image.open(stream)
def close(self):
pass
def stop(self):
pass
|
the-stack_0_17066
|
"""
Command-line usage support.
"""
import argparse
from . import asm2cfg
def main():
""" Command-line entry point to the program. """
parser = argparse.ArgumentParser(
description='Program to draw dot control-flow graph from GDB disassembly for a function.',
epilog='If function CFG rendering takes too long, try to skip function calls with -c flag.'
)
parser.add_argument('assembly_file',
help='File to contain one function assembly dump')
parser.add_argument('-c', '--skip-calls', action='store_true',
help='Skip function calls from dividing code to blocks')
parser.add_argument('--target', choices=['x86', 'arm'], default='x86',
help='Specify target platform for assembly')
parser.add_argument('-v', '--view', action='store_true',
help='View as a dot graph instead of saving to a file')
args = parser.parse_args()
print('If function CFG rendering takes too long, try to skip function calls with -c flag')
lines = asm2cfg.read_lines(args.assembly_file)
function_name, basic_blocks = asm2cfg.parse_lines(lines, args.skip_calls, args.target)
asm2cfg.draw_cfg(function_name, basic_blocks, args.view)
|
the-stack_0_17067
|
import os
import shutil
import time
from fairseq import pyrouge
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
results_dict["rouge_l_recall"] * 100
# ,results_dict["rouge_su*_f_score"] * 100
)
def test_rouge(temp_dir, cand, ref):
candidates = [line.strip() for line in open(cand, encoding='utf-8')]
references = [line.strip() for line in open(ref, encoding='utf-8')]
print(len(candidates))
print(len(references))
assert len(candidates) == len(references)
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = pyrouge.Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
pass
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
|
the-stack_0_17068
|
def poly_to_line_layer(ds, poly_name, line_name):
"""Creates a line layer from a polygon layer."""
# Delete the line layer if it exists.
if ds.GetLayer(line_name):
ds.DeleteLayer(line_name)
# Get the polygon layer and its spatial reference.
poly_lyr = ds.GetLayer(poly_name)
sr = poly_lyr.GetSpatialRef()
# Create a line layer with the same SR as the polygons
# and copy the field definitions from the polygons to
# the line layer.
line_lyr = ds.CreateLayer(line_name, sr, ogr.wkbLineString)
line_lyr.CreateFields(poly_lyr.schema)
# Create a feature to use over and over.
line_feat = ogr.Feature(line_lyr.GetLayerDefn())
# Loop through all of the polygons.
for poly_feat in poly_lyr:
# Copy the attribute values from the polygon to the
# new feature.
atts = poly_feat.items()
for fld_name in atts.keys():
line_feat.SetField(fld_name, atts[fld_name])
# Loop through the rings in the polygon.
poly_geom = poly_feat.geometry()
for i in range(poly_geom.GetGeometryCount()):
ring = poly_geom.GetGeometryRef(i)
# Create a new line using the ring's vertices.
line_geom = ogr.Geometry(ogr.wkbLineString)
for coords in ring.GetPoints():
line_geom.AddPoint(*coords)
# Insert the new line feature.
line_feat.SetGeometry(line_geom)
line_lyr.CreateFeature(line_feat)
|
the-stack_0_17069
|
import ctypes
import os
import pkgutil
from lizard import LOG, PROGRAM_DATA_DIRNAME
from lizard import user_prog, util
def check_cpus():
"""
check CPU information
:returns: dict with CPU info
"""
data = {}
lscpu_out = util.subp(["lscpu"])[0]
for k, v in [l.split(':') for l in lscpu_out.splitlines() if ':' in l]:
data[k.strip()] = v.strip()
return {'max_threads': int(data['CPU(s)']), 'name': data['Model name']}
class GPUProps(ctypes.Structure):
"""GPU properties struct"""
_fields_ = [
('gpu_index', ctypes.c_int),
('comp_level_major', ctypes.c_int),
('comp_level_minor', ctypes.c_int),
('sm_count', ctypes.c_int),
('max_sm_threads', ctypes.c_int),
('max_sm_blocks', ctypes.c_int),
('max_block_size', ctypes.c_int),
('max_total_threads', ctypes.c_int),
('max_total_blocks', ctypes.c_int),
('name', ctypes.c_char * 256),
]
def setup_cuda_detect(args, tmpdir):
"""
set up CUDA detect program
:args: parsed cmdline args
:tmpdir: temporary directory
:returns: wrapped program
"""
prog_dir = os.path.join(tmpdir, 'hw_detect')
os.mkdir(prog_dir)
def load_resource_to_prog_dir(fname, resource_dir='hw_discovery'):
resource_path = os.path.join(PROGRAM_DATA_DIRNAME, resource_dir, fname)
data = pkgutil.get_data('lizard', resource_path)
path = os.path.join(prog_dir, fname)
with open(path, 'wb') as fp:
fp.write(data)
return data
for fname in ('kernel.cu', 'program.h'):
load_resource_to_prog_dir(fname)
conf_fname = 'config.json'
data_file = os.path.join(prog_dir, conf_fname)
conf_raw = load_resource_to_prog_dir(conf_fname)
checksum = util.checksum(conf_raw)
program = user_prog.UserProg(
'Hardware Discovery', checksum, data_file, {}, build_dir=prog_dir)
program.build(
cuda_bin=args.bin, include_path=args.include, unpack=False,
set_compute_level=False)
so_path = os.path.join(prog_dir, 'user_program_cuda.so')
wrapper = ctypes.cdll.LoadLibrary(so_path)
wrapper.get_num_gpus.restype = ctypes.c_int
wrapper.get_gpu_data.argtypes = [ctypes.c_int, ctypes.POINTER(GPUProps)]
return wrapper
def get_reasonable_block_size(props, size_mult=32):
"""
get reasonable cuda block size
:props: gpu properties dict
:size_mult: block size multiple
:returns: reasonable block size
"""
max_reasonable_size = props['max_block_size']
min_reasonable_size = props['max_sm_threads'] / props['max_sm_blocks']
avg_reasonable_size = (max_reasonable_size + min_reasonable_size) / 2
reasonable_block_size = int(avg_reasonable_size/size_mult) * size_mult
LOG.debug('Using CUDA block size: %s', reasonable_block_size)
return reasonable_block_size
def check_gpus(args, tmpdir):
"""
check for CUDA capable GPUs
:args: parsed cmdline args
:tmpdir: temporary directory
:returns: dict with GPU info
"""
if args.no_gpu:
LOG.warning("Not scanning available gpus, running programs will fail")
return {'num_gpus': 0, 'gpu_info': []}
LOG.info('Checking CUDA build system')
program = setup_cuda_detect(args, tmpdir)
res = {
'num_gpus': program.get_num_gpus(),
'gpu_info': [],
}
for gpu_index in range(res['num_gpus']):
props = GPUProps()
program.get_gpu_data(gpu_index, ctypes.byref(props))
gpu_info = {
'gpu_index': props.gpu_index,
'comp_level_major': props.comp_level_major,
'comp_level_minor': props.comp_level_minor,
'sm_count': props.sm_count,
'max_sm_threads': props.max_sm_threads,
'max_sm_blocks': props.max_sm_blocks,
'max_block_size': props.max_block_size,
'max_total_threads': props.max_total_threads,
'max_total_blocks': props.max_total_blocks,
'name': props.name.decode(),
}
gpu_info['reasonable_block_size'] = get_reasonable_block_size(gpu_info)
res['gpu_info'].append(gpu_info)
return res
def scan_hardware(args, tmpdir):
"""
scan system hardware
:args: parsed cmdline args
:tmpdir: temporary directory
:returns: dict with hardware info
"""
hardware = {
'CPU': check_cpus(),
'GPU': check_gpus(args, tmpdir),
}
LOG.debug('hardware scan found: %s', hardware)
return hardware
|
the-stack_0_17070
|
from .WidgetRedirector import WidgetRedirector
from Delegator import Delegator
class Percolator:
def __init__(self, text):
# XXX would be nice to inherit from Delegator
self.text = text
self.redir = WidgetRedirector(text)
self.top = self.bottom = Delegator(text)
self.bottom.insert = self.redir.register("insert", self.insert)
self.bottom.delete = self.redir.register("delete", self.delete)
self.filters = []
def close(self):
while self.top is not self.bottom:
self.removefilter(self.top)
self.top = None
self.bottom.setdelegate(None); self.bottom = None
self.redir.close(); self.redir = None
self.text = None
def insert(self, index, chars, tags=None):
# Could go away if inheriting from Delegator
self.top.insert(index, chars, tags)
def delete(self, index1, index2=None):
# Could go away if inheriting from Delegator
self.top.delete(index1, index2)
def insertfilter(self, filter):
# Perhaps rename to pushfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is None
filter.setdelegate(self.top)
self.top = filter
def removefilter(self, filter):
# XXX Perhaps should only support popfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is not None
f = self.top
if f is filter:
self.top = filter.delegate
filter.setdelegate(None)
else:
while f.delegate is not filter:
assert f is not self.bottom
f.resetcache()
f = f.delegate
f.setdelegate(filter.delegate)
filter.setdelegate(None)
def _percolator(parent):
import tkinter as tk
import re
class Tracer(Delegator):
def __init__(self, name):
self.name = name
Delegator.__init__(self, None)
def insert(self, *args):
print(self.name, ": insert", args)
self.delegate.insert(*args)
def delete(self, *args):
print(self.name, ": delete", args)
self.delegate.delete(*args)
root = tk.Tk()
root.title("Test Percolator")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = tk.Text(root)
p = Percolator(text)
t1 = Tracer("t1")
t2 = Tracer("t2")
def toggle1():
if var1.get() == 0:
var1.set(1)
p.insertfilter(t1)
elif var1.get() == 1:
var1.set(0)
p.removefilter(t1)
def toggle2():
if var2.get() == 0:
var2.set(1)
p.insertfilter(t2)
elif var2.get() == 1:
var2.set(0)
p.removefilter(t2)
text.pack()
var1 = tk.IntVar()
cb1 = tk.Checkbutton(root, text="Tracer1", command=toggle1, variable=var1)
cb1.pack()
var2 = tk.IntVar()
cb2 = tk.Checkbutton(root, text="Tracer2", command=toggle2, variable=var2)
cb2.pack()
root.mainloop()
if __name__ == "__main__":
from MrPython.idle_test.htest import run
run(_percolator)
|
the-stack_0_17074
|
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.rarithmetic import widen
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import (specialize, import_from_mixin)
from pypy.interpreter.error import oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.function import descr_function_get
from pypy.interpreter.typedef import TypeDef, interp2app
from pypy.objspace.std.typeobject import W_TypeObject
from pypy.module._hpy_universal import llapi
from .state import State
HPySlot_Slot = llapi.cts.gettype('HPySlot_Slot')
HPy_RichCmpOp = llapi.cts.gettype('HPy_RichCmpOp')
_WRAPPER_CACHE = {}
class W_SlotWrapper(W_Root):
_immutable_fields_ = ["slot"]
def __init__(self, slot, method_name, cfuncptr, w_objclass):
self.slot = slot
self.name = method_name
self.cfuncptr = cfuncptr
self.w_objclass = w_objclass
def check_args(self, space, __args__, arity):
length = len(__args__.arguments_w)
if length != arity:
raise oefmt(space.w_TypeError, "expected %d arguments, got %d",
arity, length)
if __args__.keywords:
raise oefmt(space.w_TypeError,
"wrapper %s doesn't take any keyword arguments",
self.name)
def check_argsv(self, space, __args__, min, max):
length = len(__args__.arguments_w)
if not min <= length <= max:
raise oefmt(space.w_TypeError, "expected %d-%d arguments, got %d",
min, max, length)
if __args__.keywords:
raise oefmt(space.w_TypeError,
"wrapper %s doesn't take any keyword arguments",
self.name)
def descr_call(self, space, __args__):
# XXX: basically a copy of cpyext's W_PyCMethodObject.descr_call()
if len(__args__.arguments_w) == 0:
w_objclass = self.w_objclass
assert isinstance(w_objclass, W_TypeObject)
raise oefmt(space.w_TypeError,
"descriptor '%8' of '%s' object needs an argument",
self.name, self.w_objclass.getname(space))
w_instance = __args__.arguments_w[0]
# XXX: needs a stricter test
if not space.isinstance_w(w_instance, self.w_objclass):
w_objclass = self.w_objclass
assert isinstance(w_objclass, W_TypeObject)
raise oefmt(space.w_TypeError,
"descriptor '%8' requires a '%s' object but received a '%T'",
self.name, w_objclass.name, w_instance)
#
return self.call(space, __args__)
def call(self, space, __args__):
raise oefmt(space.w_RuntimeError, "bad slot wrapper")
W_SlotWrapper.typedef = TypeDef(
'slot_wrapper',
__get__ = interp2app(descr_function_get),
__call__ = interp2app(W_SlotWrapper.descr_call),
)
W_SlotWrapper.typedef.acceptable_as_base_class = False
# ~~~~~~~~~~ concrete W_SlotWrapper subclasses ~~~~~~~~~~~~~
# these are the equivalent of the various functions wrap_* inside CPython's typeobject.c
class W_wrap_binaryfunc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_binaryfunc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_other = __args__.arguments_w[1]
with self.handles.using(w_self, w_other) as (h_self, h_other):
h_result = func(self.ctx, h_self, h_other)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
@specialize.memo()
def get_cmp_wrapper_cls(handles, methname, OP):
try:
return _WRAPPER_CACHE[handles, methname]
except KeyError:
pass
class wrapper(W_SlotWrapper):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_richcmpfunc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_other = __args__.arguments_w[1]
with handles.using(w_self, w_other) as (h_self, h_other):
# rffi doesn't allow casting to an enum, we need to use int
# instead
h_result = func(
handles.ctx, h_self, h_other, rffi.cast(rffi.INT_real, OP))
if not h_result:
space.fromcache(State).raise_current_exception()
return handles.consume(h_result)
suffix = '_d' if handles.is_debug else '_u'
wrapper.__name__ = 'W_wrap_richcmp%s%s' % (methname, suffix)
_WRAPPER_CACHE[handles, methname] = wrapper
return wrapper
CMP_OPNAMES = ['eq', 'ne', 'lt', 'le', 'gt', 'ge']
CMP_ENUM_VALUES = [
getattr(HPy_RichCmpOp, 'HPy_%s' % opname.upper()) for opname in CMP_OPNAMES]
CMP_SLOTS = unrolling_iterable([
('__%s__' % opname, opval)
for opname, opval in zip(CMP_OPNAMES, CMP_ENUM_VALUES)])
class W_wrap_unaryfunc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_unaryfunc", self.cfuncptr)
self.check_args(space, __args__, 1)
w_self = __args__.arguments_w[0]
with self.handles.using(w_self) as h_self:
h_result = func(self.ctx, h_self)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
class W_wrap_ternaryfunc(object):
def call(self, space, __args__):
# Literaly quote of the corresponding CPython comment:
# Note: This wrapper only works for __pow__()
#
func = llapi.cts.cast("HPyFunc_ternaryfunc", self.cfuncptr)
self.check_argsv(space, __args__, 2, 3)
n = len(__args__.arguments_w)
w_self = __args__.arguments_w[0]
w1 = __args__.arguments_w[1]
if n == 2:
w2 = space.w_None
else:
w2 = __args__.arguments_w[2]
with self.handles.using(w_self, w1, w2) as (h_self, h1, h2):
h_result = func(self.ctx, h_self, h1, h2)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
class W_wrap_indexargfunc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_ssizeargfunc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_idx = __args__.arguments_w[1]
idx = space.int_w(space.index(w_idx))
with self.handles.using(w_self) as h_self:
h_result = func(self.ctx, h_self, idx)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
class W_wrap_inquirypred(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_inquiry", self.cfuncptr)
self.check_args(space, __args__, 1)
w_self = __args__.arguments_w[0]
with self.handles.using(w_self) as h_self:
res = func(self.ctx, h_self)
res = rffi.cast(lltype.Signed, res)
if res == -1:
space.fromcache(State).raise_current_exception()
return space.newbool(bool(res))
class W_wrap_lenfunc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_lenfunc", self.cfuncptr)
self.check_args(space, __args__, 1)
w_self = __args__.arguments_w[0]
with self.handles.using(w_self) as h_self:
result = func(self.ctx, h_self)
if widen(result) == -1:
space.fromcache(State).raise_current_exception()
return space.newint(result)
def sq_getindex(space, w_sequence, w_idx):
"""
This is equivalent to CPython's typeobject.c:getindex().
We call it sq_getindex because it's used only by sq_* slots.
"""
idx = space.int_w(space.index(w_idx))
if idx < 0 and space.lookup(w_sequence, '__len__'):
# It is worth noting that we are doing the lookup of __len__ twice,
# one above and one inside space.len_w. The JIT should optimize it
# away, but it might be a minor slowdown for interpreted code.
n = space.len_w(w_sequence)
idx += n
return idx
class W_wrap_sq_item(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_ssizeargfunc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_idx = __args__.arguments_w[1]
idx = sq_getindex(space, w_self, w_idx)
with self.handles.using(w_self) as h_self:
h_result = func(self.ctx, h_self, idx)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
class W_wrap_sq_setitem(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_ssizeobjargproc", self.cfuncptr)
self.check_args(space, __args__, 3)
w_self = __args__.arguments_w[0]
w_idx = __args__.arguments_w[1]
idx = sq_getindex(space, w_self, w_idx)
w_value = __args__.arguments_w[2]
with self.handles.using(w_self, w_value) as (h_self, h_value):
result = func(self.ctx, h_self, idx, h_value)
if widen(result) == -1:
space.fromcache(State).raise_current_exception()
return space.w_None
class W_wrap_sq_delitem(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_ssizeobjargproc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_idx = __args__.arguments_w[1]
idx = sq_getindex(space, w_self, w_idx)
with self.handles.using(w_self) as h_self:
result = func(self.ctx, h_self, idx, llapi.HPy_NULL)
if widen(result) == -1:
space.fromcache(State).raise_current_exception()
return space.w_None
class W_wrap_objobjproc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_objobjproc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_key = __args__.arguments_w[1]
with self.handles.using(w_self, w_key) as (h_self, h_key):
res = func(self.ctx, h_self, h_key)
res = widen(res)
if res == -1:
space.fromcache(State).raise_current_exception()
return space.newbool(bool(res))
class W_wrap_getbuffer(object):
rbp = llapi.cts.cast('HPyFunc_releasebufferproc', 0)
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_getbufferproc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_flags = __args__.arguments_w[1]
flags = rffi.cast(rffi.INT_real, space.int_w(w_flags))
with lltype.scoped_alloc(llapi.cts.gettype('HPy_buffer')) as hpybuf:
with self.handles.using(w_self) as h_self:
res = func(self.ctx, h_self, hpybuf, flags)
if widen(res) < 0:
space.fromcache(State).raise_current_exception()
buf_ptr = hpybuf.c_buf
w_obj = self.handles.consume(hpybuf.c_obj.c__i)
size = hpybuf.c_len
ndim = widen(hpybuf.c_ndim)
shape = None
if hpybuf.c_shape:
shape = [hpybuf.c_shape[i] for i in range(ndim)]
strides = None
if hpybuf.c_strides:
strides = [hpybuf.c_strides[i] for i in range(ndim)]
if hpybuf.c_format:
format = rffi.charp2str(hpybuf.c_format)
else:
format = 'B'
view = self.handles.HPyBuffer(
buf_ptr, size, w_obj,
itemsize=hpybuf.c_itemsize,
readonly=widen(hpybuf.c_readonly),
ndim=widen(hpybuf.c_ndim), format=format, shape=shape,
strides=strides)
if self.rbp:
# XXX: we're assuming w_self and w_obj have the same type!
view.releasebufferproc = self.rbp
self.handles.BUFFER_FQ.register_finalizer(view)
return view.wrap(space)
# remaining wrappers to write
## wrap_binaryfunc_l(PyObject *self, PyObject *args, void *wrapped)
## wrap_binaryfunc_r(PyObject *self, PyObject *args, void *wrapped)
## wrap_ternaryfunc_r(PyObject *self, PyObject *args, void *wrapped)
## wrap_objobjargproc(PyObject *self, PyObject *args, void *wrapped)
## wrap_delitem(PyObject *self, PyObject *args, void *wrapped)
## wrap_setattr(PyObject *self, PyObject *args, void *wrapped)
## wrap_delattr(PyObject *self, PyObject *args, void *wrapped)
## wrap_hashfunc(PyObject *self, PyObject *args, void *wrapped)
## wrap_call(PyObject *self, PyObject *args, void *wrapped, PyObject *kwds)
## wrap_del(PyObject *self, PyObject *args, void *wrapped)
## wrap_next(PyObject *self, PyObject *args, void *wrapped)
## wrap_descr_get(PyObject *self, PyObject *args, void *wrapped)
## wrap_descr_set(PyObject *self, PyObject *args, void *wrapped)
## wrap_descr_delete(PyObject *self, PyObject *args, void *wrapped)
class W_wrap_init(object):
def call(self, space, __args__):
with self.handles.using(__args__.arguments_w[0]) as h_self:
n = len(__args__.arguments_w) - 1
with lltype.scoped_alloc(rffi.CArray(llapi.HPy), n) as args_h:
i = 0
while i < n:
args_h[i] = self.handles.new(__args__.arguments_w[i + 1])
i += 1
h_kw = 0
if __args__.keywords:
w_kw = space.newdict()
for i in range(len(__args__.keywords)):
key = __args__.keywords[i]
w_value = __args__.keywords_w[i]
space.setitem_str(w_kw, key, w_value)
h_kw = self.handles.new(w_kw)
fptr = llapi.cts.cast('HPyFunc_initproc', self.cfuncptr)
try:
result = fptr(self.ctx, h_self, args_h, n, h_kw)
finally:
if h_kw:
self.handles.close(h_kw)
for i in range(n):
self.handles.close(args_h[i])
if rffi.cast(lltype.Signed, result) < 0:
space.fromcache(State).raise_current_exception()
return space.w_None
@specialize.memo()
def get_slot_cls(handles, mixin):
try:
return _WRAPPER_CACHE[handles, mixin]
except KeyError:
pass
_handles = handles
class wrapper(W_SlotWrapper):
import_from_mixin(mixin)
handles = _handles
ctx = _handles.ctx
wrapper.__name__ = mixin.__name__ + handles.cls_suffix
_WRAPPER_CACHE[handles, mixin] = wrapper
return wrapper
@specialize.memo()
def get_tp_new_wrapper_cls(handles):
try:
return _WRAPPER_CACHE[handles, 'new']
except KeyError:
pass
class W_tp_new_wrapper(handles.w_ExtensionFunction):
"""
Special case for HPy_tp_new. Note that is not NOT a SlotWrapper.
This is the equivalent of CPython's tp_new_wrapper: the difference is that
CPython's tp_new_wrapper is a regular PyMethodDef which is wrapped inside
a PyCFunction, while here we have our own type.
"""
def __init__(self, cfuncptr, w_type):
handles.w_ExtensionFunction.__init__(
self, handles.space, handles, '__new__',
llapi.HPyFunc_KEYWORDS, None, cfuncptr, w_self=w_type)
def call(self, space, h_self, __args__, skip_args=0):
assert space is handles.space
assert skip_args == 0
# NOTE: h_self contains the type for which we are calling __new__, but
# here is ignored. In CPython's tp_new_wrapper it is only used to fish
# the ->tp_new to call, but here we already have the cfuncptr
#
# XXX: tp_new_wrapper does additional checks, we should write tests
# and implement the same checks
w_self = __args__.arguments_w[0]
with handles.using(w_self) as h_self:
return self.call_varargs_kw(space, h_self, __args__,
skip_args=1, has_keywords=True)
W_tp_new_wrapper.__name__ += handles.cls_suffix
_WRAPPER_CACHE[handles, 'new'] = W_tp_new_wrapper
return W_tp_new_wrapper
# the following table shows how to map C-level slots into Python-level
# __methods__. Note that if a C-level slot corresponds to multiple
# __methods__, it appears multiple times (e.g. sq_ass_item corresponds to both
# __setitem__ and __delitem__).
SLOTS = unrolling_iterable([
# CPython slots
('bf_getbuffer', '__buffer__', W_wrap_getbuffer),
# ('mp_ass_subscript', '__xxx__', AGS.W_SlotWrapper_...),
# ('mp_length', '__xxx__', AGS.W_SlotWrapper_...),
# ('mp_subscript', '__getitem__', AGS.W_SlotWrapper_binaryfunc),
('nb_absolute', '__abs__', W_wrap_unaryfunc),
('nb_add', '__add__', W_wrap_binaryfunc),
('nb_and', '__and__', W_wrap_binaryfunc),
('nb_bool', '__bool__', W_wrap_inquirypred),
('nb_divmod', '__divmod__', W_wrap_binaryfunc),
('nb_float', '__float__', W_wrap_unaryfunc),
('nb_floor_divide', '__floordiv__', W_wrap_binaryfunc),
('nb_index', '__index__', W_wrap_unaryfunc),
('nb_inplace_add', '__iadd__', W_wrap_binaryfunc),
('nb_inplace_and', '__iand__', W_wrap_binaryfunc),
('nb_inplace_floor_divide', '__ifloordiv__', W_wrap_binaryfunc),
('nb_inplace_lshift', '__ilshift__', W_wrap_binaryfunc),
('nb_inplace_multiply', '__imul__', W_wrap_binaryfunc),
('nb_inplace_or', '__ior__', W_wrap_binaryfunc),
# CPython is buggy here: it uses wrap_binaryfunc for nb_inplace_power, but
# it means you end up calling the cfunc with the wrong signature! We
# correctly user W_wrap_ternaryfunc instead
('nb_inplace_power', '__ipow__', W_wrap_ternaryfunc),
('nb_inplace_remainder', '__imod__', W_wrap_binaryfunc),
('nb_inplace_rshift', '__irshift__', W_wrap_binaryfunc),
('nb_inplace_subtract', '__isub__', W_wrap_binaryfunc),
('nb_inplace_true_divide', '__itruediv__', W_wrap_binaryfunc),
('nb_inplace_xor', '__ixor__', W_wrap_binaryfunc),
('nb_int', '__int__', W_wrap_unaryfunc),
('nb_invert', '__invert__', W_wrap_unaryfunc),
('nb_lshift', '__lshift__', W_wrap_binaryfunc),
('nb_multiply', '__mul__', W_wrap_binaryfunc),
('nb_negative', '__neg__', W_wrap_unaryfunc),
('nb_or', '__or__', W_wrap_binaryfunc),
('nb_positive', '__pos__', W_wrap_unaryfunc),
('nb_power', '__pow__', W_wrap_ternaryfunc),
('nb_remainder', '__mod__', W_wrap_binaryfunc),
('nb_rshift', '__rshift__', W_wrap_binaryfunc),
('nb_subtract', '__sub__', W_wrap_binaryfunc),
('nb_true_divide', '__truediv__', W_wrap_binaryfunc),
('nb_xor', '__xor__', W_wrap_binaryfunc),
('sq_ass_item', '__setitem__', W_wrap_sq_setitem),
('sq_ass_item', '__delitem__', W_wrap_sq_delitem),
('sq_concat', '__add__', W_wrap_binaryfunc),
('sq_contains', '__contains__', W_wrap_objobjproc),
('sq_inplace_concat', '__iadd__', W_wrap_binaryfunc),
('sq_inplace_repeat', '__imul__', W_wrap_indexargfunc),
('sq_item', '__getitem__', W_wrap_sq_item),
('sq_length', '__len__', W_wrap_lenfunc),
('sq_repeat', '__mul__', W_wrap_indexargfunc),
# ('tp_base', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_bases', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_call', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_clear', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_del', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_descr_get', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_descr_set', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_doc', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_getattr', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_getattro', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_hash', '__xxx__', AGS.W_SlotWrapper_...),
('tp_init', '__init__', W_wrap_init),
# ('tp_is_gc', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_iter', '__iter__', W_wrap_unaryfunc),
# ('tp_iternext', '__xxx__', AGS.W_SlotWrapper_...),
# tp_new SPECIAL-CASED
('tp_repr', '__repr__', W_wrap_unaryfunc),
# tp_richcompare SPECIAL-CASED
# ('tp_setattr', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_setattro', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_str', '__str__', W_wrap_unaryfunc),
# ('tp_traverse', '__xxx__', AGS.W_SlotWrapper_...),
('nb_matrix_multiply', '__matmul__', W_wrap_binaryfunc),
('nb_inplace_matrix_multiply', '__imatmul__', W_wrap_binaryfunc),
# ('am_await', '__await__', W_wrap_unaryfunc),
# ('am_aiter', '__aiter__', W_wrap_unaryfunc),
# ('am_anext', '__anext__', W_wrap_unaryfunc),
# ('tp_finalize', '__xxx__', AGS.W_SlotWrapper_...),
# extra HPy-specific slots
# ('tp_destroy', '__xxx__', AGS.W_SlotWrapper_...),
])
@specialize.arg(0)
def fill_slot(handles, w_type, hpyslot):
space = handles.space
slot_num = rffi.cast(lltype.Signed, hpyslot.c_slot)
# special cases
if slot_num == HPySlot_Slot.HPy_tp_new:
# this is the moral equivalent of CPython's add_tp_new_wrapper
cls = get_tp_new_wrapper_cls(handles)
w_func = cls(hpyslot.c_impl, w_type)
w_type.setdictvalue(space, '__new__', w_func)
return
elif slot_num == HPySlot_Slot.HPy_tp_destroy:
w_type.tp_destroy = llapi.cts.cast('HPyFunc_destroyfunc', hpyslot.c_impl)
return
elif slot_num == HPySlot_Slot.HPy_tp_richcompare:
for methname, opval in CMP_SLOTS:
cls = get_cmp_wrapper_cls(handles, methname, opval)
w_slot = cls(slot_num, methname, hpyslot.c_impl, w_type)
w_type.setdictvalue(space, methname, w_slot)
return
elif slot_num == HPySlot_Slot.HPy_bf_releasebuffer:
return
# generic cases
found = False
for slotname, methname, mixin in SLOTS:
assert methname != '__xxx__' # sanity check
n = getattr(HPySlot_Slot, 'HPy_' + slotname)
if slot_num == n:
found = True
cls = get_slot_cls(handles, mixin)
w_slot = cls(slot_num, methname, hpyslot.c_impl, w_type)
w_type.setdictvalue(space, methname, w_slot)
if not found:
raise oefmt(space.w_NotImplementedError, "Unimplemented slot: %s", str(slot_num))
|
the-stack_0_17075
|
"""
This class is inspired by the Black Knight scene in the movie
"Monty Python and the Holy Grail", where King Arthur fights the
Black Knight, slicing off his arms and legs, but the knight
refuses to concede defeat.
# BEGIN BLACK_KNIGHT_DEMO
>>> knight = BlackKnight()
>>> knight.member
next member is:
'an arm'
>>> del knight.member
BLACK KNIGHT (loses an arm)
-- 'Tis but a scratch.
>>> del knight.member
BLACK KNIGHT (loses another arm)
-- It's just a flesh wound.
>>> del knight.member
BLACK KNIGHT (loses a leg)
-- I'm invincible!
>>> del knight.member
BLACK KNIGHT (loses another leg)
-- All right, we'll call it a draw.
# END BLACK_KNIGHT_DEMO
"""
# BEGIN BLACK_KNIGHT
class BlackKnight:
def __init__(self):
self.members = ['an arm', 'another arm',
'a leg', 'another leg']
self.phrases = ["'Tis but a scratch.",
"It's just a flesh wound.",
"I'm invincible!",
"All right, we'll call it a draw."]
@property
def member(self):
print('next member is:')
return self.members[0]
@member.deleter
def member(self):
text = 'BLACK KNIGHT (loses {})\n-- {}'
print(text.format(self.members.pop(0), self.phrases.pop(0)))
# END BLACK_KNIGHT
|
the-stack_0_17077
|
import logging
import pytest
from toy_robot_challenge.command_processor import CommandProcessor
from toy_robot_challenge.positioning import Direction, Turn
from toy_robot_challenge.robot import Robot
@pytest.fixture()
def mock_robot(mocker):
return mocker.create_autospec(Robot, instance=True)
def test_create_command_processor(mock_robot):
command_processor = CommandProcessor(mock_robot)
assert command_processor._robot == mock_robot
def test_process_ignores_invalid_command(caplog, mock_robot):
caplog.set_level(logging.DEBUG)
command_processor = CommandProcessor(mock_robot)
command_processor.process("invalid-command")
assert mock_robot.place.called is False
assert mock_robot.report.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
assert "Detected command as invalid. Terminating command execution." in str(
caplog.records
)
@pytest.mark.parametrize(
"command", ["PLACE", "PLACE 1,2", "PLACE 1,NORTH,1", "PLACE 1,2,3", "PLACE NORTH"]
)
def test_process_does_not_place_robot_for_place_command_with_missing_place_arguments(
caplog, mock_robot, command
):
caplog.set_level(logging.DEBUG)
command_processor = CommandProcessor(mock_robot)
command_processor.process(command)
assert mock_robot.place.called is False
assert mock_robot.report.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
assert (
"PLACE command invoked but the arguments provided were invalid or missing (x,y,f)."
in str(caplog.records)
)
def test_process_does_not_place_robot_for_place_command_with_invalid_direction(
caplog, mock_robot
):
caplog.set_level(logging.DEBUG)
command_processor = CommandProcessor(mock_robot)
command_processor.process("PLACE 1,2,invalid")
assert mock_robot.place.called is False
assert mock_robot.report.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
assert "Invalid direction provided in PLACE command." in str(caplog.records)
def test_process_places_robot_for_valid_place_command(mock_robot):
command_processor = CommandProcessor(mock_robot)
command_processor.process("PLACE 1,2,NORTH")
mock_robot.place.assert_called_once_with(1, 2, Direction.NORTH)
assert mock_robot.report.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
def test_process_tells_robot_to_report_for_report_command(mock_robot):
command_processor = CommandProcessor(mock_robot)
command_processor.process("REPORT")
assert mock_robot.report.called is True
assert mock_robot.place.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
@pytest.mark.parametrize("command, turn", [("LEFT", Turn.LEFT), ("RIGHT", Turn.RIGHT)])
def test_process_tells_robot_to_rotate_for_left_or_right_command(
mock_robot, command, turn
):
command_processor = CommandProcessor(mock_robot)
command_processor.process(command)
mock_robot.rotate.assert_called_once_with(turn)
assert mock_robot.report.called is False
assert mock_robot.place.called is False
assert mock_robot.move.called is False
def test_process_tells_robot_to_move_for_move_command(mock_robot):
command_processor = CommandProcessor(mock_robot)
command_processor.process("MOVE")
assert mock_robot.move.called is True
assert mock_robot.report.called is False
assert mock_robot.place.called is False
assert mock_robot.rotate.called is False
|
the-stack_0_17078
|
#!/usr/bin/env python -O
# -*- coding: utf-8 -*-
#
# tests.unit._dao.TestRTKRPN.py is part of The RTK Project
#
# All rights reserved.
"""
This is the test class for testing the RTKRPN module algorithms and
models.
"""
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(dirname(__file__)))) + "/rtk", )
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import unittest
from nose.plugins.attrib import attr
from dao.RTKRPN import RTKRPN
__author__ = 'Andrew Rowland'
__email__ = '[email protected]'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2017 Andrew "weibullguy" Rowland'
class TestRTKRPN(unittest.TestCase):
"""
Class for testing the RTKRPN class.
"""
attributes = (1, 'None', 'No effect.', 'severity', 1)
def setUp(self):
"""
Sets up the test fixture for the RTKRPN class.
"""
engine = create_engine('sqlite:////tmp/TestCommonDB.rtk', echo=False)
session = scoped_session(sessionmaker())
session.remove()
session.configure(bind=engine, autoflush=False, expire_on_commit=False)
self.DUT = session.query(RTKRPN).first()
self.DUT.name = self.attributes[1]
self.DUT.description = self.attributes[2]
self.DUT.rpn_type = self.attributes[3]
self.DUT.value = self.attributes[4]
session.commit()
@attr(all=True, unit=True)
def test00_RTKRPN_create(self):
"""
(TestRTKRPN) __init__ should create an RTKRPN model
"""
self.assertTrue(isinstance(self.DUT, RTKRPN))
# Verify class attributes are properly initialized.
self.assertEqual(self.DUT.__tablename__, 'rtk_rpn')
self.assertEqual(self.DUT.rpn_id, 1)
self.assertEqual(self.DUT.description, 'No effect.')
self.assertEqual(self.DUT.name, 'None')
self.assertEqual(self.DUT.rpn_type, 'severity')
self.assertEqual(self.DUT.value, 1)
@attr(all=True, unit=True)
def test01_RTKRPN_get_attributes(self):
"""
(TestRTKRPN) get_attributes should return a tuple of attributes values on success
"""
self.assertEqual(self.DUT.get_attributes(), self.attributes)
@attr(all=True, unit=True)
def test02a_RTKRPN_set_attributes(self):
"""
(TestRTKRPN) set_attributes should return a zero error code on success
"""
_attributes = ('Very High',
'System inoperable with destructive failure without ' \
'compromising safety.', 'severity', 8)
_error_code, _msg = self.DUT.set_attributes(_attributes)
self.assertEqual(_error_code, 0)
self.assertEqual(_msg, "RTK SUCCESS: Updating RTKRPN {0:d} " \
"attributes.".format(self.DUT.rpn_id))
@attr(all=True, unit=True)
def test02b_RTKRPN_set_attributes_to_few(self):
"""
(TestRTKRPN) set_attributes should return a 40 error code when passed too few attributes
"""
_attributes = ('Very High',
'System inoperable with destructive failure without ' \
'compromising safety.', 'severity')
_error_code, _msg = self.DUT.set_attributes(_attributes)
self.assertEqual(_error_code, 40)
self.assertEqual(_msg, "RTK ERROR: Insufficient number of input " \
"values to RTKRPN.set_attributes().")
@attr(all=True, unit=True)
def test02c_RTKRPN_set_attributes_wrong_type(self):
"""
(TestRTKRPN) set_attributes should return a 10 error code when passed the wrong type
"""
_attributes = ('Very High',
'System inoperable with destructive failure without ' \
'compromising safety.', 'severity', 'eight')
_error_code, _msg = self.DUT.set_attributes(_attributes)
self.assertEqual(_error_code, 10)
self.assertEqual(_msg, "RTK ERROR: Incorrect data type when " \
"converting one or more RTKRPN " \
"attributes.")
|
the-stack_0_17079
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import math
from typing import List
# Third-party imports
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.model.common import Tensor
from gluonts.core.component import validated
class LookupValues(gluon.HybridBlock):
def __init__(self, values: mx.nd.NDArray, **kwargs):
super().__init__(**kwargs)
with self.name_scope():
self.bin_values = self.params.get_constant("bin_values", values)
def hybrid_forward(self, F, indices, bin_values):
return F.take(bin_values, indices)
def conv1d(channels, kernel_size, in_channels, use_bias=True, **kwargs):
"""
Conv1D with better default initialization.
"""
n = in_channels
kernel_size = (
kernel_size if isinstance(kernel_size, list) else [kernel_size]
)
for k in kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
winit = mx.initializer.Uniform(stdv)
if use_bias:
binit = mx.initializer.Uniform(stdv)
else:
binit = "zeros"
return nn.Conv1D(
channels=channels,
kernel_size=kernel_size,
in_channels=in_channels,
use_bias=use_bias,
weight_initializer=winit,
bias_initializer=binit,
**kwargs,
)
class CausalDilatedResidue(nn.HybridBlock):
def __init__(
self,
n_residue,
n_skip,
dilation,
return_dense_out,
kernel_size,
**kwargs,
):
super().__init__(**kwargs)
self.n_residue = n_residue
self.n_skip = n_skip
self.dilation = dilation
self.kernel_size = kernel_size
self.return_dense_out = return_dense_out
with self.name_scope():
self.conv_sigmoid = conv1d(
in_channels=n_residue,
channels=n_residue,
kernel_size=kernel_size,
dilation=dilation,
activation="sigmoid",
)
self.conv_tanh = conv1d(
in_channels=n_residue,
channels=n_residue,
kernel_size=kernel_size,
dilation=dilation,
activation="tanh",
)
self.skip = conv1d(
in_channels=n_residue, channels=n_skip, kernel_size=1
)
self.residue = (
conv1d(
in_channels=n_residue, channels=n_residue, kernel_size=1
)
if self.return_dense_out
else None
)
def hybrid_forward(self, F, x):
u = self.conv_sigmoid(x) * self.conv_tanh(x)
s = self.skip(u)
if not self.return_dense_out:
return s, F.zeros(shape=(1,))
output = self.residue(u)
output = output + F.slice_axis(
x, begin=(self.kernel_size - 1) * self.dilation, end=None, axis=-1
)
return s, output
class WaveNet(nn.HybridBlock):
def __init__(
self,
bin_values: List[float],
n_residue: int,
n_skip: int,
dilation_depth: int,
n_stacks: int,
act_type: str,
cardinality: List[int],
embedding_dimension: int,
pred_length: int,
**kwargs,
):
super().__init__(**kwargs)
self.dilation_depth = dilation_depth
self.pred_length = pred_length
self.mu = len(bin_values)
self.dilations = WaveNet._get_dilations(
dilation_depth=dilation_depth, n_stacks=n_stacks
)
self.receptive_field = WaveNet.get_receptive_field(
dilation_depth=dilation_depth, n_stacks=n_stacks
)
self.trim_lengths = [
sum(self.dilations) - sum(self.dilations[: i + 1])
for i, _ in enumerate(self.dilations)
]
with self.name_scope():
self.feature_embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
# self.post_transform = LookupValues(mx.nd.array(bin_values))
self.target_embed = nn.Embedding(
input_dim=self.mu, output_dim=n_residue
)
self.residuals = nn.HybridSequential()
for i, d in enumerate(self.dilations):
is_not_last = i + 1 < len(self.dilations)
self.residuals.add(
CausalDilatedResidue(
n_residue=n_residue,
n_skip=n_skip,
dilation=d,
return_dense_out=is_not_last,
kernel_size=2,
)
)
std = 1.0 / math.sqrt(n_residue)
self.conv_project = nn.Conv1D(
channels=n_residue,
kernel_size=1,
use_bias=True,
weight_initializer=mx.init.Uniform(std),
bias_initializer="zero",
)
self.conv1 = conv1d(
in_channels=n_skip, channels=n_skip, kernel_size=1
)
self.conv2 = conv1d(
in_channels=n_skip, channels=self.mu, kernel_size=1
)
self.output_act = (
nn.ELU()
if act_type == "elu"
else nn.Activation(activation=act_type)
)
self.cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss()
@staticmethod
def _get_dilations(dilation_depth, n_stacks):
return [2 ** i for i in range(dilation_depth)] * n_stacks
@staticmethod
def get_receptive_field(dilation_depth, n_stacks):
"""
Return the length of the receptive field
"""
dilations = WaveNet._get_dilations(
dilation_depth=dilation_depth, n_stacks=n_stacks
)
return sum(dilations) + 1
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
scale: Tensor,
) -> Tensor:
embedded_cat = self.feature_embedder(feat_static_cat)
static_feat = F.concat(embedded_cat, F.log(scale + 1.0), dim=1)
full_target = F.concat(past_target, future_target, dim=-1).astype(
"int32"
)
full_observed = F.expand_dims(
F.concat(past_observed_values, future_observed_values, dim=-1),
axis=1,
)
full_time_features = F.concat(past_time_feat, future_time_feat, dim=-1)
repeated_static_feat = F.repeat(
F.expand_dims(static_feat, axis=-1),
repeats=self.pred_length + self.receptive_field,
axis=-1,
)
full_features = F.concat(
full_time_features, full_observed, repeated_static_feat, dim=1
)
# (batch_size, embed_dim, sequence_length)
o = self.target_embed(
F.slice_axis(full_target, begin=0, end=-1, axis=-1)
).swapaxes(1, 2)
o = F.concat(
o, F.slice_axis(full_features, begin=1, end=None, axis=-1), dim=1
)
o = self.conv_project(o)
skip_outs = []
for i, d in enumerate(self.dilations):
skip, o = self.residuals[i](o)
skip_trim = F.slice_axis(
skip, begin=self.trim_lengths[i], end=None, axis=-1
)
skip_outs.append(skip_trim)
y = sum(skip_outs)
y = self.output_act(y)
y = self.conv1(y)
y = self.output_act(y)
y = self.conv2(y)
unnormalized_output = y.swapaxes(1, 2)
label = F.slice_axis(
full_target, begin=self.receptive_field, end=None, axis=-1
)
loss_weight = F.slice_axis(
full_observed, begin=self.receptive_field, end=None, axis=-1
)
loss_weight = F.expand_dims(loss_weight, axis=2)
loss = self.cross_entropy_loss(unnormalized_output, label, loss_weight)
return loss
class WaveNetSampler(WaveNet):
"""
Runs Wavenet generation in an auto-regressive manner using caching for
speedup [PKC+16]_.
Same arguments as WaveNet. In addition
Parameters
----------
pred_length
Length of the prediction horizon
num_samples
Number of sample paths to generate in parallel in the graph
temperature
If set to 1.0 (default), sample according to estimated probabilities, if set to 0.0
most likely sample at each step is chosen.
post_transform
An optional post transform that will be applied to the samples
"""
@validated()
def __init__(
self,
bin_values: List[float],
num_samples: int,
temperature: float = 1.0,
**kwargs,
):
"""
Same arguments as WaveNet. In addition
:param pred_length: prediction length
:param num_samples: number of sample paths to generate in parallel in the graph
:param temperature: if set to 1.0 (default), sample according to estimated probabilities
- if set to 0.0 most likely sample at each step is chosen.
:param post_transform: An optional post transform that will be applied to the samples.
"""
super().__init__(bin_values=bin_values, **kwargs)
self.num_samples = num_samples
self.temperature = temperature
with self.name_scope():
self.post_transform = LookupValues(mx.nd.array(bin_values))
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
scale: Tensor,
) -> Tensor:
embedded_cat = self.feature_embedder(feat_static_cat)
static_feat = F.concat(embedded_cat, F.log(scale + 1.0), dim=1)
past_target = past_target.astype("int32")
def blow_up(u):
"""
Expand to (batch_size x num_samples)
"""
return F.repeat(u, repeats=self.num_samples, axis=0)
def is_last_layer(i):
return i + 1 == len(self.dilations)
queues = []
full_time_features = F.concat(past_time_feat, future_time_feat, dim=-1)
future_observed_values = F.slice_axis(
future_time_feat, begin=0, end=1, axis=1
).ones_like()
full_observed = F.concat(
F.expand_dims(past_observed_values, axis=1),
future_observed_values,
dim=-1,
)
repeated_static_feat = F.repeat(
F.expand_dims(static_feat, axis=-1),
repeats=self.pred_length + self.receptive_field,
axis=-1,
)
full_features = F.concat(
full_time_features, full_observed, repeated_static_feat, dim=1
)
feature_slice = F.slice_axis(
full_features,
begin=-self.pred_length - self.receptive_field + 1,
end=None,
axis=-1,
)
tmp = F.slice_axis(
past_target, begin=-self.receptive_field, end=None, axis=-1
)
o = self.target_embed(tmp).swapaxes(1, 2)
o = F.concat(
o,
F.slice_axis(
feature_slice, begin=-self.receptive_field, end=None, axis=-1
),
dim=1,
)
o = self.conv_project(o)
for i, d in enumerate(self.dilations):
sz = 1 if d == 2 ** (self.dilation_depth - 1) else d * 2
_, o = self.residuals[i](o)
if not is_last_layer(i):
o_chunk = F.slice_axis(o, begin=-sz - 1, end=-1, axis=-1)
else:
o_chunk = o
queues.append(blow_up(o_chunk))
res = F.slice_axis(past_target, begin=-2, end=None, axis=-1)
res = blow_up(res)
for n in range(self.pred_length):
queues_next = []
o = self.target_embed(
F.slice_axis(res, begin=-2, end=None, axis=-1)
).swapaxes(1, 2)
b = F.slice_axis(
full_features,
begin=self.receptive_field + n - 1,
end=self.receptive_field + n + 1,
axis=-1,
)
b = blow_up(b)
o = F.concat(o, b, dim=1)
o = self.conv_project(o)
skip_outs = []
for i, d in enumerate(self.dilations):
skip, o = self.residuals[i](o)
skip_outs.append(skip)
if not is_last_layer(i):
q = queues[i]
o = F.concat(q, o, num_args=2, dim=-1)
queues_next.append(
F.slice_axis(o, begin=1, end=None, axis=-1)
)
queues = queues_next
y = sum(skip_outs)
y = self.output_act(y)
y = self.conv1(y)
y = self.output_act(y)
unnormalized_outputs = self.conv2(y)
if self.temperature > 0:
probs = F.softmax(
unnormalized_outputs / self.temperature, axis=1
)
y = F.sample_multinomial(probs.swapaxes(1, 2))
else:
y = F.argmax(unnormalized_outputs, axis=1)
y = y.astype("int32")
res = F.concat(res, y, num_args=2, dim=-1)
samples = F.slice_axis(res, begin=-self.pred_length, end=None, axis=-1)
samples = samples.reshape(
shape=(-1, self.num_samples, self.pred_length)
)
samples = self.post_transform(samples)
samples = F.broadcast_mul(scale.expand_dims(axis=1), samples)
return samples
|
the-stack_0_17080
|
from typing_extensions import Literal
from pydantic import BaseModel, Field, conint
from enum import Enum
class EdgeType(str, Enum):
"""
An enumeration for the types of edges.
"""
Surface = "Surface"
Space = "Space"
Flight = "Flight"
class Edge(BaseModel):
"""
Base class for all edges.
"""
name: str = Field(
...,
title="Name",
description="name of the edge",
)
description: str = Field(
...,
title="Description",
description="short description of the edge",
)
origin_id: conint(strict=True) = Field(
...,
title="Origin ID",
description="ID of the origin node"
)
destination_id: conint(strict=True) = Field(
...,
title="Destination ID",
description="ID of the destination node",
)
class SurfaceEdge(Edge):
"""
An edge between two surface nodes.
"""
type: Literal[EdgeType.Surface] = Field(
title="Type",
description="Type of edge",
)
distance: float = Field(
...,
title="Distance",
description="Distance of surface edge",
ge=0
)
class SpaceEdge(Edge):
"""
An edge between two nodes using a specified list of propulsive burns.
"""
type: Literal[EdgeType.Space] = Field(
title="Type",
description="Type of edge",
)
duration: float = Field(
...,
title="Duration",
description="Duration of space edge",
ge=0
)
class FlightEdge(Edge):
"""
An edge between two nodes using flight architectures that are known to close
with a given cargo and crew capacity.
"""
type: Literal[EdgeType.Flight] = Field(
...,
title="Type",
description="Type of edge",
)
duration: float = Field(
...,
title="duration",
description="Duration of flight edge",
ge=0
)
max_crew: conint(strict=True, ge=0) = Field(
...,
title="Max Crew",
description="Crew capacity for flight",
)
max_cargo: float = Field(
...,
title="Max Cargo",
description="Cargo capacity for flight",
ge=0
)
|
the-stack_0_17081
|
from VMWConfigFile import *
from pyVim import connect
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vim, vmodl
import atexit
import os
import ssl
import requests
import argparse
import time
# Disabling urllib3 ssl warnings
requests.packages.urllib3.disable_warnings()
# Disabling SSL certificate verification
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
def get_vim_objects(content, vim_type):
'''Get vim objects of a given type.'''
return [item for item in content.viewManager.CreateContainerView(
content.rootFolder, [vim_type], recursive=True
).view]
def getDatastoreId(name):
ds_list = getAllDatastores()
print(ds_list)
for ds in ds_list:
if ds['name'] == name:
return ds['moId']
return ""
def getAllDatastores():
try:
si = None
try:
si = connect.SmartConnect(host=vc_settings["vcenter"],
user=vc_settings["user"],
pwd=vc_settings["password"],
port=443,
sslContext=context)
except IOError as e:
pass
atexit.register(Disconnect, si)
content = si.RetrieveContent()
obj_view = content.viewManager.CreateContainerView(content.rootFolder,[vim.Datastore],True)
ds_list = obj_view.view
obj_view.Destroy()
datastores = []
for ds in ds_list:
datastores.append({'name' : ds.name, 'moId' : ds._moId})
except vmodl.MethodFault as e:
print("Caught vmodl fault: %s" % e.msg)
return 1
except Exception as e:
print("Caught exception: %s" % str(e))
return 1
return datastores
|
the-stack_0_17082
|
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.tenant
class ClusterAuditLog(object):
"""Implementation of the 'ClusterAuditLog' model.
Specifies information about a single Cluster audit log.
When an action (such as pausing a Protection Job) occurs, an audit log is
generated that provides details about the action.
Attributes:
action (string): Specifies the action that caused the log to be
generated.
details (string): Specifies more information about the action.
domain (string): Specifies the domain of the user who caused the
action that generated the log.
entity_id (string): Specifies the id of the entity (object) that the
action is invoked on.
entity_name (string): Specifies the entity (object) name that the
action is invoked on. For example, if a Job called BackupEng is
paused, this field returns BackupEng.
entity_type (string): Specifies the type of the entity (object) that
the action is invoked on. For example, if a Job called BackupEng
is paused, this field returns 'Protection Job'.
human_timestamp (string): Specifies the time when the log was
generated. The time is specified using a human readable
timestamp.
impersonation (bool): Specifies if the log was generated during
impersonation.
new_record (string): Specifies the record after the action is
invoked.
original_tenant (Tenant): Specifies details about a tenant.
previous_record (string): Specifies the record before the action is
invoked.
tenant (Tenant): Specifies details about a tenant.
timestamp_usecs (long|int): Specifies the time when the log was
generated. The time is specified using a Unix epoch Timestamp (in
microseconds).
user_name (string): Specifies the user who caused the action that
generated the log.
"""
# Create a mapping from Model property names to API property names
_names = {
"action":'action',
"details":'details',
"domain":'domain',
"entity_id":'entityId',
"entity_name":'entityName',
"entity_type":'entityType',
"human_timestamp":'humanTimestamp',
"impersonation":'impersonation',
"new_record":'newRecord',
"original_tenant":'originalTenant',
"previous_record":'previousRecord',
"tenant":'tenant',
"timestamp_usecs":'timestampUsecs',
"user_name":'userName'
}
def __init__(self,
action=None,
details=None,
domain=None,
entity_id=None,
entity_name=None,
entity_type=None,
human_timestamp=None,
impersonation=None,
new_record=None,
original_tenant=None,
previous_record=None,
tenant=None,
timestamp_usecs=None,
user_name=None):
"""Constructor for the ClusterAuditLog class"""
# Initialize members of the class
self.action = action
self.details = details
self.domain = domain
self.entity_id = entity_id
self.entity_name = entity_name
self.entity_type = entity_type
self.human_timestamp = human_timestamp
self.impersonation = impersonation
self.new_record = new_record
self.original_tenant = original_tenant
self.previous_record = previous_record
self.tenant = tenant
self.timestamp_usecs = timestamp_usecs
self.user_name = user_name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
action = dictionary.get('action')
details = dictionary.get('details')
domain = dictionary.get('domain')
entity_id = dictionary.get('entityId')
entity_name = dictionary.get('entityName')
entity_type = dictionary.get('entityType')
human_timestamp = dictionary.get('humanTimestamp')
impersonation = dictionary.get('impersonation')
new_record = dictionary.get('newRecord')
original_tenant = cohesity_management_sdk.models.tenant.Tenant.from_dictionary(dictionary.get('originalTenant')) if dictionary.get('originalTenant') else None
previous_record = dictionary.get('previousRecord')
tenant = cohesity_management_sdk.models.tenant.Tenant.from_dictionary(dictionary.get('tenant')) if dictionary.get('tenant') else None
timestamp_usecs = dictionary.get('timestampUsecs')
user_name = dictionary.get('userName')
# Return an object of this model
return cls(action,
details,
domain,
entity_id,
entity_name,
entity_type,
human_timestamp,
impersonation,
new_record,
original_tenant,
previous_record,
tenant,
timestamp_usecs,
user_name)
|
the-stack_0_17086
|
# Task 1
# Given two whole numbers - the lengths of the legs of a right-angled triangle - output its area.
a = 1
b = 2
s = a * b / 2
print("The area is ", s)
# Task 2
# Input a natural number n and output its last digit.
n = input("Input a natural number ")
print(str(n)[-1])
# Task 3
# Input a two-digit natural number and output the sum of its digits.
n = int(input("Input a two-digit natural number "))
print(n // 10 + n % 10)
# Task 4
# You are given the first and second number in an arithmetic progression and natural number n. Find n-th element of arithmetic progression.
n1 = 2
n2 = 4
n = 7
n_th = n1 + (n - 1) * (n2 - n1)
print(n, "-th number is ", n_th)
|
the-stack_0_17088
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Test grammar induction performance of StructFormer."""
import argparse
import collections
import matplotlib.pyplot as plt
from nltk.parse import DependencyGraph
import numpy
import torch
from structformer import data_ptb
from structformer import tree_utils
from structformer.hinton import plot
def mean(x):
return sum(x) / len(x)
@torch.no_grad()
def test(parser, corpus, device, prt=False, gap=0):
"""Compute UF1 and UAS scores.
Args:
parser: pretrained model
corpus: labeled corpus
device: cpu or gpu
prt: bool, whether print examples
gap: distance gap for building non-binary tree
Returns:
UF1: unlabeled F1 score for constituency parsing
"""
parser.eval()
prec_list = []
reca_list = []
f1_list = []
dtree_list = []
corpus_sys = {}
corpus_ref = {}
nsens = 0
word2idx = corpus.dictionary.word2idx
dataset = zip(corpus.test_sens, corpus.test_trees, corpus.test_nltktrees)
for sen, sen_tree, sen_nltktree in dataset:
x = [word2idx[w] if w in word2idx else word2idx['<unk>'] for w in sen]
data = torch.LongTensor([x]).to(device)
pos = torch.LongTensor([list(range(len(sen)))]).to(device)
_, p_dict = parser(data, pos)
block = p_dict['block']
cibling = p_dict['cibling']
head = p_dict['head']
distance = p_dict['distance']
height = p_dict['height']
distance = distance.clone().squeeze(0).cpu().numpy().tolist()
height = height.clone().squeeze(0).cpu().numpy().tolist()
head = head.clone().squeeze(0).cpu().numpy()
max_height = numpy.max(height)
parse_tree = tree_utils.build_tree(distance, sen, gap=gap)
model_out, _ = tree_utils.get_brackets(parse_tree)
std_out, _ = tree_utils.get_brackets(sen_tree)
overlap = model_out.intersection(std_out)
corpus_sys[nsens] = tree_utils.mrg(parse_tree)
corpus_ref[nsens] = tree_utils.mrg_labeled(sen_nltktree)
prec = float(len(overlap)) / (len(model_out) + 1e-8)
reca = float(len(overlap)) / (len(std_out) + 1e-8)
if not std_out:
reca = 1.
if not model_out:
prec = 1.
f1 = 2 * prec * reca / (prec + reca + 1e-8)
prec_list.append(prec)
reca_list.append(reca)
f1_list.append(f1)
new_words = []
true_words = sen_nltktree.pos()
for d, c, w, ph in zip(distance, height, sen, head):
next_word = true_words.pop(0)
while next_word[1] not in data_ptb.WORD_TAGS:
next_word = true_words.pop(0)
new_words.append({
'address': len(new_words) + 1,
'word': next_word[0],
'lemma': None,
'ctag': None,
'tag': next_word[1],
'feats': None,
'head': numpy.argmax(ph) + 1 if c < max_height else 0,
'deps': collections.defaultdict(list),
'rel': None,
'distance': d,
'height': c
})
while true_words:
next_word = true_words.pop(0)
assert next_word[1] not in data_ptb.WORD_TAGS
dtree = DependencyGraph()
for w in new_words:
dtree.add_node(w)
dtree_list.append(dtree)
if prt and len(dtree_list) % 100 == 0:
cibling = cibling.clone().squeeze(0).cpu().numpy()
block = block.clone().squeeze(0).cpu().numpy()
for word_i, d_i, imp_i, block_i, cibling_i, head_i in zip(
sen, distance, height, block, cibling, head):
print('%20s\t%10.2f\t%5.2f\t%s\t%s\t%s' %
(word_i, d_i, imp_i, plot(block_i, max_val=1.),
plot(head_i, max_val=1), plot(cibling_i, max_val=1.)))
print('Standard output:', sen_tree)
print('Model output:', parse_tree)
print(dtree.to_conll(10))
print()
fig_i, ax_i = plt.subplots()
ax_i.set_xticks(numpy.arange(len(sen)))
ax_i.set_yticks(numpy.arange(len(sen)))
ax_i.set_xticklabels(sen)
ax_i.set_yticklabels(sen)
plt.setp(
ax_i.get_xticklabels(),
rotation=45,
ha='right',
rotation_mode='anchor')
for row in range(len(sen)):
for col in range(len(sen)):
_ = ax_i.text(
col,
row,
'%.2f' % (head[row, col]),
ha='center',
va='center',
color='w')
fig_i.tight_layout()
plt.savefig(
'./figures/sentence-%d.png' % (len(dtree_list)),
dpi=300,
format='png')
nsens += 1
print('Constituency parsing performance:')
print('Mean Prec: %.4f, Mean Reca: %.4f, Mean F1: %.4f' %
(mean(prec_list), mean(reca_list), mean(f1_list)))
correct, total = tree_utils.corpus_stats_labeled(corpus_sys, corpus_ref)
print(correct)
print(total)
print('SBAR: %.4f' % (correct['SBAR'] / total['SBAR']))
print('NP: %.4f' % (correct['NP'] / total['NP']))
print('VP: %.4f' % (correct['VP'] / total['VP']))
print('PP: %.4f' % (correct['PP'] / total['PP']))
print('ADJP: %.4f' % (correct['ADJP'] / total['ADJP']))
print('ADVP: %.4f' % (correct['ADVP'] / total['ADVP']))
print(tree_utils.corpus_average_depth(corpus_sys))
print('-' * 89)
print('Dependency parsing performance:')
print('Stanford Style:')
tree_utils.evald(dtree_list, '../data/ptb/test.stanford', directed=True)
tree_utils.evald(dtree_list, '../data/ptb/test.stanford', directed=False)
print('Conll Style:')
tree_utils.evald(dtree_list, '../data/ptb/test.conll', directed=True)
tree_utils.evald(dtree_list, '../data/ptb/test.conll', directed=False)
return mean(f1_list)
if __name__ == '__main__':
marks = [' ', '-', '=']
numpy.set_printoptions(precision=2, suppress=True, linewidth=5000)
argpr = argparse.ArgumentParser(description='PyTorch PTB Language Model')
# Model parameters.
argpr.add_argument(
'--data',
type=str,
default='data/penn/',
help='location of the data corpus')
argpr.add_argument(
'--checkpoint',
type=str,
default='PTB.pt',
help='model checkpoint to use')
argpr.add_argument('--seed', type=int, default=1111, help='random seed')
argpr.add_argument('--gap', type=float, default=0, help='random seed')
argpr.add_argument('--print', action='store_true', help='use CUDA')
argpr.add_argument('--cuda', action='store_true', help='use CUDA')
argpr.add_argument('--wsj10', action='store_true', help='use WSJ10')
args = argpr.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
# Load model
print('Loading model...')
with open(args.checkpoint, 'rb') as f:
model, _, _, _ = torch.load(f)
torch.cuda.manual_seed(args.seed)
model.cpu()
if args.cuda:
model.cuda()
# Load data
print('Loading PTB dataset...')
ptb_corpus = data_ptb.Corpus(args.data)
print('Evaluating...')
if args.cuda:
eval_device = torch.device('cuda:0')
else:
eval_device = torch.device('cpu')
print('=' * 89)
test(model, ptb_corpus, eval_device, prt=args.print, gap=args.gap)
print('=' * 89)
rel_weight = model.rel_weight.detach().cpu().numpy()
fig, axs = plt.subplots(8, 8, sharex=True, sharey=True)
names = ['p', 'd']
for i in range(rel_weight.shape[0]):
for j in range(rel_weight.shape[1]):
print(plot(rel_weight[i, j], max_val=1.), end=' ')
values = rel_weight[i, j]
if i == 0:
axs[i, j].set_title('%d' % (j,))
if j == 0:
axs[i, j].set_ylabel('%d' % (i,))
axs[i, j].bar(names, values)
print()
plt.savefig('./figures/mask_weights.png', dpi=300, format='png')
|
the-stack_0_17090
|
# vim: set fileencoding=utf-8 :
"""
~~~~~~~~~
Utilities
~~~~~~~~~
"""
from __future__ import absolute_import, division
import copy
from sqlalchemy import inspect
from sqlalchemy.ext.associationproxy import _AssociationList
from sqlalchemy.orm.dynamic import AppenderMixin
from sqlalchemy.orm.query import Query
from dictalchemy import constants
from dictalchemy import errors
def arg_to_dict(arg):
"""Convert an argument that can be None, list/tuple or dict to dict
Example::
>>> arg_to_dict(None)
[]
>>> arg_to_dict(['a', 'b'])
{'a':{},'b':{}}
>>> arg_to_dict({'a':{'only': 'id'}, 'b':{'only': 'id'}})
{'a':{'only':'id'},'b':{'only':'id'}}
:return: dict with keys and dict arguments as value
"""
if arg is None:
arg = []
try:
arg = dict(arg)
except ValueError:
arg = dict.fromkeys(list(arg), {})
return arg
def asdict(model, exclude=None, exclude_underscore=None, exclude_pk=None,
follow=None, include=None, only=None, method='asdict', **kwargs):
"""Get a dict from a model
Using the `method` parameter makes it possible to have multiple methods
that formats the result.
Additional keyword arguments will be passed to all relationships that are
followed. This can be used to pass on things like request or context.
:param follow: List or dict of relationships that should be followed.
If the parameter is a dict the value should be a dict of \
keyword arguments. Currently it follows InstrumentedList, \
MappedCollection and regular 1:1, 1:m, m:m relationships. Follow \
takes an extra argument, 'method', which is the method that \
should be used on the relation. It also takes the extra argument \
'parent' which determines where the relationships data should be \
added in the response dict. If 'parent' is set the relationship \
will be added with it's own key as a child to `parent`.
:param exclude: List of properties that should be excluded, will be \
merged with `model.dictalchemy_exclude`
:param exclude_pk: If True any column that refers to the primary key will \
be excluded.
:param exclude_underscore: Overides `model.dictalchemy_exclude_underscore`\
if set
:param include: List of properties that should be included. Use this to \
allow python properties to be called. This list will be merged \
with `model.dictalchemy_asdict_include` or \
`model.dictalchemy_include`.
:param only: List of properties that should be included. This will \
override everything else except `follow`.
:param method: Name of the method that is currently called. This will be \
the default method used in 'follow' unless another method is\
set.
:raises: :class:`dictalchemy.errors.MissingRelationError` \
if `follow` contains a non-existent relationship.
:raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` \
contains an existing relationship that currently isn't supported.
:returns: dict
"""
follow = arg_to_dict(follow)
info = inspect(model)
columns = [c.key for c in info.mapper.column_attrs]
synonyms = [c.key for c in info.mapper.synonyms]
if only:
attrs = only
else:
exclude = exclude or []
exclude += getattr(model, 'dictalchemy_exclude',
constants.default_exclude) or []
if exclude_underscore is None:
exclude_underscore = getattr(model,
'dictalchemy_exclude_underscore',
constants.default_exclude_underscore)
if exclude_underscore:
# Exclude all properties starting with underscore
exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_']
if exclude_pk is True:
exclude += [c.key for c in info.mapper.primary_key]
include = (include or []) + (getattr(model,
'dictalchemy_asdict_include',
getattr(model,
'dictalchemy_include',
None)) or [])
attrs = [k for k in columns + synonyms + include if k not in exclude]
data = dict([(k, getattr(model, k)) for k in attrs])
for (rel_key, orig_args) in follow.iteritems():
try:
rel = getattr(model, rel_key)
except AttributeError:
raise errors.MissingRelationError(rel_key)
args = copy.deepcopy(orig_args)
method = args.pop('method', method)
args['method'] = method
args.update(copy.copy(kwargs))
if hasattr(rel, method):
rel_data = getattr(rel, method)(**args)
elif isinstance(rel, (list, _AssociationList)):
rel_data = []
for child in rel:
if hasattr(child, method):
rel_data.append(getattr(child, method)(**args))
else:
try:
rel_data.append(dict(child))
# TypeError is for non-dictable children
except TypeError:
rel_data.append(copy.copy(child))
elif isinstance(rel, dict):
rel_data = {}
for (child_key, child) in rel.iteritems():
if hasattr(child, method):
rel_data[child_key] = getattr(child, method)(**args)
else:
try:
rel_data[child_key] = dict(child)
except ValueError:
rel_data[child_key] = copy.copy(child)
elif isinstance(rel, (AppenderMixin, Query)):
rel_data = []
for child in rel.all():
if hasattr(child, method):
rel_data.append(getattr(child, method)(**args))
else:
rel_data.append(dict(child))
elif rel is None:
rel_data = None
else:
raise errors.UnsupportedRelationError(rel_key)
ins_key = args.pop('parent', None)
if ins_key is None:
data[rel_key] = rel_data
else:
if ins_key not in data:
data[ins_key] = {}
data[ins_key][rel_key] = rel_data
return data
def fromdict(model, data, exclude=None, exclude_underscore=None,
allow_pk=None, follow=None, include=None, only=None):
"""Update a model from a dict
Works almost identically as :meth:`dictalchemy.utils.asdict`. However, it
will not create missing instances or update collections.
This method updates the following properties on a model:
* Simple columns
* Synonyms
* Simple 1-m relationships
:param data: dict of data
:param exclude: list of properties that should be excluded
:param exclude_underscore: If True underscore properties will be excluded,\
if set to None model.dictalchemy_exclude_underscore will be used.
:param allow_pk: If True any column that refers to the primary key will \
be excluded. Defaults model.dictalchemy_fromdict_allow_pk or \
dictable.constants.fromdict_allow_pk. If set to True a primary \
key can still be excluded with the `exclude` parameter.
:param follow: Dict of relations that should be followed, the key is the \
arguments passed to the relation. Relations only works on simple \
relations, not on lists.
:param include: List of properties that should be included. This list \
will override anything in the exclude list. It will not override \
allow_pk.
:param only: List of the only properties that should be set. This \
will not override `allow_pk` or `follow`.
:raises: :class:`dictalchemy.errors.DictalchemyError` If a primary key is \
in data and allow_pk is False
:returns: The model
"""
follow = arg_to_dict(follow)
info = inspect(model)
columns = [c.key for c in info.mapper.column_attrs]
synonyms = [c.key for c in info.mapper.synonyms]
relations = [c.key for c in info.mapper.relationships]
primary_keys = [c.key for c in info.mapper.primary_key]
if allow_pk is None:
allow_pk = getattr(model, 'dictalchemy_fromdict_allow_pk',
constants.default_fromdict_allow_pk)
if only:
valid_keys = only
else:
exclude = exclude or []
exclude += getattr(model, 'dictalchemy_exclude',
constants.default_exclude) or []
if exclude_underscore is None:
exclude_underscore = getattr(model,
'dictalchemy_exclude_underscore',
constants.default_exclude_underscore)
if exclude_underscore:
# Exclude all properties starting with underscore
exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_']
include = (include or []) + (getattr(model,
'dictalchemy_fromdict_include',
getattr(model,
'dictalchemy_include',
None)) or [])
valid_keys = [k for k in columns + synonyms
if k not in exclude] + include
# Keys that will be updated
update_keys = set(valid_keys) & set(data.keys())
# Check for primary keys
data_primary_key= update_keys & set(primary_keys)
if len(data_primary_key) and not allow_pk:
msg = ("Primary keys({0}) cannot be updated by fromdict."
"Set 'dictalchemy_fromdict_allow_pk' to True in your Model"
" or pass 'allow_pk=True'.").format(','.join(data_primary_key))
raise errors.DictalchemyError(msg)
# Update columns and synonyms
for k in update_keys:
setattr(model, k, data[k])
# Update simple relations
for (k, args) in follow.iteritems():
if k not in data:
continue
if k not in relations:
raise errors.MissingRelationError(k)
rel = getattr(model, k)
if hasattr(rel, 'fromdict'):
rel.fromdict(data[k], **args)
return model
def iter(model):
"""iter method for models
Yields everything returned by `asdict`.
"""
for i in model.asdict().iteritems():
yield i
def make_class_dictable(
cls,
exclude=constants.default_exclude,
exclude_underscore=constants.default_exclude_underscore,
fromdict_allow_pk=constants.default_fromdict_allow_pk,
include=None,
asdict_include=None,
fromdict_include=None):
"""Make a class dictable
Useful for when the Base class is already defined, for example when using
Flask-SQLAlchemy.
Warning: This method will overwrite existing attributes if they exists.
:param exclude: Will be set as dictalchemy_exclude on the class
:param exclude_underscore: Will be set as dictalchemy_exclude_underscore \
on the class
:param fromdict_allow_pk: Will be set as dictalchemy_fromdict_allow_pk\
on the class
:param include: Will be set as dictalchemy_include on the class.
:param asdict_include: Will be set as `dictalchemy_asdict_include` on the \
class. If not None it will override `dictalchemy_include`.
:param fromdict_include: Will be set as `dictalchemy_fromdict_include` on \
the class. If not None it will override `dictalchemy_include`.
:returns: The class
"""
setattr(cls, 'dictalchemy_exclude', exclude)
setattr(cls, 'dictalchemy_exclude_underscore', exclude_underscore)
setattr(cls, 'dictalchemy_fromdict_allow_pk', fromdict_allow_pk)
setattr(cls, 'asdict', asdict)
setattr(cls, 'fromdict', fromdict)
setattr(cls, '__iter__', iter)
setattr(cls, 'dictalchemy_include', include)
setattr(cls, 'dictalchemy_asdict_include', asdict_include)
setattr(cls, 'dictalchemy_fromdict_include', fromdict_include)
return cls
|
the-stack_0_17091
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import sys
import time
import unittest
import apache_beam as beam
from apache_beam import coders
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileRecord
from apache_beam.runners.interactive import background_caching_job as bcj
from apache_beam.runners.interactive import interactive_beam as ib
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import pipeline_instrument as pi
from apache_beam.runners.interactive.interactive_runner import InteractiveRunner
from apache_beam.runners.interactive.options.capture_limiters import Limiter
from apache_beam.runners.interactive.recording_manager import ElementStream
from apache_beam.runners.interactive.recording_manager import Recording
from apache_beam.runners.interactive.recording_manager import RecordingManager
from apache_beam.runners.interactive.testing.test_cache_manager import FileRecordsBuilder
from apache_beam.runners.interactive.testing.test_cache_manager import InMemoryCache
from apache_beam.runners.runner import PipelineState
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.test_stream import WindowedValueHolder
from apache_beam.transforms.window import GlobalWindow
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.windowed_value import WindowedValue
# TODO(BEAM-8288): clean up the work-around of nose tests using Python2 without
# unittest.mock module.
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock # type: ignore[misc]
class MockPipelineResult(beam.runners.runner.PipelineResult):
"""Mock class for controlling a PipelineResult."""
def __init__(self):
self._state = PipelineState.RUNNING
def wait_until_finish(self):
pass
def set_state(self, state):
self._state = state
@property
def state(self):
return self._state
def cancel(self):
self._state = PipelineState.CANCELLED
class ElementStreamTest(unittest.TestCase):
def setUp(self):
ie.new_env()
self.cache = InMemoryCache()
self.p = beam.Pipeline()
self.pcoll = self.p | beam.Create([])
self.cache_key = str(pi.CacheKey('pcoll', '', '', ''))
# Create a MockPipelineResult to control the state of a fake run of the
# pipeline.
self.mock_result = MockPipelineResult()
ie.current_env().track_user_pipelines()
ie.current_env().set_pipeline_result(self.p, self.mock_result)
ie.current_env().set_cache_manager(self.cache, self.p)
def test_read(self):
"""Test reading and if a stream is done no more elements are returned."""
self.mock_result.set_state(PipelineState.DONE)
self.cache.write(['expected'], 'full', self.cache_key)
self.cache.save_pcoder(None, 'full', self.cache_key)
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=1, max_duration_secs=1)
self.assertFalse(stream.is_done())
self.assertEqual(list(stream.read())[0], 'expected')
self.assertTrue(stream.is_done())
def test_done_if_terminated(self):
"""Test that terminating the job sets the stream as done."""
self.cache.write(['expected'], 'full', self.cache_key)
self.cache.save_pcoder(None, 'full', self.cache_key)
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=10)
self.assertFalse(stream.is_done())
self.assertEqual(list(stream.read(tail=False))[0], 'expected')
# The limiters were not reached, so the stream is not done yet.
self.assertFalse(stream.is_done())
self.mock_result.set_state(PipelineState.DONE)
self.assertEqual(list(stream.read(tail=False))[0], 'expected')
# The underlying pipeline is terminated, so the stream won't yield new
# elements.
self.assertTrue(stream.is_done())
def test_read_n(self):
"""Test that the stream only reads 'n' elements."""
self.mock_result.set_state(PipelineState.DONE)
self.cache.write(list(range(5)), 'full', self.cache_key)
self.cache.save_pcoder(None, 'full', self.cache_key)
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=1, max_duration_secs=1)
self.assertEqual(list(stream.read()), [0])
self.assertTrue(stream.is_done())
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=2, max_duration_secs=1)
self.assertEqual(list(stream.read()), [0, 1])
self.assertTrue(stream.is_done())
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=5, max_duration_secs=1)
self.assertEqual(list(stream.read()), list(range(5)))
self.assertTrue(stream.is_done())
# Test that if the user asks for more than in the cache it still returns.
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=10, max_duration_secs=1)
self.assertEqual(list(stream.read()), list(range(5)))
self.assertTrue(stream.is_done())
def test_read_duration(self):
"""Test that the stream only reads a 'duration' of elements."""
def as_windowed_value(element):
return WindowedValueHolder(WindowedValue(element, 0, []))
values = (FileRecordsBuilder(tag=self.cache_key)
.advance_processing_time(1)
.add_element(element=as_windowed_value(0), event_time_secs=0)
.advance_processing_time(1)
.add_element(element=as_windowed_value(1), event_time_secs=1)
.advance_processing_time(1)
.add_element(element=as_windowed_value(2), event_time_secs=3)
.advance_processing_time(1)
.add_element(element=as_windowed_value(3), event_time_secs=4)
.advance_processing_time(1)
.add_element(element=as_windowed_value(4), event_time_secs=5)
.build()) # yapf: disable
values = [
v.recorded_event for v in values if isinstance(v, TestStreamFileRecord)
]
self.mock_result.set_state(PipelineState.DONE)
self.cache.write(values, 'full', self.cache_key)
self.cache.save_pcoder(coders.FastPrimitivesCoder(), 'full', self.cache_key)
# The following tests a progression of reading different durations from the
# cache.
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=1)
self.assertSequenceEqual([e.value for e in stream.read()], [0])
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=2)
self.assertSequenceEqual([e.value for e in stream.read()], [0, 1])
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=10)
self.assertSequenceEqual([e.value for e in stream.read()], [0, 1, 2, 3, 4])
class RecordingTest(unittest.TestCase):
def setUp(self):
ie.new_env()
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_computed(self):
"""Tests that a PCollection is marked as computed only in a complete state.
Because the background caching job is now long-lived, repeated runs of a
PipelineFragment may yield different results for the same PCollection.
"""
p = beam.Pipeline(InteractiveRunner())
elems = p | beam.Create([0, 1, 2])
ib.watch(locals())
# Create a MockPipelineResult to control the state of a fake run of the
# pipeline.
mock_result = MockPipelineResult()
ie.current_env().track_user_pipelines()
ie.current_env().set_pipeline_result(p, mock_result)
# Create a mock BackgroundCachingJob that will control whether to set the
# PCollections as computed or not.
bcj_mock_result = MockPipelineResult()
background_caching_job = bcj.BackgroundCachingJob(bcj_mock_result, [])
# Create a recording.
recording = Recording(
p, [elems],
mock_result,
pi.PipelineInstrument(p),
max_n=10,
max_duration_secs=60)
# The background caching job and the recording isn't done yet so there may
# be more elements to be recorded.
self.assertFalse(recording.is_computed())
self.assertFalse(recording.computed())
self.assertTrue(recording.uncomputed())
# The recording is finished but the background caching job is not. There
# may still be more elements to record, or the intermediate PCollection may
# have stopped caching in an incomplete state, e.g. before a window could
# fire.
mock_result.set_state(PipelineState.DONE)
recording.wait_until_finish()
self.assertFalse(recording.is_computed())
self.assertFalse(recording.computed())
self.assertTrue(recording.uncomputed())
# The background caching job finished before we started a recording which
# is a sure signal that there will be no more elements.
bcj_mock_result.set_state(PipelineState.DONE)
ie.current_env().set_background_caching_job(p, background_caching_job)
recording = Recording(
p, [elems],
mock_result,
pi.PipelineInstrument(p),
max_n=10,
max_duration_secs=60)
recording.wait_until_finish()
# There are no more elements and the recording finished, meaning that the
# intermediate PCollections are in a complete state. They can now be marked
# as computed.
self.assertTrue(recording.is_computed())
self.assertTrue(recording.computed())
self.assertFalse(recording.uncomputed())
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_describe(self):
p = beam.Pipeline(InteractiveRunner())
numbers = p | 'numbers' >> beam.Create([0, 1, 2])
letters = p | 'letters' >> beam.Create(['a', 'b', 'c'])
ib.watch(locals())
# Create a MockPipelineResult to control the state of a fake run of the
# pipeline.
mock_result = MockPipelineResult()
ie.current_env().track_user_pipelines()
ie.current_env().set_pipeline_result(p, mock_result)
cache_manager = InMemoryCache()
ie.current_env().set_cache_manager(cache_manager, p)
# Create a recording with an arbitrary start time.
recording = Recording(
p, [numbers, letters],
mock_result,
pi.PipelineInstrument(p),
max_n=10,
max_duration_secs=60)
# Get the cache key of the stream and write something to cache. This is
# so that a pipeline doesn't have to run in the test.
numbers_stream = recording.stream(numbers)
cache_manager.write([0, 1, 2], 'full', numbers_stream.cache_key)
cache_manager.save_pcoder(None, 'full', numbers_stream.cache_key)
letters_stream = recording.stream(letters)
cache_manager.write(['a', 'b', 'c'], 'full', letters_stream.cache_key)
cache_manager.save_pcoder(None, 'full', letters_stream.cache_key)
# Get the description.
description = recording.describe()
size = description['size']
self.assertEqual(
size,
cache_manager.size('full', numbers_stream.cache_key) +
cache_manager.size('full', letters_stream.cache_key))
class RecordingManagerTest(unittest.TestCase):
def setUp(self):
ie.new_env()
def tearDown(self):
ib.options.capture_control.set_limiters_for_test([])
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_basic_execution(self):
"""A basic pipeline to be used as a smoke test."""
# Create the pipeline that will emit 0, 1, 2.
p = beam.Pipeline(InteractiveRunner())
numbers = p | 'numbers' >> beam.Create([0, 1, 2])
letters = p | 'letters' >> beam.Create(['a', 'b', 'c'])
# Watch the pipeline and PCollections. This is normally done in a notebook
# environment automatically, but we have to do it manually here.
ib.watch(locals())
ie.current_env().track_user_pipelines()
# Create the recording objects. By calling `record` a new PipelineFragment
# is started to compute the given PCollections and cache to disk.
rm = RecordingManager(p)
numbers_recording = rm.record([numbers], max_n=3, max_duration=500)
numbers_stream = numbers_recording.stream(numbers)
numbers_recording.wait_until_finish()
# Once the pipeline fragment completes, we can read from the stream and know
# that all elements were written to cache.
elems = list(numbers_stream.read())
expected_elems = [
WindowedValue(i, MIN_TIMESTAMP, [GlobalWindow()]) for i in range(3)
]
self.assertListEqual(elems, expected_elems)
# Make an extra recording and test the description.
letters_recording = rm.record([letters], max_n=3, max_duration=500)
letters_recording.wait_until_finish()
self.assertEqual(
rm.describe()['size'],
numbers_recording.describe()['size'] +
letters_recording.describe()['size'])
rm.cancel()
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_duration_parsing(self):
p = beam.Pipeline(InteractiveRunner())
elems = p | beam.Create([0, 1, 2])
# Watch the pipeline and PCollections. This is normally done in a notebook
# environment automatically, but we have to do it manually here.
ib.watch(locals())
ie.current_env().track_user_pipelines()
# Create the recording objects.
rm = RecordingManager(p)
recording = rm.record([elems], max_n=3, max_duration='500s')
recording.wait_until_finish()
# Assert that the duration was parsed correctly to integer seconds.
self.assertEqual(recording.describe()['duration'], 500)
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_cancel_stops_recording(self):
# Add the TestStream so that it can be cached.
ib.options.capturable_sources.add(TestStream)
p = beam.Pipeline(
InteractiveRunner(), options=PipelineOptions(streaming=True))
elems = (
p
| TestStream().advance_watermark_to(0).advance_processing_time(
1).add_elements(list(range(10))).advance_processing_time(1))
squares = elems | beam.Map(lambda x: x**2)
# Watch the local scope for Interactive Beam so that referenced PCollections
# will be cached.
ib.watch(locals())
# This is normally done in the interactive_utils when a transform is
# applied but needs an IPython environment. So we manually run this here.
ie.current_env().track_user_pipelines()
# Get the recording then the BackgroundCachingJob.
rm = RecordingManager(p)
recording = rm.record([squares], max_n=10, max_duration=30)
# The BackgroundCachingJob is still waiting for more elements, so it isn't
# done yet.
bcj = ie.current_env().get_background_caching_job(p)
self.assertFalse(bcj.is_done())
# Assert that something was read and that the BackgroundCachingJob was
# sucessfully stopped.
self.assertTrue(list(recording.stream(squares).read()))
rm.cancel()
self.assertTrue(bcj.is_done())
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_recording_manager_clears_cache(self):
"""Tests that the RecordingManager clears the cache before recording.
A job may have incomplete PCollections when the job terminates. Clearing the
cache ensures that correct results are computed every run.
"""
# Add the TestStream so that it can be cached.
ib.options.capturable_sources.add(TestStream)
p = beam.Pipeline(
InteractiveRunner(), options=PipelineOptions(streaming=True))
elems = (
p
| TestStream().advance_watermark_to(0).advance_processing_time(
1).add_elements(list(range(10))).advance_processing_time(1))
squares = elems | beam.Map(lambda x: x**2)
# Watch the local scope for Interactive Beam so that referenced PCollections
# will be cached.
ib.watch(locals())
# This is normally done in the interactive_utils when a transform is
# applied but needs an IPython environment. So we manually run this here.
ie.current_env().track_user_pipelines()
# Do the first recording to get the timestamp of the first time the fragment
# was run.
rm = RecordingManager(p)
rm.record([squares], max_n=10, max_duration=2)
first_recording_start = rm.describe()['start']
rm.cancel()
# Get the cache, key, and coder to read the PCollection from the cache.
pipeline_instrument = pi.PipelineInstrument(p)
cache = ie.current_env().get_cache_manager(p)
cache_key = pipeline_instrument.cache_key(squares)
# Set up a mock for the Cache's clear function which will be used to clear
# uncomputed PCollections.
cache.clear = MagicMock()
# Rerun the fragment. If the cache was cleared correctly then the starting
# time of the second recording will be later than the first. This is because
# the PCollection wasn't considered to be computedand was cleared from
# cache. Thus the pipeline fragment was rerun for that PCollection at a
# later time.
rm.record([squares], max_n=10, max_duration=1)
second_recording_start = rm.describe()['start']
rm.cancel()
self.assertGreater(second_recording_start, first_recording_start)
# Assert that the cache cleared the PCollection.
cache.clear.assert_called_with('full', cache_key)
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_clear(self):
"""Tests that clear can empty the cache for a specific pipeline."""
# Create two pipelines so we can check that clearing the cache won't clear
# all defined pipelines.
p1 = beam.Pipeline(InteractiveRunner())
elems_1 = p1 | 'elems 1' >> beam.Create([0, 1, 2])
p2 = beam.Pipeline(InteractiveRunner())
elems_2 = p2 | 'elems 2' >> beam.Create([0, 1, 2])
# Watch the pipeline and PCollections. This is normally done in a notebook
# environment automatically, but we have to do it manually here.
ib.watch(locals())
ie.current_env().track_user_pipelines()
# Create the recording objects. By calling `record` a new PipelineFragment
# is started to compute the given PCollections and cache to disk.
rm_1 = RecordingManager(p1)
recording = rm_1.record([elems_1], max_n=3, max_duration=500)
recording.wait_until_finish()
rm_2 = RecordingManager(p2)
recording = rm_2.record([elems_2], max_n=3, max_duration=500)
recording.wait_until_finish()
# Assert that clearing only one recording clears that recording.
self.assertGreater(rm_1.describe()['size'], 0)
self.assertGreater(rm_2.describe()['size'], 0)
rm_1.clear()
self.assertEqual(rm_1.describe()['size'], 0)
self.assertGreater(rm_2.describe()['size'], 0)
rm_2.clear()
self.assertEqual(rm_2.describe()['size'], 0)
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_record_pipeline(self):
# Add the TestStream so that it can be cached.
ib.options.capturable_sources.add(TestStream)
p = beam.Pipeline(
InteractiveRunner(), options=PipelineOptions(streaming=True))
# pylint: disable=unused-variable
_ = (p
| TestStream()
.advance_watermark_to(0)
.advance_processing_time(1)
.add_elements(list(range(10)))
.advance_processing_time(1)) # yapf: disable
# Watch the local scope for Interactive Beam so that referenced PCollections
# will be cached.
ib.watch(locals())
# This is normally done in the interactive_utils when a transform is
# applied but needs an IPython environment. So we manually run this here.
ie.current_env().track_user_pipelines()
# Create a lmiter that stops the background caching job when something is
# written to cache. This is used to make ensure that the pipeline is
# functioning properly and that there are no data races with the test.
class SizeLimiter(Limiter):
def __init__(self, recording_manager):
self.recording_manager = recording_manager
def is_triggered(self):
return self.recording_manager.describe()['size'] > 0
# Do the first recording to get the timestamp of the first time the fragment
# was run.
rm = RecordingManager(p)
ib.options.capture_control.set_limiters_for_test([SizeLimiter(rm)])
self.assertEqual(rm.describe()['state'], PipelineState.STOPPED)
self.assertTrue(rm.record_pipeline())
self.assertFalse(rm.record_pipeline())
for _ in range(60):
if rm.describe()['state'] == PipelineState.CANCELLED:
break
time.sleep(1)
self.assertTrue(
rm.describe()['state'] == PipelineState.CANCELLED,
'Test timed out waiting for pipeline to be cancelled. This indicates '
'that the BackgroundCachingJob did not cache anything.')
if __name__ == '__main__':
unittest.main()
|
the-stack_0_17092
|
# coding: utf-8
try:
from rest_framework_jwt.settings import api_settings
def jwt_encode_handler(payload):
"""
Encode handler override for JWT
"""
import jwt
return jwt.encode(
payload,
str(api_settings.JWT_SECRET_KEY),
str(api_settings.JWT_ALGORITHM)
).decode('utf-8')
def jwt_decode_handler(token):
"""
Decode handler override for JWT
"""
options = {
'verify_exp': api_settings.JWT_VERIFY_EXPIRATION,
}
import jwt
return jwt.decode(
token,
str(api_settings.JWT_SECRET_KEY),
str(api_settings.JWT_VERIFY),
options=options,
leeway=api_settings.JWT_LEEWAY,
audience=api_settings.JWT_AUDIENCE,
issuer=api_settings.JWT_ISSUER,
algorithms=[api_settings.JWT_ALGORITHM]
)
def jwt_payload_handler(user):
"""
Payload handler for JWT
"""
from rest_framework_jwt.utils import jwt_payload_handler as payload_handler
payload = payload_handler(user)
payload.update(
user_id=user.pk,
email=getattr(user, 'email', None),
is_staff=getattr(user, 'is_staff', None),
is_superuser=getattr(user, 'is_superuser', None))
return payload
def jwt_response_payload_handler(token, user, request):
"""
Token payload handler for JWT
"""
from django.utils.timezone import now
if user and hasattr(user, 'last_login'):
user.last_login = now()
user.save(update_fields=['last_login'])
return {'token': token}
except ImportError:
pass
|
the-stack_0_17093
|
"""Helper for evaluation on the Labeled Faces in the Wild dataset
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
# Copyright (c) 2021 Kaen Chan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from evaluation.pyeer.eer_info import get_eer_stats
from mc_dropout.network_mcdropout import NetworkMCDropout
from utils import utils
from dataset.dataset_np_ipc import Dataset
import os
import argparse
import sys
import numpy as np
from scipy import misc
from scipy import interpolate
import sklearn
import cv2
import math
import datetime
import pickle
from sklearn.decomposition import PCA
import mxnet as mx
from mxnet import ndarray as nd
import _pickle as cPickle
from utils.utils import KFold
def calculate_eer(embeddings1, embeddings2, actual_issame, compare_func, nrof_folds=10):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
dist = compare_func(embeddings1, embeddings2)
gscores_a = dist[actual_issame == 1]
iscores_a = dist[actual_issame == 0]
stats_a = get_eer_stats(gscores_a, iscores_a)
return stats_a
def evaluate(embeddings, actual_issame, compare_func, nrof_folds=10, keep_idxes=None):
# Calculate evaluation metrics
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
actual_issame = np.asarray(actual_issame)
if keep_idxes is not None:
embeddings1 = embeddings1[keep_idxes]
embeddings2 = embeddings2[keep_idxes]
actual_issame = actual_issame[keep_idxes]
return calculate_eer(embeddings1, embeddings2,
actual_issame, compare_func, nrof_folds=nrof_folds)
def load_bin(path, image_size):
print(path, image_size)
with open(path, 'rb') as f:
if 'lfw_all' in path:
bins, issame_list = pickle.load(f)
else:
bins, issame_list = pickle.load(f, encoding='latin1')
data_list = []
for flip in [0]:
data = nd.empty((len(issame_list)*2, image_size[0], image_size[1], 3))
data_list.append(data)
print(len(bins))
for i in range(len(issame_list)*2):
_bin = bins[i]
# print(type(_bin))
img = mx.image.imdecode(_bin)
# img = nd.transpose(img, axes=(2, 0, 1))
for flip in [0]:
if flip==1:
img = mx.ndarray.flip(data=img, axis=2)
data_list[flip][i][:] = img
# if i%5000==0:
# print('loading bin', i)
print(data_list[0].shape)
return (data_list, issame_list)
def extract_features(images_preprocessed, issame_list, extract_feature_func, batch_size, name='', result_dir='',
re_extract_feature=True):
print('testing verification..')
if name:
save_name_pkl_feature = result_dir + '/%s_feature.pkl' % name
if re_extract_feature or not os.path.exists(save_name_pkl_feature):
images = images_preprocessed
print(images.shape)
mu, sigma_sq = extract_feature_func(images)
save_data = (mu, sigma_sq, issame_list)
if name:
with open(save_name_pkl_feature, 'wb') as f:
cPickle.dump(save_data, f)
print('save', save_name_pkl_feature)
else:
with open(save_name_pkl_feature, 'rb') as f:
data = cPickle.load(f)
if len(data) == 3:
mu, sigma_sq, issame_list = data
else:
mu, sigma_sq = data
print('load', save_name_pkl_feature)
return mu, sigma_sq, issame_list
def eval_images_with_sigma(mu, sigma_sq, issame_list, nfolds=10, name='', filter_out_type='max', sigma_sizes=1):
print('sigma_sq', sigma_sq.shape)
feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
if name != '':
np.save('o_sigma_%s.npy' % name, sigma_sq)
# quality_score = -np.mean(np.log(sigma_sq), axis=1)
# print('quality_score quality_score=-np.mean(np.log(sigma_sq),axis=1) percentile [0, 10, 30, 50, 70, 90, 100]')
# print('quality_score ', np.percentile(quality_score.ravel(), [0, 10, 30, 50, 70, 90, 100]))
s = 'sigma_sq ' + str(np.percentile(sigma_sq.ravel(), [0, 10, 30, 50, 70, 90, 100])) + \
' percentile [0, 10, 30, 50, 70, 90, 100]\n'
# print(mu.shape)
# print('sigma_sq', sigma_sq.shape)
if sigma_sq.shape[1] == 2:
sigma_sq_c = np.copy(sigma_sq)
sigma_sq_list = [sigma_sq_c[:,:1], sigma_sq_c[:,1:]]
elif type(sigma_sizes) == list:
sigma_sq_list = []
idx = 0
for si in sigma_sizes:
sigma = sigma_sq[:, idx:idx + si]
if si > 1:
sigma = 1/np.mean(1/(sigma+1e-6), axis=-1)
sigma_sq_list += [sigma]
idx += si
elif sigma_sq.shape[1] > 2:
sigma_sq_list = [1/np.mean(1/(sigma_sq+1e-6), axis=-1)]
else:
sigma_sq_list = [sigma_sq]
for sigma_sq in sigma_sq_list:
sigma_sq1 = sigma_sq[0::2]
sigma_sq2 = sigma_sq[1::2]
# filter_out_type = 'max'
if filter_out_type == 'max':
sigma_fuse = np.maximum(sigma_sq1, sigma_sq2)
else:
sigma_fuse = sigma_sq1 + sigma_sq2
# reject_factor = 0.1
error_list = []
# reject_factors = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
# reject_factors = np.arange(50) / 100.
# reject_factors = np.arange(30) / 100.
# reject_factors = [0.0, 0.1, 0.2, 0.3]
reject_factors_points = [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3]
reject_factors = np.arange(0, 1.0, 0.01)
for reject_factor in reject_factors:
risk_threshold = np.percentile(sigma_fuse.ravel(), (1-reject_factor)*100)
keep_idxes = np.where(sigma_fuse <= risk_threshold)[0]
if len(keep_idxes) == 0:
keep_idxes = None
stats = evaluate(mu, issame_list, utils.pair_cosin_score, nrof_folds=nfolds, keep_idxes=keep_idxes)
if reject_factor in reject_factors_points:
s += 'reject_factor {:.4f} '.format(reject_factor)
s += 'risk_threshold {:.6f} '.format(risk_threshold)
s += 'keep_idxes {} / {} '.format(len(keep_idxes), len(sigma_fuse))
s += 'Cosine score eer %f fmr100 %f fmr1000 %f\n' % (stats.eer, stats.fmr100, stats.fmr1000)
error_list += [stats.fmr1000]
# print('cosin', 'acc', accuracy, 'threshold', threshold)
# print(s)
# compare_func = lambda x,y: utils.pair_MLS_score(x, y, use_attention_only=False)
# accuracy, threshold = evaluate(feat_pfe, issame_list, compare_func, nrof_folds=nfolds, keep_idxes=keep_idxes)
# s += 'MLS score acc %f threshold %f' % (accuracy, threshold)
# print('MLS', 'acc', accuracy, 'threshold', threshold)
if keep_idxes is None:
break
# s_avg = 'reject_factor 0.5 risk_threshold 0.585041 keep_idxes 3500 / 7000 '
s_avg = 'reject_factor mean --------------------------------------------- '
s_avg += 'Cosine score fmr1000 %f\n' % (np.mean(error_list))
s += s_avg
tpr = error_list
fpr = reject_factors
auc = sklearn.metrics.auc(fpr, tpr)
l = int(len(tpr)*0.3)
auc30 = sklearn.metrics.auc(fpr[:l], tpr[:l])
s += 'AUERC: %1.4f\n' % auc
s += 'AUERC30: %1.4f\n' % auc30
best = error_list[0]**2/2
s += 'AUC: %1.4f\n' % (auc-best)
best30 = (error_list[0] * min(error_list[0], 0.3))/2
s += 'AUC30: %1.4f\n' % (auc30-best30)
s += '\n'
# print(s)
return s[:-1]
def eval_images_mls(mu, sigma_sq_i, issame_list, nfolds=10, sigma_sizes=1):
print('sigma_sq', sigma_sq_i.shape)
if sigma_sq_i.shape[1] == 2:
sigma_sq_c = np.copy(sigma_sq_i)
sigma_sq_list = [sigma_sq_c[:,:1], sigma_sq_c[:,1:]]
elif type(sigma_sizes) == list:
sigma_sq_list = []
idx = 0
for si in sigma_sizes:
sigma_sq_list += [sigma_sq_i[:, idx:idx+si]]
idx += si
else:
sigma_sq_list = [sigma_sq_i]
s = ''
ret = {}
accuracy, threshold = evaluate(mu, issame_list, utils.pair_cosin_score, nrof_folds=nfolds)
ret['Cosine'] = accuracy
s += 'Cosine score acc %.4f threshold %.4f\n' % (accuracy, threshold)
for sigma_sq in sigma_sq_list:
print('testing verification..')
feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
# quality_score = -np.mean(np.log(sigma_sq), axis=1)
compare_func = lambda x, y: utils.pair_MLS_score(x, y, use_attention_only=False)
accuracy, threshold = evaluate(feat_pfe, issame_list, compare_func, nrof_folds=nfolds)
s += 'MLS score acc %.4f threshold %.4f\n' % (accuracy, threshold)
ret['MLS'] = accuracy
# print(s)
return s, ret
def eval_images(images_preprocessed, issame_list, extract_feature_func, batch_size, nfolds=10, name='', result_dir='',
re_extract_feature=True, filter_out_type='max', sigma_sizes=1, tt_flip=False, only_mls=True):
mu, sigma_sq, issame_list = extract_features(images_preprocessed, issame_list, extract_feature_func, batch_size,
name=name, result_dir=result_dir,
re_extract_feature=re_extract_feature)
# s_mls, ret = eval_images_mls(mu, sigma_sq, issame_list, nfolds=nfolds, sigma_sizes=sigma_sizes)
s_mls, ret = '', [0]
info = s_mls
if not only_mls:
s_reject = eval_images_with_sigma(mu, sigma_sq, issame_list, nfolds=nfolds, name='',
filter_out_type=filter_out_type, sigma_sizes=sigma_sizes)
info = s_mls + s_reject
if tt_flip:
info = info.replace('Cosine score acc', 'tt-flip Cosine score acc')
info = info.replace('MLS score acc', 'tt-flip MLS score acc')
return info, ret
def save_dataset_as_jpg(data_set, name):
data_list = data_set[0]
issame_list = data_set[1]
data_list = data_list[0].asnumpy()
root = r'F:\data\face-recognition\test\1v1'
for i in range(len(data_list)):
path = os.path.join(root, name)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, '%04d_%d.jpg' % (i, issame_list[i//2]))
print(path)
cv2.imwrite(path, data_list[i].astype(np.uint8)[...,::-1])
def eval(data_set, network, batch_size, nfolds=10, name='', result_dir='', re_extract_feature=True,
filter_out_type='max', sigma_sizes=1, tt_flip=False):
print('testing verification..')
data_list = data_set[0]
issame_list = data_set[1]
data_list = data_list[0].asnumpy()
# images = preprocess(data_list, network.config, False)
images = data_list
del data_set
for i in range(1):
# name1 = name + '_keep0.9_%03d' % i
name1 = name
extract_feature_func = lambda x: network.extract_feature(x, batch_size=32, need_preprecess=True,
tt_flip=tt_flip, verbose=True)
ret, _ = eval_images(images, issame_list, extract_feature_func, batch_size, nfolds=nfolds, name=name1,
result_dir=result_dir, re_extract_feature=re_extract_feature,
filter_out_type=filter_out_type, sigma_sizes=sigma_sizes, tt_flip=tt_flip, only_mls=False)
# ret = eval_images_cmp(images, issame_list, network, batch_size, nfolds=10, name=name, result_dir=result_dir,
# re_extract_feature=re_extract_feature, filter_out_type=filter_out_type)
return ret
def main_save_data(args):
data_dir = args.dataset_path
data_dir = r'F:\data\face-recognition\MS-Celeb-1M\faces_emore'
data_dir = r'F:\data\face-recognition\trillion-pairs\challenge\ms1m-retinaface-t1'
for name in args.target.split(','):
path = os.path.join(data_dir,name+".bin")
if os.path.exists(path):
image_size = [112, 112]
data_set = load_bin(path, image_size)
save_dataset_as_jpg(data_set, name)
def eval_images_from_pkl_all_data(model_dir, targets=None):
filter_out_type = 'max'
densefilter = False
seperate = False
nfolds = 10
# featmodels = ['r64', 'r100', 'r50']
# featmodels = ['r100', 'r50']
featmodels = ['same']
for featmodel in featmodels:
mu_list = []
idq_list = []
iss_list = []
if targets is None:
targets = 'lfw,calfw,cplfw,cfp_ff,cfp_fp,agedb_30,vgg2_fp'
for target in targets.split(','):
path_pkl = model_dir + r'\%s_feature.pkl' % target
with open(path_pkl, 'rb') as f:
data = cPickle.load(f)
mu2, id_quality, issame_list = data
save_name_pkl_feature = None
if featmodel == 'same':
save_name_pkl_feature = r'{}\{}_feature.pkl'.format(model_dir, target)
elif featmodel == 'r100':
save_name_pkl_feature = r'F:\data\face-recognition\test\IJB_release\pretrained_models\MS1MV2-ResNet100-Arcface\{}_feature.pkl'.format(target)
elif featmodel == 'r50':
save_name_pkl_feature = r'F:\data\face-recognition\test\IJB_release\pretrained_models\VGG2-ResNet50-Arcface\{}_feature.pkl'.format(target)
elif featmodel == 'r64':
save_name_pkl_feature = r'G:\chenkai\Probabilistic-Face-Embeddings-master\log\resface64_relu_msarcface_am_PFE_mbv3small_reject\20210116-040122-s16-m0.4-tau1\{}_feature.pkl'.format(
target)
else:
raise ('error', save_name_pkl_feature)
with open(save_name_pkl_feature, 'rb') as f:
data = cPickle.load(f)
mu2, _, _ = data
print('load verification model', save_name_pkl_feature)
if seperate:
s = eval_images_with_sigma(mu2, id_quality, issame_list, nfolds=nfolds, name='',
filter_out_type=filter_out_type, densefilter=densefilter)
print(s)
logname = 'testing-log-fnmr-{}-{}.txt'.format(target, featmodel)
if densefilter:
logname = 'testing-log-fnmr-{}-{}-densefilter.txt'.format(target, featmodel)
with open(os.path.join(model_dir, logname), 'a') as f:
if save_name_pkl_feature is not None:
f.write(save_name_pkl_feature + '\n')
f.write(targets + '\n')
f.write(s + '\n')
continue
mu_list += list(mu2)
idq_list += list(id_quality)
iss_list += list(issame_list)
print('load', path_pkl)
# s = eval_images_with_sigma(mu, id_quality, issame_list, nfolds=10, name='', filter_out_type=filter_out_type)
# print(s)
if seperate:
continue
mu_list = np.array(mu_list)
idq_list = np.array(idq_list)
iss_list = np.array(iss_list)
print('pos', np.sum(iss_list), 'neg', len(iss_list)-np.sum(iss_list), 'total', len(iss_list))
if id_quality.shape[1] == 513:
sigma_sizes = [512, 1]
else:
sigma_sizes = 1
s, ret = eval_images_mls(mu_list, idq_list, iss_list, nfolds=10, sigma_sizes=sigma_sizes)
s += eval_images_with_sigma(mu_list, idq_list, iss_list, nfolds=nfolds, name='',
filter_out_type=filter_out_type, sigma_sizes=sigma_sizes)
print(s)
logname = 'testing-log-fnmr-{}-{}.txt'.format('alldata', featmodel)
if densefilter:
logname = 'testing-log-fnmr-{}-{}-densefilter.txt'.format('alldata', featmodel)
with open(os.path.join(model_dir, logname), 'a') as f:
if save_name_pkl_feature is not None:
f.write(save_name_pkl_feature + '\n')
f.write(targets + '\n')
f.write(s + '\n')
with open(os.path.join(model_dir, 'testing-log.txt'), 'a') as f:
if save_name_pkl_feature is not None:
f.write(save_name_pkl_feature + '\n')
f.write(targets + '\n')
f.write(s + '\n')
def main(args):
data_dir = args.dataset_path
# data_dir = r'F:\data\face-recognition\MS-Celeb-1M\faces_emore'
# data_dir = r'F:\data\face-recognition\trillion-pairs\challenge\ms1m-retinaface-t1'
# data_dir = r'F:\data\metric-learning\face\ms1m-retinaface-t1'
re_extract_feature = True
# filter_out_type = 'add'
filter_out_type = 'max'
tt_flip = False
# Load model files and config file
network = NetworkMCDropout(args.backbone_name, args.resume_backbone)
# # images = np.random.random([1, 128, 128, 3])
# images = np.random.random([1, 3, 112, 112])
# img = cv2.imread(r'E:\chenkai\probface-pytorch\im_96x96.jpg')
# images = np.array([img])
# for _ in range(1):
# mu, sigma_sq = network.extract_feature(images, 1, need_preprecess=True, tt_flip=True, verbose=True)
# print(mu[0, :5])
# exit(0)
# log_dir = r'E:\chenkai\face-uncertainty-pytorch\mc_dropout'
log_dir = args.log_dir
if not os.path.exists(log_dir):
os.makedirs(log_dir)
print(args.target)
for namec in args.target.split(','):
path = os.path.join(data_dir,namec+".bin")
if os.path.exists(path):
print(path)
image_size = [112, 112]
data_set = load_bin(path, image_size)
name = namec
print('ver', name)
# save_pkl_name = '' # donot save feature.pkl
save_pkl_name = namec
print(log_dir)
sigma_sizes = network.uncertainty_size
info = eval(data_set, network, args.batch_size, 10, name=save_pkl_name, result_dir=log_dir,
re_extract_feature=re_extract_feature, filter_out_type=filter_out_type,
sigma_sizes=sigma_sizes, tt_flip=tt_flip)
# print(info)
info_result = '--- ' + name + ' ---\n'
info_result += data_dir + '\n'
info_result += info + "\n"
print("")
print(info_result)
with open(os.path.join(log_dir, 'testing-log-fnmr-{}-{}.txt'.format(name, filter_out_type)), 'a') as f:
f.write(info_result + '\n')
with open(os.path.join(log_dir, 'testing-log-fnmr.txt'), 'a') as f:
f.write(info_result + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--backbone_name", help="", type=str, default='')
parser.add_argument("--resume_backbone", help="", type=str, default='')
parser.add_argument("--log_dir", help="", type=str, default='')
parser.add_argument("--dataset_path", help="The path to the LFW dataset directory",
type=str, default=r'F:\data\metric-learning\face\ms1m-retinaface-t1')
parser.add_argument("--batch_size", help="Number of images per mini batch",
type=int, default=16)
parser.add_argument('--target', type=str, default='lfw,cfp_fp,agedb_30', help='verification targets')
args = parser.parse_args()
# args.dataset_path = r''
dataset_path_list = [
r'F:\data\metric-learning\face\ms1m-retinaface-t1',
# r'F:\data\metric-learning\face\ms1m-retinaface-t1\blur-r5-p0.05',
]
# log_dir = r'G:\chenkai\probface-pytorch\log\ir50_pfe\20210327-132611'
# log_dir = r'G:\chenkai\probface-pytorch\log\ir50_pfe\20210327-181146-mls-cl1-0.15'
# args.target = 'lfw,cfp_fp,agedb_30'
args.target = 'calfw,cplfw,cfp_ff,vgg2_fp'
args.target = 'lfw,calfw,cplfw,cfp_ff,cfp_fp,agedb_30,vgg2_fp'
# args.target = 'calfw'
args.dataset_path = dataset_path_list[0]
# args.log_dir = r'E:\chenkai\face-uncertainty-pytorch\log\glint-ir50\mcdropout'
# args.backbone_name = 'backbones.iresnet.iresnet50'
# args.resume_backbone = r'E:\chenkai\arcface_torch\glint360k_cosface_r50_fp16_0.1\backbone.pth'
# main(args)
#
# args.log_dir = r'E:\chenkai\face-uncertainty-pytorch\log\glint-ir100\mcdropout'
# args.backbone_name = 'backbones.iresnet.iresnet100'
# args.resume_backbone = r'E:\chenkai\arcface_torch\glint360k_cosface_r100_fp16_0.1\backbone.pth'
# main(args)
#
# args.log_dir = r'E:\chenkai\face-uncertainty-pytorch\log\ms1mv3-ir50\mcdropout'
# args.backbone_name = 'backbones.iresnet.iresnet50'
# args.resume_backbone = r'E:\chenkai\arcface_torch\ms1mv3_arcface_r50_fp16\backbone.pth'
# main(args)
args.log_dir = r'E:\chenkai\face-uncertainty-pytorch\log\ms1mv3-ir50\mcdropout'
args.backbone_name = 'backbones.iresnet.iresnet50'
args.resume_backbone = r'E:\chenkai\arcface_torch\ms1mv3_arcface_r50_fp16\backbone.pth'
main(args)
|
the-stack_0_17095
|
# -*- coding: utf-8 -*-
"""This file contains the definition of all languages supported by the program."""
LANGUAGES = [
{
'name': 'Spanish',
'common_words': [
'el', 'la', 'de', 'que', 'y', 'a', 'en', 'un', 'ser', 'se',
'no', 'haber', 'por', 'con', 'su', 'para', 'como', 'estar',
'tener', 'le', 'lo', 'lo', 'todo', 'pero', 'más', 'hacer',
'o', 'poder', 'decir', 'este', 'ir', 'otro', 'ese', 'la',
'si', 'me', 'ya', 'ver', 'porque', 'dar', 'cuando', 'él',
'muy', 'sin', 'vez', 'mucho', 'saber', 'qué', 'sobre',
'mi', 'alguno', 'mismo', 'yo', 'también', 'hasta'
]
},
{
'name': 'German',
'common_words': [
'das', 'ist', 'du', 'ich', 'nicht', 'die', 'es', 'und',
'der', 'was', 'wir', 'zu', 'ein', 'er', 'in', 'sie', 'mir',
'mit', 'ja', 'wie', 'den', 'auf', 'mich', 'dass', 'so',
'hier', 'eine', 'wenn', 'hat', 'all', 'sind', 'von',
'dich', 'war', 'haben', 'für', 'an', 'habe', 'da', 'nein',
'bin', 'noch', 'dir', 'uns', 'sich', 'nur',
'einen', 'kann', 'dem'
]
},
# include the English language here
# HINT: https://en.wikipedia.org/wiki/Most_common_words_in_English
]
|
the-stack_0_17096
|
import numpy as np
from mchap.io.vcf.util import vcfstr
def format_info_field(**kwargs):
"""Format key-value pairs into a VCF info field.
Parameters
----------
kwargs
Key value pairs of info field codes to values.
Returns
-------
string : str
VCF info field.
"""
parts = ["{}={}".format(k, vcfstr(v)) for k, v in kwargs.items()]
return ";".join(parts)
def format_sample_field(**kwargs):
"""Format key-value pairs into a VCF format field.
Parameters
----------
kwargs
Key value pairs of info field codes to arrays of values per sample.
Returns
-------
string : str
VCF format and sample columns.
"""
fields, arrays = zip(*kwargs.items())
fields = ":".join(fields)
lengths = np.array([len(a) for a in arrays])
length = lengths[0]
assert np.all(lengths == length)
sample_data = np.empty(length, dtype="O")
for i in range(length):
sample_data[i] = ":".join((vcfstr(a[i]) for a in arrays))
sample_data = "\t".join(sample_data)
return "{}\t{}".format(fields, sample_data)
def format_record(
*,
chrom=None,
pos=None,
id=None,
ref=None,
alt=None,
qual=None,
filter=None,
info=None,
format=None,
):
"""Format a VCF record line.
Parameters
----------
chrom : str
Variant chromosome or contig.
pos : int
Variant position.
id : str
Variant ID.
ref : str
Reference allele.
alt : list, str
Alternate alleles.
qual : int
Variant quality.
filter : str
Variant filter codes.
info : str
Variant INFO string.
format : str
Variant format codes and sample values.
Returns
-------
line : str
VCF record line.
"""
fields = [chrom, pos, id, ref, alt, qual, filter, info, format]
return "\t".join(vcfstr(f) for f in fields)
|
the-stack_0_17097
|
#from django.shortcuts import render
#from .models import Opcion,Pregunta
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from .models import Opcion, Pregunta
# Create your views here.
def index(request):
latest_question_list = Pregunta.objects.order_by('-pub_date')[:5]
context ={
'latest_question_list' : latest_question_list
}
return render(request, 'encuesta/index.html', context)
def detalle(request, pregunta_id):
pregunta = get_object_or_404(Pregunta, pk=pregunta_id)
return render(request, 'encuesta/detalle.html', {'pregunta':pregunta})
def votar(request, pregunta_id):
pregunta = get_object_or_404(Pregunta, pk=pregunta_id)
try:
selected_opcion = pregunta.opcion_set.get(pk=request.POST['opcion'])
except (KeyError, Opcion.DoesNotExist):
return render(request, 'encuesta/detalle.html', {
'pregunta' : pregunta,
'error_message' : "No has seleccionado una opcion"
})
else:
selected_opcion.votos += 1
selected_opcion.save()
return HttpResponseRedirect(reverse('encuesta:resultados', args=(pregunta.id,)))
def resultados(request, pregunta_id):
pregunta = get_object_or_404(Pregunta, pk=pregunta_id)
return render(request, 'encuesta/resultados.html', {'pregunta' : pregunta})
|
the-stack_0_17099
|
#MenuTitle: New Tab with Dangerous Glyphs for Interpolation
# -*- coding: utf-8 -*-
__doc__="""
Finds and outputs glyphs like the equals sign, or a symmetrical period, with paths that could interpolate wrongly within themselves.
"""
import GlyphsApp
Font = Glyphs.font
outputString = ""
def nodeCounts( thisLayer ):
countList = [ len(p.nodes) for p in thisLayer.paths ]
return countList
def shiftString( myString ):
return myString[1:] + myString[0]
def nodeString( path ):
nodestring = ""
for thisNode in path.nodes:
if thisNode.type == GSOFFCURVE:
nodestring += "h"
else:
nodestring += "n"
return nodestring
def compatibleWhenReversed( path1, path2 ):
pathstring1 = nodeString(path1)
pathstring2 = "".join(reversed(nodeString(path2)))
if pathstring1 == pathstring2:
return True
return False
def compatibleWithDifferentStartPoints( path1, path2 ):
pathstring1 = nodeString(path1)
pathstring2 = nodeString(path2)
for x in pathstring1:
pathstring2 = shiftString(pathstring2)
if pathstring1 == pathstring2:
return True
return False
def check( thisLayer ):
thesePaths = thisLayer.paths
theseComponents = thisLayer.components
if len( theseComponents ) > 1:
componentNameList = [ c.componentName for c in theseComponents ]
compareValue_1 = len( componentNameList )
compareValue_2 = len( set( componentNameList ) )
if compareValue_1 != compareValue_2:
return True
if len( thisLayer.paths ) > 1:
pathStructureList = [ nodeString(p) for p in thesePaths ]
compareValue_1 = len( pathStructureList )
compareValue_2 = len( set( pathStructureList ) )
if compareValue_1 != compareValue_2:
return True
nodecounts = nodeCounts(thisLayer)
if len(nodecounts) != len( set(nodecounts) ):
numberOfPaths = len(thesePaths)
for i in range( numberOfPaths ):
firstPath = thesePaths[i]
firstPathCount = len(firstPath.nodes)
for j in range( i+1, numberOfPaths):
secondPath = thesePaths[j]
secondPathCount = len(secondPath.nodes)
if firstPathCount == secondPathCount:
if firstPath.closed and secondPath.closed and compatibleWithDifferentStartPoints( firstPath, secondPath ):
return True
elif compatibleWhenReversed( firstPath, secondPath ):
return True
if len(thisLayer.paths) == 1:
thisPath = thisLayer.paths[0]
if thisPath.closed and compatibleWithDifferentStartPoints( thisPath, thisPath ):
return True
elif compatibleWhenReversed( thisPath, thisPath ):
return True
return False
for thisGlyph in Font.glyphs:
if check( thisGlyph.layers[0] ):
outputString += "/%s" % thisGlyph.name
if outputString:
Font.newTab( outputString )
else:
Message(
"No interpolation problems",
"Cannot find any dangerous glyphs in this font.",
OKButton="Hurrah!"
)
|
the-stack_0_17100
|
# coding: utf-8
"""
Metal API
This is the API for Equinix Metal. The API allows you to programmatically interact with all of your Equinix Metal resources, including devices, networks, addresses, organizations, projects, and your user account. The official API docs are hosted at <https://metal.equinix.com/developers/api>. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from metal.configuration import Configuration
class VirtualNetworkList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'virtual_networks': 'list[VirtualNetwork]'
}
attribute_map = {
'virtual_networks': 'virtual_networks'
}
def __init__(self, virtual_networks=None, local_vars_configuration=None): # noqa: E501
"""VirtualNetworkList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._virtual_networks = None
self.discriminator = None
if virtual_networks is not None:
self.virtual_networks = virtual_networks
@property
def virtual_networks(self):
"""Gets the virtual_networks of this VirtualNetworkList. # noqa: E501
:return: The virtual_networks of this VirtualNetworkList. # noqa: E501
:rtype: list[VirtualNetwork]
"""
return self._virtual_networks
@virtual_networks.setter
def virtual_networks(self, virtual_networks):
"""Sets the virtual_networks of this VirtualNetworkList.
:param virtual_networks: The virtual_networks of this VirtualNetworkList. # noqa: E501
:type virtual_networks: list[VirtualNetwork]
"""
self._virtual_networks = virtual_networks
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VirtualNetworkList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, VirtualNetworkList):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_17103
|
"""
Symmetric Functions in Non-Commuting Variables
AUTHORS:
- Travis Scrimshaw (08-04-2013): Initial version
"""
# ****************************************************************************
# Copyright (C) 2013 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.misc.misc_c import prod
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.graded_hopf_algebras import GradedHopfAlgebras
from sage.categories.rings import Rings
from sage.categories.fields import Fields
from sage.arith.misc import factorial
from sage.combinat.free_module import CombinatorialFreeModule
from sage.combinat.ncsym.bases import NCSymBases, MultiplicativeNCSymBases, NCSymBasis_abstract
from sage.combinat.set_partition import SetPartitions
from sage.combinat.set_partition_ordered import OrderedSetPartitions
from sage.combinat.posets.posets import Poset
from sage.combinat.sf.sf import SymmetricFunctions
from sage.matrix.matrix_space import MatrixSpace
from sage.sets.set import Set
from sage.rings.integer_ring import ZZ
from functools import reduce
def matchings(A, B):
"""
Iterate through all matchings of the sets `A` and `B`.
EXAMPLES::
sage: from sage.combinat.ncsym.ncsym import matchings
sage: list(matchings([1, 2, 3], [-1, -2]))
[[[1], [2], [3], [-1], [-2]],
[[1], [2], [3, -1], [-2]],
[[1], [2], [3, -2], [-1]],
[[1], [2, -1], [3], [-2]],
[[1], [2, -1], [3, -2]],
[[1], [2, -2], [3], [-1]],
[[1], [2, -2], [3, -1]],
[[1, -1], [2], [3], [-2]],
[[1, -1], [2], [3, -2]],
[[1, -1], [2, -2], [3]],
[[1, -2], [2], [3], [-1]],
[[1, -2], [2], [3, -1]],
[[1, -2], [2, -1], [3]]]
"""
lst_A = list(A)
lst_B = list(B)
# Handle corner cases
if not lst_A:
if not lst_B:
yield []
else:
yield [[b] for b in lst_B]
return
if not lst_B:
yield [[a] for a in lst_A]
return
rem_A = lst_A[:]
a = rem_A.pop(0)
for m in matchings(rem_A, lst_B):
yield [[a]] + m
for i in range(len(lst_B)):
rem_B = lst_B[:]
b = rem_B.pop(i)
for m in matchings(rem_A, rem_B):
yield [[a, b]] + m
def nesting(la, nu):
r"""
Return the nesting number of ``la`` inside of ``nu``.
If we consider a set partition `A` as a set of arcs `i - j` where `i`
and `j` are in the same part of `A`. Define
.. MATH::
\operatorname{nst}_{\lambda}^{\nu} = \#\{ i < j < k < l \mid
i - l \in \nu, j - k \in \lambda \},
and this corresponds to the number of arcs of `\lambda` strictly
contained inside of `\nu`.
EXAMPLES::
sage: from sage.combinat.ncsym.ncsym import nesting
sage: nu = SetPartition([[1,4], [2], [3]])
sage: mu = SetPartition([[1,4], [2,3]])
sage: nesting(set(mu).difference(nu), nu)
1
::
sage: lst = list(SetPartitions(4))
sage: d = {}
sage: for i, nu in enumerate(lst):
....: for mu in nu.coarsenings():
....: if set(nu.arcs()).issubset(mu.arcs()):
....: d[i, lst.index(mu)] = nesting(set(mu).difference(nu), nu)
sage: matrix(d)
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 1 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
"""
arcs = []
for p in nu:
p = sorted(p)
arcs += [(p[i], p[i+1]) for i in range(len(p)-1)]
nst = 0
for p in la:
p = sorted(p)
for i in range(len(p)-1):
for a in arcs:
if a[0] >= p[i]:
break
if p[i+1] < a[1]:
nst += 1
return nst
class SymmetricFunctionsNonCommutingVariables(UniqueRepresentation, Parent):
r"""
Symmetric functions in non-commutative variables.
The ring of symmetric functions in non-commutative variables,
which is not to be confused with the :class:`non-commutative symmetric
functions<NonCommutativeSymmetricFunctions>`, is the ring of all
bounded-degree noncommutative power series in countably many
indeterminates (i.e., elements in
`R \langle \langle x_1, x_2, x_3, \ldots \rangle \rangle` of bounded
degree) which are invariant with respect to the action of the
symmetric group `S_{\infty}` on the indices of the indeterminates.
It can be regarded as a direct limit over all `n \to \infty` of rings
of `S_n`-invariant polynomials in `n` non-commuting variables
(that is, `S_n`-invariant elements of `R\langle x_1, x_2, \ldots, x_n \rangle`).
This ring is implemented as a Hopf algebra whose basis elements are
indexed by set partitions.
Let `A = \{A_1, A_2, \ldots, A_r\}` be a set partition of the integers
`[k] := \{ 1, 2, \ldots, k \}`. This partition `A` determines an
equivalence relation `\sim_A` on `[k]`, which has `c \sim_A d` if and
only if `c` and `d` are in the same part `A_j` of `A`.
The monomial basis element `\mathbf{m}_A` indexed by `A` is the sum of
monomials `x_{i_1} x_{i_2} \cdots x_{i_k}` such that `i_c = i_d` if
and only if `c \sim_A d`.
The `k`-th graded component of the ring of symmetric functions in
non-commutative variables has its dimension equal to the number of
set partitions of `[k]`. (If we work, instead, with finitely many --
say, `n` -- variables, then its dimension is equal to the number of
set partitions of `[k]` where the number of parts is at most `n`.)
.. NOTE::
All set partitions are considered standard (i.e., set partitions
of `[n]` for some `n`) unless otherwise stated.
REFERENCES:
.. [BZ05] \N. Bergeron, M. Zabrocki. *The Hopf algebra of symmetric
functions and quasisymmetric functions in non-commutative variables
are free and cofree*. (2005). :arxiv:`math/0509265v3`.
.. [BHRZ06] \N. Bergeron, C. Hohlweg, M. Rosas, M. Zabrocki.
*Grothendieck bialgebras, partition lattices, and symmetric
functions in noncommutative variables*. Electronic Journal of
Combinatorics. **13** (2006).
.. [RS06] \M. Rosas, B. Sagan. *Symmetric functions in noncommuting
variables*. Trans. Amer. Math. Soc. **358** (2006). no. 1, 215-232.
:arxiv:`math/0208168`.
.. [BRRZ08] \N. Bergeron, C. Reutenauer, M. Rosas, M. Zabrocki.
*Invariants and coinvariants of the symmetric group in noncommuting
variables*. Canad. J. Math. **60** (2008). 266-296.
:arxiv:`math/0502082`
.. [BT13] \N. Bergeron, N. Thiem. *A supercharacter table decomposition
via power-sum symmetric functions*. Int. J. Algebra Comput. **23**,
763 (2013). :doi:`10.1142/S0218196713400171`. :arxiv:`1112.4901`.
EXAMPLES:
We begin by first creating the ring of `NCSym` and the bases that are
analogues of the usual symmetric functions::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: e = NCSym.e()
sage: h = NCSym.h()
sage: p = NCSym.p()
sage: m
Symmetric functions in non-commuting variables over the Rational Field in the monomial basis
The basis is indexed by set partitions, so we create a few elements and
convert them between these bases::
sage: elt = m(SetPartition([[1,3],[2]])) - 2*m(SetPartition([[1],[2]])); elt
-2*m{{1}, {2}} + m{{1, 3}, {2}}
sage: e(elt)
1/2*e{{1}, {2, 3}} - 2*e{{1, 2}} + 1/2*e{{1, 2}, {3}} - 1/2*e{{1, 2, 3}} - 1/2*e{{1, 3}, {2}}
sage: h(elt)
-4*h{{1}, {2}} - 2*h{{1}, {2}, {3}} + 1/2*h{{1}, {2, 3}} + 2*h{{1, 2}}
+ 1/2*h{{1, 2}, {3}} - 1/2*h{{1, 2, 3}} + 3/2*h{{1, 3}, {2}}
sage: p(elt)
-2*p{{1}, {2}} + 2*p{{1, 2}} - p{{1, 2, 3}} + p{{1, 3}, {2}}
sage: m(p(elt))
-2*m{{1}, {2}} + m{{1, 3}, {2}}
sage: elt = p(SetPartition([[1,3],[2]])) - 4*p(SetPartition([[1],[2]])) + 2; elt
2*p{} - 4*p{{1}, {2}} + p{{1, 3}, {2}}
sage: e(elt)
2*e{} - 4*e{{1}, {2}} + e{{1}, {2}, {3}} - e{{1, 3}, {2}}
sage: m(elt)
2*m{} - 4*m{{1}, {2}} - 4*m{{1, 2}} + m{{1, 2, 3}} + m{{1, 3}, {2}}
sage: h(elt)
2*h{} - 4*h{{1}, {2}} - h{{1}, {2}, {3}} + h{{1, 3}, {2}}
sage: p(m(elt))
2*p{} - 4*p{{1}, {2}} + p{{1, 3}, {2}}
There is also a shorthand for creating elements. We note that we must use
``p[[]]`` to create the empty set partition due to python's syntax. ::
sage: eltm = m[[1,3],[2]] - 3*m[[1],[2]]; eltm
-3*m{{1}, {2}} + m{{1, 3}, {2}}
sage: elte = e[[1,3],[2]]; elte
e{{1, 3}, {2}}
sage: elth = h[[1,3],[2,4]]; elth
h{{1, 3}, {2, 4}}
sage: eltp = p[[1,3],[2,4]] + 2*p[[1]] - 4*p[[]]; eltp
-4*p{} + 2*p{{1}} + p{{1, 3}, {2, 4}}
There is also a natural projection to the usual symmetric functions by
letting the variables commute. This projection map preserves the product
and coproduct structure. We check that Theorem 2.1 of [RS06]_ holds::
sage: Sym = SymmetricFunctions(QQ)
sage: Sm = Sym.m()
sage: Se = Sym.e()
sage: Sh = Sym.h()
sage: Sp = Sym.p()
sage: eltm.to_symmetric_function()
-6*m[1, 1] + m[2, 1]
sage: Sm(p(eltm).to_symmetric_function())
-6*m[1, 1] + m[2, 1]
sage: elte.to_symmetric_function()
2*e[2, 1]
sage: Se(h(elte).to_symmetric_function())
2*e[2, 1]
sage: elth.to_symmetric_function()
4*h[2, 2]
sage: Sh(m(elth).to_symmetric_function())
4*h[2, 2]
sage: eltp.to_symmetric_function()
-4*p[] + 2*p[1] + p[2, 2]
sage: Sp(e(eltp).to_symmetric_function())
-4*p[] + 2*p[1] + p[2, 2]
"""
def __init__(self, R):
"""
Initialize ``self``.
EXAMPLES::
sage: NCSym1 = SymmetricFunctionsNonCommutingVariables(FiniteField(23))
sage: NCSym2 = SymmetricFunctionsNonCommutingVariables(Integers(23))
sage: TestSuite(SymmetricFunctionsNonCommutingVariables(QQ)).run()
"""
# change the line below to assert(R in Rings()) once MRO issues from #15536, #15475 are resolved
assert(R in Fields() or R in Rings()) # side effect of this statement assures MRO exists for R
self._base = R # Won't be needed once CategoryObject won't override base_ring
category = GradedHopfAlgebras(R) # TODO: .Cocommutative()
Parent.__init__(self, category = category.WithRealizations())
def _repr_(self):
r"""
EXAMPLES::
sage: SymmetricFunctionsNonCommutingVariables(ZZ)
Symmetric functions in non-commuting variables over the Integer Ring
"""
return "Symmetric functions in non-commuting variables over the %s"%self.base_ring()
def a_realization(self):
r"""
Return the realization of the powersum basis of ``self``.
OUTPUT:
- The powersum basis of symmetric functions in non-commuting variables.
EXAMPLES::
sage: SymmetricFunctionsNonCommutingVariables(QQ).a_realization()
Symmetric functions in non-commuting variables over the Rational Field in the powersum basis
"""
return self.powersum()
_shorthands = tuple(['chi', 'cp', 'm', 'e', 'h', 'p', 'rho', 'x'])
def dual(self):
r"""
Return the dual Hopf algebra of the symmetric functions in
non-commuting variables.
EXAMPLES::
sage: SymmetricFunctionsNonCommutingVariables(QQ).dual()
Dual symmetric functions in non-commuting variables over the Rational Field
"""
from sage.combinat.ncsym.dual import SymmetricFunctionsNonCommutingVariablesDual
return SymmetricFunctionsNonCommutingVariablesDual(self.base_ring())
class monomial(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the monomial basis.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: m[[1,3],[2]]*m[[1,2]]
m{{1, 3}, {2}, {4, 5}} + m{{1, 3}, {2, 4, 5}} + m{{1, 3, 4, 5}, {2}}
sage: m[[1,3],[2]].coproduct()
m{} # m{{1, 3}, {2}} + m{{1}} # m{{1, 2}} + m{{1, 2}} # m{{1}} + m{{1,
3}, {2}} # m{}
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.m()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='m', bracket=False,
category=NCSymBases(NCSym))
@cached_method
def _m_to_p_on_basis(self, A):
r"""
Return `\mathbf{m}_A` in terms of the powersum basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the powersum basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: all(m(m._m_to_p_on_basis(A)) == m[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
def lt(s, t):
if s == t:
return False
for p in s:
if len([z for z in t if z.intersection(p)]) != 1:
return False
return True
p = self.realization_of().p()
P = Poset((A.coarsenings(), lt))
R = self.base_ring()
return p._from_dict({B: R(P.moebius_function(A, B)) for B in P})
@cached_method
def _m_to_cp_on_basis(self, A):
r"""
Return `\mathbf{m}_A` in terms of the `\mathbf{cp}` basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{cp}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: all(m(m._m_to_cp_on_basis(A)) == m[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
cp = self.realization_of().cp()
arcs = set(A.arcs())
R = self.base_ring()
return cp._from_dict({B: R((-1)**len(set(B.arcs()).difference(A.arcs())))
for B in A.coarsenings() if arcs.issubset(B.arcs())},
remove_zeros=False)
def from_symmetric_function(self, f):
r"""
Return the image of the symmetric function ``f`` in ``self``.
This is performed by converting to the monomial basis and
extending the method :meth:`sum_of_partitions` linearly. This is a
linear map from the symmetric functions to the symmetric functions
in non-commuting variables that does not preserve the product or
coproduct structure of the Hopf algebra.
.. SEEALSO:: :meth:`~Element.to_symmetric_function`
INPUT:
- ``f`` -- an element of the symmetric functions
OUTPUT:
- An element of the `\mathbf{m}` basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).m()
sage: mon = SymmetricFunctions(QQ).m()
sage: elt = m.from_symmetric_function(mon[2,1,1]); elt
1/12*m{{1}, {2}, {3, 4}} + 1/12*m{{1}, {2, 3}, {4}} + 1/12*m{{1}, {2, 4}, {3}}
+ 1/12*m{{1, 2}, {3}, {4}} + 1/12*m{{1, 3}, {2}, {4}} + 1/12*m{{1, 4}, {2}, {3}}
sage: elt.to_symmetric_function()
m[2, 1, 1]
sage: e = SymmetricFunctionsNonCommutingVariables(QQ).e()
sage: elm = SymmetricFunctions(QQ).e()
sage: e(m.from_symmetric_function(elm[4]))
1/24*e{{1, 2, 3, 4}}
sage: h = SymmetricFunctionsNonCommutingVariables(QQ).h()
sage: hom = SymmetricFunctions(QQ).h()
sage: h(m.from_symmetric_function(hom[4]))
1/24*h{{1, 2, 3, 4}}
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).p()
sage: pow = SymmetricFunctions(QQ).p()
sage: p(m.from_symmetric_function(pow[4]))
p{{1, 2, 3, 4}}
sage: p(m.from_symmetric_function(pow[2,1]))
1/3*p{{1}, {2, 3}} + 1/3*p{{1, 2}, {3}} + 1/3*p{{1, 3}, {2}}
sage: p([[1,2]])*p([[1]])
p{{1, 2}, {3}}
Check that `\chi \circ \widetilde{\chi}` is the identity on `Sym`::
sage: all(m.from_symmetric_function(pow(la)).to_symmetric_function() == pow(la)
....: for la in Partitions(4))
True
"""
m = SymmetricFunctions(self.base_ring()).m()
return self.sum([c * self.sum_of_partitions(i) for i,c in m(f)])
def dual_basis(self):
r"""
Return the dual basis to the monomial basis.
OUTPUT:
- the `\mathbf{w}` basis of the dual Hopf algebra
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).m()
sage: m.dual_basis()
Dual symmetric functions in non-commuting variables over the Rational Field in the w basis
"""
return self.realization_of().dual().w()
def duality_pairing(self, x, y):
r"""
Compute the pairing between an element of ``self`` and an element
of the dual.
INPUT:
- ``x`` -- an element of symmetric functions in non-commuting
variables
- ``y`` -- an element of the dual of symmetric functions in
non-commuting variables
OUTPUT:
- an element of the base ring of ``self``
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: w = m.dual_basis()
sage: matrix([[m(A).duality_pairing(w(B)) for A in SetPartitions(3)] for B in SetPartitions(3)])
[1 0 0 0 0]
[0 1 0 0 0]
[0 0 1 0 0]
[0 0 0 1 0]
[0 0 0 0 1]
sage: (m[[1,2],[3]] + 3*m[[1,3],[2]]).duality_pairing(2*w[[1,3],[2]] + w[[1,2,3]] + 2*w[[1,2],[3]])
8
"""
x = self(x)
y = self.dual_basis()(y)
return sum(coeff * y[I] for (I, coeff) in x)
def product_on_basis(self, A, B):
r"""
The product on monomial basis elements.
The product of the basis elements indexed by two set partitions `A`
and `B` is the sum of the basis elements indexed by set partitions
`C` such that `C \wedge ([n] | [k]) = A | B` where `n = |A|`
and `k = |B|`. Here `A \wedge B` is the infimum of `A` and `B`
and `A | B` is the
:meth:`SetPartition.pipe` operation.
Equivalently we can describe all `C` as matchings between the
parts of `A` and `B` where if `a \in A` is matched
with `b \in B`, we take `a \cup b` instead of `a` and `b` in `C`.
INPUT:
- ``A``, ``B`` -- set partitions
OUTPUT:
- an element of the `\mathbf{m}` basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: A = SetPartition([[1], [2,3]])
sage: B = SetPartition([[1], [3], [2,4]])
sage: m.product_on_basis(A, B)
m{{1}, {2, 3}, {4}, {5, 7}, {6}} + m{{1}, {2, 3, 4}, {5, 7}, {6}}
+ m{{1}, {2, 3, 5, 7}, {4}, {6}} + m{{1}, {2, 3, 6}, {4}, {5, 7}}
+ m{{1, 4}, {2, 3}, {5, 7}, {6}} + m{{1, 4}, {2, 3, 5, 7}, {6}}
+ m{{1, 4}, {2, 3, 6}, {5, 7}} + m{{1, 5, 7}, {2, 3}, {4}, {6}}
+ m{{1, 5, 7}, {2, 3, 4}, {6}} + m{{1, 5, 7}, {2, 3, 6}, {4}}
+ m{{1, 6}, {2, 3}, {4}, {5, 7}} + m{{1, 6}, {2, 3, 4}, {5, 7}}
+ m{{1, 6}, {2, 3, 5, 7}, {4}}
sage: B = SetPartition([[1], [2]])
sage: m.product_on_basis(A, B)
m{{1}, {2, 3}, {4}, {5}} + m{{1}, {2, 3, 4}, {5}}
+ m{{1}, {2, 3, 5}, {4}} + m{{1, 4}, {2, 3}, {5}} + m{{1, 4}, {2, 3, 5}}
+ m{{1, 5}, {2, 3}, {4}} + m{{1, 5}, {2, 3, 4}}
sage: m.product_on_basis(A, SetPartition([]))
m{{1}, {2, 3}}
TESTS:
We check that we get all of the correct set partitions::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: A = SetPartition([[1], [2,3]])
sage: B = SetPartition([[1], [2]])
sage: S = SetPartition([[1,2,3], [4,5]])
sage: AB = SetPartition([[1], [2,3], [4], [5]])
sage: L = sorted(filter(lambda x: S.inf(x) == AB, SetPartitions(5)), key=str)
sage: list(map(list, L)) == list(map(list, sorted(m.product_on_basis(A, B).support(), key=str)))
True
"""
if not A:
return self.monomial(B)
if not B:
return self.monomial(A)
P = SetPartitions()
n = A.size()
B = [Set([y+n for y in b]) for b in B] # Shift B by n
unions = lambda m: [reduce(lambda a,b: a.union(b), x) for x in m]
one = self.base_ring().one()
return self._from_dict({P(unions(m)): one for m in matchings(A, B)},
remove_zeros=False)
def coproduct_on_basis(self, A):
r"""
Return the coproduct of a monomial basis element.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- The coproduct applied to the monomial symmetric function in
non-commuting variables indexed by ``A`` expressed in the
monomial basis.
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: m[[1, 3], [2]].coproduct()
m{} # m{{1, 3}, {2}} + m{{1}} # m{{1, 2}} + m{{1, 2}} # m{{1}} + m{{1, 3}, {2}} # m{}
sage: m.coproduct_on_basis(SetPartition([]))
m{} # m{}
sage: m.coproduct_on_basis(SetPartition([[1,2,3]]))
m{} # m{{1, 2, 3}} + m{{1, 2, 3}} # m{}
sage: m[[1,5],[2,4],[3,7],[6]].coproduct()
m{} # m{{1, 5}, {2, 4}, {3, 7}, {6}} + m{{1}} # m{{1, 5}, {2, 4}, {3, 6}}
+ 2*m{{1, 2}} # m{{1, 3}, {2, 5}, {4}} + m{{1, 2}} # m{{1, 4}, {2, 3}, {5}}
+ 2*m{{1, 2}, {3}} # m{{1, 3}, {2, 4}} + m{{1, 3}, {2}} # m{{1, 4}, {2, 3}}
+ 2*m{{1, 3}, {2, 4}} # m{{1, 2}, {3}} + 2*m{{1, 3}, {2, 5}, {4}} # m{{1, 2}}
+ m{{1, 4}, {2, 3}} # m{{1, 3}, {2}} + m{{1, 4}, {2, 3}, {5}} # m{{1, 2}}
+ m{{1, 5}, {2, 4}, {3, 6}} # m{{1}} + m{{1, 5}, {2, 4}, {3, 7}, {6}} # m{}
"""
P = SetPartitions()
# Handle corner cases
if not A:
return self.tensor_square().monomial(( P([]), P([]) ))
if len(A) == 1:
return self.tensor_square().sum_of_monomials([(P([]), A), (A, P([]))])
ell_set = list(range(1, len(A) + 1)) # +1 for indexing
L = [[[], ell_set]] + list(SetPartitions(ell_set, 2))
def to_basis(S):
if not S:
return P([])
sub_parts = [list(A[i-1]) for i in S] # -1 for indexing
mins = [min(p) for p in sub_parts]
over_max = max([max(p) for p in sub_parts]) + 1
ret = [[] for i in range(len(S))]
cur = 1
while min(mins) != over_max:
m = min(mins)
i = mins.index(m)
ret[i].append(cur)
cur += 1
sub_parts[i].pop(sub_parts[i].index(m))
if sub_parts[i]:
mins[i] = min(sub_parts[i])
else:
mins[i] = over_max
return P(ret)
L1 = [(to_basis(S), to_basis(C)) for S,C in L]
L2 = [(M, N) for N,M in L1]
return self.tensor_square().sum_of_monomials(L1 + L2)
def internal_coproduct_on_basis(self, A):
r"""
Return the internal coproduct of a monomial basis element.
The internal coproduct is defined by
.. MATH::
\Delta^{\odot}(\mathbf{m}_A) = \sum_{B \wedge C = A}
\mathbf{m}_B \otimes \mathbf{m}_C
where we sum over all pairs of set partitions `B` and `C`
whose infimum is `A`.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the tensor square of the `\mathbf{m}` basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: m.internal_coproduct_on_basis(SetPartition([[1,3],[2]]))
m{{1, 2, 3}} # m{{1, 3}, {2}} + m{{1, 3}, {2}} # m{{1, 2, 3}} + m{{1, 3}, {2}} # m{{1, 3}, {2}}
"""
P = SetPartitions()
SP = SetPartitions(A.size())
ret = [[A,A]]
for i, B in enumerate(SP):
for C in SP[i+1:]:
if B.inf(C) == A:
B_std = P(list(B.standardization()))
C_std = P(list(C.standardization()))
ret.append([B_std, C_std])
ret.append([C_std, B_std])
return self.tensor_square().sum_of_monomials((B, C) for B,C in ret)
def sum_of_partitions(self, la):
r"""
Return the sum over all set partitions whose shape is ``la``
with a fixed coefficient `C` defined below.
Fix a partition `\lambda`, we define
`\lambda! := \prod_i \lambda_i!` and `\lambda^! := \prod_i m_i!`.
Recall that `|\lambda| = \sum_i \lambda_i` and `m_i` is the
number of parts of length `i` of `\lambda`. Thus we defined the
coefficient as
.. MATH::
C := \frac{\lambda! \lambda^!}{|\lambda|!}.
Hence we can define a lift `\widetilde{\chi}` from `Sym`
to `NCSym` by
.. MATH::
m_{\lambda} \mapsto C \sum_A \mathbf{m}_A
where the sum is over all set partitions whose shape
is `\lambda`.
INPUT:
- ``la`` -- an integer partition
OUTPUT:
- an element of the `\mathbf{m}` basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).m()
sage: m.sum_of_partitions(Partition([2,1,1]))
1/12*m{{1}, {2}, {3, 4}} + 1/12*m{{1}, {2, 3}, {4}} + 1/12*m{{1}, {2, 4}, {3}}
+ 1/12*m{{1, 2}, {3}, {4}} + 1/12*m{{1, 3}, {2}, {4}} + 1/12*m{{1, 4}, {2}, {3}}
TESTS:
Check that `\chi \circ \widetilde{\chi}` is the identity on `Sym`::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).m()
sage: mon = SymmetricFunctions(QQ).monomial()
sage: all(m.from_symmetric_function(mon[la]).to_symmetric_function() == mon[la]
....: for i in range(6) for la in Partitions(i))
True
"""
from sage.combinat.partition import Partition
la = Partition(la) # Make sure it is a partition
R = self.base_ring()
P = SetPartitions()
c = R( prod(factorial(i) for i in la) / ZZ(factorial(la.size())) )
return self._from_dict({P(m): c for m in SetPartitions(sum(la), la)},
remove_zeros=False)
class Element(CombinatorialFreeModule.Element):
"""
An element in the monomial basis of `NCSym`.
"""
def expand(self, n, alphabet='x'):
r"""
Expand ``self`` written in the monomial basis in `n`
non-commuting variables.
INPUT:
- ``n`` -- an integer
- ``alphabet`` -- (default: ``'x'``) a string
OUTPUT:
- The symmetric function of ``self`` expressed in the ``n``
non-commuting variables described by ``alphabet``.
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: m[[1,3],[2]].expand(4)
x0*x1*x0 + x0*x2*x0 + x0*x3*x0 + x1*x0*x1 + x1*x2*x1 + x1*x3*x1
+ x2*x0*x2 + x2*x1*x2 + x2*x3*x2 + x3*x0*x3 + x3*x1*x3 + x3*x2*x3
One can use a different set of variables by using the
optional argument ``alphabet``::
sage: m[[1],[2,3]].expand(3,alphabet='y')
y0*y1^2 + y0*y2^2 + y1*y0^2 + y1*y2^2 + y2*y0^2 + y2*y1^2
"""
from sage.algebras.free_algebra import FreeAlgebra
from sage.combinat.permutation import Permutations
m = self.parent()
F = FreeAlgebra(m.base_ring(), n, alphabet)
x = F.gens()
def on_basis(A):
basic_term = [0] * A.size()
for index, part in enumerate(A):
for i in part:
basic_term[i-1] = index # -1 for indexing
return sum( prod(x[p[i]-1] for i in basic_term) # -1 for indexing
for p in Permutations(n, len(A)) )
return m._apply_module_morphism(self, on_basis, codomain=F)
def to_symmetric_function(self):
r"""
The projection of ``self`` to the symmetric functions.
Take a symmetric function in non-commuting variables
expressed in the `\mathbf{m}` basis, and return the projection of
expressed in the monomial basis of symmetric functions.
The map `\chi \colon NCSym \to Sym` is defined by
.. MATH::
\mathbf{m}_A \mapsto
m_{\lambda(A)} \prod_i n_i(\lambda(A))!
where `\lambda(A)` is the partition associated with `A` by
taking the sizes of the parts and `n_i(\mu)` is the
multiplicity of `i` in `\mu`.
OUTPUT:
- an element of the symmetric functions in the monomial basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: m[[1,3],[2]].to_symmetric_function()
m[2, 1]
sage: m[[1],[3],[2]].to_symmetric_function()
6*m[1, 1, 1]
"""
m = SymmetricFunctions(self.parent().base_ring()).monomial()
c = lambda la: prod(factorial(i) for i in la.to_exp())
return m.sum_of_terms((i.shape(), coeff*c(i.shape()))
for (i, coeff) in self)
m = monomial
class elementary(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the elementary basis.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.e()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='e', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
## Register coercions
# monomials
m = NCSym.m()
self.module_morphism(self._e_to_m_on_basis, codomain=m).register_as_coercion()
# powersum
# NOTE: Keep this ahead of creating the homogeneous basis to
# get the coercion path m -> p -> e
p = NCSym.p()
self.module_morphism(self._e_to_p_on_basis, codomain=p,
triangular="upper").register_as_coercion()
p.module_morphism(p._p_to_e_on_basis, codomain=self,
triangular="upper").register_as_coercion()
# homogeneous
h = NCSym.h()
self.module_morphism(self._e_to_h_on_basis, codomain=h,
triangular="upper").register_as_coercion()
h.module_morphism(h._h_to_e_on_basis, codomain=self,
triangular="upper").register_as_coercion()
@cached_method
def _e_to_m_on_basis(self, A):
r"""
Return `\mathbf{e}_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{m}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
sage: all(e(e._e_to_m_on_basis(A)) == e[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
n = A.size()
P = SetPartitions(n)
min_elt = P([[i] for i in range(1, n+1)])
one = self.base_ring().one()
return m._from_dict({B: one for B in P if A.inf(B) == min_elt},
remove_zeros=False)
@cached_method
def _e_to_h_on_basis(self, A):
r"""
Return `\mathbf{e}_A` in terms of the homogeneous basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{h}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
sage: all(e(e._e_to_h_on_basis(A)) == e[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
h = self.realization_of().h()
sign = lambda B: (-1)**(B.size() - len(B))
coeff = lambda B: sign(B) * prod(factorial(sum( 1 for part in B if part.issubset(big) )) for big in A)
R = self.base_ring()
return h._from_dict({B: R(coeff(B)) for B in A.refinements()},
remove_zeros=False)
@cached_method
def _e_to_p_on_basis(self, A):
r"""
Return `\mathbf{e}_A` in terms of the powersum basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{p}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
sage: all(e(e._e_to_p_on_basis(A)) == e[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
p = self.realization_of().p()
coeff = lambda B: prod([(-1)**(i-1) * factorial(i-1) for i in B.shape()])
R = self.base_ring()
return p._from_dict({B: R(coeff(B)) for B in A.refinements()},
remove_zeros=False)
class Element(CombinatorialFreeModule.Element):
"""
An element in the elementary basis of `NCSym`.
"""
def omega(self):
r"""
Return the involution `\omega` applied to ``self``.
The involution `\omega` on `NCSym` is defined by
`\omega(\mathbf{e}_A) = \mathbf{h}_A`.
OUTPUT:
- an element in the basis ``self``
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
sage: h = NCSym.h()
sage: elt = e[[1,3],[2]].omega(); elt
2*e{{1}, {2}, {3}} - e{{1, 3}, {2}}
sage: elt.omega()
e{{1, 3}, {2}}
sage: h(elt)
h{{1, 3}, {2}}
"""
P = self.parent()
h = P.realization_of().h()
return P(h.sum_of_terms(self))
def to_symmetric_function(self):
r"""
The projection of ``self`` to the symmetric functions.
Take a symmetric function in non-commuting variables
expressed in the `\mathbf{e}` basis, and return the projection of
expressed in the elementary basis of symmetric functions.
The map `\chi \colon NCSym \to Sym` is given by
.. MATH::
\mathbf{e}_A \mapsto
e_{\lambda(A)} \prod_i \lambda(A)_i!
where `\lambda(A)` is the partition associated with `A` by
taking the sizes of the parts.
OUTPUT:
- An element of the symmetric functions in the elementary basis
EXAMPLES::
sage: e = SymmetricFunctionsNonCommutingVariables(QQ).e()
sage: e[[1,3],[2]].to_symmetric_function()
2*e[2, 1]
sage: e[[1],[3],[2]].to_symmetric_function()
e[1, 1, 1]
"""
e = SymmetricFunctions(self.parent().base_ring()).e()
c = lambda la: prod(factorial(i) for i in la)
return e.sum_of_terms((i.shape(), coeff*c(i.shape()))
for (i, coeff) in self)
e = elementary
class homogeneous(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the homogeneous basis.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: h[[1,3],[2,4]]*h[[1,2,3]]
h{{1, 3}, {2, 4}, {5, 6, 7}}
sage: h[[1,2]].coproduct()
h{} # h{{1, 2}} + 2*h{{1}} # h{{1}} + h{{1, 2}} # h{}
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.h()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='h', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._h_to_m_on_basis, codomain=m).register_as_coercion()
p = NCSym.p()
self.module_morphism(self._h_to_p_on_basis, codomain=p).register_as_coercion()
p.module_morphism(p._p_to_h_on_basis, codomain=self).register_as_coercion()
@cached_method
def _h_to_m_on_basis(self, A):
r"""
Return `\mathbf{h}_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{m}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: all(h(h._h_to_m_on_basis(A)) == h[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
P = SetPartitions()
m = self.realization_of().m()
coeff = lambda B: prod(factorial(i) for i in B.shape())
R = self.base_ring()
return m._from_dict({P(B): R( coeff(A.inf(B)) )
for B in SetPartitions(A.size())}, remove_zeros=False)
@cached_method
def _h_to_e_on_basis(self, A):
r"""
Return `\mathbf{h}_A` in terms of the elementary basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{e}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: all(h(h._h_to_e_on_basis(A)) == h[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
e = self.realization_of().e()
sign = lambda B: (-1)**(B.size() - len(B))
coeff = lambda B: (sign(B) * prod(factorial(sum( 1 for part in B if part.issubset(big) ))
for big in A))
R = self.base_ring()
return e._from_dict({B: R(coeff(B)) for B in A.refinements()},
remove_zeros=False)
@cached_method
def _h_to_p_on_basis(self, A):
r"""
Return `\mathbf{h}_A` in terms of the powersum basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{p}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: all(h(h._h_to_p_on_basis(A)) == h[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
p = self.realization_of().p()
coeff = lambda B: abs( prod([(-1)**(i-1) * factorial(i-1) for i in B.shape()]) )
R = self.base_ring()
return p._from_dict({B: R(coeff(B)) for B in A.refinements()},
remove_zeros=False)
class Element(CombinatorialFreeModule.Element):
"""
An element in the homogeneous basis of `NCSym`.
"""
def omega(self):
r"""
Return the involution `\omega` applied to ``self``.
The involution `\omega` on `NCSym` is defined by
`\omega(\mathbf{h}_A) = \mathbf{e}_A`.
OUTPUT:
- an element in the basis ``self``
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: e = NCSym.e()
sage: elt = h[[1,3],[2]].omega(); elt
2*h{{1}, {2}, {3}} - h{{1, 3}, {2}}
sage: elt.omega()
h{{1, 3}, {2}}
sage: e(elt)
e{{1, 3}, {2}}
"""
P = self.parent()
e = self.parent().realization_of().e()
return P(e.sum_of_terms(self))
def to_symmetric_function(self):
r"""
The projection of ``self`` to the symmetric functions.
Take a symmetric function in non-commuting variables
expressed in the `\mathbf{h}` basis, and return the projection of
expressed in the complete basis of symmetric functions.
The map `\chi \colon NCSym \to Sym` is given by
.. MATH::
\mathbf{h}_A \mapsto
h_{\lambda(A)} \prod_i \lambda(A)_i!
where `\lambda(A)` is the partition associated with `A` by
taking the sizes of the parts.
OUTPUT:
- An element of the symmetric functions in the complete basis
EXAMPLES::
sage: h = SymmetricFunctionsNonCommutingVariables(QQ).h()
sage: h[[1,3],[2]].to_symmetric_function()
2*h[2, 1]
sage: h[[1],[3],[2]].to_symmetric_function()
h[1, 1, 1]
"""
h = SymmetricFunctions(self.parent().base_ring()).h()
c = lambda la: prod(factorial(i) for i in la)
return h.sum_of_terms((i.shape(), coeff*c(i.shape()))
for (i, coeff) in self)
h = homogeneous
class powersum(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the powersum basis.
The powersum basis is given by
.. MATH::
\mathbf{p}_A = \sum_{A \leq B} \mathbf{m}_B,
where we sum over all coarsenings of the set partition `A`. If we
allow our variables to commute, then `\mathbf{p}_A` goes to the
usual powersum symmetric function `p_{\lambda}` whose (integer)
partition `\lambda` is the shape of `A`.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: x = p.an_element()**2; x
4*p{} + 8*p{{1}} + 4*p{{1}, {2}} + 6*p{{1}, {2, 3}}
+ 12*p{{1, 2}} + 6*p{{1, 2}, {3}} + 9*p{{1, 2}, {3, 4}}
sage: x.to_symmetric_function()
4*p[] + 8*p[1] + 4*p[1, 1] + 12*p[2] + 12*p[2, 1] + 9*p[2, 2]
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.p()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='p', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._p_to_m_on_basis, codomain=m,
unitriangular="lower").register_as_coercion()
m.module_morphism(m._m_to_p_on_basis, codomain=self,
unitriangular="lower").register_as_coercion()
x = NCSym.x()
self.module_morphism(self._p_to_x_on_basis, codomain=x,
unitriangular="upper").register_as_coercion()
x.module_morphism(x._x_to_p_on_basis, codomain=self,
unitriangular="upper").register_as_coercion()
@cached_method
def _p_to_m_on_basis(self, A):
r"""
Return `\mathbf{p}_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{m}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: all(p(p._p_to_m_on_basis(A)) == p[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
one = self.base_ring().one()
return m._from_dict({B: one for B in A.coarsenings()}, remove_zeros=False)
@cached_method
def _p_to_e_on_basis(self, A):
r"""
Return `\mathbf{p}_A` in terms of the elementary basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{e}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: all(p(p._p_to_e_on_basis(A)) == p[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
e = self.realization_of().e()
P_refine = Poset((A.refinements(), A.parent().lt))
c = prod((-1)**(i-1) * factorial(i-1) for i in A.shape())
R = self.base_ring()
return e._from_dict({B: R(P_refine.moebius_function(B, A) / ZZ(c))
for B in P_refine}, remove_zeros=False)
@cached_method
def _p_to_h_on_basis(self, A):
r"""
Return `\mathbf{p}_A` in terms of the homogeneous basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{h}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: all(p(p._p_to_h_on_basis(A)) == p[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
h = self.realization_of().h()
P_refine = Poset((A.refinements(), A.parent().lt))
c = abs(prod((-1)**(i-1) * factorial(i-1) for i in A.shape()))
R = self.base_ring()
return h._from_dict({B: R(P_refine.moebius_function(B, A) / ZZ(c))
for B in P_refine}, remove_zeros=False)
@cached_method
def _p_to_x_on_basis(self, A):
r"""
Return `\mathbf{p}_A` in terms of the `\mathbf{x}` basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{x}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: all(p(p._p_to_x_on_basis(A)) == p[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
x = self.realization_of().x()
one = self.base_ring().one()
return x._from_dict({B: one for B in A.refinements()}, remove_zeros=False)
# Note that this is the same as the monomial coproduct_on_basis
def coproduct_on_basis(self, A):
r"""
Return the coproduct of a monomial basis element.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- The coproduct applied to the monomial symmetric function in
non-commuting variables indexed by ``A`` expressed in the
monomial basis.
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).powersum()
sage: p[[1, 3], [2]].coproduct()
p{} # p{{1, 3}, {2}} + p{{1}} # p{{1, 2}} + p{{1, 2}} # p{{1}} + p{{1, 3}, {2}} # p{}
sage: p.coproduct_on_basis(SetPartition([[1]]))
p{} # p{{1}} + p{{1}} # p{}
sage: p.coproduct_on_basis(SetPartition([]))
p{} # p{}
"""
P = SetPartitions()
# Handle corner cases
if not A:
return self.tensor_square().monomial(( P([]), P([]) ))
if len(A) == 1:
return self.tensor_square().sum_of_monomials([(P([]), A), (A, P([]))])
ell_set = list(range(1, len(A) + 1)) # +1 for indexing
L = [[[], ell_set]] + list(SetPartitions(ell_set, 2))
def to_basis(S):
if not S:
return P([])
sub_parts = [list(A[i-1]) for i in S] # -1 for indexing
mins = [min(p) for p in sub_parts]
over_max = max([max(p) for p in sub_parts]) + 1
ret = [[] for i in range(len(S))]
cur = 1
while min(mins) != over_max:
m = min(mins)
i = mins.index(m)
ret[i].append(cur)
cur += 1
sub_parts[i].pop(sub_parts[i].index(m))
if sub_parts[i]:
mins[i] = min(sub_parts[i])
else:
mins[i] = over_max
return P(ret)
L1 = [(to_basis(S), to_basis(C)) for S,C in L]
L2 = [(M, N) for N,M in L1]
return self.tensor_square().sum_of_monomials(L1 + L2)
def internal_coproduct_on_basis(self, A):
r"""
Return the internal coproduct of a powersum basis element.
The internal coproduct is defined by
.. MATH::
\Delta^{\odot}(\mathbf{p}_A) = \mathbf{p}_A \otimes
\mathbf{p}_A
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the tensor square of ``self``
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).powersum()
sage: p.internal_coproduct_on_basis(SetPartition([[1,3],[2]]))
p{{1, 3}, {2}} # p{{1, 3}, {2}}
"""
return self.tensor_square().monomial((A, A))
def antipode_on_basis(self, A):
r"""
Return the result of the antipode applied to a powersum basis element.
Let `A` be a set partition. The antipode given in [LM2011]_ is
.. MATH::
S(\mathbf{p}_A) = \sum_{\gamma} (-1)^{\ell(\gamma)}
\mathbf{p}_{\gamma[A]}
where we sum over all ordered set partitions (i.e. set
compositions) of `[\ell(A)]` and
.. MATH::
\gamma[A] = A_{\gamma_1}^{\downarrow} | \cdots |
A_{\gamma_{\ell(A)}}^{\downarrow}
is the action of `\gamma` on `A` defined in
:meth:`SetPartition.ordered_set_partition_action()`.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element in the basis ``self``
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).powersum()
sage: p.antipode_on_basis(SetPartition([[1], [2,3]]))
p{{1, 2}, {3}}
sage: p.antipode_on_basis(SetPartition([]))
p{}
sage: F = p[[1,3],[5],[2,4]].coproduct()
sage: F.apply_multilinear_morphism(lambda x,y: x.antipode()*y)
0
"""
P = SetPartitions()
def action(gamma):
cur = 1
ret = []
for S in gamma:
sub_parts = [list(A[i-1]) for i in S] # -1 for indexing
mins = [min(p) for p in sub_parts]
over_max = max([max(p) for p in sub_parts]) + 1
temp = [[] for i in range(len(S))]
while min(mins) != over_max:
m = min(mins)
i = mins.index(m)
temp[i].append(cur)
cur += 1
sub_parts[i].pop(sub_parts[i].index(m))
if sub_parts[i]:
mins[i] = min(sub_parts[i])
else:
mins[i] = over_max
ret += temp
return P(ret)
return self.sum_of_terms( (A.ordered_set_partition_action(gamma), (-1)**len(gamma))
for gamma in OrderedSetPartitions(len(A)) )
def primitive(self, A, i=1):
r"""
Return the primitive associated to ``A`` in ``self``.
Fix some `i \in S`. Let `A` be an atomic set partition of `S`,
then the primitive `p(A)` given in [LM2011]_ is
.. MATH::
p(A) = \sum_{\gamma} (-1)^{\ell(\gamma)-1}
\mathbf{p}_{\gamma[A]}
where we sum over all ordered set partitions of `[\ell(A)]` such
that `i \in \gamma_1` and `\gamma[A]` is the action of `\gamma`
on `A` defined in
:meth:`SetPartition.ordered_set_partition_action()`.
If `A` is not atomic, then `p(A) = 0`.
.. SEEALSO:: :meth:`SetPartition.is_atomic`
INPUT:
- ``A`` -- a set partition
- ``i`` -- (default: 1) index in the base set for ``A`` specifying
which set of primitives this belongs to
OUTPUT:
- an element in the basis ``self``
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).powersum()
sage: elt = p.primitive(SetPartition([[1,3], [2]])); elt
-p{{1, 2}, {3}} + p{{1, 3}, {2}}
sage: elt.coproduct()
-p{} # p{{1, 2}, {3}} + p{} # p{{1, 3}, {2}} - p{{1, 2}, {3}} # p{} + p{{1, 3}, {2}} # p{}
sage: p.primitive(SetPartition([[1], [2,3]]))
0
sage: p.primitive(SetPartition([]))
p{}
"""
if not A:
return self.one()
A = SetPartitions()(A) # Make sure it's a set partition
if not A.is_atomic():
return self.zero()
return self.sum_of_terms( (A.ordered_set_partition_action(gamma), (-1)**(len(gamma)-1))
for gamma in OrderedSetPartitions(len(A)) if i in gamma[0] )
class Element(CombinatorialFreeModule.Element):
"""
An element in the powersum basis of `NCSym`.
"""
def to_symmetric_function(self):
r"""
The projection of ``self`` to the symmetric functions.
Take a symmetric function in non-commuting variables
expressed in the `\mathbf{p}` basis, and return the projection of
expressed in the powersum basis of symmetric functions.
The map `\chi \colon NCSym \to Sym` is given by
.. MATH::
\mathbf{p}_A \mapsto p_{\lambda(A)}
where `\lambda(A)` is the partition associated with `A` by
taking the sizes of the parts.
OUTPUT:
- an element of symmetric functions in the power sum basis
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).p()
sage: p[[1,3],[2]].to_symmetric_function()
p[2, 1]
sage: p[[1],[3],[2]].to_symmetric_function()
p[1, 1, 1]
"""
p = SymmetricFunctions(self.parent().base_ring()).p()
return p.sum_of_terms((i.shape(), coeff) for (i, coeff) in self)
p = powersum
class coarse_powersum(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the `\mathbf{cp}` basis.
This basis was defined in [BZ05]_ as
.. MATH::
\mathbf{cp}_A = \sum_{A \leq_* B} \mathbf{m}_B,
where we sum over all strict coarsenings of the set partition `A`.
An alternative description of this basis was given in [BT13]_ as
.. MATH::
\mathbf{cp}_A = \sum_{A \subseteq B} \mathbf{m}_B,
where we sum over all set partitions whose arcs are a subset of
the arcs of the set partition `A`.
.. NOTE::
In [BZ05]_, this basis was denoted by `\mathbf{q}`. In [BT13]_,
this basis was called the powersum basis and denoted by `p`.
However it is a coarser basis than the usual powersum basis in
the sense that it does not yield the usual powersum basis
of the symmetric function under the natural map of letting
the variables commute.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: cp = NCSym.cp()
sage: cp[[1,3],[2,4]]*cp[[1,2,3]]
cp{{1, 3}, {2, 4}, {5, 6, 7}}
sage: cp[[1,2],[3]].internal_coproduct()
cp{{1, 2}, {3}} # cp{{1, 2}, {3}}
sage: ps = SymmetricFunctions(NCSym.base_ring()).p()
sage: ps(cp[[1,3],[2]].to_symmetric_function())
p[2, 1] - p[3]
sage: ps(cp[[1,2],[3]].to_symmetric_function())
p[2, 1]
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.cp()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='cp', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._cp_to_m_on_basis, codomain=m,
unitriangular="lower").register_as_coercion()
m.module_morphism(m._m_to_cp_on_basis, codomain=self,
unitriangular="lower").register_as_coercion()
@cached_method
def _cp_to_m_on_basis(self, A):
r"""
Return `\mathbf{cp}_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\mathbf{m}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: cp = NCSym.cp()
sage: all(cp(cp._cp_to_m_on_basis(A)) == cp[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
one = self.base_ring().one()
return m._from_dict({B: one for B in A.strict_coarsenings()},
remove_zeros=False)
cp = coarse_powersum
class x_basis(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the `\mathbf{x}` basis.
This basis is defined in [BHRZ06]_ by the formula:
.. MATH::
\mathbf{x}_A = \sum_{B \leq A} \mu(B, A) \mathbf{p}_B
and has the following properties:
.. MATH::
\mathbf{x}_A \mathbf{x}_B = \mathbf{x}_{A|B}, \quad \quad
\Delta^{\odot}(\mathbf{x}_C) = \sum_{A \vee B = C} \mathbf{x}_A
\otimes \mathbf{x}_B.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: x = NCSym.x()
sage: x[[1,3],[2,4]]*x[[1,2,3]]
x{{1, 3}, {2, 4}, {5, 6, 7}}
sage: x[[1,2],[3]].internal_coproduct()
x{{1}, {2}, {3}} # x{{1, 2}, {3}} + x{{1, 2}, {3}} # x{{1}, {2}, {3}} +
x{{1, 2}, {3}} # x{{1, 2}, {3}}
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.x()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='x', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
@cached_method
def _x_to_p_on_basis(self, A):
r"""
Return `\mathbf{x}_A` in terms of the powersum basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\mathbf{p}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: x = NCSym.x()
sage: all(x(x._x_to_p_on_basis(A)) == x[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
def lt(s, t):
if s == t:
return False
for p in s:
if len([z for z in t if z.intersection(p)]) != 1:
return False
return True
p = self.realization_of().p()
P_refine = Poset((A.refinements(), lt))
R = self.base_ring()
return p._from_dict({B: R(P_refine.moebius_function(B, A))
for B in P_refine})
x = x_basis
class deformed_coarse_powersum(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the `\rho` basis.
This basis was defined in [BT13]_ as a `q`-deformation of the
`\mathbf{cp}` basis:
.. MATH::
\rho_A = \sum_{A \subseteq B}
\frac{1}{q^{\operatorname{nst}_{B-A}^A}} \mathbf{m}_B,
where we sum over all set partitions whose arcs are a subset of
the arcs of the set partition `A`.
INPUT:
- ``q`` -- (default: ``2``) the parameter `q`
EXAMPLES::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: rho = NCSym.rho(q)
We construct Example 3.1 in [BT13]_::
sage: rnode = lambda A: sorted([a[1] for a in A.arcs()], reverse=True)
sage: dimv = lambda A: sorted([a[1]-a[0] for a in A.arcs()], reverse=True)
sage: lst = list(SetPartitions(4))
sage: S = sorted(lst, key=lambda A: (dimv(A), rnode(A)))
sage: m = NCSym.m()
sage: matrix([[m(rho[A])[B] for B in S] for A in S])
[ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
[ 0 1 0 0 1 1 0 1 0 0 1 0 0 0 0]
[ 0 0 1 0 1 0 1 1 0 0 0 0 0 0 1]
[ 0 0 0 1 0 1 1 1 0 0 0 1 0 0 0]
[ 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0]
[ 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 1 0 0 1 1 0 0]
[ 0 0 0 0 0 0 0 0 0 1 1 0 1 0 0]
[ 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1/q]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]
"""
def __init__(self, NCSym, q=2):
"""
EXAMPLES::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: TestSuite(NCSym.rho(q)).run()
"""
R = NCSym.base_ring()
self._q = R(q)
CombinatorialFreeModule.__init__(self, R, SetPartitions(),
prefix='rho', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._rho_to_m_on_basis, codomain=m).register_as_coercion()
m.module_morphism(self._m_to_rho_on_basis, codomain=self).register_as_coercion()
def q(self):
"""
Return the deformation parameter `q` of ``self``.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: rho = NCSym.rho(5)
sage: rho.q()
5
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: rho = NCSym.rho(q)
sage: rho.q() == q
True
"""
return self._q
@cached_method
def _rho_to_m_on_basis(self, A):
r"""
Return `\rho_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\mathbf{m}` basis
TESTS::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: rho = NCSym.rho(q)
sage: all(rho(rho._rho_to_m_on_basis(A)) == rho[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
arcs = set(A.arcs())
return m._from_dict({B: self._q**-nesting(set(B).difference(A), A)
for B in A.coarsenings() if arcs.issubset(B.arcs())},
remove_zeros=False)
@cached_method
def _m_to_rho_on_basis(self, A):
r"""
Return `\mathbf{m}_A` in terms of the `\rho` basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\rho` basis
TESTS::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: rho = NCSym.rho(q)
sage: m = NCSym.m()
sage: all(m(rho._m_to_rho_on_basis(A)) == m[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
coeff = lambda A,B: ((-1)**len(set(B.arcs()).difference(A.arcs()))
/ self._q**nesting(set(B).difference(A), B))
arcs = set(A.arcs())
return self._from_dict({B: coeff(A,B) for B in A.coarsenings()
if arcs.issubset(B.arcs())},
remove_zeros=False)
rho = deformed_coarse_powersum
class supercharacter(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the supercharacter `\chi` basis.
This basis was defined in [BT13]_ as a `q`-deformation of the
supercharacter basis.
.. MATH::
\chi_A = \sum_B \chi_A(B) \mathbf{m}_B,
where we sum over all set partitions `A` and `\chi_A(B)` is the
evaluation of the supercharacter `\chi_A` on the superclass `\mu_B`.
.. NOTE::
The supercharacters considered in [BT13]_ are coarser than
those considered by Aguiar et. al.
INPUT:
- ``q`` -- (default: ``2``) the parameter `q`
EXAMPLES::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q)
sage: chi[[1,3],[2]]*chi[[1,2]]
chi{{1, 3}, {2}, {4, 5}}
sage: chi[[1,3],[2]].coproduct()
chi{} # chi{{1, 3}, {2}} + (2*q-2)*chi{{1}} # chi{{1}, {2}} +
(3*q-2)*chi{{1}} # chi{{1, 2}} + (2*q-2)*chi{{1}, {2}} # chi{{1}} +
(3*q-2)*chi{{1, 2}} # chi{{1}} + chi{{1, 3}, {2}} # chi{}
sage: chi2 = NCSym.chi()
sage: chi(chi2[[1,2],[3]])
((-q+2)/q)*chi{{1}, {2}, {3}} + 2/q*chi{{1, 2}, {3}}
sage: chi2
Symmetric functions in non-commuting variables over the Fraction Field
of Univariate Polynomial Ring in q over Rational Field in the
supercharacter basis with parameter q=2
"""
def __init__(self, NCSym, q=2):
"""
EXAMPLES::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: TestSuite(NCSym.chi(q)).run()
"""
R = NCSym.base_ring()
self._q = R(q)
CombinatorialFreeModule.__init__(self, R, SetPartitions(),
prefix='chi', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._chi_to_m_on_basis, codomain=m).register_as_coercion()
m.module_morphism(self._m_to_chi_on_basis, codomain=self).register_as_coercion()
def q(self):
"""
Return the deformation parameter `q` of ``self``.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: chi = NCSym.chi(5)
sage: chi.q()
5
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q)
sage: chi.q() == q
True
"""
return self._q
@cached_method
def _chi_to_m_on_basis(self, A):
r"""
Return `\chi_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\mathbf{m}` basis
TESTS::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q)
sage: all(chi(chi._chi_to_m_on_basis(A)) == chi[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
q = self._q
arcs = set(A.arcs())
ret = {}
for B in SetPartitions(A.size()):
Barcs = B.arcs()
if any((a[0] == b[0] and b[1] < a[1])
or (b[0] > a[0] and a[1] == b[1])
for a in arcs for b in Barcs):
continue
ret[B] = ((-1)**len(arcs.intersection(Barcs))
* (q - 1)**(len(arcs) - len(arcs.intersection(Barcs)))
* q**(sum(a[1] - a[0] for a in arcs) - len(arcs))
/ q**nesting(B, A))
return m._from_dict(ret, remove_zeros=False)
@cached_method
def _graded_inverse_matrix(self, n):
r"""
Return the inverse of the transition matrix of the ``n``-th
graded part from the `\chi` basis to the monomial basis.
EXAMPLES::
sage: R = QQ['q'].fraction_field(); q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q); m = NCSym.m()
sage: lst = list(SetPartitions(2))
sage: m = matrix([[m(chi[A])[B] for A in lst] for B in lst]); m
[ -1 1]
[q - 1 1]
sage: chi._graded_inverse_matrix(2)
[ -1/q 1/q]
[(q - 1)/q 1/q]
sage: chi._graded_inverse_matrix(2) * m
[1 0]
[0 1]
"""
lst = SetPartitions(n)
MS = MatrixSpace(self.base_ring(), lst.cardinality())
m = self.realization_of().m()
m = MS([[m(self[A])[B] for A in lst] for B in lst])
return ~m
@cached_method
def _m_to_chi_on_basis(self, A):
r"""
Return `\mathbf{m}_A` in terms of the `\chi` basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\chi` basis
TESTS::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q)
sage: m = NCSym.m()
sage: all(m(chi._m_to_chi_on_basis(A)) == m[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
n = A.size()
lst = list(SetPartitions(n))
m = self._graded_inverse_matrix(n)
i = lst.index(A)
return self._from_dict({B: m[j,i] for j,B in enumerate(lst)})
chi = supercharacter
|
the-stack_0_17109
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for evaluating various tasks."""
import codecs
import tensorflow.compat.v1 as tf
from airdialogue.evaluator.metrics import bleu
from airdialogue.evaluator.metrics import rouge
from airdialogue.evaluator.metrics import kl
ROLE_TOKENS = ["<t1>", "<t2>"]
def evaluate(ref_file, trans_file, metric):
"""Pick a metric and evaluate depending on task."""
if ":" in metric:
metric, mode = metric.split(":")
else:
mode = "brief"
assert mode in ["brief", "all"]
# BLEU scores for translation task
if metric.lower() == "bleu":
evaluation_score = _bleu(
ref_file, trans_file, mode=mode)
# ROUGE scores for summarization tasks
elif metric.lower() == "rouge":
evaluation_score = _rouge(
ref_file, trans_file, mode=mode)
# kl scores for evaluating the ngram kl distribution of the whole corpus
elif metric.lower() == "kl":
evaluation_score = _kl(
ref_file, trans_file, mode=mode)
elif metric.lower() == "accuracy":
evaluation_score = _accuracy(ref_file, trans_file)
else:
raise ValueError("Unknown metric %s" % metric)
return evaluation_score
def _kl(ref_file, trans_file, mode="brief"):
"""Compute KL divergence and handling BPE."""
max_order = 4
ref_files = [ref_file]
reference_text = []
role_tokens = []
for reference_filename in ref_files:
with codecs.getreader("utf-8")(tf.gfile.GFile(reference_filename,
"rb")) as fh:
for line in fh:
reference, role = process_dialogue_infer(
line.rstrip(), get_role_token=True)
reference_text.append(reference.split(" "))
role_tokens.append(role)
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
translations.append(line.rstrip().split(" "))
results = {}
kl_scores = kl.compute_kl(reference_text, translations, max_order)
for key in kl_scores:
results["all-" + key] = kl_scores[key]
if mode == "brief":
return sum(results.values()) / len(results)
for role in ROLE_TOKENS:
_sub_ref_texts = []
_sub_trans = []
for _r, _t, _role in zip(reference_text, translations, role_tokens):
if _role == role:
_sub_ref_texts.append(_r)
_sub_trans.append(_t)
kl_scores = kl.compute_kl(_sub_ref_texts, _sub_trans, max_order)
for key in kl_scores:
results[role + "-" + key] = kl_scores[key]
return results
def _bleu(ref_file, trans_file, mode="brief"):
"""Compute BLEU scores and handling BPE."""
max_order = 4
smooth = False
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with codecs.getreader("utf-8")(tf.gfile.GFile(reference_filename,
"rb")) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
role_tokens = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference, role = process_dialogue_infer(
reference.rstrip(), get_role_token=True)
reference_list.append(reference.split(" "))
per_segment_references.append(reference_list)
role_tokens.append(role)
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
translations.append(line.rstrip().split(" "))
results = {}
bleu_score, _, _, _, _, _ = bleu.compute_bleu(per_segment_references,
translations, max_order, smooth)
results["all"] = 100 * bleu_score
if mode == "brief":
return results["all"]
for role in ROLE_TOKENS:
_sub_ref_texts = []
_sub_trans = []
for _r, _t, _role in zip(per_segment_references, translations, role_tokens):
if _role == role:
_sub_ref_texts.append(_r)
_sub_trans.append(_t)
bleu_score, _, _, _, _, _ = bleu.compute_bleu(_sub_ref_texts, _sub_trans,
max_order, smooth)
results[role] = 100 * bleu_score
return results
def _rouge(ref_file, summarization_file, mode="brief"):
"""Compute ROUGE scores and handling BPE."""
results = {}
references = []
role_tokens = []
with codecs.getreader("utf-8")(tf.gfile.GFile(ref_file, "rb")) as fh:
for line in fh:
ref, role = process_dialogue_infer(line.rstrip(), get_role_token=True)
references.append(ref)
role_tokens.append(role)
hypotheses = []
with codecs.getreader("utf-8")(tf.gfile.GFile(summarization_file,
"rb")) as fh:
for line in fh:
hypotheses.append(line)
rouge_score_map = rouge.rouge(hypotheses, references)
results["all"] = 100 * rouge_score_map["rouge_l/f_score"]
if mode == "brief":
return results["all"]
for role in ROLE_TOKENS:
_sub_ref_texts = []
_sub_hypos = []
for _r, _t, _role in zip(references, hypotheses, role_tokens):
if _role == role:
_sub_ref_texts.append(_r)
_sub_hypos.append(_t)
rouge_score_map = rouge.rouge(_sub_hypos, _sub_ref_texts)
results[role] = 100 * rouge_score_map["rouge_l/f_score"]
return results
def process_dialogue_infer(file_line, get_role_token=False):
# split the end token (<t1>,<t2>)
_line = file_line.replace(" <eod>", "")
_line = _line.rstrip().split("|")[1].rsplit(" ", 1)
if not get_role_token:
return _line[0]
else:
return _line[0], _line[1]
def _accuracy(label_file, pred_file):
"""Compute accuracy, each line contains a label."""
with codecs.getreader("utf-8")(tf.gfile.GFile(label_file, "rb")) as label_fh:
with codecs.getreader("utf-8")(tf.gfile.GFile(pred_file, "rb")) as pred_fh:
count = 0.0
match = 0.0
for label, pred in zip(label_fh, pred_fh):
label = process_dialogue_infer(label.strip()).strip()
pred = pred.strip()
if label == pred:
match += 1
count += 1
return 100 * match / count
|
the-stack_0_17110
|
from django import forms
from django.contrib.auth.models import AnonymousUser
from cabot.cabotapp.views import StatusCheckForm
from cabot.metricsapp.api import get_status_check_fields
from cabot.metricsapp.models import GrafanaInstance, GrafanaDataSource
# Model forms for admin site
from cabot.metricsapp.models.grafana import set_grafana_panel_from_session, GrafanaPanel
class GrafanaInstanceAdminForm(forms.ModelForm):
class Meta:
model = GrafanaInstance
exclude = []
class GrafanaDataSourceAdminForm(forms.ModelForm):
class Meta:
model = GrafanaDataSource
exclude = []
# Forms for selecting Grafana instance, dashboard, panel, etc.
class GrafanaInstanceForm(forms.Form):
"""Select a Grafana instance to use for a status check"""
grafana_instance = forms.ModelChoiceField(
queryset=GrafanaInstance.objects.all(),
initial=1,
help_text='Grafana site instance to select a dashboard from.'
)
def __init__(self, *args, **kwargs):
default_grafana_instance = kwargs.pop('default_grafana_instance')
super(GrafanaInstanceForm, self).__init__(*args, **kwargs)
if default_grafana_instance is not None:
self.fields['grafana_instance'].initial = default_grafana_instance
class GrafanaDashboardForm(forms.Form):
"""Select a Grafana dashboard to use for a status check"""
def __init__(self, *args, **kwargs):
dashboards = kwargs.pop('dashboards')
default_dashboard = kwargs.pop('default_dashboard')
super(GrafanaDashboardForm, self).__init__(*args, **kwargs)
self.fields['dashboard'] = forms.ChoiceField(
choices=dashboards,
help_text='Grafana dashboard to use for the check.'
)
if default_dashboard is not None:
self.fields['dashboard'].initial = default_dashboard
class GrafanaPanelForm(forms.Form):
"""Select a Grafana panel to use for a status check"""
def __init__(self, *args, **kwargs):
panels = kwargs.pop('panels')
default_panel_id = kwargs.pop('default_panel_id')
super(GrafanaPanelForm, self).__init__(*args, **kwargs)
self.fields['panel'] = forms.ChoiceField(
choices=panels,
help_text='Grafana panel to use for the check.'
)
if default_panel_id is not None:
for panel in panels:
panel_data = panel[0]
if panel_data['panel_id'] == default_panel_id:
self.fields['panel'].initial = panel_data
break
def clean_panel(self):
"""Make sure the data source for the panel is supported"""
panel = eval(self.cleaned_data['panel'])
datasource = panel['datasource']
grafana_instance_id = panel['grafana_instance_id']
try:
GrafanaDataSource.objects.get(grafana_source_name=datasource,
grafana_instance_id=grafana_instance_id)
except GrafanaDataSource.DoesNotExist:
raise forms.ValidationError('No matching data source for {}.'.format(datasource))
return panel
class GrafanaSeriesForm(forms.Form):
"""Select the series to use for a status check"""
def __init__(self, *args, **kwargs):
series = kwargs.pop('series')
default_series = kwargs.pop('default_series')
super(GrafanaSeriesForm, self).__init__(*args, **kwargs)
self.fields['series'] = forms.MultipleChoiceField(
choices=series,
widget=forms.CheckboxSelectMultiple,
help_text='Data series to use in the check.'
)
if default_series is not None:
self.fields['series'].initial = default_series
def clean_series(self):
"""Make sure at least one series is selected."""
series = self.cleaned_data.get('series')
if not series:
raise forms.ValidationError('At least one series must be selected.')
return series
class GrafanaStatusCheckForm(StatusCheckForm):
"""Generic form for creating a status check. Other metrics sources will subclass this."""
_autofilled_fields = ('time_range', 'check_type', 'warning_value', 'high_alert_value', 'source')
_disabled_fields = ('source',)
def __init__(self, grafana_session_data=None, user=None, initial=None, *args, **kwargs):
self.grafana_panel = ((initial and initial['grafana_panel'])
or (kwargs.get('instance') and kwargs['instance'].grafana_panel)
or GrafanaPanel())
if grafana_session_data:
dashboard_info = grafana_session_data['dashboard_info']
panel_info = grafana_session_data['panel_info']
templating_dict = grafana_session_data['templating_dict']
instance_id = grafana_session_data['instance_id']
grafana_data_source = GrafanaDataSource.objects.get(
grafana_source_name=grafana_session_data['datasource'],
grafana_instance_id=instance_id
)
# we will reuse the PK of instance.grafana_panel if there's one set, changes are manually saved in save()
set_grafana_panel_from_session(self.grafana_panel, grafana_session_data)
grafana_fields = get_status_check_fields(dashboard_info, panel_info, grafana_data_source,
templating_dict, self.grafana_panel, user)
# MetricsSourceBase overrides __unicode__ to return its name, but we need it to serialize to
# its pk so ModelChoiceForm can handle it right
grafana_fields['source'] = grafana_fields['source'].pk
# apply initial on top of get_status_check_fields() to allow overriding
if initial:
grafana_fields.update(initial)
initial = grafana_fields
super(GrafanaStatusCheckForm, self).__init__(*args, initial=initial, **kwargs)
self.fields['name'].widget = forms.TextInput(attrs=dict(style='width:50%'))
self.fields['name'].help_text = None
for field_name in self._autofilled_fields:
self.fields[field_name].help_text += ' Autofilled from the Grafana dashboard.'
for field_name in self._disabled_fields:
self.fields[field_name].disabled = True
self.user = user # used in save(), ignored if None
def save(self, commit=True):
model = super(GrafanaStatusCheckForm, self).save(commit=False)
# the grafana panel may have been created or updated, so also save that
if self.grafana_panel:
self.grafana_panel.save()
model.grafana_panel = self.grafana_panel
if self.user and not isinstance(self.user, AnonymousUser):
model.created_by = self.user
# When commit is False, we just get the model, but the service/instance sets aren't saved
# (since the model doesn't have a pk yet). Re-run to actually save the service and instance sets
model = super(GrafanaStatusCheckForm, self).save()
return model
|
the-stack_0_17111
|
import sys
from flask import Flask, render_template, jsonify, redirect
import pymongo
import scrape_mars
sys.setrecursionlimit(2000)
app = Flask(__name__)
client = pymongo.MongoClient()
db = client.mars_db
collection = db.mars_facts
@app.route('/scrape')
def scrape():
mars = scrape_mars.scrape()
db.mars_facts.insert_one(mars)
@app.route("/")
def home():
mars = list(db.mars_facts.find())
print(mars)
return render_template("index.html", mars = mars)
if __name__ == "__main__":
app.run(debug=True)
|
the-stack_0_17112
|
import codecs
import sys
import setuptools
def read_requirements_file(req_name):
requirements = []
try:
with codecs.open(req_name, encoding='utf-8') as req_file:
for req_line in req_file:
if '#' in req_line:
req_line = req_line[0:req_line.find('#')].strip()
if req_line:
requirements.append(req_line.strip())
except IOError:
pass
return requirements
install_requires = read_requirements_file('requirements.txt')
setup_requires = read_requirements_file('setup-requirements.txt')
tests_require = read_requirements_file('test-requirements.txt')
if sys.version_info < (2, 7):
tests_require.append('unittest2')
if sys.version_info < (3, 0):
tests_require.append('mock')
setuptools.setup(
name='sprockets.mixins.redis',
version='0.0.0',
description='Tornado handler mixin to provide easy read/write access to Redis',
long_description=codecs.open('README.rst', encoding='utf-8').read(),
url='https://github.com/sprockets/sprockets.mixins.redis.git',
author='AWeber Communications',
author_email='[email protected]',
license=codecs.open('LICENSE', encoding='utf-8').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=['sprockets',
'sprockets.mixins',
'sprockets.mixins.redis'],
package_data={'': ['LICENSE', 'README.md']},
include_package_data=True,
namespace_packages=['sprockets',
'sprockets.mixins'],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
test_suite='nose.collector',
zip_safe=False)
|
the-stack_0_17113
|
from collections import OrderedDict
from requests.exceptions import ConnectionError
from .. import exceptions, utility, transports, schema
from . import response, messages
import logging
import yaml
logger = logging.getLogger(__name__)
class CommandHTTPSTransport(transports.BaseTransport):
def __init__(self,
headers = None,
auth = None,
options_callback = None,
message_callback = None,
encryption_key = None
):
super().__init__(
headers = headers,
auth = auth,
encryption_key = encryption_key
)
self._options_callback = options_callback
self._message_callback = message_callback
def transition(self, link, decoders, params = None):
params = self.get_params(link, params)
url = self.get_url(link.url, params.path)
headers = self.get_headers(url, decoders)
headers.update(self._headers)
if link.action == 'get':
# Schema
try:
result = self.request_page(url, headers, params, decoders)
if isinstance(result, schema.Error):
raise exceptions.CommandError(result['detail'])
return result
except ConnectionError as error:
raise exceptions.CommandConnectionError(error)
else:
# Command
if self._options_callback and callable(self._options_callback):
self._options_callback(params.data)
try:
return self.request_stream(url, headers, params, decoders)
except ConnectionError as error:
raise exceptions.CommandConnectionError(error)
def request_stream(self, url, headers, params, decoders):
session = self.init_session()
request = self._build_post_request(session, url, headers, params)
settings = session.merge_environment_settings(
request.url, None, True, False, None
)
logger.debug("Stream {} request headers: {}".format(request.url, request.headers))
request_response = session.send(request, **settings)
command_response = response.CommandResponse()
if request_response.status_code >= 400:
raise exceptions.CommandResponseError(utility.format_response_error(request_response))
try:
for line in request_response.iter_lines():
message = messages.Message.get(
self._decode_message(request_response, line, decoders),
self._cipher.key
)
if self._message_callback and callable(self._message_callback):
self._message_callback(message)
command_response.add(message)
except Exception as error:
logger.debug("Stream {} error response headers: {}".format(request.url, request_response.headers))
logger.debug("Stream {} error response params:\n\n{}".format(request.url, yaml.dump(params.data)))
logger.debug("Stream {} error status code: {}".format(request.url, request_response.status_code))
raise error
return command_response
def _decode_message(self, response, data, decoders):
result = None
if data:
content_type = response.headers.get('content-type')
codec = self._negotiate_decoder(decoders, content_type)
options = {
'base_url': response.url
}
if 'content-type' in response.headers:
options['content_type'] = response.headers['content-type']
if 'content-disposition' in response.headers:
options['content_disposition'] = response.headers['content-disposition']
result = codec.decode(data, **options)
return result
def _decode_result_error(self, result, response):
is_error = response.status_code >= 400 and response.status_code <= 599
if is_error and not isinstance(result, schema.Error):
default_title = "{} {}".format(response.status_code, response.reason)
result = self._coerce_to_error(result, default_title = default_title)
return result
def _coerce_to_error(self, obj, default_title):
if isinstance(obj, schema.Document):
return schema.Error(
title = obj.title or default_title,
content = self._coerce_to_error_content(obj)
)
elif isinstance(obj, dict):
return schema.Error(title = default_title, content = obj)
elif isinstance(obj, list):
return schema.Error(title = default_title, content = { 'messages': obj })
elif obj is None:
return schema.Error(title = default_title)
return schema.Error(title = default_title, content = { 'message': obj })
def _coerce_to_error_content(self, node):
if isinstance(node, (schema.Document, schema.Object)):
return OrderedDict([
(key, self._coerce_to_error_content(value))
for key, value in node.data.items()
])
elif isinstance(node, schema.Array):
return [
self._coerce_to_error_content(item)
for item in node
if not isinstance(item, schema.Link)
]
return node
|
the-stack_0_17114
|
"""
# Definition for a Node.
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution:
def postorder(self, root: 'Node') -> List[int]:
if not root:
return []
res = [root.val]
for child in root.children[::-1]:
res.extend(self.postorder(child)[::-1])
return res[::-1]
|
the-stack_0_17118
|
"""Save codes into library"""
from typing import List
from d2lbook import notebook
from d2lbook import common
import logging
import os
import copy
import re
import pathlib
import ast
import astor
from yapf.yapflib.yapf_api import FormatCode
import isort
HEADER = '################# WARNING ################\n'
def _write_header(f):
f.write(HEADER)
f.write('# The below part is generated automatically through:\n')
f.write('# d2lbook build lib\n')
f.write('# Don\'t edit it directly\n\n')
def save_tab(notebooks: List[str], lib_fname: str, tab: str, default_tab: str):
logging.info(
f'Matching with the pattern: "#@save", seaching for tab {tab}')
custom_header = []
if os.path.exists(lib_fname):
with open(lib_fname, 'r') as f:
lines = f.readlines()
for i, l in enumerate(lines):
if l.strip() == HEADER.strip():
custom_header = lines[:i]
break
with open(lib_fname, 'w') as f:
if custom_header:
f.write(''.join(custom_header))
_write_header(f)
saved = []
for nb in notebooks:
saved.extend(_save_code(nb, tab=tab, default_tab=default_tab))
f.write(_refactor_blocks(saved))
logging.info('Saved %d blocks into %s', len(saved), lib_fname)
def save_version(version: str, version_fn: str):
if version and version_fn:
with open(version_fn, 'r', encoding='UTF-8') as f:
lines = f.read().split('\n')
for i, l in enumerate(lines):
if '__version__' in l:
lines[i] = f'__version__ = "{version}"'
logging.info(f'save {lines[i]} into {version_fn}')
with open(version_fn, 'w') as f:
f.write('\n'.join(lines))
def _save_block(source: str, save_mark: str):
if not save_mark: return ''
lines = source.splitlines()
block = []
for i, l in enumerate(lines):
m = re.search(f'# *{save_mark}', l)
if m:
l = l[:m.span()[0]].rstrip()
if l: block.append(l)
for j in range(i + 1, len(lines)):
l = lines[j]
if not l.startswith(' ') and len(l):
block.append(lines[j])
else:
for k in range(j, len(lines)):
if lines[k].startswith(' ') or not len(lines[k]):
block.append(lines[k])
else:
break
break
return format_code('\n'.join(block))
def _save_code(input_fn, save_mark='@save', tab=None,
default_tab=None):
"""get the code blocks (import, class, def) that will be saved"""
with open(input_fn, 'r', encoding='UTF-8') as f:
nb = notebook.read_markdown(f.read())
if tab:
nb = notebook.get_tab_notebook(nb, tab, default_tab)
if not nb:
return []
saved = []
for i, cell in enumerate(nb.cells):
if cell.cell_type == 'code':
block = _save_block(cell.source, save_mark)
if block:
label = _find_latest_label(nb.cells[:i-1])
saved.append([block, label, input_fn])
return saved
def _find_latest_label(cells):
for cell in reversed(cells):
if cell.cell_type == 'markdown':
matches = re.findall(common.md_mark_pattern, cell.source)
for m in reversed(matches):
if m[0] == 'label' and 'sec_' in m[1]:
return m[1]
return ''
def _refactor_blocks(saved_blocks):
# add label into docstring
for i, (block, label, _) in enumerate(saved_blocks):
if not label: continue
modules = common.split_list(block.split('\n'), lambda l: l.startswith('def') or l.startswith('class'))
new_block = []
if modules[0]: new_block.append('\n'.join(modules[0]))
for m in modules[1:]:
parts = common.split_list(m, lambda l: '):' in l)
# find the docstring
if len(parts) > 1:
docstr = parts[1][1] if len(parts[1]) > 1 else common.head_spaces(m[0]) + ' '
loc = f'Defined in :numref:{label}"""'
if docstr.lstrip().startswith('"""') and docstr.endswith('"""'):
parts[1][1] = docstr[:-3] + f'\n\n{common.head_spaces(docstr)}{loc}'
else:
parts[1].insert(1, f'{common.head_spaces(docstr)}"""{loc}')
new_block.append('\n'.join(common.flatten(parts)))
saved_blocks[i][0] = '\n'.join(new_block)
# merge @d2l.save_to_class
new_blocks = []
class_blocks = {}
for i, (block, _, _) in enumerate(saved_blocks):
lines = block.split('\n')
if lines[0].startswith('class'):
new_blocks.append(block)
m = re.search('class +([\w\_]+)', lines[0])
if m:
class_blocks[m.groups()[0]] = len(new_blocks) - 1
continue
register = '@d2l.add_to_class'
if register in block:
parts = common.split_list(lines, lambda x: x.startswith(register))
if parts[0]:
new_blocks.append(parts[0])
if len(parts) > 1:
for p in parts[1:]:
m = re.search('\@d2l\.add_to_class\(([\.\w\_]+)\)', p[0])
if m:
cls = m.groups()[0].split('.')[-1]
new_blocks[class_blocks[cls]] += '\n\n' + '\n'.join([' '+l for l in p[1:]])
continue
new_blocks.append(block)
return '\n\n'.join(new_blocks)
def _parse_mapping_config(config: str, split_line=True):
"""Parse config such as: numpy -> asnumpy, reshape, ...
Return a list of string pairs
"""
terms = []
for line in config.splitlines():
if split_line:
terms.extend(line.split(','))
else:
terms.append(line)
mapping = []
for term in terms:
term = term.strip()
if not term:
continue
if len(term.split('->')) == 2:
a, b = term.split('->')
mapping.append((a.strip(), b.strip()))
else:
mapping.append((term, term))
return mapping
def node_to_source(node):
if isinstance(node, ast.Constant):
return str(node.value)
return astor.to_source(node).rstrip()
def save_alias(tab_lib):
"""Save alias into the library file"""
alias = ''
if 'alias' in tab_lib:
alias += tab_lib['alias'].strip() + '\n'
if 'lib_name' in tab_lib:
lib_name = tab_lib["lib_name"]
if 'simple_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['simple_alias'])
for a, b in mapping:
if a.endswith('('): a = a[:-1]
if b.endswith('('): b = b[:-1]
alias += f'\n{a} = {lib_name}.{b}'
if 'fluent_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['fluent_alias'])
alias += '\n' + '\n'.join([
f'{a} = lambda x, *args, **kwargs: x.{b}(*args, **kwargs)'
for a, b in mapping])
if 'args_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['args_alias'], split_line=False)
for a, b in mapping:
alias += f'\ndef {a}:\n return {b}'
if alias:
lib_file = tab_lib['lib_file']
with open(lib_file, 'a') as f:
logging.info(
f'Wrote {len(alias.splitlines())} alias into {lib_file}')
f.write('# Alias defined in config.ini\n')
f.write(alias + '\n\n')
def replace_call(source: str, mapping, replace_fn):
matched = False
for a in mapping:
if 'd2l.'+a in source:
matched = True
if not matched:
return source
lines = source.splitlines()
if lines[0].startswith('%'):
source = '\n'.join(lines[1:])
for _ in range(100): # 100 is a (random) big enough number
replaced = False
tree = ast.parse(source)
for node in ast.walk(tree):
if (isinstance(node, ast.Call) and
isinstance(node.func, ast.Attribute) and
isinstance(node.func.value, ast.Name) and
node.func.value.id == 'd2l' and
node.func.attr in mapping):
new_node = replace_fn(node, mapping[node.func.attr])
if new_node:
source = source.replace(
ast.get_source_segment(source, node),
new_node if isinstance(new_node, str) else node_to_source(new_node))
replaced = True
break
if not replaced:
break
if lines[0].startswith('%'):
source = lines[0] + '\n' + source
return source
def replace_fluent_alias(source, fluent_mapping):
def _replace(node, b):
return ast.Call(
ast.Attribute(value=node.args[0], attr=b),
node.args[1:], node.keywords)
return replace_call(source, fluent_mapping, _replace)
def replace_args_alias(source, args_mapping):
def _replace(node, b):
a_args, b = b
a_kwargs = {a: b for a, b in a_args if not a.startswith('a_')}
a_args = [a for a, _ in a_args if a.startswith('a_')]
if len(node.args) != len(a_args):
return None
key_value = {a : node_to_source(arg) for arg, a in zip(node.args, a_args)}
for kw in node.keywords:
assert kw.arg in a_kwargs, (kw.arg, a_kwargs)
key_value['='+kw.arg] = '='+node_to_source(kw.value)
# remove not appeared keywords
b_call = ast.parse(b).body[0].value
if isinstance(b_call, ast.Call):
new_keywords = [kw for kw in b_call.keywords if '='+kw.value.id in key_value]
b_call.keywords = new_keywords
b = node_to_source(b_call)
for k, v in key_value.items():
b = b.replace(k, v)
return b
return replace_call(source, dict(args_mapping), _replace)
def call_args(call_str):
call = ast.parse(call_str).body[0].value
assert isinstance(call, ast.Call), call_str
name = call.func.id
args = [(a.id,None) for a in call.args] + [(k.arg, k.value) for k in call.keywords]
return name, args
def replace_alias(nb, tab_lib):
nb = copy.deepcopy(nb)
patterns = []
fluent_mapping = {}
args_mapping = {}
if 'reverse_alias' in tab_lib:
patterns += _parse_mapping_config(tab_lib['reverse_alias'], split_line=False)
if 'lib_name' in tab_lib:
lib_name = tab_lib["lib_name"]
if 'simple_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['simple_alias'])
patterns += [(f'd2l.{a}', f'{lib_name}.{b}') for a, b in mapping]
if 'fluent_alias' in tab_lib:
fluent_mapping = dict(_parse_mapping_config(tab_lib['fluent_alias']))
if 'args_alias' in tab_lib:
for a, b in _parse_mapping_config(tab_lib['args_alias'], split_line=False):
name, args = call_args(a)
args_mapping[name] = (args, b)
for cell in nb.cells:
if cell.cell_type == 'code':
for p, r in patterns:
cell.source = cell.source.replace(p, r)
if fluent_mapping:
cell.source = replace_fluent_alias(cell.source, fluent_mapping)
if args_mapping:
cell.source = replace_args_alias(cell.source, args_mapping)
return nb
def format_code(source: str):
if 'import ' in source:
config = isort.settings.Config(no_lines_before=[
isort.settings.FUTURE, isort.settings.STDLIB, isort.settings.
THIRDPARTY, isort.settings.FIRSTPARTY, isort.settings.LOCALFOLDER])
source = isort.code(source, config=config)
# remove tailing spaces
source = '\n'.join([l.rstrip() for l in source.split('\n')]).strip()
# Disable yapf, as it doesn't work well for long sentences
return source
# fix the bug that yapf cannot handle jupyter magic
for l in source.splitlines():
if l.startswith('%') or l.startswith('!'):
return source
# fix the bug that yapf remove the tailling ;
has_tailling_semicolon = source.rstrip().endswith(';')
style = {
'DISABLE_ENDING_COMMA_HEURISTIC': True,
'SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET': False,
'SPLIT_BEFORE_CLOSING_BRACKET': False,
'SPLIT_BEFORE_DICT_SET_GENERATOR': False,
'SPLIT_BEFORE_LOGICAL_OPERATOR': False,
'SPLIT_BEFORE_NAMED_ASSIGNS': False,
'COLUMN_LIMIT': 78,
'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION': 1,}
source = FormatCode(source, style_config=style)[0].strip()
if has_tailling_semicolon: source += ';'
return source
def format_code_nb(nb):
for cell in nb.cells:
if cell.cell_type == 'code':
cell.source = format_code(cell.source)
return nb
# DEPRECATED
# def save_file(root_dir: str, nbfile: str):
# nbfile = pathlib.Path(nbfile)
# pyfile = root_dir / nbfile.with_suffix('.py')
# with nbfile.open('r') as f:
# nb = notebook.read_markdown(f.read())
# saved = []
# save_all = False
# for cell in nb.cells:
# if cell.cell_type == 'code':
# src = cell.source.lstrip()
# if re.search('# *@save_all', src):
# save_all = True
# if save_all or re.search('# *@save_cell', src):
# saved.append(src)
# else:
# blk = _save_block(src, '@save')
# if blk:
# saved.append(blk)
# if saved:
# with pyfile.open('w') as f:
# f.write(
# f'# This file is generated from {str(nbfile)} automatically through:\n'
# )
# f.write('# d2lbook build lib\n')
# f.write('# Don\'t edit it directly\n\n')
# for blk in saved:
# f.write(blk + '\n\n')
# logging.info(f'Found {len(saved)} blocks in {str(nbfile)}')
# DEPRECATED
# def save_mark(notebooks: List[str], lib_fname: str, save_mark: str):
# logging.info('Matching with the pattern: "%s"', save_mark)
# with open(lib_fname, 'w') as f:
# _write_header(f)
# lib_name = os.path.dirname(lib_fname)
# lib_name = lib_name.split('/')[-1]
# f.write('import sys\n' + lib_name + ' = sys.modules[__name__]\n\n')
# for nb in notebooks:
# _save_code(nb, f, save_mark=save_mark)
# logging.info('Saved into %s', lib_fname)
|
the-stack_0_17119
|
import os
import argparse
import datetime
import tensorflow as tf
import yolo.config as cfg
from yolo.yolo_net import YOLONet
from utils.timer import Timer
from utils.pascal_voc import pascal_voc
slim = tf.contrib.slim
class Solver(object):
def __init__(self, net, data):
self.net = net
self.data = data
self.weights_file = cfg.WEIGHTS_FILE
self.max_iter = cfg.MAX_ITER
self.initial_learning_rate = cfg.LEARNING_RATE
self.decay_steps = cfg.DECAY_STEPS
self.decay_rate = cfg.DECAY_RATE
self.staircase = cfg.STAIRCASE
self.summary_iter = cfg.SUMMARY_ITER
self.save_iter = cfg.SAVE_ITER
self.output_dir = os.path.join(
cfg.OUTPUT_DIR, datetime.datetime.now().strftime('%Y_%m_%d_%H_%M'))
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.save_cfg()
self.variable_to_restore = tf.global_variables()
self.saver = tf.train.Saver(self.variable_to_restore, max_to_keep=None)
self.ckpt_file = os.path.join(self.output_dir, 'yolo')
self.summary_op = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(self.output_dir, flush_secs=60)
self.global_step = tf.train.create_global_step()
self.learning_rate = tf.train.exponential_decay(
self.initial_learning_rate, self.global_step, self.decay_steps,
self.decay_rate, self.staircase, name='learning_rate')
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate)
self.train_op = slim.learning.create_train_op(
self.net.total_loss, self.optimizer, global_step=self.global_step)
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options)
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
if self.weights_file is not None:
print('Restoring weights from: ' + self.weights_file)
self.saver.restore(self.sess, self.weights_file)
self.writer.add_graph(self.sess.graph)
def train(self):
train_timer = Timer()
load_timer = Timer()
for step in range(1, self.max_iter + 1):
load_timer.tic()
images, labels = self.data.get()
load_timer.toc()
feed_dict = {self.net.images: images,
self.net.labels: labels}
if step % self.summary_iter == 0:
if step % (self.summary_iter * 10) == 0:
train_timer.tic()
summary_str, loss, _ = self.sess.run(
[self.summary_op, self.net.total_loss, self.train_op],
feed_dict=feed_dict)
train_timer.toc()
log_str = '''{} Epoch: {}, Step: {}, Learning rate: {},\
Loss: {:5.3f}\nSpeed: {:.3f}s/iter,\
Load: {:.3f}s/iter, Remain: {}'''.format(
datetime.datetime.now().strftime('%m-%d %H:%M:%S'),
self.data.epoch,
int(step),
round(self.learning_rate.eval(session=self.sess), 6),
loss,
train_timer.average_time,
load_timer.average_time,
train_timer.remain(step, self.max_iter))
print(log_str)
else:
train_timer.tic()
summary_str, _ = self.sess.run(
[self.summary_op, self.train_op],
feed_dict=feed_dict)
train_timer.toc()
self.writer.add_summary(summary_str, step)
else:
train_timer.tic()
self.sess.run(self.train_op, feed_dict=feed_dict)
train_timer.toc()
if step % self.save_iter == 0:
print('{} Saving checkpoint file to: {}'.format(
datetime.datetime.now().strftime('%m-%d %H:%M:%S'),
self.output_dir))
self.saver.save(
self.sess, self.ckpt_file, global_step=self.global_step)
def save_cfg(self):
with open(os.path.join(self.output_dir, 'config.txt'), 'w') as f:
cfg_dict = cfg.__dict__
for key in sorted(cfg_dict.keys()):
if key[0].isupper():
cfg_str = '{}: {}\n'.format(key, cfg_dict[key])
f.write(cfg_str)
def update_config_paths(data_dir, weights_file):
cfg.DATA_PATH = data_dir
cfg.PASCAL_PATH = os.path.join(data_dir, 'pascal_voc')
cfg.CACHE_PATH = os.path.join(cfg.PASCAL_PATH, 'cache')
cfg.OUTPUT_DIR = os.path.join(cfg.PASCAL_PATH, 'output')
cfg.WEIGHTS_DIR = os.path.join(cfg.PASCAL_PATH, 'weights')
cfg.WEIGHTS_FILE = os.path.join(cfg.WEIGHTS_DIR, weights_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
parser.add_argument('--data_dir', default="data", type=str)
parser.add_argument('--threshold', default=0.2, type=float)
parser.add_argument('--iou_threshold', default=0.5, type=float)
parser.add_argument('--gpu', default='', type=str)
args = parser.parse_args()
if args.gpu is not None:
cfg.GPU = args.gpu
if args.data_dir != cfg.DATA_PATH:
update_config_paths(args.data_dir, args.weights)
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU
yolo = YOLONet()
pascal = pascal_voc('train')
solver = Solver(yolo, pascal)
print('Start training ...')
solver.train()
print('Done training.')
if __name__ == '__main__':
# python train.py --weights YOLO_small.ckpt --gpu 0
main()
|
the-stack_0_17121
|
from RLTest import Env
from redisgraph import Graph
from pathos.pools import ProcessPool as Pool
# 1.test getting and setting config
# 2. test overflowing the server when there's a limit
# expect to get error!
# 3. test overflowing the server when there's no limit
# expect not to get any exceptions
GRAPH_NAME = "max_pending_queries"
SLOW_QUERY = "UNWIND range (0, 1000000) AS x WITH x WHERE (x / 2) = 50 RETURN x"
def issue_query(conn, q):
try:
conn.execute_command("GRAPH.QUERY", GRAPH_NAME, q)
return False
except Exception as e:
assert "Max pending queries exceeded" in str(e)
return True
class testPendingQueryLimit():
def __init__(self):
# skip test if we're running under Valgrind
if Env().envRunner.debugger is not None:
Env().skip() # valgrind is not working correctly with multi process
self.env = Env(decodeResponses=True)
self.conn = self.env.getConnection()
def test_01_query_limit_config(self):
# read max queued queries config
result = self.conn.execute_command("GRAPH.CONFIG", "GET", "MAX_QUEUED_QUERIES")
max_queued_queries = result[1]
self.env.assertEquals(max_queued_queries, 4294967295)
# update configuration, set max queued queries
self.conn.execute_command("GRAPH.CONFIG", "SET", "MAX_QUEUED_QUERIES", 10)
# re-read configuration
result = self.conn.execute_command("GRAPH.CONFIG", "GET", "MAX_QUEUED_QUERIES")
max_queued_queries = result[1]
self.env.assertEquals(max_queued_queries, 10)
def stress_server(self):
threadpool_size = self.conn.execute_command("GRAPH.CONFIG", "GET", "THREAD_COUNT")[1]
thread_count = threadpool_size * 5
qs = [SLOW_QUERY] * thread_count
connections = []
pool = Pool(nodes=thread_count)
# init connections
for i in range(thread_count):
connections.append(self.env.getConnection())
# invoke queries
result = pool.map(issue_query, connections, qs)
# return if error encountered
return any(result)
def test_02_overflow_no_limit(self):
# no limit on number of pending queries
limit = 4294967295
self.conn.execute_command("GRAPH.CONFIG", "SET", "MAX_QUEUED_QUERIES", limit)
error_encountered = self.stress_server()
self.env.assertFalse(error_encountered)
def test_03_overflow_with_limit(self):
# limit number of pending queries
limit = 1
self.conn.execute_command("GRAPH.CONFIG", "SET", "MAX_QUEUED_QUERIES", limit)
error_encountered = self.stress_server()
self.env.assertTrue(error_encountered)
|
the-stack_0_17122
|
import numpy as np
import random
#### Version that maintains IDs
def new_cluster_points(X, mu):
clusters = {}
# this is for excluding IDs from the calculation
tmp_mu = []
for point in mu:
tmp_mu.append(point[1:13])
for x in X:
tmp_x = x[1:13]
# norm calculates the distance of a vector
# In this formula, it cacluates the distance between the sample vectors and all the other vectors, and select the min value as the best mean
bestmukey = min([(i[0], np.linalg.norm(tmp_x-tmp_mu[i[0]])) for i in enumerate(tmp_mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
return clusters
def new_reevaluate_centers(mu, clusters):
newmu = []
keys = sorted(clusters.keys())
for k in keys:
tmp_mu = []
for point in clusters[k]:
tmp_mu.append(point[1:13])
newmean = np.mean(tmp_mu, axis = 0)
newmean = np.insert(newmean, 0, 0)
newmu.append(newmean)
return newmu
def new_has_converged(mu, oldmu):
tmp_mu = []
tmp_oldmu = []
for point in mu:
tmp_mu.append(point[1:13])
for point in oldmu:
tmp_oldmu.append(point[1:13])
return (set([tuple(a) for a in tmp_mu]) == set([tuple(a) for a in tmp_oldmu]))
def new_find_centers(X, K):
oldmu = random.sample(X, K)
mu = random.sample(X, K)
while not new_has_converged(mu, oldmu):
oldmu = mu
# Assign all points in X to clusters
clusters = new_cluster_points(X, mu)
# Reevaluate centers
mu = new_reevaluate_centers(oldmu, clusters)
try:
clusters
except:
clusters = new_cluster_points(X, mu) # added to avoid null cluster
return(mu, clusters)
### Original clustering functions without maintaining IDs (allowing multiple dimensions)
def cluster_points(X, mu):
clusters = {}
for x in X:
bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) for i in enumerate(mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
return clusters
def reevaluate_centers(mu, clusters):
newmu = []
keys = sorted(clusters.keys())
for k in keys:
newmu.append(np.mean(clusters[k], axis = 0))
return newmu
def has_converged(mu, oldmu):
return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu]))
def find_centers(X, K):
# Initialize to K random centers
oldmu = random.sample(X, K)
mu = random.sample(X, K)
while not has_converged(mu, oldmu):
oldmu = mu
# Assign all points in X to clusters
clusters = cluster_points(X, mu)
# Reevaluate centers
mu = reevaluate_centers(oldmu, clusters)
try:
clusters
except:
clusters = cluster_points(X, mu) # added to avoid null cluster
return(mu, clusters)
def Wk(mu, clusters):
K = len(mu)
try:
r = sum([np.linalg.norm(mu[i]-c)**2/(2*len(c)) for i in range(K) for c in clusters[i]])
except:
r = 1
print("index error")
return r
def bounding_box(X):
size = len(X[0])
xmins = [0 for x in range(size)]
xmaxs = [0 for x in range(size)]
for i in range(0, size):
xmins[i], xmaxs[i] = min(X,key=lambda a:a[i])[i], max(X,key=lambda a:a[i])[i]
return (xmins,xmaxs)
def gap_statistic(X, num_k):
(xmins,xmaxs) = bounding_box(X)
# Dispersion for real distribution
ks = range(1,num_k)
Wks = np.zeros(len(ks))
Wkbs = np.zeros(len(ks))
sk = np.zeros(len(ks))
for indk, k in enumerate(ks):
print("K:" + str(k))
mu, clusters = find_centers(X,k)
Wks[indk] = np.log(Wk(mu, clusters))
# Create B reference datasets
B = 10
BWkbs = np.zeros(B)
for i in range(B):
# print("B: " + str(i))
Xb = []
for n in range(len(X)):
randomvalues = []
for index in range(len(xmins)):
randomvalues.insert(0, random.uniform(xmins[index], xmaxs[index]))
Xb.append(randomvalues)
Xb = np.array(Xb)
mu, clusters = find_centers(Xb,k)
BWkbs[i] = np.log(Wk(mu, clusters))
Wkbs[indk] = sum(BWkbs)/B
sk[indk] = np.sqrt(sum((BWkbs-Wkbs[indk])**2)/B)
sk = sk*np.sqrt(1+1/B)
return(ks, Wks, Wkbs, sk)
#example
input_list = np.array([[1, 2], [4, 5], [4, 3], [4, 5], [3, 3], [1, 3], [7, 8]])
num_k=3
# to start the gap analysis to determin K
ks, logWks, logWkbs, sk = gap_statistic(input_list, num_k)
print (ks, logWks, logWkbs, sk)
|
the-stack_0_17124
|
from os import environ
import constructs
from aws_cdk import aws_iam
from aws_cdk.core import Environment, Stack
from backend.environment import environment_name
from .constructs.api import API
from .constructs.lambda_layers import LambdaLayers
from .constructs.lds import LDS
from .constructs.notify import Notify
from .constructs.opentopo import OpenTopography
from .constructs.processing import Processing
from .constructs.staging import Staging
from .constructs.storage import Storage
class Application(Stack):
def __init__(self, scope: constructs.Construct, stack_id: str) -> None:
environment = Environment(
account=environ["CDK_DEFAULT_ACCOUNT"], region=environ["CDK_DEFAULT_REGION"]
)
super().__init__(scope, stack_id, env=environment)
env_name = environment_name()
principal: aws_iam.PrincipalBase
if saml_provider_arn := environ.get("GEOSTORE_SAML_IDENTITY_PROVIDER_ARN"):
principal = aws_iam.FederatedPrincipal(
federated=saml_provider_arn,
assume_role_action="sts:AssumeRoleWithSAML",
conditions={"StringEquals": {"SAML:aud": "https://signin.aws.amazon.com/saml"}},
)
else:
principal = aws_iam.AccountPrincipal(
account_id=aws_iam.AccountRootPrincipal().account_id
)
storage = Storage(self, "storage", env_name=env_name)
lambda_layers = LambdaLayers(self, "lambda-layers", env_name=env_name)
processing = Processing(
self,
"processing",
botocore_lambda_layer=lambda_layers.botocore,
env_name=env_name,
principal=principal,
storage_bucket=storage.storage_bucket,
validation_results_table=storage.validation_results_table,
)
Staging(self, "staging", users_role=processing.staging_users_role)
API(
self,
"api",
botocore_lambda_layer=lambda_layers.botocore,
datasets_table=storage.datasets_table,
env_name=env_name,
principal=principal,
state_machine=processing.state_machine,
state_machine_parameter=processing.state_machine_parameter,
sqs_queue=processing.message_queue,
sqs_queue_parameter=processing.message_queue_name_parameter,
storage_bucket=storage.storage_bucket,
validation_results_table=storage.validation_results_table,
)
Notify(
self,
"notify",
botocore_lambda_layer=lambda_layers.botocore,
env_name=env_name,
state_machine=processing.state_machine,
validation_results_table=storage.validation_results_table,
)
if self.node.try_get_context("enableLDSAccess"):
LDS(self, "lds", env_name=env_name, storage_bucket=storage.storage_bucket)
if self.node.try_get_context("enableOpenTopographyAccess"):
OpenTopography(
self, "opentopography", env_name=env_name, storage_bucket=storage.storage_bucket
)
|
the-stack_0_17125
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2011, 2012, 2013, 2014, 2015 E-Democracy.org and
# Contributors.
#
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
import codecs
import os
from setuptools import setup, find_packages
from version import get_version
name = 'gs.auth.oauth.client'
version = get_version()
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
with codecs.open(os.path.join("docs", "HISTORY.rst"),
encoding='utf-8') as f:
long_description += '\n' + f.read()
setup(
name=name,
version=version,
description="outh2 client registration methods",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
"Environment :: Web Environment",
"Framework :: Zope2",
"Intended Audience :: Developers",
'License :: OSI Approved :: Zope Public License',
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='outh2, client, registration',
author='Richard Waid',
author_email='[email protected]',
maintainer='Michael JasonSmith',
maintainer_email='[email protected]',
url='https://github.com/groupserver/{0}'.format(name),
license='ZPL 2.1',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['.'.join(name.split('.')[:i])
for i in range(1, len(name.split('.')))],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'simplejson',
],
test_suite="{0}.tests.test_all".format(name),
entry_points="""
# -*- Entry points: -*-
""",
)
|
the-stack_0_17126
|
#%%
# read full assignment
# think algo before implementing
# dont use a dict when you need a list
# assignment is still = and not ==
# dont use itertools when you can use np.roll
# check mathemathical functions if the parentheses are ok
# networkx is awesome
# sometimes while true is better than just too small for loop
# networkx addes nodes when adding edge to nonexistent node
# bitwise comparison is a nice trick
# fiddling with regex can take a lot of time
# %%
import os
import re
import numpy as np
try:
os.chdir(os.path.join(os.getcwd(), 'day 17'))
print(os.getcwd())
except:
pass
# %%
step = 369
buffer = [0]
pos = 0
for p in range(1,2018):
if p%1000000==0: print(p)
pos = (pos+step)%len(buffer)+1
buffer.insert(pos,p)
buffer[buffer.index(2017)+1]
# %%
# part 2
step = 369
buffer = 1
pos = 0
res = []
for p in range(1,50000000):
if p%1000000==0: print(p)
pos = (pos+step)%buffer+1
if pos == 1 :
print(p)
res.append(p)
buffer+=1
res
# %%
# found this one from the megathread on reddit
from collections import deque
step = 369
spinlock = deque([0])
for i in range(1, 2018):
spinlock.rotate(-step)
spinlock.append(i)
print(spinlock[0])
|
the-stack_0_17127
|
from collections import defaultdict
class Graph():
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self,u,v):
self.graph[u].append(v)
def isCyclicUtil(self,v,visited,recStack):
visited[v] = True
recStack[v] = True
for neighbour in self.graph[v]:
if visited[neighbour] == False:
if self.isCyclicUtil(neighbour,visited,recStack) == True:
return True
elif recStack[neighbour] == True:
return True
#remove from recStack
recStack[v]= False
return False
def isCyclic(self):
visited = [False]*self.V
recStack = [False]*self.V
# loop through all nodes.
for node in range(self.V):
if visited[node]==False:
if self.isCyclicUtil(node,visited,recStack) == True:
return True
# no cycles found
return False
g = Graph(4)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
if g.isCyclic() == 1:
print("Graph has a cycle")
else:
print("Graph has no cycle")
|
the-stack_0_17129
|
from dtc.enums.message_types import MessageTypes
from lib.base_message_type import BaseMessageType
class JournalEntryResponse(BaseMessageType):
def __init__(self,
journal_entry=None,
date_time=None,
is_final_response=None):
self.Type = MessageTypes.JOURNAL_ENTRY_RESPONSE
self.JournalEntry = journal_entry
self.DateTime = date_time
self.IsFinalResponse = is_final_response
@staticmethod
def from_message_short(message_obj):
packet = message_obj.get('F')
return JournalEntryResponse(
journal_entry=packet[0],
date_time=packet[1],
is_final_response=packet[2]
)
@staticmethod
def from_message_long(message_obj):
return JournalEntryResponse(
journal_entry=message_obj.get('JournalEntry'),
date_time=message_obj.get('DateTime'),
is_final_response=message_obj.get('IsFinalResponse')
)
@staticmethod
def from_message(message_obj):
if 'F' in message_obj:
return JournalEntryResponse.from_message_short(message_obj)
else:
return JournalEntryResponse.from_message_long(message_obj)
@staticmethod
def get_message_type_name():
return "JournalEntryResponse"
|
the-stack_0_17133
|
from sys import platform
import sys
import os
import numpy as np
import shutil
import json
from FunctionalPackage import State
import random
from queue import Queue # LILO队列
import re
import copy
if platform == "linux" or platform == "linux2":# this is linux
os.environ['SUMO_HOME'] = '/usr/share/sumo'
try:
import traci
import traci.constants as tc
except ImportError:
if "SUMO_HOME" in os.environ:
print(os.path.join(os.environ["SUMO_HOME"], "tools"))
sys.path.append(
os.path.join(os.environ["SUMO_HOME"], "tools")
)
try:
import traci
import traci.constants as tc
except ImportError:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
else:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
elif platform == "win32":
os.environ['SUMO_HOME'] = 'C:\\Program Files (x86)\\DLR\\Sumo'
try:
import traci
import traci.constants as tc
except ImportError:
if "SUMO_HOME" in os.environ:
print(os.path.join(os.environ["SUMO_HOME"], "tools"))
sys.path.append(
os.path.join(os.environ["SUMO_HOME"], "tools")
)
try:
import traci
import traci.constants as tc
except ImportError:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
else:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
elif platform =='darwin':
os.environ['SUMO_HOME'] = "/Users/{0}/sumo/sumo-git".format(os.getlogin())
try:
import traci
import traci.constants as tc
except ImportError:
if "SUMO_HOME" in os.environ:
print(os.path.join(os.environ["SUMO_HOME"], "tools"))
sys.path.append(
os.path.join(os.environ["SUMO_HOME"], "tools")
)
try:
import traci
import traci.constants as tc
except ImportError:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
else:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
else:
sys.exit("platform error")
CAV_rate = 0.5 # smart rate
carWidth = 3
grid_width = 4
area_length = 1000
travel_time_scale = 300
listLanes = ['edge1-0_1', 'edge1-0_2', 'edge2-0_1', 'edge2-0_2',
'edge3-0_1', 'edge3-0_2', 'edge4-0_1', 'edge4-0_2']
intelli_edges = ['edge1-0', 'edge2-0', 'edge3-0', 'edge4-0']
center_edges = ['-gneE10', '-gneE11', '-gneE12', '-gneE13']
routeID_list = ['routewe', 'routeew', 'routesn', 'routens']
route_lane_id_list = [
[[['-gneE10_1', '-gneE10_2'], ['edge1-0_1', 'edge1-0_2'], ['edge0-2_1', 'edge0-2_2']],
[['-gneE10_0'], ['gneE3_2'], ['gneE4_1'], ['gneE5_2'], ['gneE6_0']]],
[[['-gneE11_1', '-gneE11_2'], ['edge2-0_1', 'edge2-0_2'], ['edge0-1_1', 'edge0-1_2']],
[['-gneE11_0'], ['gneE7_2'], ['-gneE9_1'], ['-gneE2_2'], ['-gneE1_0']]],
[[['-gneE12_1', '-gneE12_2'], ['edge3-0_1', 'edge3-0_2'], ['edge0-4_1', 'edge0-4_2']],
[['-gneE12_0'], ['gneE5_2'], ['gneE6_1'], ['gneE7_2'], ['-gneE9_0']]],
[[['-gneE13_1', '-gneE13_2'], ['edge4-0_1', 'edge4-0_2'], ['edge0-3_1', 'edge0-3_2']],
[['-gneE13_0'], ['-gneE2_2'], ['-gneE1_1'], ['gneE3_2'], ['gneE4_0']]]
]
# route_lane_id_list[四个方向中的一个][两条线路中的一个][线路中的不同edge][每个edge上的lanes]
def get_list(my_list, new_list):
for item in my_list:
if isinstance(item, list):
new_list = get_list(item, new_list)
else:
# print(item)
new_list.append(item)
return new_list
intelli_lanes = [[['edge1-0_1','edge1-0_2'],['edge1-0_3']],
[['edge2-0_1','edge2-0_2'],['edge2-0_3']],
[['edge3-0_1','edge3-0_2'],['edge3-0_3']],
[['edge4-0_1','edge4-0_2'],['edge4-0_3']]]
intelli_loops = [[['e1Detector_edge1-0_1_3','e1Detector_edge1-0_2_4'],['e1Detector_edge1-0_3_5']],
[['e1Detector_edge2-0_1_15','e1Detector_edge2-0_2_16'],['e1Detector_edge2-0_3_17']],
[['e1Detector_edge3-0_1_28','e1Detector_edge3-0_2_27'],['e1Detector_edge3-0_3_29']],
[['e1Detector_edge4-0_1_37','e1Detector_edge4-0_2_38'],['e1Detector_edge4-0_3_39']]]
intelli_loops_outgoing = [[['e1Detector_edge1-0_1_3_outgoing','e1Detector_edge1-0_2_4_outgoing'],['e1Detector_edge1-0_3_5_outgoing']],
[['e1Detector_edge2-0_1_15_outgoing','e1Detector_edge2-0_2_16_outgoing'],['e1Detector_edge2-0_3_17_outgoing']],
[['e1Detector_edge3-0_1_28_outgoing','e1Detector_edge3-0_2_27_outgoing'],['e1Detector_edge3-0_3_29_outgoing']],
[['e1Detector_edge4-0_1_37_outgoing','e1Detector_edge4-0_2_38_outgoing'],['e1Detector_edge4-0_3_39_outgoing']]]
intelli_lanes_list=[]
intelli_lanes_list = get_list(intelli_lanes,intelli_lanes_list)
intelli_eff_lane_num = 8
through_lane_id_list = []
through_lane_id_list = get_list(route_lane_id_list, through_lane_id_list)
through_lane_id_list += ['edge1-0_3','edge2-0_3','edge3-0_3','edge4-0_3']
base_travel_time = 500 / 15 # base travel time length/ speed
timeslot_factor=1
reward_weight = 1.0
C = 30*timeslot_factor # cycle length (sec)
s = 2*1800 / 3600 # vehicles per sec
Step_len = C # seconds
node_light_7 = "node0"
normal_speed = 13
varrho = 0.7
class SUMO_Agent(object):
def __init__(self, sumo_cmd_str, path_set, action_dim):
self.path_set = path_set
self.start_sumo(sumo_cmd_str)
self.induction_loop_ID_list = traci.inductionloop.getIDList()
self.model_based_TT = {'0':[],'1':[],'2':[],'3':[]}
self.speed_based_TT = {'0':[],'1':[],'2':[],'3':[]}
self.induction_loop_num = dict()
for loop_id in self.induction_loop_ID_list:
self.induction_loop_num[loop_id] = Queue()
for i in range(Step_len*4):
self.induction_loop_num[loop_id].put(0)
self.induction_loop_arrival_rate = dict(zip(intelli_lanes_list, list(np.zeros(len(intelli_lanes_list)))))
self.dic_vehicles = {}
self.current_phase = 0
self.current_phase_duration = 0
self.select_space_length = 50 # 50 grids per lane
self.advise1 = 0 # straight lane
if action_dim <= 2:
self.advise2 = 0
else:
self.advise2 = list(np.zeros(4))
self.advise3 = 0 # turn left lane
# vehicles information
self.all_vehs_info = dict()
# dictionary for record the information of all vehicles in whole simulation
# there are four elements for each vehicle
#[accu. wait time, enter time, travel time, type_index(0: in other road; 1: straight in main road; 2: shunt in main road)]
self.new_vehs = set()
self.current_all_vehs = set()
self.main_vehs = set()
self.main_new_vehs = set()
self.main_new_vehs_4decision = set()
self.main_new_turn_vehs = set()
self.last_step_all_vehs = set()
self.last_step_main_vehs = set()
self.over_sau_time = list(np.zeros(8))
self.straight_num = np.zeros(4)
self.shunt_num = np.zeros(4)
self.smart_num = np.zeros(4)
self.lanes_travel_time_dict = dict()
self.lanes_veh_Num_time_dict = dict()
self.lanes_MeanSpeed_dict = dict()
# self.travel_time_update_lanes = []
self.MeanSpeed_update_lanes = []
for lane in through_lane_id_list:
# self.travel_time_update_lanes.append(lane)
self.MeanSpeed_update_lanes.append(lane)
self.lanes_travel_time_dict[lane] = 500/normal_speed
self.lanes_veh_Num_time_dict[lane]=0
self.lanes_MeanSpeed_dict[lane] = [normal_speed]
self.update_state()
self.share_straight_travel_time = np.zeros(4)
self.share_reroute_travel_time = np.zeros(4)
self.real_g_ratio = 1/2 *np.ones(4)
def start_sumo(self, sumo_cmd_str):
traci.start(sumo_cmd_str)
def status_calculator(self):
# vehs_num,
# queue_len,
# current_phase,
# est_arrival_rate,
# ave_traval_time:
for lane in self.MeanSpeed_update_lanes:
# self.lanes_travel_time_dict[lane].append(np.clip(traci.lane.getTraveltime(lane),0,300))
Lane_veh_Num = self.lanes_veh_Num_time_dict[lane]
MeanSpeed = np.mean(self.lanes_MeanSpeed_dict[lane])
if MeanSpeed ==0:
est_traval_time =(Lane_veh_Num/70) *300 + 500/normal_speed
else:
est_traval_time = 500/MeanSpeed
self.lanes_travel_time_dict[lane]=np.clip(est_traval_time,0,300)
edge_NumVehiclesTracker = []
edge_QueueTracker = []
edge_arrival_rateTracker = []
edge_shunt_ave_traval_timeTracker = []
current_phaseTracker = traci.trafficlight.getPhase(node_light_7)
edge_straight_ave_traval_timeTracker = []
edge_straight_intelli_ave_traval_timeTracker = []
edge_outgoing_rateTracker = []
# ================ count vehicles in edge
for eff_lane_idx in range(len(intelli_lanes)):
straight_double_lanes = intelli_lanes[eff_lane_idx][0]
lane1_veh_num = traci.lane.getLastStepVehicleNumber(straight_double_lanes[0])/100
lane2_veh_num = traci.lane.getLastStepVehicleNumber(straight_double_lanes[1])/100
edge_NumVehiclesTracker.append(lane1_veh_num+lane2_veh_num)
for eff_lane_idx in range(len(intelli_lanes)):
leftTurn_single_lanes = intelli_lanes[eff_lane_idx][1]
lane3_veh_num = traci.lane.getLastStepVehicleNumber(leftTurn_single_lanes[0])/100
edge_NumVehiclesTracker.append(lane3_veh_num)
# ================= COUNT HALTED VEHICLES (I.E. QUEUE SIZE)
for eff_lane_idx in range(len(intelli_lanes)):
straight_double_lanes = intelli_lanes[eff_lane_idx][0]
lane1_veh_num = traci.lane.getLastStepHaltingNumber(straight_double_lanes[0])/100
lane2_veh_num = traci.lane.getLastStepHaltingNumber(straight_double_lanes[1])/100
edge_QueueTracker.append(lane1_veh_num+lane2_veh_num)
for eff_lane_idx in range(len(intelli_lanes)):
leftTurn_single_lanes = intelli_lanes[eff_lane_idx][1]
lane3_veh_num = traci.lane.getLastStepHaltingNumber(leftTurn_single_lanes[0])/100
edge_QueueTracker.append(lane3_veh_num)
# ================= Arrive Rate
for eff_loop_idx in range(len(intelli_loops)):
straight_double_lanes = intelli_lanes[eff_loop_idx][0]
straight_double_loops = intelli_loops[eff_loop_idx][0]
lane_arrive = np.zeros(2)
for loop_idx in range(len(straight_double_loops)):
loop_id = straight_double_loops[loop_idx]
lane_id = straight_double_lanes[loop_idx]
last_step_mean_speed = traci.inductionloop.getLastStepMeanSpeed(loop_id)
last_step_vehs_num = traci.lane.getLastStepVehicleNumber(lane_id)
if (last_step_mean_speed < 5) and (last_step_vehs_num > 70):
lane_arrive[loop_idx] = s/2
else:
lane_arrive[loop_idx]= np.mean(np.array(self.induction_loop_num[loop_id].queue))
edge_arrival_rateTracker.append(np.sum(lane_arrive))
for eff_loop_idx in range(len(intelli_loops)):
leftTurn_single_lanes = intelli_lanes[eff_loop_idx][1]
leftTurn_single_loops = intelli_loops[eff_loop_idx][1]
loop_id = leftTurn_single_loops[0]
lane_id = leftTurn_single_lanes[0]
last_step_mean_speed = traci.inductionloop.getLastStepMeanSpeed(loop_id)
last_step_vehs_num = traci.lane.getLastStepVehicleNumber(lane_id)
if (last_step_mean_speed < 5) and (last_step_vehs_num > 70):
lane_arrive = s/2
else:
lane_arrive= np.mean(np.array(self.induction_loop_num[loop_id].queue))
edge_arrival_rateTracker.append(lane_arrive)
# ================= Outgoing Rate
for eff_loop_idx in range(len(intelli_loops_outgoing)):
straight_double_lanes = intelli_lanes[eff_loop_idx][0]
straight_double_loops = intelli_loops_outgoing[eff_loop_idx][0]
lane_arrive = np.zeros(2)
for loop_idx in range(len(straight_double_loops)):
loop_id = straight_double_loops[loop_idx]
lane_id = straight_double_lanes[loop_idx]
last_step_mean_speed = traci.inductionloop.getLastStepMeanSpeed(loop_id)
last_step_vehs_num = traci.lane.getLastStepVehicleNumber(lane_id)
lane_arrive[loop_idx]= np.mean(np.array(self.induction_loop_num[loop_id].queue))
edge_outgoing_rateTracker.append(np.sum(lane_arrive))
for eff_loop_idx in range(len(intelli_loops_outgoing)):
leftTurn_single_lanes = intelli_lanes[eff_loop_idx][1]
leftTurn_single_loops = intelli_loops_outgoing[eff_loop_idx][1]
loop_id = leftTurn_single_loops[0]
lane_id = leftTurn_single_lanes[0]
last_step_mean_speed = traci.inductionloop.getLastStepMeanSpeed(loop_id)
last_step_vehs_num = traci.lane.getLastStepVehicleNumber(lane_id)
lane_arrive= np.mean(np.array(self.induction_loop_num[loop_id].queue))
edge_outgoing_rateTracker.append(lane_arrive)
for route_index in range(len(route_lane_id_list)):
shunt_route = route_lane_id_list[route_index][1]
route_travel_time = 0
for lanes_list in shunt_route:
lanes_travel = 0
for lane in lanes_list:
lane_travel = self.lanes_travel_time_dict[lane]
lanes_travel += lane_travel
lanes_travel = lanes_travel/len(lanes_list)
route_travel_time += lanes_travel
route_travel_time += 500/15
edge_shunt_ave_traval_timeTracker.append(route_travel_time/travel_time_scale)
for route_index in range(len(route_lane_id_list)):
straight_route = route_lane_id_list[route_index][0]
straight_route_stat_based = [straight_route[0], straight_route[2]]
route_travel_time = 0
for lanes_list in straight_route_stat_based:
lanes_travel = 0
for lane in lanes_list:
lane_travel = self.lanes_travel_time_dict[lane]
lanes_travel += lane_travel
lanes_travel = lanes_travel/len(lanes_list)
route_travel_time += lanes_travel
route_travel_time += 500/15
edge_straight_ave_traval_timeTracker.append(route_travel_time/travel_time_scale)
for route_index in range(len(route_lane_id_list)):
straight_route = route_lane_id_list[route_index][0]
straight_route_intelli = [straight_route[1]]
route_travel_time = 0
for lanes_list in straight_route_intelli:
lanes_travel = 0
for lane in lanes_list:
lanes_travel += self.lanes_travel_time_dict[lane]
lanes_travel = lanes_travel / len(lanes_list)
route_travel_time += lanes_travel
edge_straight_intelli_ave_traval_timeTracker.append(route_travel_time/travel_time_scale)
for eff_lane_idx in range(len(intelli_lanes)):
leftTurn_single_lanes = intelli_lanes[eff_lane_idx][1]
lane_id = leftTurn_single_lanes[0]
lane3_travel = self.lanes_travel_time_dict[lane_id]
edge_straight_intelli_ave_traval_timeTracker.append(lane3_travel/travel_time_scale)
return [edge_NumVehiclesTracker, edge_QueueTracker, current_phaseTracker, edge_arrival_rateTracker,
edge_shunt_ave_traval_timeTracker, edge_straight_ave_traval_timeTracker, edge_straight_intelli_ave_traval_timeTracker,
edge_outgoing_rateTracker]
def update_state(self):
self.status_tracker = self.status_calculator()
max_we = get_max_queue_length(['edge1-0_1', 'edge1-0_2', 'edge2-0_1', 'edge2-0_2'])
max_sn = get_max_queue_length(['edge3-0_1', 'edge3-0_2', 'edge4-0_1', 'edge4-0_2'])
if max_we > 50:
self.advise1 = 1 *(random.random()>0.5)
elif max_sn > 50:
self.advise1 = -1*(random.random()>0.5)
max_we_turn_left = get_max_queue_length(['edge1-0_3', 'edge2-0_3'])
max_sn_turn_left = get_max_queue_length(['edge3-0_3', 'edge4-0_3'])
if max_we_turn_left > 50:
self.advise3 = 1*(random.random()>0.5)
elif max_sn_turn_left > 50:
self.advise3 = -1*(random.random()>0.5)
self.state = State(vehs_num=np.reshape(np.array(self.status_tracker[0]), newshape=(1, 8)),
queue_len=np.reshape(np.array(self.status_tracker[1]), newshape=(1, 8)),
est_arrival_rate=np.reshape(np.array(self.status_tracker[3]), newshape=(1, 8)),
over_sau_time=np.reshape(np.array(self.over_sau_time)/300, newshape=(1, 8)),
ave_shunt_traval_time=np.reshape(np.array(self.status_tracker[4]), newshape=(1, 4)),
ave_straight_traval_time=np.reshape(np.array(self.status_tracker[5]), newshape=(1, 4)),
ave_itelli_traval_time=np.reshape(np.array(self.status_tracker[6]), newshape=(1, 8)),
current_phase=np.reshape(np.array(self.status_tracker[2]), newshape=(1, 1)),)
def travel_time_model_based(self, g_ratio, v, route_index):
if route_index>3:
s_tem = (s/2)
que_thre = 15
else:
s_tem = s
que_thre = 30
if self.status_tracker[1][route_index]*100 >que_thre:
c = self.status_tracker[7][route_index]
else:
c = s_tem * g_ratio
if c==0:
X = 2
else:
X = v/c
if g_ratio == 1:
uniform_delay = 0
else:
uniform_delay = (C/2)*((1-g_ratio)**2/(1-min(X, 1)*g_ratio))
if (X < varrho) :
if X == 0:
add_delay = 0
else:
add_delay = X**2/(2*v*(1-X))
else:
X0 = 0.67 + s_tem * g_ratio * C / 600
if c == 0:
add_delay = ((2 * self.over_sau_time[route_index] + Step_len) * 1 / 4) * (
(X - 1) + np.sqrt((X - 1) ** 2 + (12 * (X - X0) / (1 * (2 * self.over_sau_time[route_index] + Step_len)))))
else:
add_delay = ((2 * self.over_sau_time[route_index] + Step_len) * 1 / 4) * (
(X - 1) + np.sqrt((X - 1) ** 2 + (12 * (X - X0) / (c * (2 * self.over_sau_time[route_index] + Step_len)))))
total_travel_time = min(base_travel_time + uniform_delay + add_delay, 300)
return total_travel_time
def return_reward(self, g_ratio):
vehs_num = 0
travel_time = 0
travel_time_existing = 0
vehs_num_existing = 0
for route_index in range(4):
vehs_num += self.shunt_num[route_index] + self.straight_num[route_index]
travel_time += (
self.shunt_num[route_index] * (self.share_reroute_travel_time[route_index]/travel_time_scale) + self.straight_num[route_index] *
(self.share_straight_travel_time[route_index]/travel_time_scale))
travel_time_existing += (self.travel_time_model_based(
g_ratio[0] if route_index < 2 else g_ratio[2], self.status_tracker[3][route_index], route_index)/travel_time_scale * self.status_tracker[0][route_index]*100 + self.travel_time_model_based(
g_ratio[1] if route_index < 2 else g_ratio[3], self.status_tracker[3][4+route_index], 4+route_index)/travel_time_scale * self.status_tracker[0][4+route_index]*100)
if self.status_tracker[1][route_index]*100 >30:
c = self.status_tracker[7][route_index]
else:
c = (s * (g_ratio[0] if route_index < 2 else g_ratio[2]))
if self.status_tracker[3][route_index]> (c):
self.over_sau_time[route_index] = min(self.over_sau_time[route_index] + Step_len, 300*timeslot_factor)
else:
self.over_sau_time[route_index] = max(self.over_sau_time[route_index]-Step_len, 0)
if (self.status_tracker[1][route_index]*100 <15) and (self.status_tracker[0][route_index]*100 <15) and (self.over_sau_time[route_index]>60):
self.over_sau_time[route_index] = 0
if self.status_tracker[1][route_index+4]*100 >15:
c = self.status_tracker[7][route_index+4]
else:
c = ((s/2) * (g_ratio[1] if route_index < 2 else g_ratio[3]))
if self.status_tracker[3][4+route_index]>(c):
self.over_sau_time[4+route_index] = min(self.over_sau_time[4+route_index] + Step_len, 300*timeslot_factor)
else:
self.over_sau_time[4+route_index] = max(self.over_sau_time[4+route_index]-Step_len, 0)
if (self.status_tracker[1][route_index+4]*100 <7) and (self.status_tracker[0][route_index+4]*100 <7) and (self.over_sau_time[route_index+4]>60):
self.over_sau_time[route_index+4] = 0
vehs_num_existing += (self.status_tracker[0][route_index]+self.status_tracker[0][4+route_index])*100
if vehs_num > 0:
new_vehs_reward = 200/travel_time_scale - travel_time/vehs_num
else:
new_vehs_reward = 0
if vehs_num_existing > 0:
existing_vehs_reward = 50/travel_time_scale - travel_time_existing/vehs_num_existing
else:
existing_vehs_reward = 0
reward = (reward_weight*existing_vehs_reward + new_vehs_reward)/2
reward = max(min(reward, 1), -1)
return reward
def turn_right_ratio_based(self, ratio):
# ratio: including the turn ratio for four edges(1*4)
for veh_id in self.main_new_vehs_4decision:
edge_id = traci.vehicle.getRoadID(veh_id)
route_index = center_edges.index(edge_id)
# center_edges = ['edge1-0', 'edge2-0', 'edge3-0', 'edge4-0']
target_ratio = ratio[route_index]
current_total = self.shunt_num[route_index]+self.straight_num[route_index]
if self.shunt_num[route_index] == 0:
current_ratio = 0
else:
current_ratio = self.shunt_num[route_index] / (current_total)
rnd = np.random.rand(1)
self.all_vehs_info[veh_id][6]= route_index
if rnd < CAV_rate:
self.smart_num[route_index] += 1
if current_ratio < target_ratio:
self.shunt_num[route_index] += 1
self.all_vehs_info[veh_id][3] = 2
traci.vehicle.setRouteID(veh_id, routeID_list[route_index])
traci.vehicle.setColor(veh_id, (255, 0, 0))
self.all_vehs_info[veh_id][5] = self.share_reroute_travel_time[route_index]
else:
self.straight_num[route_index] += 1
self.all_vehs_info[veh_id][5] = self.share_straight_travel_time[route_index]
else:
self.straight_num[route_index] += 1
self.all_vehs_info[veh_id][5] = self.share_straight_travel_time[route_index]
def update_vehs_set(self):
self.main_vehs = set()
self.main_new_vehs_4decision = set()
self.current_all_vehs = set(traci.vehicle.getIDList())
self.new_vehs = self.current_all_vehs.symmetric_difference(
self.last_step_all_vehs.intersection(self.current_all_vehs)) # new vehicles
for veh_id in (self.current_all_vehs - self.new_vehs): # update accu. wait and travel time of existing vehicles
self.all_vehs_info[veh_id][0] = traci.vehicle.getAccumulatedWaitingTime(veh_id)
self.all_vehs_info[veh_id][2] = traci.simulation.getCurrentTime() - self.all_vehs_info[veh_id][1]
self.all_vehs_info[veh_id][4] += traci.vehicle.getFuelConsumption(veh_id)
for veh_id in self.current_all_vehs:
edge_id = traci.vehicle.getRoadID(veh_id)
if edge_id in center_edges:
self.main_vehs = self.main_vehs.union(set([veh_id])) # vehicles in main edge
# new vehicles in main edge
self.main_new_vehs = self.main_vehs.symmetric_difference(self.last_step_main_vehs.intersection(self.main_vehs))
# record the set for finding the new vehicle in next duration
self.last_step_all_vehs = self.current_all_vehs
self.last_step_main_vehs = self.main_vehs
# define the information about new vehicles
#Frame form[AccumulatedWaitingTime, EnteringTime, TravelTime, Flag(0:Not in Main Road, 1:Straight in Main Road, 2:Rerouted in Main Road 3: turn left), FuelConsumption, EstimatedTravelTime, EnterDirection(1:west,2,east,3:south,4:north)]
for veh_id in (self.new_vehs - self.main_new_vehs):
self.all_vehs_info[veh_id] = [traci.vehicle.getAccumulatedWaitingTime(veh_id),
traci.simulation.getCurrentTime(), traci.simulation.getCurrentTime(), 0, 0,-1,-1]
for veh_id in self.main_new_vehs:
type_id = traci.vehicle.getTypeID(veh_id)
if type_id == "Car":
self.main_new_vehs_4decision.add(veh_id)
self.all_vehs_info[veh_id] = [traci.vehicle.getAccumulatedWaitingTime(veh_id),
traci.simulation.getCurrentTime(), traci.simulation.getCurrentTime(), 1, 0,-1,-1]
elif type_id == "Car2": #left turn
self.all_vehs_info[veh_id] = [traci.vehicle.getAccumulatedWaitingTime(veh_id),
traci.simulation.getCurrentTime(), traci.simulation.getCurrentTime(), 3, 0,-1,-1]
else:
print("Car type error")
def induction_loop_count(self):
for loop_id in self.induction_loop_ID_list:
self.induction_loop_num[loop_id].put(traci.inductionloop.getLastStepVehicleNumber(loop_id))
self.induction_loop_num[loop_id].get() # 返回并删除队列头部元素
def sim_step(self, action_change_ratio):
traci.simulationStep()
self.current_phase_duration += 1
self.update_vehs_set()
self.turn_right_ratio_based(action_change_ratio) # part of vehicles turns right
self.induction_loop_count()
for lane in self.MeanSpeed_update_lanes:
Lane_veh_Num = traci.lane.getLastStepVehicleNumber(lane)
self.lanes_veh_Num_time_dict[lane]=Lane_veh_Num
if Lane_veh_Num<1:
MeanSpeed = normal_speed
else:
MeanSpeed = min(traci.lane.getLastStepMeanSpeed(lane),normal_speed)
if len(self.lanes_MeanSpeed_dict[lane])>=30:
del self.lanes_MeanSpeed_dict[lane][0]
self.lanes_MeanSpeed_dict[lane].append(MeanSpeed)
def take_action(self, action, dynamic_flag):
self.advise1 = 0
self.advise3 = 0
for lane in self.MeanSpeed_update_lanes:
self.lanes_MeanSpeed_dict[lane] = []
if len(action) == 8:
self.advise2 = list(np.zeros(4))
action_change_phase, action_change_ratio = 2*action[0:4], action[4:]
step = 0
last_dur_end_phase = traci.trafficlight.getPhase(node_light_7)
self.current_phase_duration = 0
action_change_phase_revise = action_change_phase*(action_change_phase>(6/Step_len))
selected_phase_list = []
action_selected_phase_revise = []
for phase_idx in range(action_change_phase_revise.size):
if action_change_phase_revise[phase_idx]>0:
selected_phase_list.append(phase_idx*2)
action_selected_phase_revise.append(action_change_phase_revise[phase_idx])
self.pre_g_ratio=copy.deepcopy(self.real_g_ratio)
self.real_g_ratio = np.round((action_change_phase_revise/np.sum(action_change_phase_revise))*Step_len)/Step_len
g_ratio = self.real_g_ratio
action_selected_phase_revise = np.array(action_selected_phase_revise)
action_selected_phase_revise = np.round((action_selected_phase_revise/np.sum(action_selected_phase_revise))*Step_len)
for route_index in range(4):
if len(self.model_based_TT[str(route_index)])>3:
del self.model_based_TT[str(route_index)][0]
self.model_based_TT[str(route_index)].append(self.travel_time_model_based(
g_ratio[0] if route_index < 2 else g_ratio[2], self.status_tracker[3][route_index], route_index))
if len(self.speed_based_TT[str(route_index)])>3:
del self.speed_based_TT[str(route_index)][0]
self.speed_based_TT[str(route_index)].append(self.status_tracker[6][route_index]*travel_time_scale)
self.share_straight_travel_time[route_index] = self.status_tracker[5][route_index]*travel_time_scale + (np.mean(self.model_based_TT[str(route_index)])+np.mean(self.speed_based_TT[str(route_index)]) )/2
self.share_reroute_travel_time[route_index] = self.status_tracker[4][route_index]*travel_time_scale
for phase_idx in range(len(selected_phase_list)):
if phase_idx ==0:
if last_dur_end_phase == selected_phase_list[phase_idx]:
for _ in range(int(action_selected_phase_revise[phase_idx]-3)):
self.sim_step(action_change_ratio)
step += 1
else:
traci.trafficlight.setPhase(node_light_7, last_dur_end_phase+1) # 3s黄灯
for _ in range(3):
self.sim_step(action_change_ratio)
step += 1
self.current_phase_duration = selected_phase_list[phase_idx]
traci.trafficlight.setPhase(node_light_7, selected_phase_list[phase_idx])
for _ in range(int(action_selected_phase_revise[phase_idx]-6)):
self.sim_step(action_change_ratio)
step += 1
else:
self.current_phase_duration = selected_phase_list[phase_idx]
traci.trafficlight.setPhase(node_light_7, selected_phase_list[phase_idx])
for _ in range(int(action_selected_phase_revise[phase_idx]-3)):
self.sim_step(action_change_ratio)
step += 1
if phase_idx ==(len(selected_phase_list)-1):
for _ in range(Step_len-step):
self.sim_step(action_change_ratio)
step += 1
else:
traci.trafficlight.setPhase(node_light_7, selected_phase_list[phase_idx]+1)
for _ in range(3):
self.sim_step(action_change_ratio)
step += 1
if step != Step_len:
print(f"step is {step} which is not equal to StepLength {Step_len}")
reward = self.return_reward(g_ratio)
self.straight_num = np.zeros(4)
self.shunt_num = np.zeros(4)
self.smart_num = np.zeros(4)
self.update_state()
if len(action) <= 2:
if np.mean(self.over_sau_time) > 280*timeslot_factor:
self.advise2 = 1*(random.random()>0.5)
else:
for index in range(4):
if self.over_sau_time[index] > 280*timeslot_factor:
self.advise2[index] = 1*(random.random()>0.5)
return reward
def close_sumo():
traci.close()
def get_max_queue_length(listLanes):
max_queue_length = 0
for lane in listLanes:
queue_length = traci.lane.getLastStepHaltingNumber(lane)
if max_queue_length < queue_length:
max_queue_length = queue_length
return max_queue_length
|
the-stack_0_17135
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class P2SVpnServerConfiguration(SubResource):
"""P2SVpnServerConfiguration Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param p2_svpn_server_configuration_properties_name: The name of the
P2SVpnServerConfiguration that is unique within a VirtualWan in a resource
group. This name can be used to access the resource along with Paren
VirtualWan resource name.
:type p2_svpn_server_configuration_properties_name: str
:param vpn_protocols: VPN protocols for the P2SVpnServerConfiguration.
:type vpn_protocols: list[str or
~azure.mgmt.network.v2019_02_01.models.VpnGatewayTunnelingProtocol]
:param p2_svpn_server_config_vpn_client_root_certificates: VPN client root
certificate of P2SVpnServerConfiguration.
:type p2_svpn_server_config_vpn_client_root_certificates:
list[~azure.mgmt.network.v2019_02_01.models.P2SVpnServerConfigVpnClientRootCertificate]
:param p2_svpn_server_config_vpn_client_revoked_certificates: VPN client
revoked certificate of P2SVpnServerConfiguration.
:type p2_svpn_server_config_vpn_client_revoked_certificates:
list[~azure.mgmt.network.v2019_02_01.models.P2SVpnServerConfigVpnClientRevokedCertificate]
:param p2_svpn_server_config_radius_server_root_certificates: Radius
Server root certificate of P2SVpnServerConfiguration.
:type p2_svpn_server_config_radius_server_root_certificates:
list[~azure.mgmt.network.v2019_02_01.models.P2SVpnServerConfigRadiusServerRootCertificate]
:param p2_svpn_server_config_radius_client_root_certificates: Radius
client root certificate of P2SVpnServerConfiguration.
:type p2_svpn_server_config_radius_client_root_certificates:
list[~azure.mgmt.network.v2019_02_01.models.P2SVpnServerConfigRadiusClientRootCertificate]
:param vpn_client_ipsec_policies: VpnClientIpsecPolicies for
P2SVpnServerConfiguration.
:type vpn_client_ipsec_policies:
list[~azure.mgmt.network.v2019_02_01.models.IpsecPolicy]
:param radius_server_address: The radius server address property of the
P2SVpnServerConfiguration resource for point to site client connection.
:type radius_server_address: str
:param radius_server_secret: The radius secret property of the
P2SVpnServerConfiguration resource for point to site client connection.
:type radius_server_secret: str
:ivar provisioning_state: The provisioning state of the
P2SVpnServerConfiguration resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:ivar p2_svpn_gateways: List of references to P2SVpnGateways.
:vartype p2_svpn_gateways:
list[~azure.mgmt.network.v2019_02_01.models.SubResource]
:param p2_svpn_server_configuration_properties_etag: A unique read-only
string that changes whenever the resource is updated.
:type p2_svpn_server_configuration_properties_etag: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'p2_svpn_gateways': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'p2_svpn_server_configuration_properties_name': {'key': 'properties.name', 'type': 'str'},
'vpn_protocols': {'key': 'properties.vpnProtocols', 'type': '[str]'},
'p2_svpn_server_config_vpn_client_root_certificates': {'key': 'properties.p2SVpnServerConfigVpnClientRootCertificates', 'type': '[P2SVpnServerConfigVpnClientRootCertificate]'},
'p2_svpn_server_config_vpn_client_revoked_certificates': {'key': 'properties.p2SVpnServerConfigVpnClientRevokedCertificates', 'type': '[P2SVpnServerConfigVpnClientRevokedCertificate]'},
'p2_svpn_server_config_radius_server_root_certificates': {'key': 'properties.p2SVpnServerConfigRadiusServerRootCertificates', 'type': '[P2SVpnServerConfigRadiusServerRootCertificate]'},
'p2_svpn_server_config_radius_client_root_certificates': {'key': 'properties.p2SVpnServerConfigRadiusClientRootCertificates', 'type': '[P2SVpnServerConfigRadiusClientRootCertificate]'},
'vpn_client_ipsec_policies': {'key': 'properties.vpnClientIpsecPolicies', 'type': '[IpsecPolicy]'},
'radius_server_address': {'key': 'properties.radiusServerAddress', 'type': 'str'},
'radius_server_secret': {'key': 'properties.radiusServerSecret', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'p2_svpn_gateways': {'key': 'properties.p2SVpnGateways', 'type': '[SubResource]'},
'p2_svpn_server_configuration_properties_etag': {'key': 'properties.etag', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, p2_svpn_server_configuration_properties_name: str=None, vpn_protocols=None, p2_svpn_server_config_vpn_client_root_certificates=None, p2_svpn_server_config_vpn_client_revoked_certificates=None, p2_svpn_server_config_radius_server_root_certificates=None, p2_svpn_server_config_radius_client_root_certificates=None, vpn_client_ipsec_policies=None, radius_server_address: str=None, radius_server_secret: str=None, p2_svpn_server_configuration_properties_etag: str=None, name: str=None, **kwargs) -> None:
super(P2SVpnServerConfiguration, self).__init__(id=id, **kwargs)
self.p2_svpn_server_configuration_properties_name = p2_svpn_server_configuration_properties_name
self.vpn_protocols = vpn_protocols
self.p2_svpn_server_config_vpn_client_root_certificates = p2_svpn_server_config_vpn_client_root_certificates
self.p2_svpn_server_config_vpn_client_revoked_certificates = p2_svpn_server_config_vpn_client_revoked_certificates
self.p2_svpn_server_config_radius_server_root_certificates = p2_svpn_server_config_radius_server_root_certificates
self.p2_svpn_server_config_radius_client_root_certificates = p2_svpn_server_config_radius_client_root_certificates
self.vpn_client_ipsec_policies = vpn_client_ipsec_policies
self.radius_server_address = radius_server_address
self.radius_server_secret = radius_server_secret
self.provisioning_state = None
self.p2_svpn_gateways = None
self.p2_svpn_server_configuration_properties_etag = p2_svpn_server_configuration_properties_etag
self.name = name
self.etag = None
|
the-stack_0_17137
|
import re
from typing import Any, Dict, List, Text
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.tokenizers import Token, Tokenizer
from rasa.nlu.training_data import Message, TrainingData
from rasa.nlu.constants import (
MESSAGE_RESPONSE_ATTRIBUTE,
MESSAGE_INTENT_ATTRIBUTE,
MESSAGE_TEXT_ATTRIBUTE,
MESSAGE_TOKENS_NAMES,
MESSAGE_ATTRIBUTES,
MESSAGE_SPACY_FEATURES_NAMES,
MESSAGE_VECTOR_FEATURE_NAMES,
)
class WhitespaceTokenizer(Tokenizer, Component):
provides = [MESSAGE_TOKENS_NAMES[attribute] for attribute in MESSAGE_ATTRIBUTES]
defaults = {
# Flag to check whether to split intents
"intent_tokenization_flag": False,
# Symbol on which intent should be split
"intent_split_symbol": "_",
# text will be tokenized with case sensitive as default
"case_sensitive": True,
}
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
"""Construct a new tokenizer using the WhitespaceTokenizer framework."""
super().__init__(component_config)
# flag to check whether to split intents
self.intent_tokenization_flag = self.component_config.get(
"intent_tokenization_flag"
)
# split symbol for intents
self.intent_split_symbol = self.component_config["intent_split_symbol"]
self.case_sensitive = self.component_config["case_sensitive"]
def train(
self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any
) -> None:
for example in training_data.training_examples:
for attribute in MESSAGE_ATTRIBUTES:
if example.get(attribute) is not None:
example.set(
MESSAGE_TOKENS_NAMES[attribute],
self.tokenize(example.get(attribute), attribute),
)
def process(self, message: Message, **kwargs: Any) -> None:
message.set(
MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE], self.tokenize(message.text)
)
def tokenize(
self, text: Text, attribute: Text = MESSAGE_TEXT_ATTRIBUTE
) -> List[Token]:
if not self.case_sensitive:
text = text.lower()
# remove 'not a word character' if
if attribute != MESSAGE_INTENT_ATTRIBUTE:
words = re.sub(
# there is a space or an end of a string after it
r"[^\w#@&]+(?=\s|$)|"
# there is a space or beginning of a string before it
# not followed by a number
r"(\s|^)[^\w#@&]+(?=[^0-9\s])|"
# not in between numbers and not . or @ or & or - or #
# e.g. 10'000.00 or [email protected]
# and not url characters
r"(?<=[^0-9\s])[^\w._~:/?#\[\]()@!$&*+,;=-]+(?=[^0-9\s])",
" ",
text,
).split()
else:
words = (
text.split(self.intent_split_symbol)
if self.intent_tokenization_flag
else [text]
)
running_offset = 0
tokens = []
for word in words:
word_offset = text.index(word, running_offset)
word_len = len(word)
running_offset = word_offset + word_len
tokens.append(Token(word, word_offset))
return tokens
|
the-stack_0_17139
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Stack and ParallelStack Ops."""
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class StackOpTest(test.TestCase):
def randn(self, shape, dtype):
data = np.random.randn(*shape)
if dtype == np.bool_:
return data < 0 # Naive casting yields True with P(1)!
else:
return data.astype(dtype)
def testSimple(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
rank = len(shape)
for axis in range(-rank, rank):
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
data = self.randn(shape, dtype)
xs = np_split_squeeze(data, axis)
# Stack back into a single tensorflow tensor
with self.subTest(shape=shape, axis=axis, dtype=dtype):
c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c, data)
def testSimpleParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testSimpleParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testConst(self):
np.random.seed(7)
with test_util.use_gpu():
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
# Check on a variety of shapes and types
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
for dtype in [np.bool_, np.float32, np.int16, np.int32, np.int64]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
# Stack back into a single tensorflow tensor directly using np array
c = array_ops.stack(data)
if not context.executing_eagerly():
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c, data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
if not context.executing_eagerly():
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl, data)
def testConstParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testConstParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
with self.subTest(shape=shape):
with self.cached_session():
def func(*xs):
return array_ops.stack(xs)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.subTest(shape=shape):
with self.cached_session():
def func(*inp):
return array_ops.stack(inp, axis=1)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testZeroSizeCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testZeroSizeGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testAxis0DefaultCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAxis0DefaultGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for shape in (3,), (2, 2, 3), (4, 1, 2, 2), (8, 2, 10):
rank = len(shape)
expected = self.randn(shape, np.float32)
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
# For all the possible axis to split it, including negative indices.
for axis in range(-rank, rank):
test_arrays = np_split_squeeze(expected, axis)
with self.cached_session():
with self.subTest(shape=shape, dtype=dtype, axis=axis):
actual_pack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = self.evaluate(actual_pack)
actual_stack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = self.evaluate(actual_stack)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = 2 not in range \[-2, 2\)"):
array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = -3 not in range \[-2, 2\)"):
array_ops.stack(t, axis=-3)
def testComplex(self):
np.random.seed(7)
with self.session():
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
for dtype in [np.complex64, np.complex128]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
self.assertAllEqual(self.evaluate(c), data)
class AutomaticStackingTest(test.TestCase):
def testSimple(self):
self.assertAllEqual([1, 0, 2],
ops.convert_to_tensor([1, constant_op.constant(0), 2]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
[0,
constant_op.constant(1), 0],
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
constant_op.constant([0, 1, 0]),
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([
constant_op.constant([0, 0, 0]),
constant_op.constant([0, 1, 0]),
constant_op.constant([0, 0, 0])
]))
def testWithNDArray(self):
with self.session():
result = ops.convert_to_tensor([[[0., 0.],
constant_op.constant([1., 1.])],
np.array(
[[2., 2.], [3., 3.]],
dtype=np.float32)])
self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
self.evaluate(result))
def testDtype(self):
t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
self.assertEqual(dtypes.float64, t_1.dtype)
t_2 = ops.convert_to_tensor(
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
t_3 = ops.convert_to_tensor(
[[0., 0., 0.],
constant_op.constant([0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_3.dtype)
t_4 = ops.convert_to_tensor(
[constant_op.constant([0., 0., 0.], dtype=dtypes.float64)],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_4.dtype)
with self.assertRaises(TypeError):
ops.convert_to_tensor([
constant_op.constant(
[0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
])
def testDtypeConversionWhenTensorDtypeMismatch(self):
t_0 = ops.convert_to_tensor([0., 0., 0.])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([0, 0, 0])
self.assertEqual(dtypes.int32, t_1.dtype)
t_2 = ops.convert_to_tensor([t_0, t_0, t_1], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
if __name__ == "__main__":
test.main()
|
the-stack_0_17140
|
#!/usr/bin/env python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import cv2
import numpy as np
import os,sys,timeit,json
from os import listdir
from os.path import isfile, join
import scipy.misc
import logging as log
import argparse
from vai.dpuv1.rt import xdnn, xdnn_io
from vai.dpuv1.utils.postproc import yolo
from yolo_utils import bias_selector, saveDetectionDarknetStyle, yolo_parser_args
from yolo_utils import draw_boxes, generate_colors
from get_mAP_darknet import calc_detector_mAP
def extant_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
"""
if x == "-":
# skip file check and allow empty string
return ""
if not os.path.exists(x):
# Argparse uses the ArgumentTypeError to give a rejection message like:
# error: argument input: x does not exist
raise argparse.ArgumentTypeError("{0} does not exist".format(x))
return x
def prep_image(image_file, net_width, net_height, pix_scale, pad_val, img_transpose, ch_swp):
img = cv2.imread(image_file)
orig_shape = img.shape
height, width, __ = img.shape
newdim = max(height, width)
scalew = float(width) / newdim
scaleh = float(height) / newdim
maxdim = max(net_width, net_height)
neww = int(maxdim * scalew)
newh = int(maxdim * scaleh)
img = cv2.resize(img, (neww, newh))
if img.dtype != np.float32:
img = img.astype(np.float32, order='C')
img = img * pix_scale
height, width, channels = img.shape
newdim = max(height, width)
letter_image = np.zeros((newdim, newdim, channels))
letter_image[:, :, :] = pad_val
if newdim == width:
letter_image[(newdim-height)/2:((newdim-height)/2+height),0:width] = img
else:
letter_image[0:height,(newdim-width)/2:((newdim-width)/2+width)] = img
img = letter_image
img = np.transpose(img, (img_transpose[0], img_transpose[1], img_transpose[2]))
ch = 3*[None]
ch[0] = img[0,:,:]
ch[1] = img[1,:,:]
ch[2] = img[2,:,:]
img = np.stack((ch[ch_swp[0]],ch[ch_swp[1]],ch[ch_swp[2]]))
return img, orig_shape
def yolo_gpu_inference(backend_path,
image_dir,
deploy_model,
weights,
out_labels,
IOU_threshold,
scorethresh,
mean_value,
pxscale,
transpose,
channel_swap,
yolo_model,
num_classes, args):
# Setup the environment
images = xdnn_io.getFilePaths(args['images'])
if(args['golden'] or args['visualize']):
assert args['labels'], "Provide --labels to compute mAP."
assert args['results_dir'], "For accuracy measurements, provide --results_dir to save the detections."
labels = xdnn_io.get_labels(args['labels'])
colors = generate_colors(len(labels))
# Select postproc and biases
if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc
elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc
biases = bias_selector(args)
import caffe
caffe.set_mode_cpu()
print(args)
if(args['gpu'] is not None):
caffe.set_mode_gpu()
caffe.set_device(args['gpu'])
net = caffe.Net(deploy_model, weights, caffe.TEST)
net_h, net_w = net.blobs['data'].data.shape[-2:]
args['net_h'] = net_h
args['net_w'] = net_w
for i,img in enumerate(images):
if((i+1)%100 == 0): print(i+1, "images processed")
raw_img, img_shape = xdnn_io.loadYoloImageBlobFromFile(img, net_h, net_w)
net.blobs['data'].data[...] = raw_img
out = net.forward()
caffeOutput = sorted(out.values(), key=lambda item: item.shape[-1])
boxes = yolo_postproc(caffeOutput, args, [img_shape], biases=biases)
print("{}. Detected {} boxes in {}".format(i, len(boxes[0]), img))
# Save the result
boxes = boxes[0]
if(args['results_dir']):
filename = os.path.splitext(os.path.basename(img))[0]
out_file_txt = os.path.join(args['results_dir'], filename + '.txt')
print("Saving {} boxes to {}".format(len(boxes), out_file_txt)); sys.stdout.flush()
saveDetectionDarknetStyle(out_file_txt, boxes, img_shape)
if(args['visualize']):
out_file_png = os.path.join(args['results_dir'], filename + '.png')
print("Saving result to {}".format(out_file_png)); sys.stdout.flush()
draw_boxes(img, boxes, labels, colors, out_file_png)
# draw_boxes(images[i],bboxes,class_names,colors=[(0,0,0)]*num_classes)
return len(images)
def main():
parser = argparse.ArgumentParser()
parser = yolo_parser_args(parser)
parser.add_argument('--deploymodel', help="network definition prototxt file in case of caffe",
required=True, type=extant_file, metavar="FILE")
parser.add_argument('--caffemodel', help="network weights caffe model file in case of caffe",
required=True, type=extant_file, metavar="FILE")
parser.add_argument('--images', nargs='*',
help='directory or raw image files to use as input', required=True, type=extant_file, metavar="FILE")
parser.add_argument('--labels', help='label ID', type=extant_file, metavar="FILE")
parser.add_argument('--golden', help='Ground truth directory', type=extant_file, metavar="FILE")
parser.add_argument('--mean_value', type=int, nargs=3, default=[0,0,0], # BGR for Caffe
help='image mean values ')
parser.add_argument('--pxscale', type=float, default=(1.0/255.0), help='pix cale value')
parser.add_argument('--transpose', type=int, default=[2,0,1], nargs=3, help="Passed to caffe.io.Transformer function set_transpose, default 2,0,1" )
parser.add_argument('--channel_swap', type=int, default=[2,1,0], nargs=3, help="Passed to caffe.io.Transformer function set_channel_swap, default 2,1,0")
parser.add_argument('--caffe_backend_path', help='caffe backend')
parser.add_argument('--gpu', type=int, default=None, help='GPU-ID to run Caffe inference on GPU')
args = parser.parse_args()
args = xdnn_io.make_dict_args(args)
num_images_processed = yolo_gpu_inference(args['caffe_backend_path'],
args['images'],
args['deploymodel'],
args['caffemodel'],
args['results_dir'],
args['iouthresh'],
args['scorethresh'],
args['mean_value'],
args['pxscale'],
args['transpose'],
args['channel_swap'],
args['yolo_model'],
args['classes'], args)
print('num images processed : ', num_images_processed)
# mAP calculation
if(args['golden']):
labels = xdnn_io.get_labels(args['labels'])
print()
print("Computing mAP score : ")
print("Class names are : {} ".format(labels))
mAP = calc_detector_mAP(args['results_dir'], args['golden'], len(labels), labels, args['prob_threshold'], args['mapiouthresh'], args['points'])
sys.stdout.flush()
if __name__ == '__main__':
main()
|
the-stack_0_17142
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_block_lower_triangular as block_lower_triangular
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
def _block_lower_triangular_dense(expected_shape, blocks):
"""Convert a list of blocks into a dense blockwise lower-triangular matrix."""
rows = []
num_cols = 0
for row_blocks in blocks:
# Get the batch shape for the block.
batch_row_shape = array_ops.shape(row_blocks[0])[:-1]
num_cols += array_ops.shape(row_blocks[-1])[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape, [expected_shape[-2] - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
zeros_to_pad_after_shape, dtype=row_blocks[-1].dtype)
row_blocks.append(zeros_to_pad_after)
rows.append(array_ops.concat(row_blocks, axis=-1))
return array_ops.concat(rows, axis=-2)
@test_util.run_all_in_graph_and_eager_modes
class SquareLinearOperatorBlockLowerTriangularTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def tearDown(self):
config.enable_tensor_float_32_execution(self.tf32_keep_)
def setUp(self):
self.tf32_keep_ = config.tensor_float_32_execution_enabled()
config.enable_tensor_float_32_execution(False)
# Increase from 1e-6 to 1e-5
self._atol[dtypes.float32] = 1e-5
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._rtol[dtypes.complex64] = 1e-5
super(SquareLinearOperatorBlockLowerTriangularTest, self).setUp()
@staticmethod
def use_blockwise_arg():
return True
@staticmethod
def skip_these_tests():
# Skipping since `LinearOperatorBlockLowerTriangular` is in general not
# self-adjoint.
return ["cholesky", "eigvalsh"]
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
return [
shape_info((0, 0)),
shape_info((1, 1)),
shape_info((1, 3, 3)),
shape_info((5, 5), blocks=[[(2, 2)], [(3, 2), (3, 3)]]),
shape_info((3, 7, 7),
blocks=[[(1, 2, 2)], [(1, 3, 2), (3, 3, 3)],
[(1, 2, 2), (1, 2, 3), (1, 2, 2)]]),
shape_info((2, 4, 6, 6),
blocks=[[(2, 1, 2, 2)], [(1, 4, 2), (4, 4, 4)]]),
]
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
expected_blocks = (
shape_info.__dict__["blocks"] if "blocks" in shape_info.__dict__
else [[list(shape_info.shape)]])
matrices = []
for i, row_shapes in enumerate(expected_blocks):
row = []
for j, block_shape in enumerate(row_shapes):
if i == j: # operator is on the diagonal
row.append(
linear_operator_test_util.random_positive_definite_matrix(
block_shape, dtype, force_well_conditioned=True))
else:
row.append(
linear_operator_test_util.random_normal(block_shape, dtype=dtype))
matrices.append(row)
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [[
array_ops.placeholder_with_default(
matrix, shape=None) for matrix in row] for row in matrices]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix( # pylint:disable=g-complex-comprehension
l,
is_square=True,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
for l in row] for row in lin_op_matrices])
# Should be auto-set.
self.assertTrue(operator.is_square)
# Broadcast the shapes.
expected_shape = list(shape_info.shape)
broadcasted_matrices = linear_operator_util.broadcast_matrix_batch_dims(
[op for row in matrices for op in row]) # pylint: disable=g-complex-comprehension
matrices = [broadcasted_matrices[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]
for i in range(len(matrices))]
block_lower_triangular_dense = _block_lower_triangular_dense(
expected_shape, matrices)
if not use_placeholder:
block_lower_triangular_dense.set_shape(expected_shape)
return operator, block_lower_triangular_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix(matrix)]],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_block_lower_triangular_inverse_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)],
[linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)]],
is_non_singular=True,
)
inverse = operator.inverse()
self.assertIsInstance(
inverse,
block_lower_triangular.LinearOperatorBlockLowerTriangular)
self.assertEqual(2, len(inverse.operators))
self.assertEqual(1, len(inverse.operators[0]))
self.assertEqual(2, len(inverse.operators[1]))
def test_tape_safe(self):
operator_1 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[1., 0.], [0., 1.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator_2 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[2., 0.], [1., 0.]]))
operator_3 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[3., 1.], [1., 3.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]],
is_self_adjoint=False,
is_positive_definite=True)
diagonal_grads_only = ["diag_part", "trace", "determinant",
"log_abs_determinant"]
self.check_tape_safe(operator, skip_options=diagonal_grads_only)
for y in diagonal_grads_only:
for diag_block in [operator_1, operator_3]:
with backprop.GradientTape() as tape:
grads = tape.gradient(getattr(operator, y)(), diag_block.variables)
for item in grads:
self.assertIsNotNone(item)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_3 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegex(ValueError, "always non-singular"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]], is_non_singular=False)
operator_4 = linalg.LinearOperatorFullMatrix(
[[1., 0.], [2., 0.]], is_non_singular=False)
# A singular operator off of the main diagonal shouldn't raise
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_4, operator_2]], is_non_singular=True)
with self.assertRaisesRegex(ValueError, "always singular"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_4]], is_non_singular=True)
def test_different_dtypes_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3))],
[linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))]
]
with self.assertRaisesRegex(TypeError, "same dtype"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_non_square_operator_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(3, 4), is_square=False)],
[linalg.LinearOperatorFullMatrix(rng.rand(4, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(4, 4))]
]
with self.assertRaisesRegex(ValueError, "must be square"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_empty_operators_raises(self):
with self.assertRaisesRegex(ValueError, "must be a list of >=1"):
block_lower_triangular.LinearOperatorBlockLowerTriangular([])
def test_operators_wrong_length_raises(self):
with self.assertRaisesRegex(ValueError, "must contain `2` blocks"):
block_lower_triangular.LinearOperatorBlockLowerTriangular([
[linalg.LinearOperatorFullMatrix(rng.rand(2, 2))],
[linalg.LinearOperatorFullMatrix(rng.rand(2, 2))
for _ in range(3)]])
def test_operators_mismatched_dimension_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(3, 3))],
[linalg.LinearOperatorFullMatrix(rng.rand(3, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(3, 3))]
]
with self.assertRaisesRegex(ValueError, "must be the same as"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_incompatible_input_blocks_raises(self):
matrix_1 = array_ops.placeholder_with_default(rng.rand(4, 4), shape=None)
matrix_2 = array_ops.placeholder_with_default(rng.rand(3, 4), shape=None)
matrix_3 = array_ops.placeholder_with_default(rng.rand(3, 3), shape=None)
operators = [
[linalg.LinearOperatorFullMatrix(matrix_1, is_square=True)],
[linalg.LinearOperatorFullMatrix(matrix_2),
linalg.LinearOperatorFullMatrix(matrix_3, is_square=True)]
]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
operators)
x = np.random.rand(2, 4, 5).tolist()
msg = ("dimension does not match" if context.executing_eagerly()
else "input structure is ambiguous")
with self.assertRaisesRegex(ValueError, msg):
operator.matmul(x)
if __name__ == "__main__":
linear_operator_test_util.add_tests(
SquareLinearOperatorBlockLowerTriangularTest)
test.main()
|
the-stack_0_17143
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import abc
import os
import uuid
from typing import Dict
import six
from datadog_checks.dev.tooling.constants import get_root
from datadog_checks.dev.tooling.git import content_changed
from datadog_checks.dev.tooling.manifest_validator.schema import get_manifest_schema
from datadog_checks.dev.tooling.utils import (
get_metadata_file,
has_logs,
is_metric_in_metadata_file,
is_package,
parse_version_parts,
read_metadata_rows,
)
FIELDS_NOT_ALLOWED_TO_CHANGE = ["integration_id", "display_name", "guid"]
METRIC_TO_CHECK_EXCLUDE_LIST = {
'openstack.controller', # "Artificial" metric, shouldn't be listed in metadata file.
'riakcs.bucket_list_pool.workers', # RiakCS 2.1 metric, but metadata.csv lists RiakCS 2.0 metrics only.
}
class ValidationResult(object):
def __init__(self):
self.failed = False
self.fixed = False
self.messages = {'success': [], 'warning': [], 'failure': [], 'info': []}
def __str__(self):
return '\n'.join(['\n'.join(messages) for messages in self.messages.values()])
def __repr__(self):
return str(self)
@six.add_metaclass(abc.ABCMeta)
class ManifestValidator(object):
def __init__(self, is_extras=False, is_marketplace=False, check_in_extras=True, check_in_marketplace=True):
self.result = ValidationResult()
self.is_extras = is_extras
self.is_marketplace = is_marketplace
self.check_in_extras = check_in_extras
self.check_in_markeplace = check_in_marketplace
def should_validate(self):
if not self.is_extras and not self.is_marketplace:
return True
if self.is_extras and self.check_in_extras:
return True
if self.is_marketplace and self.check_in_markeplace:
return True
return False
def validate(self, check_name, manifest, should_fix):
# type: (str, Dict, bool) -> None
"""Validates the decoded manifest. Will perform inline changes if fix is true"""
raise NotImplementedError
def fail(self, error_message):
self.result.failed = True
self.result.messages['failure'].append(error_message)
def fix(self, problem, solution):
self.result.warning_msg = problem
self.result.success_msg = solution
self.result.fixed = True
self.result.failed = False
def __repr__(self):
return str(self.result)
class AttributesValidator(ManifestValidator):
""" attributes are valid"""
def validate(self, check_name, decoded, fix):
errors = sorted(get_manifest_schema().iter_errors(decoded), key=lambda e: e.path)
if errors:
for error in errors:
self.fail(f' {"->".join(map(str, error.absolute_path))} Error: {error.message}')
class GUIDValidator(ManifestValidator):
all_guids = {}
def validate(self, check_name, decoded, fix):
guid = decoded.get('guid')
if guid in self.all_guids:
output = f' duplicate `guid`: `{guid}` from `{self.all_guids[guid]}`'
if fix:
new_guid = uuid.uuid4()
self.all_guids[new_guid] = check_name
decoded['guid'] = new_guid
self.fix(output, f' new `guid`: {new_guid}')
else:
self.fail(output)
elif not guid or not isinstance(guid, str):
output = ' required non-null string: guid'
if fix:
new_guid = uuid.uuid4()
self.all_guids[new_guid] = check_name
decoded['guid'] = new_guid
self.fix(output, f' new `guid`: {new_guid}')
else:
self.fail(output)
else:
self.all_guids[guid] = check_name
return self.result
class ManifestVersionValidator(ManifestValidator):
def __init__(self, *args, **kwargs):
super(ManifestVersionValidator, self).__init__(*args, **kwargs)
self.root = get_root()
def validate(self, check_name, decoded, fix):
# manifest_version
correct_manifest_version = '1.0.0'
manifest_version = decoded.get('manifest_version')
version_parts = parse_version_parts(manifest_version)
if len(version_parts) != 3:
if not manifest_version:
output = ' required non-null string: manifest_version'
else:
output = f' invalid `manifest_version`: {manifest_version}'
if fix:
version_parts = parse_version_parts(correct_manifest_version)
decoded['manifest_version'] = correct_manifest_version
self.fix(output, f' new `manifest_version`: {correct_manifest_version}')
else:
self.fail(output)
if len(version_parts) == 3:
about_exists = os.path.isfile(
os.path.join(self.root, check_name, 'datadog_checks', check_name, '__about__.py')
)
if version_parts >= [1, 0, 0]:
if 'version' in decoded and about_exists:
output = ' outdated field: version'
if fix:
del decoded['version']
self.fix(output, ' removed field: version')
else:
self.fail(output)
elif about_exists:
output = f' outdated `manifest_version`: {manifest_version}'
if fix:
decoded['manifest_version'] = correct_manifest_version
self.fix(output, f' new `manifest_version`: {correct_manifest_version}')
if 'version' in decoded:
del decoded['version']
self.result.messages['success'].append(' removed field: version')
else:
self.fail(output)
else:
version = decoded.get('version')
version_parts = parse_version_parts(version)
if len(version_parts) != 3:
if not version:
output = ' required non-null string: version'
else:
output = f' invalid `version`: {version}'
self.fail(output)
class MaintainerValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
if not self.should_validate():
return
correct_maintainer = '[email protected]'
maintainer = decoded.get('maintainer')
if not maintainer.isascii():
self.fail(f' `maintainer` contains non-ascii character: {maintainer}')
return
if maintainer != correct_maintainer:
output = f' incorrect `maintainer`: {maintainer}'
if fix:
decoded['maintainer'] = correct_maintainer
self.fix(output, f' new `maintainer`: {correct_maintainer}')
else:
self.fail(output)
class NameValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
correct_name = check_name
name = decoded.get('name')
if not isinstance(name, str) or name.lower() != correct_name.lower():
output = f' incorrect `name`: {name}'
if fix:
decoded['name'] = correct_name
self.fix(output, f' new `name`: {correct_name}')
else:
self.fail(output)
class MetricsMetadataValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
# metrics_metadata
metadata_in_manifest = decoded.get('assets', {}).get('metrics_metadata')
metadata_file = get_metadata_file(check_name)
metadata_file_exists = os.path.isfile(metadata_file)
if not metadata_in_manifest and metadata_file_exists:
# There is a metadata.csv file but no entry in the manifest.json
self.fail(' metadata.csv exists but not defined in the manifest.json of {}'.format(check_name))
elif metadata_in_manifest and not metadata_file_exists:
# There is an entry in the manifest.json file but the referenced csv file does not exist.
self.fail(' metrics_metadata in manifest.json references a non-existing file: {}.'.format(metadata_file))
class MetricToCheckValidator(ManifestValidator):
def validate(self, check_name, decoded, _):
if not self.should_validate() or check_name == 'snmp' or check_name == 'moogsoft':
return
metadata_in_manifest = decoded.get('assets', {}).get('metrics_metadata')
# metric_to_check
metric_to_check = decoded.get('metric_to_check')
pricing = decoded.get('pricing', [])
if metric_to_check:
metrics_to_check = metric_to_check if isinstance(metric_to_check, list) else [metric_to_check]
if any(p.get('metric') in metrics_to_check for p in pricing):
return
for metric in metrics_to_check:
metric_integration_check_name = check_name
# snmp vendor specific integrations define metric_to_check
# with metrics from `snmp` integration
if check_name.startswith('snmp_') and not metadata_in_manifest:
metric_integration_check_name = 'snmp'
if (
not is_metric_in_metadata_file(metric, metric_integration_check_name)
and metric not in METRIC_TO_CHECK_EXCLUDE_LIST
):
self.fail(f' metric_to_check not in metadata.csv: {metric!r}')
elif metadata_in_manifest:
# if we have a metadata.csv file but no `metric_to_check` raise an error
metadata_file = get_metadata_file(check_name)
if os.path.isfile(metadata_file):
for _, row in read_metadata_rows(metadata_file):
# there are cases of metadata.csv files with just a header but no metrics
if row:
self.fail(' metric_to_check not included in manifest.json')
class SupportValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
if self.is_extras:
correct_support = 'contrib'
elif self.is_marketplace:
correct_support = 'partner'
else:
correct_support = 'core'
support = decoded.get('support')
if support != correct_support:
output = f' incorrect `support`: {support}'
if fix:
decoded['support'] = correct_support
self.fix(output, f' new `support`: {correct_support}')
else:
self.fail(output)
class IsPublicValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
correct_is_public = True
is_public = decoded.get('is_public')
if not isinstance(is_public, bool):
output = ' required boolean: is_public'
if fix:
decoded['is_public'] = correct_is_public
self.fix(output, f' new `is_public`: {correct_is_public}')
else:
self.fail(output)
class ImmutableAttributesValidator(ManifestValidator):
"""Ensure attributes haven't changed
Skip if the manifest is a new file (i.e. new integration)
"""
def validate(self, check_name, decoded, fix):
manifest_fields_changed = content_changed(file_glob=f"{check_name}/manifest.json")
if 'new file' not in manifest_fields_changed:
for field in FIELDS_NOT_ALLOWED_TO_CHANGE:
if field in manifest_fields_changed:
output = f'Attribute `{field}` is not allowed to be modified. Please revert to original value'
self.fail(output)
else:
self.result.messages['info'].append(
" skipping check for changed fields: integration not on default branch"
)
class LogsCategoryValidator(ManifestValidator):
"""If an integration defines logs it should have the log collection category"""
LOG_COLLECTION_CATEGORY = "log collection"
IGNORE_LIST = {
'docker_daemon',
'ecs_fargate', # Logs are provided by FireLens or awslogs
'cassandra_nodetool', # Logs are provided by cassandra
'jmeter',
'kafka_consumer', # Logs are provided by kafka
'kubernetes',
'pan_firewall',
'altostra',
'hasura_cloud',
'sqreen',
}
def validate(self, check_name, decoded, fix):
categories = decoded.get('categories')
check_has_logs = has_logs(check_name)
check_has_logs_category = self.LOG_COLLECTION_CATEGORY in categories
if check_has_logs == check_has_logs_category or check_name in self.IGNORE_LIST:
return
if check_has_logs:
output = ' required category: ' + self.LOG_COLLECTION_CATEGORY
if fix:
correct_categories = categories + [self.LOG_COLLECTION_CATEGORY]
decoded['categories'] = correct_categories
self.fix(output, f' new `categories`: {correct_categories}')
else:
self.fail(output)
else:
output = (
' This integration does not have logs, please remove the category: '
+ self.LOG_COLLECTION_CATEGORY
+ ' or define the logs properly'
)
self.fail(output)
class SupportedOSValidator(ManifestValidator):
"""If an integration contains python or logs configuration, the supported_os field should not be empty."""
def validate(self, check_name, decoded, _):
supported_os = decoded.get('supported_os')
check_has_logs = has_logs(check_name)
check_has_python = is_package(check_name)
if not supported_os and (check_has_logs or check_has_python):
output = f'Attribute `supported_os` in {check_name}/manifest.json should not be empty.'
self.fail(output)
def get_all_validators(is_extras, is_marketplace):
return [
AttributesValidator(),
GUIDValidator(),
ManifestVersionValidator(),
MaintainerValidator(is_extras, is_marketplace, check_in_extras=False, check_in_marketplace=False),
NameValidator(),
MetricsMetadataValidator(),
MetricToCheckValidator(),
SupportValidator(is_extras, is_marketplace),
IsPublicValidator(),
ImmutableAttributesValidator(),
LogsCategoryValidator(),
SupportedOSValidator(),
]
|
the-stack_0_17145
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis38.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [45642496, 45644416]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'line': {'color': 'yellow'},
'fill': {'color': 'red'}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
the-stack_0_17147
|
from time import sleep
from typing import Iterable, Optional
from .shared.networking import ConnectionSettings, NetworkConnection
from .shared.configuration import Configuration
from .shared.logs import get_logger, initialize
from .messaging.broker import Broker, BrokerSettings
from .messaging.logging_broker import LoggingBroker
from .computing.facade import get_computational_problem
from .computing.base import Subproblem, SubproblemResult, SubproblemPool
from .computing.domain_commands import DomainCommand, PruneCommand
from .app import ApplicationSettings, ComputationManager, EmptySubproblemPoolError
from .messaging.commands import CommandMapper
from .messaging.command_handler import CommandHandler, CommandNotRegisteredException
from time import time
ResultCommand: type = None
RegisterCommand: type = None
def main(computation_manager: ComputationManager):
config = Configuration(__package__) \
.add_json_file('config.json')
app_settings = config.get('Application').bind_as(ApplicationSettings)
broker_settings = config.get('Broker').bind_as(BrokerSettings)
logger = get_logger(__package__)
mode_name = 'active' if app_settings.active_mode else 'passive'
logger.info(f'Codeine started in {mode_name} mode.')
handler = create_command_handler(computation_manager.pool)
broker = create_broker(broker_settings, create_command_mapper())
broker.start()
broker.discover_network()
subproblem: Optional[Subproblem] = None
active_mode = app_settings.active_mode
any_free_subproblems = True
ttt = time()
try:
while True:
if computation_manager.pool.not_started_pool:
any_free_subproblems = True
if time() - ttt > 5:
display_pool(computation_manager.pool, logger)
broker.discover_network()
broker.broadcast(ProgressCommand(*computation_manager.get_progress()))
ttt = time()
if active_mode and any_free_subproblems:
if requested_subproblem_drop(subproblem, computation_manager):
subproblem.stop()
logger.info(f'Subproblem drop requested.')
if subproblem is None:
try:
subproblem = computation_manager.create_random()
subproblem.start()
identifier = subproblem.identifier
broker.broadcast(RegisterCommand(identifier))
logger.info(f'Subproblem #{identifier} has started.')
except EmptySubproblemPoolError:
logger.warning('No more subproblems to take.')
any_free_subproblems = False
elif not subproblem.is_alive():
identifier = subproblem.identifier
if computation_manager.pool.get_id_in_progress_locally() is None:
logger.info(f'Subproblem #{identifier} has been dropped.')
else:
result = subproblem.result
computation_manager.handle_completed(subproblem)
broadcast_result(subproblem, broker)
logger.info(f'Subproblem #{identifier} has ended (result: {result}).')
subproblem = None
results = computation_manager.pool.results
if computation_manager.stop_condition_is_met():
active_mode = False
logger.info(f'Stop condition is met: {results}')
elif computation_manager.all_subproblems_finished():
any_free_subproblems = False
logger.info(f'All subproblems finished: {results}')
for payload in broker.get_payloads():
try:
logger.info(f'Received command from {payload.address}: {payload.command}')
responses = handler.handle(payload)
for response in responses:
broker.send(response)
except CommandNotRegisteredException as exc:
logger.error(f'Unregistered command received from {payload.address}: {exc}')
logger.info(computation_manager.pool.results)
if not broker.is_alive():
break
sleep(0.01)
except KeyboardInterrupt:
pass
except BaseException as exc:
logger.exception(f'An unexpected exception has occurred: {exc}')
logger.info('Gracefully stopping Codeine...')
broker.stop()
broker.join()
if subproblem:
subproblem.stop()
subproblem.join()
logger.info('Gracefully stopped.')
def requested_subproblem_drop(subproblem, computation_manager) -> bool:
return (computation_manager.pool.get_id_in_progress_locally() is None
and subproblem is not None)
def create_broker(broker_settings: BrokerSettings, mapper: CommandMapper) -> Broker:
logger = get_logger('broker')
connection = NetworkConnection(broker_settings.connection)
broker = LoggingBroker(connection, logger, mapper, broker_settings)
broker.on_prune(lambda addr: PruneCommand(addr))
return broker
def create_command_mapper() -> CommandMapper:
return CommandMapper() \
.register(ResultCommand) \
.register(RegisterCommand) \
.register(DropCommand) \
.register(ProgressCommand)
def create_command_handler(pool: SubproblemPool) -> CommandHandler:
return CommandHandler() \
.register(DomainCommand, pool)
def broadcast_result(subproblem: Subproblem, broker: Broker):
command = ResultCommand(subproblem.identifier, subproblem.result)
broker.broadcast(command)
def display_pool(pool: SubproblemPool, logger):
logger.info(f'[Not started] {pool.not_started_pool}')
logger.info(f'[In progress] {pool.in_progress_pool}')
logger.info(f' [Solved] {pool.results}')
if __name__ == '__main__':
initialize()
PROBLEM = get_computational_problem()
ResultCommand = PROBLEM.result_command_type
RegisterCommand = PROBLEM.register_command_type
DropCommand = PROBLEM.drop_command_type
ProgressCommand = PROBLEM.progress_command_type
main(ComputationManager(PROBLEM))
|
the-stack_0_17148
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
import SimpleXMLRPCServer
import threading
import time
import pickle
import pyalgotrade.logger
class AutoStopThread(threading.Thread):
def __init__(self, server):
threading.Thread.__init__(self)
self.__server = server
def run(self):
while self.__server.jobsPending():
time.sleep(1)
self.__server.stop()
class Results(object):
"""The results of the strategy executions."""
def __init__(self, parameters, result):
self.__parameters = parameters
self.__result = result
def getParameters(self):
"""Returns a sequence of parameter values."""
return self.__parameters
def getResult(self):
"""Returns the result for a given set of parameters."""
return self.__result
class Job(object):
def __init__(self, strategyParameters):
self.__strategyParameters = strategyParameters
self.__bestResult = None
self.__bestParameters = None
self.__id = id(self)
def getId(self):
return self.__id
def getNextParameters(self):
ret = None
if len(self.__strategyParameters):
ret = self.__strategyParameters.pop()
return ret
def getBestParameters(self):
return self.__bestParameters
def getBestResult(self):
return self.__bestResult
def getBestWorkerName(self):
return self.__bestWorkerName
def setBestResult(self, result, parameters, workerName):
self.__bestResult = result
self.__bestParameters = parameters
self.__bestWorkerName = workerName
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
rpc_paths = ('/PyAlgoTradeRPC',)
class Server(SimpleXMLRPCServer.SimpleXMLRPCServer):
defaultBatchSize = 200
def __init__(self, address, port, autoStop=True):
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, (address, port), requestHandler=RequestHandler, logRequests=False, allow_none=True)
self.__instrumentsAndBars = None # Pickle'd instruments and bars for faster retrieval.
self.__barsFreq = None
self.__activeJobs = {}
self.__activeJobsLock = threading.Lock()
self.__parametersLock = threading.Lock()
self.__bestJob = None
self.__parametersIterator = None
self.__logger = pyalgotrade.logger.getLogger("server")
if autoStop:
self.__autoStopThread = AutoStopThread(self)
else:
self.__autoStopThread = None
self.register_introspection_functions()
self.register_function(self.getInstrumentsAndBars, 'getInstrumentsAndBars')
self.register_function(self.getBarsFrequency, 'getBarsFrequency')
self.register_function(self.getNextJob, 'getNextJob')
self.register_function(self.pushJobResults, 'pushJobResults')
self.__forcedStop = False
def __getNextParams(self):
ret = []
# Get the next set of parameters.
with self.__parametersLock:
if self.__parametersIterator is not None:
try:
for i in xrange(Server.defaultBatchSize):
ret.append(self.__parametersIterator.next())
except StopIteration:
self.__parametersIterator = None
return ret
def getLogger(self):
return self.__logger
def getInstrumentsAndBars(self):
return self.__instrumentsAndBars
def getBarsFrequency(self):
return str(self.__barsFreq)
def getBestJob(self):
return self.__bestJob
def getNextJob(self):
ret = None
params = []
# Get the next set of parameters.
params = self.__getNextParams()
# Map the active job
if len(params):
ret = Job(params)
with self.__activeJobsLock:
self.__activeJobs[ret.getId()] = ret
return pickle.dumps(ret)
def jobsPending(self):
if self.__forcedStop:
return False
with self.__parametersLock:
jobsPending = self.__parametersIterator is not None
with self.__activeJobsLock:
activeJobs = len(self.__activeJobs) > 0
return jobsPending or activeJobs
def pushJobResults(self, jobId, result, parameters, workerName):
jobId = pickle.loads(jobId)
result = pickle.loads(result)
parameters = pickle.loads(parameters)
workerName = pickle.loads(workerName)
job = None
# Get the active job and remove the mapping.
with self.__activeJobsLock:
try:
job = self.__activeJobs[jobId]
del self.__activeJobs[jobId]
except KeyError:
# The job's results were already submitted.
return
# Save the job with the best result
if self.__bestJob is None or result > self.__bestJob.getBestResult():
job.setBestResult(result, parameters, workerName)
self.__bestJob = job
self.getLogger().info("Partial result %s with parameters: %s from %s" % (result, parameters, workerName))
def stop(self):
self.shutdown()
def serve(self, barFeed, strategyParameters):
ret = None
try:
# Initialize instruments, bars and parameters.
self.getLogger().info("Loading bars")
loadedBars = []
for dateTime, bars in barFeed:
loadedBars.append(bars)
instruments = barFeed.getRegisteredInstruments()
self.__instrumentsAndBars = pickle.dumps((instruments, loadedBars))
self.__barsFreq = barFeed.getFrequency()
self.__parametersIterator = iter(strategyParameters)
if self.__autoStopThread:
self.__autoStopThread.start()
self.getLogger().info("Waiting for workers")
self.serve_forever()
if self.__autoStopThread:
self.__autoStopThread.join()
# Show the best result.
bestJob = self.getBestJob()
if bestJob:
self.getLogger().info("Best final result %s with parameters: %s from client %s" % (bestJob.getBestResult(), bestJob.getBestParameters(), bestJob.getBestWorkerName()))
ret = Results(bestJob.getBestParameters(), bestJob.getBestResult())
else:
self.getLogger().error("No jobs processed")
finally:
self.__forcedStop = True
return ret
def serve(barFeed, strategyParameters, address, port):
"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found.
"""
s = Server(address, port)
return s.serve(barFeed, strategyParameters)
|
the-stack_0_17149
|
#!c:\users\lgale\pycharmprojects\test\venv\scripts\python.exe
# $Id: rst2odt_prepstyles.py 8346 2019-08-26 12:11:32Z milde $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
# Author: Michael Schutte <[email protected]>
from __future__ import print_function
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print(__doc__, file=sys.stderr)
print("Usage: %s STYLE_FILE.odt\n" % sys.argv[0], file=sys.stderr)
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
|
the-stack_0_17150
|
import siliconcompiler
import multiprocessing
import os
import pytest
# unit routine
def run_design(datadir, design, N, job):
chip = siliconcompiler.Chip(loglevel='INFO')
chip.set('design', design)
chip.add('source', os.path.join(datadir, f'{design}.v'))
chip.set('param', 'N', str(N))
chip.set('jobname', job)
chip.set('relax', True)
chip.set('quiet', True)
chip.set('steplist', ['import', 'syn'])
chip.target("asicflow_freepdk45")
chip.run()
@pytest.mark.eda
@pytest.mark.quick
def test_doe(scroot):
'''Test running multiple experiments sweeping different parameters in
parallel using multiprocessing library.'''
datadir = os.path.join(scroot, 'third_party', 'designs', 'oh', 'stdlib', 'hdl')
design = 'oh_add'
N = [4, 8, 16, 32, 64, 128]
# Define parallel processingg
processes = []
for i in range(len(N)):
job = 'job' + str(i)
processes.append(multiprocessing.Process(target=run_design,
args=(datadir,
design,
str(N[i]),
job
)))
# Boiler plate start and join
for p in processes:
p.start()
for p in processes:
p.join()
# Post-processing data
chip = siliconcompiler.Chip()
prev_area = 0
for i in range(len(N)):
jobname = 'job'+str(i)
chip.read_manifest(f"build/{design}/{jobname}/syn/0/outputs/{design}.pkg.json", job=jobname)
area = chip.get('metric','syn','0','cellarea','real', job=jobname)
# expect to have increasing area as we increase adder width
assert area > prev_area
prev_area = area
if __name__ == "__main__":
from tests.fixtures import scroot
test_doe(scroot())
|
the-stack_0_17151
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c
from esphome.const import CONF_FREQUENCY, CONF_ID
DEPENDENCIES = ["i2c"]
MULTI_CONF = True
pca9685_ns = cg.esphome_ns.namespace("pca9685")
PCA9685Output = pca9685_ns.class_("PCA9685Output", cg.Component, i2c.I2CDevice)
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(PCA9685Output),
cv.Required(CONF_FREQUENCY): cv.All(
cv.frequency, cv.Range(min=23.84, max=1525.88)
),
}
)
.extend(cv.COMPONENT_SCHEMA)
.extend(i2c.i2c_device_schema(0x40))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID], config[CONF_FREQUENCY])
await cg.register_component(var, config)
await i2c.register_i2c_device(var, config)
|
the-stack_0_17153
|
import setuptools
from scrape_songs.__version__ import __version__
with open("README.md", 'r') as f:
long_description = f.read()
setuptools.setup(
name="scrape-songs",
version=__version__,
python_requires=">=3.7",
install_requires=['scrapy', 'wikipedia'],
description="A tool used to collect lists of song" \
" names from albums on wikipedia and format them.",
author="QualityHammer",
author_email="[email protected]",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/QualityHammer/Whats-on-this-Album",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": ["scrape_songs=scrape_songs.client:run"]
}
)
|
the-stack_0_17154
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.tf2onnx - range op conversion
"""
import numpy as np
from onnx.onnx_pb import TensorProto
from tf2onnx import utils
# pylint: disable=unused-argument,missing-docstring
def make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype):
"""make Range subgraph if all inputs are const."""
# T range = Range(T start, T limit, T delta)
# V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
base_name = utils.make_name(scope_name)
start = ctx.get_node_by_output(start).get_tensor_value(as_list=False)
limit = ctx.get_node_by_output(limit).get_tensor_value(as_list=False)
delta = ctx.get_node_by_output(delta).get_tensor_value(as_list=False)
val = np.arange(start, limit, delta, dtype=start.dtype)
const_range = ctx.make_const(base_name, val)
ctx.make_node("Identity", [const_range.output[0]], shapes=[shape], dtypes=[dtype], outputs=[output])
def make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype):
"""make Range subgraph."""
# T range = Range(T start, T limit, T delta)
# V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
base_name = utils.make_name(scope_name)
# trip_count
diff_node = ctx.make_node("Sub",
[limit, start],
op_name_scope=base_name,
name=utils.make_name("diff"))
diff_output = diff_node.output[0]
delta_cast = delta
if dtype in [TensorProto.INT32, TensorProto.INT64]:
cast_node = ctx.make_node("Cast", [diff_output], op_name_scope=base_name,
name="cast_diff", attr={"to": TensorProto.FLOAT})
diff_output = cast_node.output[0]
cast_node = ctx.make_node("Cast", [delta], op_name_scope=base_name, name="cast_delta",
attr={"to": TensorProto.FLOAT})
delta_cast = cast_node.output[0]
div_node = ctx.make_node("Div", [diff_output, delta_cast], op_name_scope=base_name, name="div")
ceil_node = ctx.make_node("Ceil", [div_node.output[0]], op_name_scope=base_name, name="ceil")
trip_count_node = ctx.make_node("Cast", [ceil_node.output[0]], op_name_scope=base_name, name="trip_cnt",
attr={"to": TensorProto.INT64})
# cond
# Use initializer here since Constant OP before opset 9 does not support bool type
cond_name = "{}_cond".format(base_name)
ctx.make_const(cond_name, np.ones((), dtype=bool))
# body
g = ctx.create_new_graph_with_same_config()
g.make_node("Identity", ["cond"], outputs=["cond_out"])
g.make_node("Add", ["prev", delta], outputs=["current"], name=utils.make_name("add"))
g.make_node("Identity", ["prev"], outputs=["range"])
g.add_graph_input("i", TensorProto.INT64, [])
g.add_graph_input("cond", TensorProto.BOOL, [])
g.add_graph_input("prev", dtype, [])
g.add_graph_output("cond_out", TensorProto.BOOL, [])
g.add_graph_output("current", dtype, [])
g.add_graph_output("range", dtype, [])
# loop
loop_inputs = [trip_count_node.output[0], cond_name, start]
loop_node = ctx.make_node("Loop", loop_inputs, output_count=2, op_name_scope=base_name, name="loop")
loop_node.set_body_graph_as_attr("body", g)
ctx.make_node("Identity", [loop_node.output[1]], name=base_name, shapes=[shape],
dtypes=[dtype], outputs=[output])
def make_range(ctx, start, limit, delta, output, scope_name, shape, dtype):
if all(ctx.get_node_by_output(n).is_const() for n in [start, limit, delta]) is True:
make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype)
else:
make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype)
def range_op7(ctx, node, name, args):
"""Range."""
# T range = Range(T start, T limit, T delta)
# V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
dtype = node.get_attr_int("Tidx")
shape = node.output_shapes[0]
utils.make_sure(dtype is not None, "Tidx of %s is None", node.name)
ctx.remove_node(node.name)
make_range(ctx, node.input[0], node.input[1], node.input[2],
node.output[0], name, shape, dtype)
|
the-stack_0_17155
|
from django.contrib import admin
from . import models
class BonderAdmin(admin.ModelAdmin):
list_display = (
'id',
'semiconductor',
'name',
'slug',
'description',
'product_code',
'model',
'condition',
'warranty',
'seller',
'manufacturer',
'image',
'availability',
'price',
'created',
'name',
'slug',
'created',
'updated'
)
admin.site.register(models.Bonder, BonderAdmin)
|
the-stack_0_17156
|
import os, sys
import lesscpy
from shutil import copyfile, rmtree
from jupyter_core.paths import jupyter_config_dir, jupyter_data_dir
from glob import glob
from tempfile import mkstemp
# path to local site-packages/jupyterthemes
package_dir = os.path.dirname(os.path.realpath(__file__))
# path to user jupyter-themes dir
user_dir = os.path.join(os.path.expanduser('~'), '.jupyter-themes')
# path to save tempfile with style_less before reading/compiling
_, tempfile = mkstemp('.less')
_, vimtemp = mkstemp('.less')
# path to install custom.css file (~/.jupyter/custom/)
jupyter_home = jupyter_config_dir()
jupyter_data = jupyter_data_dir()
jupyter_custom = os.path.join(jupyter_home, 'custom')
jupyter_custom_fonts = os.path.join(jupyter_custom, 'fonts')
jupyter_customcss = os.path.join(jupyter_custom, 'custom.css')
jupyter_customjs = os.path.join(jupyter_custom, 'custom.js')
jupyter_nbext = os.path.join(jupyter_data, 'nbextensions')
# theme colors, layout, and font directories
layouts_dir = os.path.join(package_dir, 'layout')
styles_dir = os.path.join(package_dir, 'styles')
styles_dir_user = os.path.join(user_dir, 'styles')
fonts_dir = os.path.join(package_dir, 'fonts')
defaults_dir = os.path.join(package_dir, 'defaults')
# default custom.css/js files to override JT on reset
defaultCSS = os.path.join(defaults_dir, 'custom.css')
defaultJS = os.path.join(defaults_dir, 'custom.js')
# layout files for notebook, codemirror, cells, mathjax, & vim ext
nb_style = os.path.join(layouts_dir, 'notebook.less')
cm_style = os.path.join(layouts_dir, 'codemirror.less')
cl_style = os.path.join(layouts_dir, 'cells.less')
ex_style = os.path.join(layouts_dir, 'extras.less')
vim_style = os.path.join(layouts_dir, 'vim.less')
comp_style = os.path.join(layouts_dir, 'completer.less')
theme_name_file = os.path.join(jupyter_custom, 'current_theme.txt')
def fileOpen(filename, mode):
if sys.version_info[0]==3:
return open(filename, mode, encoding='utf8', errors='ignore')
else:
return open(filename, mode)
def check_directories():
# Ensure all install dirs exist
if not os.path.isdir(jupyter_home):
os.makedirs(jupyter_home)
if not os.path.isdir(jupyter_custom):
os.makedirs(jupyter_custom)
if not os.path.isdir(jupyter_custom_fonts):
os.makedirs(jupyter_custom_fonts)
if not os.path.isdir(jupyter_data):
os.makedirs(jupyter_data)
if not os.path.isdir(jupyter_nbext):
os.makedirs(jupyter_nbext)
def less_to_css(style_less):
""" write less-compiled css file to jupyter_customcss in jupyter_dir
"""
with fileOpen(tempfile, 'w') as f:
f.write(style_less)
os.chdir(package_dir)
style_css = lesscpy.compile(tempfile)
style_css += '\n\n'
return style_css
def write_final_css(style_css):
# install style_css to .jupyter/custom/custom.css
with fileOpen(jupyter_customcss, 'w') as custom_css:
custom_css.write(style_css)
def install_precompiled_theme(theme):
# for Python 3.5, install selected theme from precompiled defaults
compiled_dir = os.path.join(styles_dir, 'compiled')
compiled_dir_user = os.path.join(styles_dir_user, 'compiled')
if (os.path.isdir(compiled_dir_user) and
'{}.css'.format(theme) in os.listdir(compiled_dir_user)):
theme_src = os.path.join(compiled_dir_user, '{}.css'.format(theme))
else:
theme_src = os.path.join(compiled_dir, '{}.css'.format(theme))
theme_dst = os.path.join(jupyter_custom, 'custom.css')
copyfile(theme_src, theme_dst)
def send_fonts_to_jupyter(font_file_path):
fname = font_file_path.split(os.sep)[-1]
copyfile(font_file_path, os.path.join(jupyter_custom_fonts, fname))
def delete_font_files():
for fontfile in os.listdir(jupyter_custom_fonts):
abspath = os.path.join(jupyter_custom_fonts, fontfile)
os.remove(abspath)
def convert_fontsizes(fontsizes):
# if triple digits, move decimal (105 --> 10.5)
fontsizes = [str(fs) for fs in fontsizes]
for i, fs in enumerate(fontsizes):
if len(fs) >= 3:
fontsizes[i] = '.'.join([fs[:-1], fs[-1]])
elif int(fs) > 25:
fontsizes[i] = '.'.join([fs[0], fs[-1]])
return fontsizes
def set_font_properties(style_less,
nbfont=None,
tcfont=None,
monofont=None,
monosize=11,
tcfontsize=13,
nbfontsize=13,
prfontsize=95,
dffontsize=93,
outfontsize=85,
mathfontsize=100,
dfonts=False):
"""Parent function for setting notebook, text/md, and
codecell font-properties
"""
fontsizes = [monosize, nbfontsize, tcfontsize, prfontsize, dffontsize, outfontsize]
monosize, nbfontsize, tcfontsize, prfontsize, dffontsize, outfontsize = convert_fontsizes(fontsizes)
if dfonts==True:
monofont, tcfont, nbfont = ['monospace', 'sans-serif', 'sans-serif']
else:
if monofont is not None:
monofont, monofpath = stored_font_dicts(monofont)
style_less = import_fonts(style_less, monofont, monofpath)
else:
monofont='monospace'
if tcfont is not None:
tcfont, tcfontpath = stored_font_dicts(tcfont)
style_less = import_fonts(style_less, tcfont, tcfontpath)
else:
tcfont='sans-serif'
if nbfont is not None:
if nbfont == 'proxima':
nbfont, tcfont = ["'Proxima Nova'"]*2
style_less = proxima_nova_imports(style_less)
else:
nbfont, nbfontpath = stored_font_dicts(nbfont)
style_less = import_fonts(style_less, nbfont, nbfontpath)
else:
nbfont='sans-serif'
style_less += '/* Set Font-Type and Font-Size Variables */\n'
# font names and fontfamily info for codecells, notebook & textcells
style_less += '@monofont: {}; \n'.format(monofont)
style_less += '@notebook-fontfamily: {}; \n'.format(nbfont)
style_less += '@text-cell-fontfamily: {}; \n'.format(tcfont)
# font size for codecells, main notebook, notebook-sub, & textcells
style_less += '@monofontsize: {}pt; \n'.format(monosize)
style_less += '@monofontsize-sub: {}pt; \n'.format(float(monosize) - 1)
style_less += '@nb-fontsize: {}pt; \n'.format(nbfontsize)
style_less += '@nb-fontsize-sub: {}pt; \n'.format(float(nbfontsize) - 1)
style_less += '@text-cell-fontsize: {}pt; \n'.format(tcfontsize)
style_less += '@df-header-fontsize: {}pt; \n'.format(float(dffontsize) + 1)
style_less += '@df-fontsize: {}pt; \n'.format(dffontsize)
style_less += '@output-font-size: {}pt; \n'.format(outfontsize)
style_less += '@prompt-fontsize: {}pt; \n'.format(prfontsize)
style_less += '@mathfontsize: {}%; \n'.format(mathfontsize)
style_less += '\n\n'
style_less += '/* Import Theme Colors and Define Layout Variables */\n'
return style_less
def import_fonts(style_less, fontname, font_subdir):
"""Copy all custom fonts to ~/.jupyter/custom/fonts/ and
write import statements to style_less
"""
ftype_dict = {'woff2': 'woff2',
'woff': 'woff',
'ttf': 'truetype',
'otf': 'opentype',
'svg': 'svg'}
define_font = (
"@font-face {{font-family: {fontname};\n\tfont-weight:"
"{weight};\n\tfont-style: {style};\n\tsrc: local('{fontname}'),"
"\n\turl('fonts{sepp}{fontfile}') format('{ftype}');}}\n")
fontname = fontname.split(',')[0]
fontpath = os.path.join(fonts_dir, font_subdir)
for fontfile in os.listdir(fontpath):
if '.txt' in fontfile or 'DS_' in fontfile:
continue
weight = 'normal'
style = 'normal'
if 'medium' in fontfile:
weight = 'medium'
elif 'ital' in fontfile:
style = 'italic'
ft = ftype_dict[fontfile.split('.')[-1]]
style_less += define_font.format(
fontname=fontname,
weight=weight,
style=style,
sepp='/',
fontfile=fontfile,
ftype=ft)
send_fonts_to_jupyter(os.path.join(fontpath, fontfile))
return style_less
def style_layout(style_less,
theme='grade3',
cursorwidth=2,
cursorcolor='default',
cellwidth='980',
lineheight=170,
margins='auto',
vimext=False,
toolbar=False,
nbname=False,
kernellogo=False,
altprompt=False,
altmd=False,
altout=False,
hideprompt=False):
"""Set general layout and style properties of text and code cells"""
# write theme name to ~/.jupyter/custom/ (referenced by jtplot.py)
with fileOpen(theme_name_file, 'w') as f:
f.write(theme)
if (os.path.isdir(styles_dir_user) and
'{}.less'.format(theme) in os.listdir(styles_dir_user)):
theme_relpath = os.path.relpath(
os.path.join(styles_dir_user, theme), package_dir)
else:
theme_relpath = os.path.relpath(
os.path.join(styles_dir, theme), package_dir)
style_less += '@import "{}";\n'.format(theme_relpath)
textcell_bg = '@cc-input-bg'
promptText = '@input-prompt'
promptBG = '@cc-input-bg'
promptPadding = '.25em'
promptBorder = '2px solid @prompt-line'
tcPromptBorder = '2px solid @tc-prompt-std'
promptMinWidth = 11.5
outpromptMinWidth = promptMinWidth +.5 # remove + 3 since it will overlay output print() text
tcPromptWidth = promptMinWidth + .5
tcPromptFontsize = "@prompt-fontsize"
ccOutputBG = '@cc-output-bg-default'
if theme in ['grade3', 'gispo']:
textcell_bg = '@notebook-bg'
if altprompt:
promptPadding = '.1em'
promptMinWidth = 8
outpromptMinWidth = promptMinWidth + .5
tcPromptWidth = promptMinWidth + .5
promptText = 'transparent'
tcPromptBorder = '2px solid transparent'
if altmd:
textcell_bg = '@notebook-bg'
tcPromptBorder = '2px dotted @tc-border-selected'
if altout:
ccOutputBG = '@notebook-bg'
if margins != 'auto':
margins = '{}px'.format(margins)
if '%' not in cellwidth:
cellwidth = str(cellwidth) + 'px'
style_less += '@container-margins: {};\n'.format(margins)
style_less += '@cell-width: {}; \n'.format(cellwidth)
style_less += '@cc-line-height: {}%; \n'.format(lineheight)
style_less += '@text-cell-bg: {}; \n'.format(textcell_bg)
style_less += '@cc-prompt-width: {}ex; \n'.format(promptMinWidth)
style_less += '@cc-prompt-bg: {}; \n'.format(promptBG)
style_less += '@cc-output-bg: {}; \n'.format(ccOutputBG)
style_less += '@prompt-text: {}; \n'.format(promptText)
style_less += '@prompt-padding: {}; \n'.format(promptPadding)
style_less += '@prompt-border: {}; \n'.format(promptBorder)
style_less += '@prompt-min-width: {}ex; \n'.format(promptMinWidth)
style_less += '@out-prompt-min-width: {}ex; \n'.format(outpromptMinWidth)
style_less += '@tc-prompt-width: {}ex; \n'.format(tcPromptWidth)
style_less += '@tc-prompt-border: {}; \n'.format(tcPromptBorder)
style_less += '@cursor-width: {}px; \n'.format(cursorwidth)
style_less += '@cursor-info: @cursor-width solid {}; \n'.format(
cursorcolor)
style_less += '@tc-prompt-fontsize: {}; \n'.format(tcPromptFontsize)
style_less += '\n\n'
# read-in notebook.less (general nb style)
with fileOpen(nb_style, 'r') as notebook:
style_less += notebook.read() + '\n'
# read-in cells.less (cell layout)
with fileOpen(cl_style, 'r') as cells:
style_less += cells.read() + '\n'
# read-in extras.less (misc layout)
with fileOpen(ex_style, 'r') as extras:
style_less += extras.read() + '\n'
# read-in codemirror.less (syntax-highlighting)
with fileOpen(cm_style, 'r') as codemirror:
style_less += codemirror.read() + '\n'
with fileOpen(comp_style, 'r') as codemirror:
style_less += codemirror.read() + '\n'
style_less += toggle_settings(
toolbar, nbname, hideprompt, kernellogo) + '\n'
if vimext:
set_vim_style(theme)
return style_less
def toggle_settings(
toolbar=False, nbname=False, hideprompt=False, kernellogo=False):
"""Toggle main notebook toolbar (e.g., buttons), filename,
and kernel logo."""
toggle = ''
if toolbar:
toggle += 'div#maintoolbar {margin-left: -4px !important;}\n'
toggle += '.toolbar.container {width: 100% !important;}\n'
else:
toggle += 'div#maintoolbar {display: none !important;}\n'
if nbname:
toggle += ('span.save_widget span.filename {margin-left: 8px; height: initial;'
'font-size: 100%; color: @nb-name-fg; background-color:'
'@cc-input-bg;}\n')
toggle += ('span.save_widget span.filename:hover {color:'
'@nb-name-hover; background-color: @cc-input-bg;}\n')
toggle += ('#menubar {padding-top: 4px; background-color:'
'@notebook-bg;}\n')
else:
toggle += '#header-container {display: none !important;}\n'
if hideprompt:
toggle += 'div.prompt.input_prompt {display: none !important;}\n'
toggle += 'div.prompt.output_prompt {width: 5ex !important;}\n'
toggle += 'div.out_prompt_overlay.prompt:hover {width: 5ex !important; min-width: 5ex !important;}\n'
toggle += (
'.CodeMirror-gutters, .cm-s-ipython .CodeMirror-gutters'
'{ position: absolute; left: 0; top: 0; z-index: 3; width: 2em; '
'display: inline-block !important; }\n')
toggle += ('div.cell.code_cell .input { border-left: 5px solid @cm-gutters !important; border-bottom-left-radius: 5px; border-top-left-radius: 5px; }\n')
if kernellogo:
toggle += '@kernel-logo-display: block;'
else:
toggle += '@kernel-logo-display: none;'
return toggle
def proxima_nova_imports(style_less):
style_less += """@font-face {
font-family: 'Proxima Nova Bold';
src: url('fonts/Proxima Nova Alt Bold-webfont.eot');
src: url('fonts/Proxima Nova Alt Bold-webfont.eot?#iefix') format('embedded-opentype'),
url('fonts/Proxima Nova Alt Bold-webfont.woff2') format('woff2'),
url('fonts/Proxima Nova Alt Bold-webfont.woff') format('woff'),
url('fonts/Proxima Nova Alt Bold-webfont.ttf') format('truetype'),
url('fonts/Proxima Nova Alt Bold-webfont.svg#proxima_nova_altbold') format('svg');
font-weight: 600;
font-style: normal;
}
@font-face {
font-family: 'Proxima Nova';
src: url('fonts/Proxima Nova Alt Regular-webfont.eot');
src: url('fonts/Proxima Nova Alt Regular-webfont.eot?#iefix') format('embedded-opentype'),
url('fonts/Proxima Nova Alt Regular-webfont.woff') format('woff'),
url('fonts/Proxima Nova Alt Regular-webfont.ttf') format('truetype'),
url('fonts/Proxima Nova Alt Regular-webfont.svg#proxima_nova_altregular') format('svg');
font-weight: 400;
font-style: normal;
}"""
font_subdir = os.path.join(fonts_dir, "sans-serif/proximasans")
fontpath = os.path.join(fonts_dir, font_subdir)
for fontfile in os.listdir(font_subdir):
send_fonts_to_jupyter(os.path.join(fontpath, fontfile))
return style_less
def set_mathjax_style(style_css, mathfontsize):
"""Write mathjax settings, set math fontsize
"""
jax_style = """<script>
MathJax.Hub.Config({
"HTML-CSS": {
/*preferredFont: "TeX",*/
/*availableFonts: ["TeX", "STIX"],*/
styles: {
scale: %d,
".MathJax_Display": {
"font-size": %s,
}
}
}
});\n</script>
""" % (int(mathfontsize), '"{}%"'.format(str(mathfontsize)))
style_css += jax_style
return style_css
def set_vim_style(theme):
"""Add style and compatibility with vim notebook extension"""
vim_jupyter_nbext = os.path.join(jupyter_nbext, 'vim_binding')
if not os.path.isdir(vim_jupyter_nbext):
os.makedirs(vim_jupyter_nbext)
vim_less = '@import "styles{}";\n'.format(''.join([os.sep, theme]))
with open(vim_style, 'r') as vimstyle:
vim_less += vimstyle.read() + '\n'
with open(vimtemp, 'w') as vtemp:
vtemp.write(vim_less)
os.chdir(package_dir)
vim_css = lesscpy.compile(vimtemp)
vim_css += '\n\n'
# install vim_custom_css to ...nbextensions/vim_binding/vim_binding.css
vim_custom_css = os.path.join(vim_jupyter_nbext, 'vim_binding.css')
with open(vim_custom_css, 'w') as vim_custom:
vim_custom.write(vim_css)
def reset_default(verbose=False):
"""Remove custom.css and custom fonts"""
paths = [jupyter_custom, jupyter_nbext]
for fpath in paths:
custom = '{0}{1}{2}.css'.format(fpath, os.sep, 'custom')
try:
os.remove(custom)
except Exception:
pass
try:
delete_font_files()
except Exception:
check_directories()
delete_font_files()
copyfile(defaultCSS, jupyter_customcss)
copyfile(defaultJS, jupyter_customjs)
if os.path.exists(theme_name_file):
os.remove(theme_name_file)
if verbose:
print("Reset css and font defaults in:\n{} &\n{}".format(*paths))
def set_nb_theme(name):
"""Set theme from within notebook """
from IPython.core.display import HTML
styles_dir = os.path.join(package_dir, 'styles/compiled/')
css_path = glob('{0}/{1}.css'.format(styles_dir, name))[0]
customcss = open(css_path, "r").read()
return HTML(''.join(['<style> ', customcss, ' </style>']))
def get_colors(theme='grade3', c='default', get_dict=False):
if theme in ['grade3', 'gispo']:
cdict = {'default': '#ff711a',
'b': '#1e70c7',
'o': '#ff711a',
'r': '#e22978',
'p': '#AA22FF',
'g': '#2ecc71'}
else:
cdict = {'default': '#0095ff',
'b': '#0095ff',
'o': '#ff914d',
'r': '#DB797C',
'p': '#c776df',
'g': '#94c273'}
cdict['x'] = '@cc-input-fg'
if get_dict:
return cdict
return cdict[c]
def get_alt_prompt_text_color(theme):
altColors = {'grade3': '#FF7823',
'oceans16': '#667FB1',
'chesterish': '#0b98c8',
'onedork': '#94c273',
'monokai': '#94c273'}
return altColors[theme]
def stored_font_dicts(fontcode, get_all=False):
fonts = {'mono':
{'anka': ['Anka/Coder', 'anka-coder'],
'anonymous': ['Anonymous Pro', 'anonymous-pro'],
'aurulent': ['Aurulent Sans Mono', 'aurulent'],
'bitstream': ['Bitstream Vera Sans Mono', 'bitstream-vera'],
'bpmono': ['BPmono', 'bpmono'],
'code': ['Code New Roman', 'code-new-roman'],
'consolamono': ['Consolamono', 'consolamono'],
'cousine': ['Cousine', 'cousine'],
'dejavu': ['DejaVu Sans Mono', 'dejavu'],
'droidmono': ['Droid Sans Mono', 'droidmono'],
'fira': ['Fira Mono', 'fira'],
'firacode': ['Fira Code', 'firacode'],
'generic': ['Generic Mono', 'generic'],
'hack': ['Hack', 'hack'],
'hasklig': ['Hasklig', 'hasklig'],
'iosevka' : ['Iosevka', 'iosevka'],
'inputmono': ['Input Mono', 'inputmono'],
'inconsolata': ['Inconsolata-g', 'inconsolata-g'],
'liberation': ['Liberation Mono', 'liberation'],
'meslo': ['Meslo', 'meslo'],
'office': ['Office Code Pro', 'office-code-pro'],
'oxygen': ['Oxygen Mono', 'oxygen'],
'roboto': ['Roboto Mono', 'roboto'],
'saxmono': ['saxMono', 'saxmono'],
'source': ['Source Code Pro', 'source-code-pro'],
'sourcemed': ['Source Code Pro Medium', 'source-code-medium'],
'ptmono': ['PT Mono', 'ptmono'],
'ubuntu': ['Ubuntu Mono', 'ubuntu']},
'sans':
{'droidsans': ['Droid Sans', 'droidsans'],
'karla': ['Karla', 'karla'],
'opensans': ['Open Sans', 'opensans'],
'ptsans': ['PT Sans', 'ptsans'],
'sourcesans': ['Source Sans Pro', 'sourcesans'],
'robotosans': ['Roboto', 'robotosans'],
'latosans': ['Lato', 'latosans'],
'exosans': ['Exo_2', 'exosans'],
'proxima': ['Proxima Nova', 'proximasans']},
'serif':
{'ptserif': ['PT Serif', 'ptserif'],
'ebserif': ['EB Garamond', 'ebserif'],
'loraserif': ['Lora', 'loraserif'],
'merriserif': ['Merriweather', 'merriserif'],
'crimsonserif': ['Crimson Text', 'crimsonserif'],
'georgiaserif': ['Georgia', 'georgiaserif'],
'neutonserif': ['Neuton', 'neutonserif'],
'cardoserif': ['Cardo Serif', 'cardoserif'],
'goudyserif': ['Goudy Serif', 'goudyserif']}}
if get_all:
return fonts
if fontcode in list(fonts['mono']):
fontname, fontdir = fonts['mono'][fontcode]
fontfam = 'monospace'
elif fontcode in list(fonts['sans']):
fontname, fontdir = fonts['sans'][fontcode]
fontfam = 'sans-serif'
elif fontcode in list(fonts['serif']):
fontname, fontdir = fonts['serif'][fontcode]
fontfam = 'serif'
else:
print("\n\tOne of the fonts you requested is not available\n\tSetting all fonts to default")
return ''
fontdir = os.sep.join([fontfam, fontdir])
return '"{}", {}'.format(fontname, fontfam), fontdir
|
the-stack_0_17159
|
import numpy
# Expected input: Image from gaussian pyramid
def divide_picture_to_windows(picture):
height = picture.shape[0] + 1
width = picture.shape[1] + 1
x_step = 48
y_step = 48
height_of_window = 48
width_of_window = 48
list_of_windows = []
count = 0
for y in range(0, height - height_of_window, y_step):
for x in range(0, width - width_of_window, x_step):
# print(x,y)
count = count + 1
window = numpy.zeros((height_of_window, width_of_window, 3))
for j in range(height_of_window):
for i in range(width_of_window):
window[j, i] = picture[y + j, x + i]
# print("Picture pixel:", window[j, i])
list_of_windows.append(window)
# Save picture
# scipy.misc.imsave("windows/window" + str(count), thinned_image)
windows = numpy.zeros((count, height_of_window, width_of_window, 3))
for i in range(count):
windows[i] = list_of_windows[i]
return windows
def convertWindowToArray(window):
array = numpy.zeros(200)
count = 0
for y in range(10):
for x in range(20):
array[count] = window[y, x]
count = count + 1
return array
if __name__ == "__main__":
pass
|
the-stack_0_17160
|
import logging
import os
import time
from pyvirtualdisplay import Display
log = logging.getLogger(__name__ + str(os.getpid()))
class VirtualScreen:
def __init__(self, visible=0, size=(1920, 1080)):
"""
Init an instance of virtual display
:param visible: whether visible on screen, 0 for false, 1 for true
:param size: virtual display size in pixels, as tuple form: (width, height)
"""
self.display = Display(visible=visible, size=size)
log.info("Virtual display set up, visible: {}, size: {}".
format(False if not visible else True, size))
self.display.start()
time.sleep(1)
def __enter__(self):
log.info("Created virtual display instance.")
return self.display
def __exit__(self, exc_type, exc_val, exc_tb):
if self.display:
self.display.stop()
log.info("Virtual display stopped.")
|
the-stack_0_17163
|
from honeygrove.config import Config
import os
import re
import xml.etree.ElementTree as ET
class FilesystemParser:
honeytoken_directory = str(Config.folder.honeytoken_files)
cd_pattern = "^cd \S+$"
mkdir_pattern = "^mkdir \S+$"
touch_pattern = "^touch \S+$"
ls_pattern = "^ls \S+$"
def __init__(self, xml_path=Config.folder.filesystem):
with open(str(xml_path)) as f:
try:
self.start_path = f.readline().split("--")[1].split(",") # read first line and parse
self.start_path = list(map(int, self.start_path)) # letters-numbers to list
except Exception:
self.start_path = [] # if nothing given, the "/" is the root-/user directory
# The current position in the tree as list
self.current_pos = self.start_path
self.xml_path = xml_path
self.tree = ET.parse(str(self.xml_path))
self.root = self.tree.getroot()
if self.root.attrib['name'] != "/":
self.mode = "DOS"
else:
self.mode = "UNIX"
# Saves the user directory path (to show it as "-")
self.user_path = self.get_current_path()
self.add_honeytoken_files()
def get_position(self, path):
"""
Specifies the position to a given path
:param path: the path which position shall be determined
:return:
"""
path = self.get_absolute_path(path)
if not self.valid_path(path):
raise Exception("Invalid path")
position = []
if path == "/":
return position
for element in path.split("/")[1:]: # da wir mit absoluten Pfaden arbeiten, ist das erste Element ""
children = [c.attrib['name'] for c in self.get_element(position)]
position.append(children.index(element))
return position
def get_path(self, position):
"""
Gives path for a position
:param position: The position, the path has to be determined for
:return:
"""
path = ""
current = self.root
if position == []:
return "/"
for i in position:
current = current[i]
if current.attrib['name'] != "/": # "root-/" brauchen wir nicht, ist schon da
path += "/" + current.attrib['name']
return path
def get_element(self, position):
"""
Gives the element from the XML-tree
:param position: Position of the element
:return:
"""
current = self.root
for i in position:
current = current[i]
return current
def get_absolute_path(self, rel_path: str):
"""
Changes a (absolute or relevant) path into a absolute Path and converts commands like ".."
:param rel_path: The path to be converted
:return: the absolute path to path
"""
if not rel_path:
return ""
if self.mode == "DOS":
if re.match(r"\w:\\", rel_path[0:2]):
rel_path = rel_path[3:]
rel_path = rel_path.replace("\\", "/")
if rel_path == "/":
return rel_path
if rel_path[0] == "~":
rel_path = rel_path.replace("~", self.user_path)
if rel_path[0] != "/": # if its a absolute path, we don't have to add a prefix
rel_path = self.get_current_path() + "/" + rel_path
# Deletes stuff like ///, /./, ./ or /.
rel_path = re.sub(r"([^\.]|^)(\./)", r"/",
rel_path) # "beginning of the line" or " not .", followed by any amount of "./"
rel_path = re.sub(r"(/\.)$", r"/", rel_path) # the same for the end of the line
rel_path = re.sub(r"/{2,}", r"/", rel_path) # ///// goes to /
folders = rel_path.split("/")
folders = list(filter(None, folders))
i = 0
while i < len(folders):
f = folders[i]
if f == "..":
if i > 0:
folders.pop(i - 1)
folders.pop(i - 1) # same index because the list slipped by 1
else:
folders.pop(i)
i = 0
else:
i += 1
return "/" + "/".join(folders)
def tree_contains(self, file_name):
"""
Checks if a name exists somewhere in the tree
:param file_name:
:return:
"""
found = False
for child in self.root.findall('.//'):
if child.attrib['name'] == file_name:
found = True
break
return found
def add_honeytoken_files(self):
"""
Adds the file names from the honeytokenfiles folder if files with given names not already exist
"""
for file in os.listdir(self.honeytoken_directory):
if not self.tree_contains(str(file)):
self.touch(self.user_path + "/" + file)
def get_current_path(self):
"""returns the current path as String"""
return self.get_path(self.current_pos)
def get_formatted_path(self):
"""
Returns the current path as platform adjusted, returnable String
:return:
"""
path = self.get_current_path()
if self.user_path == "/":
return path # if / is configured as user directory, nothing shall be replaced
if self.mode == "DOS":
return "C:" + path.replace("/", "\\")
if self.user_path in path:
path = path.replace(self.user_path, '~')
return path
def mkdir(self, path):
"""Creates a new folder at the given path"""
return self.create(path, "dir")
def touch(self, path):
"""
Creates a new file at the given path
"""
try:
return self.create(path, "file")
except Exception as e:
return e
def create(self, path, tag):
"""
Creates a new node
:param path: Path (with filename) to the ne node
:param tag: Type (file or directory)
:return:
"""
path = self.get_absolute_path(path)
split = path.split("/")
file_path = "/".join(split[:-1])
file_name = split[-1]
if file_name in self.ls(file_path) or file_name == ".":
if tag == "dir":
return "mkdir: cannot create directory '" + file_name + "': File exists"
else:
return # hall not be created again
file_position = self.get_position(file_path)
ET.SubElement(self.get_element(file_position), tag, {"name": file_name})
def ls(self, path=''):
"""Lists all children"""
if path:
path = self.get_absolute_path(path)
pos = self.get_position(path)
else:
pos = self.current_pos
element = self.get_element(pos)
response = ""
for child in element:
response += child.attrib['name'] + '\n'
return response
def cd(self, path):
"""
Changes the position in the data tree
:param path (absolute or relative path)
:return None or a error message
"""
if not path:
return
input = path
path = self.get_absolute_path(path)
if not self.valid_path(path):
return input + ": No such file or directory"
self.current_pos = self.get_position(path)
return
def valid_path(self, path, tag=''):
"""
Determines if a given path exists
:param path: the path to be checked
:param tag: if tag is given, it'll be checked if the tag of the element is at the position path =tag
"""
path = self.get_absolute_path(path) # just in case
if tag != 'file' and path == "/":
return True
pos = []
res = True
for p in path.split("/")[1:]:
children = [c.attrib['name'] for c in self.get_element(pos)]
if p in children:
pos.append(children.index(p))
else:
res = False
if not (tag == '' or self.get_element(pos).tag == tag): # not valid if the tag is not the desired
res = False
return res
def valid_directory(self, path):
"""Determines if the given path of current_pos leads to a folder"""
return self.valid_path(path, 'dir')
def valid_file(self, path):
"""Determines if the given path of current_pos leads to a file"""
return self.valid_path(path, 'file')
def delete(self, path):
"""
Searches for a given file and deletes it if it exists
:param path: the path to the file to be deleted
:return:
"""
if path == ".." or path == ".":
return "rm: refusing to remove '.' or '..' directory: skipping '" + path + "'"
path = self.get_absolute_path(path)
if not self.valid_path(path):
return
child_name = path.split("/")[-1]
parent_path = "/".join(path.split("/")[:-1])
parent = self.get_element(self.get_position(parent_path))
for child in parent:
if child.attrib.get('name') == child_name:
parent.remove(child)
def rename(self, from_path, to_name):
"""
Changes the name of a given file
:param from_path: path to the file to be renamedPfad zur umzubenennenden Datei
:param to_name: new name
:return:
"""
self.move(from_path, to_name) # rename is actually just a special case of move
def move(self, sourcepath, targetpath):
"""
Moves a file from one position to another
:param sourcepath: the path to the file to be moved
:param targetpath: the destination path (with new filename)
:return:
"""
sourcepath = self.get_absolute_path(sourcepath)
targetpath = self.get_absolute_path(targetpath)
split = targetpath.split("/")
parentpath = "/" + "/".join(split[1:-1])
element = self.get_element(self.get_position(sourcepath))
sourcetype = element.tag
if not self.valid_directory(parentpath):
return "Directory not found."
else:
if self.valid_path(targetpath):
targettype = self.get_element(self.get_position(targetpath)).tag
if targettype != sourcetype:
return "Not possible"
parent = self.get_element(self.get_position(parentpath))
self.delete(sourcepath)
element.attrib['name'] = targetpath.split("/")[-1]
parent.append(element)
def cat(self, path):
"""
Returns the content of the file as String
:param path: the path to the file
:return:
"""
path = self.get_absolute_path(path)
if not self.valid_path(path):
raise Exception("File not found")
if not self.valid_file(path):
raise Exception("Is a directory")
filename = path.split("/")[-1]
for f in os.listdir(self.honeytoken_directory):
if f == filename:
with open(self.honeytoken_directory + "/" + f, "r") as fp:
data = fp.read()
return data
|
the-stack_0_17165
|
#!/usr/bin/python
#
# tester.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import math
import sys
import os
import struct
import threading
import time
import random
import time
import traceback
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
import fdb
fdb.api_version(int(sys.argv[2]))
from fdb import six
from fdb.impl import strinc
import fdb.tuple
from directory_extension import DirectoryExtension
from cancellation_timeout_tests import test_timeouts
from cancellation_timeout_tests import test_db_timeouts
from cancellation_timeout_tests import test_cancellation
from cancellation_timeout_tests import test_retry_limits
from cancellation_timeout_tests import test_db_retry_limits
from cancellation_timeout_tests import test_combinations
from size_limit_tests import test_size_limit_option, test_get_approximate_size
random.seed(0)
if len(sys.argv) == 4:
db = fdb.open(sys.argv[3])
else:
db = fdb.open()
class Stack:
def __init__(self):
self.stack = []
def __repr__(self):
return repr(self.stack)
def __str__(self):
return str(self.stack)
def __len__(self):
return len(self.stack)
def __getitem__(self, idx):
return self.stack[idx]
def __setitem__(self, idx, val):
self.stack[idx] = val
def push(self, idx, val):
self.stack.insert(0, (idx, val))
def pop(self, count=None, with_idx=False):
c = count
if c is None:
c = 1
raw = self.stack[:c]
del self.stack[:c]
for i in range(len(raw)):
if isinstance(raw[i][1], fdb.Future):
try:
val = raw[i][1].wait()
if val is None or (hasattr(val, 'present') and not val.present()):
raw[i] = (raw[i][0], b'RESULT_NOT_PRESENT')
else:
raw[i] = (raw[i][0], val)
except fdb.FDBError as e:
# print('ERROR: %r' % e)
raw[i] = (raw[i][0], fdb.tuple.pack((b'ERROR', str(e.code).encode('ascii'))))
if count is None:
if with_idx:
return raw[0]
else:
return raw[0][1]
else:
if with_idx:
return raw
else:
return [item[1] for item in raw]
class Instruction:
def __init__(self, tr, stack, op, index, isDatabase=False, isSnapshot=False):
self.tr = tr
self.stack = stack
self.op = op
self.index = index
self.isDatabase = isDatabase
self.isSnapshot = isSnapshot
def pop(self, count=None, with_idx=False):
return self.stack.pop(count, with_idx)
def push(self, val):
self.stack.push(self.index, val)
def test_db_options(db):
db.options.set_location_cache_size(100001)
db.options.set_max_watches(100001)
db.options.set_datacenter_id("dc_id")
db.options.set_machine_id("machine_id")
db.options.set_snapshot_ryw_enable()
db.options.set_snapshot_ryw_disable()
db.options.set_transaction_logging_max_field_length(1000)
db.options.set_transaction_timeout(100000)
db.options.set_transaction_timeout(0)
db.options.set_transaction_timeout(0)
db.options.set_transaction_max_retry_delay(100)
db.options.set_transaction_size_limit(100000)
db.options.set_transaction_retry_limit(10)
db.options.set_transaction_retry_limit(-1)
db.options.set_transaction_causal_read_risky()
db.options.set_transaction_include_port_in_address()
@fdb.transactional
def test_options(tr):
tr.options.set_priority_system_immediate()
tr.options.set_priority_batch()
tr.options.set_causal_read_risky()
tr.options.set_causal_write_risky()
tr.options.set_read_your_writes_disable()
tr.options.set_read_system_keys()
tr.options.set_access_system_keys()
tr.options.set_transaction_logging_max_field_length(1000)
tr.options.set_timeout(60 * 1000)
tr.options.set_retry_limit(50)
tr.options.set_max_retry_delay(100)
tr.options.set_used_during_commit_protection_disable()
tr.options.set_debug_transaction_identifier('my_transaction')
tr.options.set_log_transaction()
tr.options.set_read_lock_aware()
tr.options.set_lock_aware()
tr.options.set_include_port_in_address()
tr.get(b'\xff').wait()
def check_watches(db, watches, expected):
for i, watch in enumerate(watches):
if watch.is_ready() or expected:
try:
watch.wait()
if not expected:
assert False, "Watch %d is ready" % i
except fdb.FDBError as e:
tr = db.create_transaction()
tr.on_error(e).wait()
return False
return True
def test_watches(db):
while True:
db[b'w0'] = b'0'
db[b'w3'] = b'3'
watches = [None]
@fdb.transactional
def txn1(tr):
watches[0] = tr.watch(b'w0')
tr.set(b'w0', b'0')
assert not watches[0].is_ready()
txn1(db)
watches.append(db.clear_and_watch(b'w1'))
watches.append(db.set_and_watch(b'w2', b'2'))
watches.append(db.get_and_watch(b'w3'))
assert watches[3][0] == b'3'
watches[3] = watches[3][1]
time.sleep(1)
if not check_watches(db, watches, False):
continue
del db[b'w1']
time.sleep(5)
if not check_watches(db, watches, False):
continue
db[b'w0'] = b'a'
db[b'w1'] = b'b'
del db[b'w2']
db.bit_xor(b'w3', b'\xff\xff')
if check_watches(db, watches, True):
return
@fdb.transactional
def test_locality(tr):
tr.options.set_timeout(60 * 1000)
tr.options.set_read_system_keys() # We do this because the last shard (for now, someday the last N shards) is in the /FF/ keyspace
# This isn't strictly transactional, thought we expect it to be given the size of our database
boundary_keys = list(fdb.locality.get_boundary_keys(tr, b'', b'\xff\xff')) + [b'\xff\xff']
end_keys = [tr.get_key(fdb.KeySelector.last_less_than(k)) for k in boundary_keys[1:]]
start_addresses = [fdb.locality.get_addresses_for_key(tr, k) for k in boundary_keys[:-1]]
end_addresses = [fdb.locality.get_addresses_for_key(tr, k) for k in end_keys]
if [set(s.wait()) for s in start_addresses] != [set(e.wait()) for e in end_addresses]:
raise Exception("Locality not internally consistent.")
def test_predicates():
assert fdb.predicates.is_retryable(fdb.FDBError(1020))
assert not fdb.predicates.is_retryable(fdb.FDBError(10))
class Tester:
tr_map = {}
tr_map_lock = threading.RLock()
def __init__(self, db, prefix):
self.db = db
self.instructions = self.db[fdb.tuple.range((prefix,))]
self.stack = Stack()
self.tr_name = prefix
Tester.tr_map[self.tr_name] = None
self.last_version = 0
self.threads = []
self.directory_extension = DirectoryExtension()
def push_range(self, inst, iter, prefix_filter=None):
kvs = []
for k, v in iter:
if prefix_filter is None or k.startswith(prefix_filter):
kvs += [k, v]
inst.push(fdb.tuple.pack(tuple(kvs)))
@staticmethod
@fdb.transactional
def wait_empty(tr, prefix):
res = tr.get_range_startswith(prefix, 1).to_list()
if len(res) == 1:
raise fdb.FDBError(1020)
@fdb.transactional
def log_stack(self, tr, prefix, entries):
for i, (idx, el) in entries.items():
pk = prefix + fdb.tuple.pack((i, idx))
pv = fdb.tuple.pack((el,))
tr.set(pk, pv[:40000])
def current_transaction(self):
with Tester.tr_map_lock:
return Tester.tr_map[self.tr_name]
def new_transaction(self):
with Tester.tr_map_lock:
Tester.tr_map[self.tr_name] = self.db.create_transaction()
def switch_transaction(self, name):
self.tr_name = name
with Tester.tr_map_lock:
if self.tr_name not in Tester.tr_map:
self.new_transaction()
def run(self):
for idx, i in enumerate(self.instructions):
op_tuple = fdb.tuple.unpack(i.value)
op = op_tuple[0]
# print("Stack is %r" % self.stack)
# if op != "PUSH" and op != "SWAP":
# print("%d. Instruction is %s" % (idx, op))
isDatabase = op.endswith(six.u('_DATABASE'))
isSnapshot = op.endswith(six.u('_SNAPSHOT'))
if isDatabase:
op = op[:-9]
obj = self.db
elif isSnapshot:
op = op[:-9]
obj = self.current_transaction().snapshot
else:
obj = self.current_transaction()
inst = Instruction(obj, self.stack, op, idx, isDatabase, isSnapshot)
try:
if inst.op == six.u("PUSH"):
inst.push(op_tuple[1])
elif inst.op == six.u("DUP"):
inst.stack.push(*self.stack[0])
elif inst.op == six.u("EMPTY_STACK"):
self.stack = Stack()
elif inst.op == six.u("SWAP"):
idx = inst.pop()
self.stack[0], self.stack[idx] = self.stack[idx], self.stack[0]
elif inst.op == six.u("POP"):
inst.pop()
elif inst.op == six.u("SUB"):
a, b = inst.pop(2)
inst.push(a - b)
elif inst.op == six.u("CONCAT"):
a, b = inst.pop(2)
inst.push(a + b)
elif inst.op == six.u("WAIT_FUTURE"):
old_idx, item = inst.pop(with_idx=True)
inst.stack.push(old_idx, item)
elif inst.op == six.u("NEW_TRANSACTION"):
self.new_transaction()
elif inst.op == six.u("USE_TRANSACTION"):
self.switch_transaction(inst.pop())
elif inst.op == six.u("ON_ERROR"):
inst.push(inst.tr.on_error(inst.pop()))
elif inst.op == six.u("GET"):
key = inst.pop()
num = random.randint(0, 2)
if num == 0:
f = obj[key]
elif num == 1:
f = obj.get(key)
else:
f = obj.__getitem__(key)
if f == None:
inst.push(b'RESULT_NOT_PRESENT')
else:
inst.push(f)
elif inst.op == six.u("GET_ESTIMATED_RANGE_SIZE"):
begin, end = inst.pop(2)
estimatedSize = obj.get_estimated_range_size_bytes(begin, end).wait()
inst.push(b"GOT_ESTIMATED_RANGE_SIZE")
elif inst.op == six.u("GET_KEY"):
key, or_equal, offset, prefix = inst.pop(4)
result = obj.get_key(fdb.KeySelector(key, or_equal, offset))
if result.startswith(prefix):
inst.push(result)
elif result < prefix:
inst.push(prefix)
else:
inst.push(strinc(prefix))
elif inst.op == six.u("GET_RANGE"):
begin, end, limit, reverse, mode = inst.pop(5)
if limit == 0 and mode == -1 and random.random() < 0.5:
if reverse:
r = obj[begin:end:-1]
else:
r = obj[begin:end]
else:
r = obj.get_range(begin, end, limit, reverse, mode)
self.push_range(inst, r)
elif inst.op == six.u("GET_RANGE_STARTS_WITH"):
prefix, limit, reverse, mode = inst.pop(4)
self.push_range(inst, obj.get_range_startswith(prefix, limit, reverse, mode))
elif inst.op == six.u("GET_RANGE_SELECTOR"):
begin_key, begin_or_equal, begin_offset, end_key, end_or_equal, end_offset, limit, reverse, mode, prefix = inst.pop(10)
beginSel = fdb.KeySelector(begin_key, begin_or_equal, begin_offset)
endSel = fdb.KeySelector(end_key, end_or_equal, end_offset)
if limit == 0 and mode == -1 and random.random() < 0.5:
if reverse:
r = obj[beginSel:endSel:-1]
else:
r = obj[beginSel:endSel]
else:
r = obj.get_range(beginSel, endSel, limit, reverse, mode)
self.push_range(inst, r, prefix_filter=prefix)
elif inst.op == six.u("GET_READ_VERSION"):
self.last_version = obj.get_read_version().wait()
inst.push(b"GOT_READ_VERSION")
elif inst.op == six.u("SET"):
key, value = inst.pop(2)
if random.random() < 0.5:
obj[key] = value
else:
obj.set(key, value)
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("LOG_STACK"):
prefix = inst.pop()
entries = {}
while len(self.stack) > 0:
stack_index = len(self.stack) - 1
entries[stack_index] = inst.pop(with_idx=True)
if len(entries) == 100:
self.log_stack(self.db, prefix, entries)
entries = {}
self.log_stack(self.db, prefix, entries)
elif inst.op == six.u("ATOMIC_OP"):
opType, key, value = inst.pop(3)
getattr(obj, opType.lower())(key, value)
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("SET_READ_VERSION"):
inst.tr.set_read_version(self.last_version)
elif inst.op == six.u("CLEAR"):
if random.random() < 0.5:
del obj[inst.pop()]
else:
obj.clear(inst.pop())
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("CLEAR_RANGE"):
begin, end = inst.pop(2)
num = random.randint(0, 2)
if num == 0:
del obj[begin:end]
elif num == 1:
obj.clear_range(begin, end)
else:
obj.__delitem__(slice(begin, end))
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("CLEAR_RANGE_STARTS_WITH"):
obj.clear_range_startswith(inst.pop())
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("READ_CONFLICT_RANGE"):
inst.tr.add_read_conflict_range(inst.pop(), inst.pop())
inst.push(b"SET_CONFLICT_RANGE")
elif inst.op == six.u("WRITE_CONFLICT_RANGE"):
inst.tr.add_write_conflict_range(inst.pop(), inst.pop())
inst.push(b"SET_CONFLICT_RANGE")
elif inst.op == six.u("READ_CONFLICT_KEY"):
inst.tr.add_read_conflict_key(inst.pop())
inst.push(b"SET_CONFLICT_KEY")
elif inst.op == six.u("WRITE_CONFLICT_KEY"):
inst.tr.add_write_conflict_key(inst.pop())
inst.push(b"SET_CONFLICT_KEY")
elif inst.op == six.u("DISABLE_WRITE_CONFLICT"):
inst.tr.options.set_next_write_no_write_conflict_range()
elif inst.op == six.u("COMMIT"):
inst.push(inst.tr.commit())
elif inst.op == six.u("RESET"):
inst.tr.reset()
elif inst.op == six.u("CANCEL"):
inst.tr.cancel()
elif inst.op == six.u("GET_COMMITTED_VERSION"):
self.last_version = inst.tr.get_committed_version()
inst.push(b"GOT_COMMITTED_VERSION")
elif inst.op == six.u("GET_APPROXIMATE_SIZE"):
approximate_size = inst.tr.get_approximate_size().wait()
inst.push(b"GOT_APPROXIMATE_SIZE")
elif inst.op == six.u("GET_VERSIONSTAMP"):
inst.push(inst.tr.get_versionstamp())
elif inst.op == six.u("TUPLE_PACK"):
count = inst.pop()
items = inst.pop(count)
inst.push(fdb.tuple.pack(tuple(items)))
elif inst.op == six.u("TUPLE_PACK_WITH_VERSIONSTAMP"):
prefix = inst.pop()
count = inst.pop()
items = inst.pop(count)
if not fdb.tuple.has_incomplete_versionstamp(items) and random.random() < 0.5:
inst.push(b"ERROR: NONE")
else:
try:
packed = fdb.tuple.pack_with_versionstamp(tuple(items), prefix=prefix)
inst.push(b"OK")
inst.push(packed)
except ValueError as e:
if str(e).startswith("No incomplete"):
inst.push(b"ERROR: NONE")
else:
inst.push(b"ERROR: MULTIPLE")
elif inst.op == six.u("TUPLE_UNPACK"):
for i in fdb.tuple.unpack(inst.pop()):
inst.push(fdb.tuple.pack((i,)))
elif inst.op == six.u("TUPLE_SORT"):
count = inst.pop()
items = inst.pop(count)
unpacked = map(fdb.tuple.unpack, items)
if six.PY3:
sorted_items = sorted(unpacked, key=fdb.tuple.pack)
else:
sorted_items = sorted(unpacked, cmp=fdb.tuple.compare)
for item in sorted_items:
inst.push(fdb.tuple.pack(item))
elif inst.op == six.u("TUPLE_RANGE"):
count = inst.pop()
items = inst.pop(count)
r = fdb.tuple.range(tuple(items))
inst.push(r.start)
inst.push(r.stop)
elif inst.op == six.u("ENCODE_FLOAT"):
f_bytes = inst.pop()
f = struct.unpack(">f", f_bytes)[0]
if not math.isnan(f) and not math.isinf(f) and not f == -0.0 and f == int(f):
f = int(f)
inst.push(fdb.tuple.SingleFloat(f))
elif inst.op == six.u("ENCODE_DOUBLE"):
d_bytes = inst.pop()
d = struct.unpack(">d", d_bytes)[0]
inst.push(d)
elif inst.op == six.u("DECODE_FLOAT"):
f = inst.pop()
f_bytes = struct.pack(">f", f.value)
inst.push(f_bytes)
elif inst.op == six.u("DECODE_DOUBLE"):
d = inst.pop()
d_bytes = struct.pack(">d", d)
inst.push(d_bytes)
elif inst.op == six.u("START_THREAD"):
t = Tester(self.db, inst.pop())
thr = threading.Thread(target=t.run)
thr.start()
self.threads.append(thr)
elif inst.op == six.u("WAIT_EMPTY"):
prefix = inst.pop()
Tester.wait_empty(self.db, prefix)
inst.push(b"WAITED_FOR_EMPTY")
elif inst.op == six.u("UNIT_TESTS"):
try:
test_db_options(db)
test_options(db)
test_watches(db)
test_cancellation(db)
test_retry_limits(db)
test_db_retry_limits(db)
test_timeouts(db)
test_db_timeouts(db)
test_combinations(db)
test_locality(db)
test_predicates()
test_size_limit_option(db)
test_get_approximate_size(db)
except fdb.FDBError as e:
print("Unit tests failed: %s" % e.description)
traceback.print_exc()
raise Exception("Unit tests failed: %s" % e.description)
elif inst.op.startswith(six.u('DIRECTORY_')):
self.directory_extension.process_instruction(inst)
else:
raise Exception("Unknown op %s" % inst.op)
except fdb.FDBError as e:
# print('ERROR: %r' % e)
inst.stack.push(idx, fdb.tuple.pack((b"ERROR", str(e.code).encode('ascii'))))
# print(" to %s" % self.stack)
# print()
[thr.join() for thr in self.threads]
if __name__ == '__main__':
t = Tester(db, sys.argv[1].encode('ascii'))
t.run()
|
the-stack_0_17166
|
import unittest
from unittest.mock import MagicMock
from colorchanger import colorchanger
class MyTestCase(unittest.TestCase):
@staticmethod
def test_set_hue_color():
# Given
hue_light_id = 1
rgb_color = (0, 255, 0)
colorchanger.hue_bridge.set_light = MagicMock(return_value=None)
xy = colorchanger.converter.rgb_to_xy(0, 255, 0)
# When
colorchanger.set_hue_color(hue_light_id, rgb_color)
# Then
colorchanger.hue_bridge.set_light.assert_called_with(hue_light_id, 'xy', xy)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_17169
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for app_dev_linter.py."""
from __future__ import annotations
import io
import multiprocessing
import os
from core import utils
from core.tests import test_utils
from . import other_files_linter
from . import pre_commit_linter
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = pre_commit_linter.FileCache()
FILE_CACHE = NAME_SPACE.files
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
class CustomLintChecksManagerTests(test_utils.LinterTestBase):
"""Tests for CustomLintChecksManager."""
def setUp(self):
super(CustomLintChecksManagerTests, self).setUp()
self.verbose_mode_enabled = False
self.dependencies_file = io.StringIO(
'{\"dependencies\":{\"frontend\":{\"guppy\":'
'{\"version\": \"0.1\"},\"skulpt-dist\":{\"version\": \"0.2\"}'
',\"midiJs\":{\"version\": \"0.4\"}}}}')
self.package_file = io.StringIO(
'{\"dependencies\":{\"nerdamer\":\"^0.6\"}}')
self.files_in_typings_dir = [
'guppy-defs-0.1.d.ts',
'skulpt-defs-0.2.d.ts',
'midi-defs-0.4.d.ts',
'nerdamer-defs-0.6.d.ts'
]
def mock_open_file(path, unused_permissions):
if path == other_files_linter.DEPENDENCIES_JSON_FILE_PATH:
return self.dependencies_file
elif path == other_files_linter.PACKAGE_JSON_FILE_PATH:
return self.package_file
def mock_listdir(unused_path):
return self.files_in_typings_dir
self.open_file_swap = self.swap(
utils, 'open_file', mock_open_file)
self.listdir_swap = self.swap(os, 'listdir', mock_listdir)
def test_check_valid_pattern_in_app_dev_yaml(self):
def mock_readlines(unused_self, unused_filepath):
return (
'# Just a comment',
'# Third party files:',
'- third_party/static/bootstrap-4.3.1/')
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_skip_files_in_app_dev_yaml()
expected_error_messages = ['SUCCESS App dev file check passed']
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('App dev file', error_messages.name)
self.assertFalse(error_messages.failed)
def test_check_invalid_pattern_in_app_dev_yaml(self):
def mock_readlines(unused_self, unused_filepath):
return (
'# Third party files:', '- third_party/static/bootstrap-4.3/')
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_skip_files_in_app_dev_yaml()
self.assertEqual(len(error_messages.get_report()), 2)
self.assertTrue(
'Pattern on line 2 doesn\'t match any file or directory' in
error_messages.get_report()[0])
self.assertEqual('App dev file', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_valid_pattern(self):
def mock_readlines(unused_self, unused_filepath):
return (
'// This is a comment.',
'plugins: [',
' new HtmlWebpackPlugin({',
' chunks: [\'about\'],',
' filename: \'about-page.mainpage.html\',',
' meta: defaultMeta,',
' template: commonPrefix + \'/pages/about-page/about-page'
'.mainpage.html\',',
' minify: htmlMinifyConfig,',
' inject: false', '}),]'
)
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_webpack_config_file()
expected_error_messages = [
'SUCCESS Webpack config file check passed']
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Webpack config file', error_messages.name)
self.assertFalse(error_messages.failed)
def test_check_invalid_pattern_with_some_keys_missing(self):
def mock_readlines(unused_self, unused_filepath):
return (
'plugins: [',
' new HtmlWebpackPlugin({',
' chunks: [\'about\'],',
' filename: \'about-page.mainpage.html\',',
' minify: htmlMinifyConfig,',
' inject: false', '}),]'
)
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_webpack_config_file()
expected_error_messages = [
'Line 2: The following keys: meta, template are missing in '
'HtmlWebpackPlugin block in webpack.common.config.ts',
'FAILED Webpack config file check failed']
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Webpack config file', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_invalid_pattern_without_all_keys(self):
def mock_readlines(unused_self, unused_filepath):
return (
'plugins: [',
' new HtmlWebpackPlugin({',
'}),]'
)
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_webpack_config_file()
expected_error_messages = [
'Line 2: The following keys: chunks, filename, meta, template,'
' minify, inject are missing in HtmlWebpackPlugin block in '
'webpack.common.config.ts', 'FAILED Webpack config file check'
' failed']
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Webpack config file', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_third_party_libs_type_defs(self):
expected_error_messages = [
'SUCCESS Third party type defs check passed']
with self.open_file_swap, self.listdir_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Third party type defs', error_messages.name)
self.assertFalse(error_messages.failed)
def test_check_third_party_libs_type_defs_verbose(self):
self.verbose_mode_enabled = True
expected_error_messages = [
'SUCCESS Third party type defs check passed']
with self.open_file_swap, self.listdir_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Third party type defs', error_messages.name)
self.assertFalse(error_messages.failed)
def test_check_third_party_libs_type_defs_multiple(self):
self.files_in_typings_dir.append('guppy-defs-0.2.d.ts')
expected_error_messages = 'FAILED Third party type defs check failed'
with self.open_file_swap, self.listdir_swap, self.print_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report()[1], expected_error_messages)
self.assert_same_list_elements([
'There are multiple type definitions for Guppy in the '
'typings dir.'], error_messages.get_report())
self.assertEqual('Third party type defs', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_third_party_libs_type_defs_no_type_defs(self):
self.files_in_typings_dir = [
'skulpt-defs-0.2.d.ts',
'math-expressions-defs-0.3.d.ts',
'midi-defs-0.4.d.ts',
'nerdamer-defs-0.6.d.ts'
]
expected_error_messages = 'FAILED Third party type defs check failed'
with self.open_file_swap, self.listdir_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report()[1], expected_error_messages)
self.assert_same_list_elements([
'There are no type definitions for Guppy in the '
'typings dir.'], error_messages.get_report())
self.assertEqual('Third party type defs', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_third_party_libs_type_defs_wrong_version(self):
self.files_in_typings_dir = [
'guppy-defs-0.2.d.ts',
'skulpt-defs-0.2.d.ts',
'math-expressions-defs-0.3.d.ts',
'midi-defs-0.4.d.ts',
'nerdamer-defs-0.6.d.ts'
]
expected_error_messages = 'FAILED Third party type defs check failed'
with self.open_file_swap, self.listdir_swap, self.print_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report()[1], expected_error_messages)
self.assert_same_list_elements([
'Type definitions for Guppy are not up to date. The '
'current version of Guppy is 0.1 and the type definitions '
'are for version 0.2. Please refer typings/README.md '
'for more details.'], error_messages.get_report())
self.assertEqual('Third party type defs', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_github_workflows_use_merge_action_checks(self):
def mock_listdir(unused_path):
return ['pass.yml', 'fail.yml', 'README']
def mock_read(path):
if path.endswith('pass.yml'):
return '\n'.join([
'name: Passing workflow file',
'on:',
' push:',
' branches:',
' - develop',
'',
'jobs:',
' run:',
' steps:',
' - uses: actions/checkout@v2',
' - uses: ./.github/actions/merge',
' - run: echo "oppia"',
])
elif path.endswith('fail.yml'):
return '\n'.join([
'name: Passing workflow file',
'on:',
' push:',
' branches:',
' - develop',
'',
'jobs:',
' run:',
' steps:',
' - uses: actions/checkout@v2',
' - run: echo "oppia"',
])
raise AssertionError(
'mock_read called with unexpected path %s' % path)
listdir_swap = self.swap_with_checks(
os, 'listdir', mock_listdir,
expected_args=[(other_files_linter.WORKFLOWS_DIR,)])
read_swap = self.swap(FILE_CACHE, 'read', mock_read)
expected = [
'%s --> Job run does not use the .github/actions/merge action.' %
os.path.join(other_files_linter.WORKFLOWS_DIR, 'fail.yml'),
'FAILED Github workflows use merge action check failed',
]
with listdir_swap, read_swap:
task_results = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_github_workflows_use_merge_action()
self.assertEqual(task_results.get_report(), expected)
def test_perform_all_lint_checks(self):
lint_task_report = other_files_linter.CustomLintChecksManager(
FILE_CACHE).perform_all_lint_checks()
self.assertTrue(isinstance(lint_task_report, list))
def test_get_linters_with_success(self):
custom_linter, third_party_linter = (
other_files_linter.get_linters(FILE_CACHE))
self.assertTrue(
isinstance(
custom_linter, other_files_linter.CustomLintChecksManager))
self.assertEqual(third_party_linter, None)
|
the-stack_0_17171
|
import time
from huobi.connection.websocket_req_client import *
from huobi.utils.channels_request import *
from huobi.model.market import *
class ReqPriceDepthService:
def __init__(self, params):
self.params = params
def subscribe(self, callback, error_handler, **kwargs):
symbol_list = self.params["symbol_list"]
step = self.params["step"]
def subscription(connection):
for symbol in symbol_list:
connection.send(request_price_depth_channel(symbol, step))
time.sleep(0.01)
def parse(dict_data):
price_depth_event = PriceDepthReq()
price_depth_event.id = dict_data.get("id")
price_depth_event.rep = dict_data.get("rep")
data = dict_data.get("data", {})
price_depth_obj = PriceDepth.json_parse(data)
price_depth_event.data = price_depth_obj
return price_depth_event
WebSocketReqClient(**kwargs).execute_subscribe_v1(subscription,
parse,
callback,
error_handler)
|
the-stack_0_17172
|
from typing import List, Optional, Tuple
import matplotlib as mpl
mpl.use("Agg")
from theseus.opt import Opts
import os
import cv2
import torch
import numpy as np
from theseus.opt import Config
from theseus.segmentation.models import MODEL_REGISTRY
from theseus.segmentation.augmentations import TRANSFORM_REGISTRY
from theseus.segmentation.datasets import DATASET_REGISTRY, DATALOADER_REGISTRY
from theseus.utilities.loading import load_state_dict
from theseus.utilities.loggers import LoggerObserver, StdoutLogger
from theseus.utilities.cuda import get_devices_info
from theseus.utilities.getter import get_instance, get_instance_recursively
from theseus.utilities.visualization.visualizer import Visualizer
from theseus.cps.models.wrapper import ModelWithLoss
class VideoWriter:
def __init__(self, video_info, saved_path):
self.video_info = video_info
self.saved_path = saved_path
self.FPS = self.video_info["fps"]
self.WIDTH = self.video_info["width"]
self.HEIGHT = self.video_info["height"]
self.NUM_FRAMES = self.video_info["num_frames"]
self.outvid = cv2.VideoWriter(
self.saved_path,
cv2.VideoWriter_fourcc(*"mp4v"),
self.FPS,
(self.WIDTH, self.HEIGHT),
)
def write(self, frame):
self.outvid.write(frame.astype(np.uint8))
class TestPipeline(object):
def __init__(self, opt: Config):
super(TestPipeline, self).__init__()
self.opt = opt
self.debug = opt["global"]["debug"]
self.logger = LoggerObserver.getLogger("main")
self.savedir = opt["global"]["save_dir"]
os.makedirs(self.savedir, exist_ok=True)
stdout_logger = StdoutLogger(__name__, self.savedir, debug=self.debug)
self.logger.subscribe(stdout_logger)
self.logger.text(self.opt, level=LoggerObserver.INFO)
self.transform_cfg = Config.load_yaml(opt["global"]["cfg_transform"])
self.device_name = opt["global"]["device"]
self.device = torch.device(self.device_name)
self.weights = opt["global"]["weights"]
self.transform = get_instance_recursively(
self.transform_cfg, registry=TRANSFORM_REGISTRY
)
self.dataset = get_instance(
opt["data"]["dataset"],
registry=DATASET_REGISTRY,
transform=self.transform["val"],
)
CLASSNAMES = self.dataset.classnames
self.dataloader = get_instance(
opt["data"]["dataloader"],
registry=DATALOADER_REGISTRY,
dataset=self.dataset,
)
self.model1 = get_instance(
self.opt["model1"],
registry=MODEL_REGISTRY,
classnames=CLASSNAMES,
num_classes=len(CLASSNAMES),
).to(self.device)
self.model2 = get_instance(
self.opt["model2"],
registry=MODEL_REGISTRY,
classnames=CLASSNAMES,
num_classes=len(CLASSNAMES),
).to(self.device)
if self.weights:
state_dict = torch.load(self.weights, map_location=self.device)
self.model1 = load_state_dict(self.model1, state_dict, "model1")
self.model2 = load_state_dict(self.model2, state_dict, "model2")
self.model = ModelWithLoss(
self.model1,
self.model2,
criterion_sup=None,
criterion_unsup=None,
soft_cps=True,
device=self.device,
)
def infocheck(self):
device_info = get_devices_info(self.device_name)
self.logger.text("Using " + device_info, level=LoggerObserver.INFO)
self.logger.text(
f"Number of test sample: {len(self.dataset)}", level=LoggerObserver.INFO
)
self.logger.text(
f"Everything will be saved to {self.savedir}", level=LoggerObserver.INFO
)
@torch.no_grad()
def inference(self):
self.infocheck()
self.logger.text("Inferencing...", level=LoggerObserver.INFO)
visualizer = Visualizer()
self.model.eval()
video_name, ext = os.path.splitext(os.path.basename(self.dataset.video_path))
saved_mask_path = os.path.join(self.savedir, f"{video_name}_masks{ext}")
saved_overlay_path = os.path.join(self.savedir, f"{video_name}_overlay{ext}")
mask_writer = VideoWriter(self.dataset.video_info, saved_mask_path)
overlay_writer = VideoWriter(self.dataset.video_info, saved_overlay_path)
for idx, batch in enumerate(self.dataloader):
inputs = batch["inputs"]
img_names = batch["img_names"]
ori_sizes = batch["ori_sizes"]
outputs = self.model.get_prediction(batch, self.device)
preds = outputs["masks"]
for (input, pred, filename, ori_size) in zip(
inputs, preds, img_names, ori_sizes
):
decode_pred = visualizer.decode_segmap(pred)[:, :, ::-1]
resized_decode_mask = cv2.resize(decode_pred, dsize=tuple(ori_size))
# Save mask
mask_writer.write(resized_decode_mask)
# Save overlay
raw_image = visualizer.denormalize(input)
raw_image = (raw_image * 255).astype(np.uint8)
ori_image = cv2.resize(raw_image, dsize=tuple(ori_size))
ori_image = cv2.cvtColor(ori_image, cv2.COLOR_RGB2BGR)
overlay = ori_image * 0.75 + resized_decode_mask * 0.25
overlay_writer.write(overlay)
self.logger.text(
f"Save submission video at {saved_mask_path}", level=LoggerObserver.INFO
)
self.logger.text(
f"Save overlay video at {saved_overlay_path}", level=LoggerObserver.INFO
)
if __name__ == "__main__":
opts = Opts().parse_args()
val_pipeline = TestPipeline(opts)
val_pipeline.inference()
|
the-stack_0_17173
|
#!/usr/bin/python3
#
# ./cgetall.py canvas_course_page_url|course_id [destination_directory]
#
# get all of the Canvas course pages with a given base URL or for a given course_id
#
# with the option '-C'or '--containers' use HTTP rather than HTTPS for access to Canvas
# with the option "-v" or "--verbose" you get lots of output - showing in detail the operations of the program
#
# Can also be called with an alternative configuration file:
# ./cgetall.py --config config-test.json 11
#
# Example:
# cgetall.py https://kth.instructure.com/courses/11/pages/test-3
# or
# cgetall.py 11
#
# both get all of the course pages for course 11
#
# G. Q. Maguire Jr.
#
# 2020.03.27
# based on the earlier cgetall.py of 2016.07.25
#
import csv, requests, time
from pprint import pprint
import optparse
import sys
import json
#############################
###### EDIT THIS STUFF ######
#############################
global baseUrl # the base URL used for access to Canvas
global header # the header for all HTML requests
global payload # place to store additionally payload when needed for options to HTML requests
# Based upon the options to the program, initialize the variables used to access Canvas gia HTML requests
def initialize(options):
global baseUrl, header, payload
# styled based upon https://martin-thoma.com/configuration-files-in-python/
if options.config_filename:
config_file=options.config_filename
else:
config_file='config.json'
try:
with open(config_file) as json_data_file:
configuration = json.load(json_data_file)
access_token=configuration["canvas"]["access_token"]
if options.containers:
baseUrl="http://"+configuration["canvas"]["host"]+"/api/v1"
print("using HTTP for the container environment")
else:
baseUrl="https://"+configuration["canvas"]["host"]+"/api/v1"
header = {'Authorization' : 'Bearer ' + access_token}
payload = {}
except:
print("Unable to open configuration file named {}".format(config_file))
print("Please create a suitable configuration file, the default name is config.json")
sys.exit()
#modules_csv = 'modules.csv' # name of file storing module names
log_file = 'log.txt' # a log file. it will log things
def list_pages(course_id):
list_of_all_pages=[]
# Use the Canvas API to get the list of pages for this course
#GET /api/v1/courses/:course_id/pages
url = "{0}/courses/{1}/pages".format(baseUrl, course_id)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by [email protected] on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
if Verbose_Flag:
for p in list_of_all_pages:
print("{}".format(p["title"]))
return list_of_all_pages
def getall_course_pages(course_id, destination_directory):
for p in list_pages(course_id):
url = "{0}/courses/{1}/pages/{2}".format(baseUrl, course_id, p["url"])
if Verbose_Flag:
print(url)
payload={}
r = requests.get(url, headers = header, data=payload)
if Verbose_Flag:
print("r.status_code: {}".format(r.status_code))
if r.status_code == requests.codes.ok:
page_response = r.json()
new_file_name=p["url"][p["url"].rfind("/")+1:]+'.html'
if len(destination_directory) > 0:
new_file_name=destination_directory+'/'+new_file_name
if Verbose_Flag:
print("new_file_name: {}".format(new_file_name))
# write out body of response as a .html page
with open(new_file_name, 'wb') as f:
# modified the code to handle empty files
if len(page_response["body"]) > 0:
encoded_output = bytes(page_response["body"], 'UTF-8')
else:
encoded_output = bytes("", 'UTF-8')
f.write(encoded_output)
continue
else:
print("No such page: {}".format(canvas_course_page_url))
continue
return True
def main():
global Verbose_Flag
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
parser.add_option("--config", dest="config_filename",
help="read configuration from FILE", metavar="FILE")
parser.add_option('-t', '--testing',
dest="testing",
default=False,
action="store_true",
help="execute test code"
)
parser.add_option('-C', '--containers',
dest="containers",
default=False,
action="store_true",
help="for the container enviroment in the virtual machine"
)
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
if Verbose_Flag:
print("ARGV :{}".format(sys.argv[1:]))
print("VERBOSE :{}".format(options.verbose))
print("REMAINING :{}".format(remainder))
initialize(options)
if (len(remainder) < 1):
print("Inusffient arguments\n must provide url or course_id\n")
else:
canvas_course_page_url=remainder[0]
if (len(remainder) >= 1):
destination_directory=remainder[1]
print("outputing files to {}".format(destination_directory))
else:
destination_directory=""
if canvas_course_page_url.find("http") >= 0:
#extract course_id from URL
course_id=canvas_course_page_url[canvas_course_page_url.find("courses/")+8:canvas_course_page_url.find("pages/")-1]
else:
course_id=remainder[0]
if Verbose_Flag:
print("course_id: {}".format(course_id))
output=getall_course_pages(course_id, destination_directory)
if (output):
if Verbose_Flag:
pprint(output)
if __name__ == "__main__": main()
|
the-stack_0_17179
|
"""Download & cache terraform binaries"""
import logging
import platform
import re
from functools import partial
from os import PathLike, getenv, pathsep
from pathlib import Path
from typing import Union
from miutil.fdio import extractall
from miutil.web import urlopen_cached
__all__ = ["ARCH", "CACHE_DIR", "OS", "VERSION_TF", "terraform"]
log = logging.getLogger(__name__)
CACHE_DIR = "~/.terraform"
VERSION_TF = "1.0.5"
ARCH = "amd64" if "64" in platform.machine() else "386"
match = partial(re.match, string=platform.system(), flags=re.I)
for i in {"darwin", "freebsd", "linux", "openbsd", "windows|cli|cygwin|msys"}:
if match(i):
OS = i.split("|", 1)[0]
break
else:
OS = match("[a-zA-Z]+").group(0).lower()
AnyPath = Union[str, "PathLike[str]", Path]
def terraform(cache: AnyPath = CACHE_DIR, version: str = VERSION_TF) -> Path:
"""
Finds the first terraform binary on the $PATH,
otherwise downloads `version` to `cache`.
"""
base_bin = "terraform" + (".exe" if OS == "windows" else "")
for path in map(Path, getenv("PATH").split(pathsep)):
if (path / base_bin).is_file():
return (path / base_bin).resolve()
cache = Path(cache).expanduser()
bin = cache / base_bin
url = (
f"https://releases.hashicorp.com/terraform"
f"/{version}/terraform_{version}_{OS}_{ARCH}.zip"
)
if not bin.is_file():
log.info("Downloading to %s", cache)
with urlopen_cached(url, cache) as fd:
extractall(fd, cache)
assert bin.is_file()
if OS != "windows":
bin.chmod(0o755)
return bin
|
the-stack_0_17181
|
from __future__ import annotations
import click, parse, json
from modules import fileutils
from dataclasses import dataclass
from collections import defaultdict
@dataclass
class Point:
x:int
y:int
def __str__(self):
return f'({self.x}, {self.y})'
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
return self.x == other.x and self.y == other.y
class Line:
def __init__(self, p1:Point, p2:Point):
self.p1 = p1
self.p2 = p2
self._points = []
self._slope = None
self._intercept = None
@property
def points(self):
if not self._points:
print(self)
if self.is_horizontal:
small,large = min(self.p1.x, self.p2.x), max(self.p1.x, self.p2.x)
self._points = [Point(x, self.p1.y) for x in range(small,large+1)]
elif self.is_vertical:
small,large = min(self.p1.y, self.p2.y), max(self.p1.y, self.p2.y)
self._points = [Point(self.p1.x, y) for y in range(small, large+1)]
else:
small,large = min(self.p1.x, self.p2.x), max(self.p1.x, self.p2.x)
self._points = [Point(x, self.solve(x)) for x in range(small, large+1)]
return self._points
def solve(self,x):
y = (self.slope) * x + self.y_intercept
return int(y)
@property
def slope(self):
if self._slope is None:
if self.is_vertical:
self._slope = float('NaN')
else:
self._slope = (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)
return self._slope
@property
def y_intercept(self):
if self._intercept is None:
self._intercept = self.p1.y - (self.slope * self.p1.x)
return self._intercept
@property
def is_horizontal(self):
return self.p1.y == self.p2.y
@property
def is_vertical(self):
return self.p1.x == self.p2.x
def __str__(self):
return f'{self.p1} -> {self.p2} :: y = {self.slope}x + {self.y_intercept}'
def parse_lines(lst):
pattern = parse.compile('{x1:d},{y1:d} -> {x2:d},{y2:d}')
lines = []
for elem in lst:
info = pattern.parse(elem)
lines.append(Line(Point(x=info['x1'], y=info['y1']), Point(x=info['x2'], y=info['y2'])))
return lines
def part_1(lines):
"""Part 1"""
lines = parse_lines(lines)
grid = defaultdict(lambda: 0)
for line in lines:
for point in line.points:
if line.is_horizontal or line.is_vertical:
grid[point] += 1
danger = sum([1 for v in grid.values() if v>1])
print(f'{danger=}')
def part_2(lines):
"""Part 2"""
lines = parse_lines(lines)
grid = defaultdict(lambda: 0)
for line in lines:
for point in line.points:
grid[point] += 1
danger = sum([1 for v in grid.values() if v>1])
print(f'{danger=}')
@click.command()
@click.option('--test', '-t', is_flag=True, default=False)
@click.argument('part', type=int)
def d5(test, part):
"""Day 5 commands"""
lines = fileutils.load_lines(5, test)
fn = {
1: part_1,
2: part_2,
}.get(part)
fn(lines)
|
the-stack_0_17182
|
# -*- coding: utf-8 -*-
"""
@date: 2021/9/23 下午10:06
@file: general_dataset_v2.py
@author: zj
@description:
"""
import os
import json
from torch.utils.data import Dataset
from .evaluator.general_evaluator import GeneralEvaluator
from .util import default_loader
class GeneralDatasetV2(Dataset):
def __init__(self, root, transform=None, target_transform=None, top_k=(1, 5), keep_rgb=False):
assert os.path.isfile(root)
with open(root, 'r') as f:
data_dict = json.load(f)
self.classes = list(data_dict.keys())
self.total_img_list = list()
self.total_label_list = list()
for key in self.classes:
img_list = data_dict[key]
label = self.classes.index(key)
for img_path in img_list:
assert os.path.isfile(img_path), img_path
self.total_img_list.append(img_path)
self.total_label_list.append(label)
self.root = root
self.transform = transform
self.target_transform = target_transform
self.keep_rgb = keep_rgb
self._update_evaluator(top_k)
def __getitem__(self, index: int):
img_path = self.total_img_list[index]
target = self.total_label_list[index]
image = default_loader(img_path, rgb=self.keep_rgb)
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def __len__(self) -> int:
return len(self.total_img_list)
def _update_evaluator(self, top_k):
self.evaluator = GeneralEvaluator(self.classes, top_k=top_k)
def get_classes(self):
return self.classes
def __repr__(self):
return self.__class__.__name__ + ' (' + self.root + ')'
|
the-stack_0_17183
|
from django.conf.urls import url
from . import views
app_name = 'user_management_ui'
urlpatterns = [
url(
r'^register/verify/(?P<token>[0-9A-Za-z:\-_]+)/$',
views.VerifyUserEmailView.as_view(),
name='registration-verify',
),
]
|
the-stack_0_17184
|
import concurrent.futures as futures
import os
import pathlib
import re
from collections import OrderedDict
import numpy as np
from skimage import io
def get_image_index_str(img_idx):
return "{:06d}".format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True):
img_idx_str = get_image_index_str(idx)
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if not (prefix / file_path).exists():
raise ValueError("file not exist: {}".format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
def get_image_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,
relative_path)
def get_label_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,
relative_path)
def get_velodyne_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path)
def get_calib_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path)
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
# image_infos = []
root_path = pathlib.Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
image_info = {'image_idx': idx}
annotations = None
if velodyne:
image_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['img_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array(
[float(info) for info in lines[0].split(' ')[1:13]]).reshape(
[3, 4])
P1 = np.array(
[float(info) for info in lines[1].split(' ')[1:13]]).reshape(
[3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
P3 = np.array(
[float(info) for info in lines[3].split(' ')[1:13]]).reshape(
[3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
add_difficulty_to_annos(image_info)
return image_info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def filter_kitti_anno(image_anno,
used_classes,
used_difficulty=None,
dontcare_iou=None):
if not isinstance(used_classes, (list, tuple)):
used_classes = [used_classes]
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x in used_classes
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
if used_difficulty is not None:
relevant_annotation_indices = [
i for i, x in enumerate(img_filtered_annotations['difficulty'])
if x in used_difficulty
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][relevant_annotation_indices])
if 'DontCare' in used_classes and dontcare_iou is not None:
dont_care_indices = [
i for i, x in enumerate(img_filtered_annotations['name'])
if x == 'DontCare'
]
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = img_filtered_annotations['bbox']
ious = iou(all_boxes, all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou
for key in image_anno.keys():
img_filtered_annotations[key] = (img_filtered_annotations[key][
np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def filter_annos_low_score(image_annos, thresh):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(anno['score']) if s >= thresh
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def kitti_result_line(result_dict, precision=4):
prec_float = "{" + ":.{}f".format(precision) + "}"
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', None),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError("you must specify a value for {}".format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError("unknown key. supported key:{}".format(
res_dict.keys()))
return ' '.join(res_line)
def add_difficulty_to_annos(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for eval_utils
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for eval_utils
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos["difficulty"] = np.array(diff, np.int32)
return diff
def get_label_anno(label_path):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
annotations['name'] = np.array([x[0] for x in content])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array(
[[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array(
[[float(info) for info in x[8:11]] for x in content]).reshape(
-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array(
[[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array(
[float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros([len(annotations['bbox'])])
return annotations
def get_label_annos(label_folder, image_ids=None):
if image_ids is None:
filepaths = pathlib.Path(label_folder).glob('*.txt')
prog = re.compile(r'^\d{6}.txt$')
filepaths = filter(lambda f: prog.match(f.name), filepaths)
image_ids = [int(p.stem) for p in filepaths]
image_ids = sorted(image_ids)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
annos = []
label_folder = pathlib.Path(label_folder)
for idx in image_ids:
image_idx = get_image_index_str(idx)
label_filename = label_folder / (image_idx + '.txt')
annos.append(get_label_anno(label_filename))
return annos
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
else:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
|
the-stack_0_17185
|
import unittest
import numpy as np
import pyroomacoustics as pra
room_dim = [15, 14, 16]
absorption = 0.2
source_position = [2.0, 3.1, 2.0]
mic_position = [2.0, 1.5, 2.0]
fs = 16000
max_order = 5
# scenario A
def get_room_constructor_args():
'''
When provided with sources and microphones, the constructor
should try to compute the RIR immediately
'''
source = pra.SoundSource(position=source_position)
mics = pra.MicrophoneArray(np.array([mic_position]).T, fs)
shoebox = pra.ShoeBox(
room_dim,
absorption=absorption,
fs=fs,
max_order=max_order,
sources=[source],
mics=mics,
)
shoebox.image_source_model()
shoebox.compute_rir()
return shoebox
#scenario B
def get_room_add_method():
shoebox = pra.ShoeBox(room_dim, absorption=absorption, fs=fs, max_order=max_order)
shoebox.add_source(source_position)
mics = pra.MicrophoneArray(np.array([mic_position]).T, fs)
shoebox.add_microphone_array(mics)
shoebox.image_source_model()
shoebox.compute_rir()
return shoebox
class RoomConstructorSources(unittest.TestCase):
def test_room_constructor(self):
room_1 = get_room_constructor_args()
self.assertTrue(isinstance(room_1.sources[0], pra.SoundSource))
def test_room_add_method(self):
room_2 = get_room_add_method()
self.assertTrue(isinstance(room_2.sources[0], pra.SoundSource))
def test_rir_equal(self):
room_1 = get_room_constructor_args()
room_2 = get_room_add_method()
self.assertTrue(np.allclose(room_1.rir[0][0], room_2.rir[0][0]))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_17188
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class ipsecprofile(base_resource) :
"""Configuration for IPSEC profile resource."""
def __init__(self) :
self._name = ""
self._ikeversion = ""
self._encalgo = []
self._hashalgo = []
self._lifetime = 0
self._psk = ""
self._publickey = ""
self._privatekey = ""
self._peerpublickey = ""
self._livenesscheckinterval = 0
self._replaywindowsize = 0
self._ikeretryinterval = 0
self._retransmissiontime = 0
self._perfectforwardsecrecy = ""
self._builtin = []
self.___count = 0
@property
def name(self) :
"""The name of the ipsec profile.<br/>Minimum length = 1<br/>Maximum length = 32."""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""The name of the ipsec profile.<br/>Minimum length = 1<br/>Maximum length = 32
:param name:
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ikeversion(self) :
"""IKE Protocol Version.<br/>Possible values = V1, V2."""
try :
return self._ikeversion
except Exception as e:
raise e
@ikeversion.setter
def ikeversion(self, ikeversion) :
"""IKE Protocol Version.<br/>Possible values = V1, V2
:param ikeversion:
"""
try :
self._ikeversion = ikeversion
except Exception as e:
raise e
@property
def encalgo(self) :
"""Type of encryption algorithm.<br/>Possible values = AES, 3DES."""
try :
return self._encalgo
except Exception as e:
raise e
@encalgo.setter
def encalgo(self, encalgo) :
"""Type of encryption algorithm.<br/>Possible values = AES, 3DES
:param encalgo:
"""
try :
self._encalgo = encalgo
except Exception as e:
raise e
@property
def hashalgo(self) :
"""Type of hashing algorithm.<br/>Possible values = HMAC_SHA1, HMAC_SHA256, HMAC_SHA384, HMAC_SHA512, HMAC_MD5."""
try :
return self._hashalgo
except Exception as e:
raise e
@hashalgo.setter
def hashalgo(self, hashalgo) :
"""Type of hashing algorithm.<br/>Possible values = HMAC_SHA1, HMAC_SHA256, HMAC_SHA384, HMAC_SHA512, HMAC_MD5
:param hashalgo:
"""
try :
self._hashalgo = hashalgo
except Exception as e:
raise e
@property
def lifetime(self) :
"""Lifetime of IKE SA in seconds. Lifetime of IPSec SA will be (lifetime of IKE SA/8).<br/>Minimum length = 480<br/>Maximum length = 31536000."""
try :
return self._lifetime
except Exception as e:
raise e
@lifetime.setter
def lifetime(self, lifetime) :
"""Lifetime of IKE SA in seconds. Lifetime of IPSec SA will be (lifetime of IKE SA/8).<br/>Minimum length = 480<br/>Maximum length = 31536000
:param lifetime:
"""
try :
self._lifetime = lifetime
except Exception as e:
raise e
@property
def psk(self) :
"""Pre shared key value."""
try :
return self._psk
except Exception as e:
raise e
@psk.setter
def psk(self, psk) :
"""Pre shared key value.
:param psk:
"""
try :
self._psk = psk
except Exception as e:
raise e
@property
def publickey(self) :
"""Public key file path."""
try :
return self._publickey
except Exception as e:
raise e
@publickey.setter
def publickey(self, publickey) :
"""Public key file path.
:param publickey:
"""
try :
self._publickey = publickey
except Exception as e:
raise e
@property
def privatekey(self) :
"""Private key file path."""
try :
return self._privatekey
except Exception as e:
raise e
@privatekey.setter
def privatekey(self, privatekey) :
"""Private key file path.
:param privatekey:
"""
try :
self._privatekey = privatekey
except Exception as e:
raise e
@property
def peerpublickey(self) :
"""Peer public key file path."""
try :
return self._peerpublickey
except Exception as e:
raise e
@peerpublickey.setter
def peerpublickey(self, peerpublickey) :
"""Peer public key file path.
:param peerpublickey:
"""
try :
self._peerpublickey = peerpublickey
except Exception as e:
raise e
@property
def livenesscheckinterval(self) :
"""Number of seconds after which a notify payload is sent to check the liveliness of the peer. Additional retries are done as per retransmit interval setting. Zero value disables liveliness checks.<br/>Maximum length = 64999."""
try :
return self._livenesscheckinterval
except Exception as e:
raise e
@livenesscheckinterval.setter
def livenesscheckinterval(self, livenesscheckinterval) :
"""Number of seconds after which a notify payload is sent to check the liveliness of the peer. Additional retries are done as per retransmit interval setting. Zero value disables liveliness checks.<br/>Maximum length = 64999
:param livenesscheckinterval:
"""
try :
self._livenesscheckinterval = livenesscheckinterval
except Exception as e:
raise e
@property
def replaywindowsize(self) :
"""IPSec Replay window size for the data traffic.<br/>Maximum length = 16384."""
try :
return self._replaywindowsize
except Exception as e:
raise e
@replaywindowsize.setter
def replaywindowsize(self, replaywindowsize) :
"""IPSec Replay window size for the data traffic.<br/>Maximum length = 16384
:param replaywindowsize:
"""
try :
self._replaywindowsize = replaywindowsize
except Exception as e:
raise e
@property
def ikeretryinterval(self) :
"""IKE retry interval for bringing up the connection.<br/>Minimum length = 60<br/>Maximum length = 3600."""
try :
return self._ikeretryinterval
except Exception as e:
raise e
@ikeretryinterval.setter
def ikeretryinterval(self, ikeretryinterval) :
"""IKE retry interval for bringing up the connection.<br/>Minimum length = 60<br/>Maximum length = 3600
:param ikeretryinterval:
"""
try :
self._ikeretryinterval = ikeretryinterval
except Exception as e:
raise e
@property
def retransmissiontime(self) :
"""The interval in seconds to retry sending the IKE messages to peer, three consecutive attempts are done with doubled interval after every failure.<br/>Minimum length = 1<br/>Maximum length = 99."""
try :
return self._retransmissiontime
except Exception as e:
raise e
@retransmissiontime.setter
def retransmissiontime(self, retransmissiontime) :
"""The interval in seconds to retry sending the IKE messages to peer, three consecutive attempts are done with doubled interval after every failure.<br/>Minimum length = 1<br/>Maximum length = 99
:param retransmissiontime:
"""
try :
self._retransmissiontime = retransmissiontime
except Exception as e:
raise e
@property
def perfectforwardsecrecy(self) :
"""Enable/Disable PFS.<br/>Possible values = ENABLE, DISABLE."""
try :
return self._perfectforwardsecrecy
except Exception as e:
raise e
@perfectforwardsecrecy.setter
def perfectforwardsecrecy(self, perfectforwardsecrecy) :
"""Enable/Disable PFS.<br/>Possible values = ENABLE, DISABLE
:param perfectforwardsecrecy:
"""
try :
self._perfectforwardsecrecy = perfectforwardsecrecy
except Exception as e:
raise e
@property
def builtin(self) :
"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL."""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(ipsecprofile_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ipsecprofile
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""Use this API to add ipsecprofile.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
addresource = ipsecprofile()
addresource.name = resource.name
addresource.ikeversion = resource.ikeversion
addresource.encalgo = resource.encalgo
addresource.hashalgo = resource.hashalgo
addresource.lifetime = resource.lifetime
addresource.psk = resource.psk
addresource.publickey = resource.publickey
addresource.privatekey = resource.privatekey
addresource.peerpublickey = resource.peerpublickey
addresource.livenesscheckinterval = resource.livenesscheckinterval
addresource.replaywindowsize = resource.replaywindowsize
addresource.ikeretryinterval = resource.ikeretryinterval
addresource.retransmissiontime = resource.retransmissiontime
addresource.perfectforwardsecrecy = resource.perfectforwardsecrecy
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ ipsecprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].ikeversion = resource[i].ikeversion
addresources[i].encalgo = resource[i].encalgo
addresources[i].hashalgo = resource[i].hashalgo
addresources[i].lifetime = resource[i].lifetime
addresources[i].psk = resource[i].psk
addresources[i].publickey = resource[i].publickey
addresources[i].privatekey = resource[i].privatekey
addresources[i].peerpublickey = resource[i].peerpublickey
addresources[i].livenesscheckinterval = resource[i].livenesscheckinterval
addresources[i].replaywindowsize = resource[i].replaywindowsize
addresources[i].ikeretryinterval = resource[i].ikeretryinterval
addresources[i].retransmissiontime = resource[i].retransmissiontime
addresources[i].perfectforwardsecrecy = resource[i].perfectforwardsecrecy
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""Use this API to delete ipsecprofile.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
deleteresource = ipsecprofile()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ ipsecprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ ipsecprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the ipsecprofile resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = ipsecprofile()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = ipsecprofile()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [ipsecprofile() for _ in range(len(name))]
obj = [ipsecprofile() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = ipsecprofile()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
"""Use this API to fetch filtered set of ipsecprofile resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = ipsecprofile()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
"""Use this API to count the ipsecprofile resources configured on NetScaler.
:param client:
"""
try :
obj = ipsecprofile()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
"""Use this API to count filtered the set of ipsecprofile resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = ipsecprofile()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Perfectforwardsecrecy:
""" """
ENABLE = "ENABLE"
DISABLE = "DISABLE"
class Builtin:
""" """
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Encalgo:
""" """
AES = "AES"
_3DES = "3DES"
class Ikeversion:
""" """
V1 = "V1"
V2 = "V2"
class Hashalgo:
""" """
HMAC_SHA1 = "HMAC_SHA1"
HMAC_SHA256 = "HMAC_SHA256"
HMAC_SHA384 = "HMAC_SHA384"
HMAC_SHA512 = "HMAC_SHA512"
HMAC_MD5 = "HMAC_MD5"
class ipsecprofile_response(base_response) :
""" """
def __init__(self, length=1) :
self.ipsecprofile = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ipsecprofile = [ipsecprofile() for _ in range(length)]
|
the-stack_0_17189
|
from fastapi import Path
# example for /pep/{namespace}
example_namespace = Path(
...,
description="A namespace that holds projects.",
regex=r"^\w+$",
example="demo",
)
example_pep_id = Path(
...,
description="A project name inside a particular namespace",
example="BiocProject"
)
|
the-stack_0_17190
|
import os
import os.path
import sys
from multiprocessing import Pool
import numpy as np
import cv2
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from progress_bar import ProgressBar
def main():
"""A multii-thread tool to crop sub images."""
input_folder = '../dataset/dataset/training_set/train_hdr'
save_folder = '../dataset/dataset/training_set/train_hdr_sub'
n_thread = 20
crop_sz = 480 # crop size
step = 240 # crop stride
thres_sz = 48
compression_level = 0 # 3 is the default value in cv2
# CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer
# compression time. If read raw images during training, use 0 for faster IO speed.
if not os.path.exists(save_folder):
os.makedirs(save_folder)
print('mkdir [{:s}] ...'.format(save_folder))
else:
print('Folder [{:s}] already exists. Exit...'.format(save_folder))
sys.exit(1)
img_list = []
for root, _, file_list in sorted(os.walk(input_folder)):
path = [os.path.join(root, x) for x in file_list] # assume only images in the input_folder
img_list.extend(path)
#for file_name in file_list:
# if os.path.splitext(file_name)[1] == '.png':
# img_list.append(os.path.join(root, file_name))
def update(arg):
pbar.update(arg)
pbar = ProgressBar(len(img_list))
pool = Pool(n_thread)
for path in img_list:
pool.apply_async(worker,
args=(path, save_folder, crop_sz, step, thres_sz, compression_level),
callback=update)
pool.close()
pool.join()
print('All subprocesses done.')
def worker(path, save_folder, crop_sz, step, thres_sz, compression_level):
img_name = os.path.basename(path)
#img_name = '_'.join(path.split('/')[-4:])
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
n_channels = len(img.shape)
if n_channels == 2:
h, w = img.shape
elif n_channels == 3:
h, w, c = img.shape
else:
raise ValueError('Wrong image shape - {}'.format(n_channels))
h_space = np.arange(0, h - crop_sz + 1, step)
if h - (h_space[-1] + crop_sz) > thres_sz:
h_space = np.append(h_space, h - crop_sz)
w_space = np.arange(0, w - crop_sz + 1, step)
if w - (w_space[-1] + crop_sz) > thres_sz:
w_space = np.append(w_space, w - crop_sz)
index = 0
for x in h_space:
for y in w_space:
index += 1
if n_channels == 2:
crop_img = img[x:x + crop_sz, y:y + crop_sz]
else:
crop_img = img[x:x + crop_sz, y:y + crop_sz, :]
crop_img = np.ascontiguousarray(crop_img)
# var = np.var(crop_img / 255)
# if var > 0.008:
# print(img_name, index_str, var)
cv2.imwrite(
os.path.join(save_folder, img_name.replace('.png', '_s{:03d}.png'.format(index))),
crop_img, [cv2.IMWRITE_PNG_COMPRESSION, compression_level])
return 'Processing {:s} ...'.format(img_name)
if __name__ == '__main__':
main()
|
the-stack_0_17191
|
import shutil
import collections
import os
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
from mle_logging import MLELogger
time_tic1 = {"num_steps": 10, "num_epochs": 1}
stats_tic1 = {"train_loss": 0.1234, "test_loss": 0.1235}
time_tic2 = {"num_steps": 20, "num_epochs": 1}
stats_tic2 = {"train_loss": 0.2, "test_loss": 0.1}
time_tic3 = {"num_steps": 30, "num_epochs": 1}
stats_tic3 = {"train_loss": 0.223, "test_loss": 0.097}
time_tic4 = {"num_steps": 40, "num_epochs": 1}
stats_tic4 = {"train_loss": 0.123, "test_loss": 0.085}
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
model = DummyModel()
fig, ax = plt.subplots()
ax.plot(np.random.normal(0, 1, 20))
some_dict = {"hi": "there"}
log_config = {
"time_to_track": ["num_steps", "num_epochs"],
"what_to_track": ["train_loss", "test_loss"],
"experiment_dir": "reload_dir/",
"model_type": "torch",
"ckpt_time_to_track": "num_steps",
"save_every_k_ckpt": 2,
"save_top_k_ckpt": 2,
"top_k_metric_name": "test_loss",
"top_k_minimize_metric": True,
}
def test_reload():
"""Test reloading/continuation of previous log with top/every k."""
if os.path.exists(log_config["experiment_dir"]) and os.path.isdir(
log_config["experiment_dir"]
):
shutil.rmtree(log_config["experiment_dir"])
log = MLELogger(**log_config)
log.update(time_tic1, stats_tic1, model, fig, some_dict, save=True)
log.update(time_tic2, stats_tic2, model, fig, some_dict, save=True)
log.update(time_tic3, stats_tic3, model, fig, some_dict, save=True)
# Reload the previously instantiated logger from the directory
relog = MLELogger(**log_config, reload=True)
# Check correctness of checkpoints
assert collections.Counter(relog.model_log.top_k_ckpt_list) == collections.Counter(
[
"reload_dir/models/top_k/top_k_no_seed_provided_top_0.pt",
"reload_dir/models/top_k/top_k_no_seed_provided_top_1.pt",
]
)
assert collections.Counter(
relog.model_log.top_k_storage_time
) == collections.Counter([20, 30])
assert np.allclose(relog.model_log.top_k_performance, [0.097, 0.1])
assert collections.Counter(
relog.model_log.every_k_storage_time
) == collections.Counter([20])
assert collections.Counter(
relog.model_log.every_k_ckpt_list
) == collections.Counter(
["reload_dir/models/every_k/every_k_no_seed_provided_k_2.pt"]
)
# Check correctness of figure paths
assert collections.Counter(
relog.figure_log.fig_storage_paths
) == collections.Counter(
[
"reload_dir/figures/fig_1_no_seed_provided.png",
"reload_dir/figures/fig_2_no_seed_provided.png",
"reload_dir/figures/fig_3_no_seed_provided.png",
]
)
# Check correctness of extra paths
assert collections.Counter(
relog.extra_log.extra_storage_paths
) == collections.Counter(
[
"reload_dir/extra/extra_1_no_seed_provided.pkl",
"reload_dir/extra/extra_2_no_seed_provided.pkl",
"reload_dir/extra/extra_3_no_seed_provided.pkl",
]
)
# Check correctness of reloaded statistics
assert np.allclose(
relog.stats_log.stats_tracked["test_loss"], np.array([0.1235, 0.1, 0.097])
)
assert np.allclose(
relog.stats_log.clock_tracked["num_steps"], np.array([10, 20, 30])
)
# Add new result to log
relog.update(time_tic4, stats_tic4, model, fig, some_dict, save=True)
# Check correctness of figure paths
assert collections.Counter(
relog.figure_log.fig_storage_paths
) == collections.Counter(
[
"reload_dir/figures/fig_1_no_seed_provided.png",
"reload_dir/figures/fig_2_no_seed_provided.png",
"reload_dir/figures/fig_3_no_seed_provided.png",
"reload_dir/figures/fig_4_no_seed_provided.png",
]
)
# Check correctness of extra paths
assert collections.Counter(
relog.extra_log.extra_storage_paths
) == collections.Counter(
[
"reload_dir/extra/extra_1_no_seed_provided.pkl",
"reload_dir/extra/extra_2_no_seed_provided.pkl",
"reload_dir/extra/extra_3_no_seed_provided.pkl",
"reload_dir/extra/extra_4_no_seed_provided.pkl",
]
)
# Check correctness of reloaded statistics
assert np.allclose(
np.array(relog.stats_log.stats_tracked["test_loss"]),
np.array([0.1235, 0.1, 0.097, 0.085]),
)
assert np.allclose(
np.array(relog.stats_log.clock_tracked["num_steps"]),
np.array([10, 20, 30, 40]),
)
# Clean up/delete files
shutil.rmtree(log_config["experiment_dir"])
|
the-stack_0_17193
|
"""
Created on 15.04.2019 by Tatiana Korchuganova
Function to transform data from standard Grafana API response to D3.js compatible
"""
def stacked_hist(series, group_by=None, split_series=None):
plot_data = {}
if not group_by:
return plot_data
if not split_series:
split_series_value = 'all'
split_series_list = []
tags = series[0]['tags'].keys()
if group_by in tags:
for s in series:
split_series_value = s['tags'][split_series] if split_series else split_series_value
if split_series_value not in split_series_list:
split_series_list.append(split_series_value)
if s['tags'][group_by] not in plot_data:
plot_data[s['tags'][group_by]] = {}
if split_series_value not in plot_data[s['tags'][group_by]]:
plot_data[s['tags'][group_by]][split_series_value] = 0
plot_data[s['tags'][group_by]][split_series_value] += s['values'][0][1]
# fill holes by 0 value
for gb, ssd in plot_data.items():
for s in split_series_list:
if s not in ssd.keys():
ssd[s] = 0
return plot_data
def pledges_merging(data, pledges, coeff, pledges_dict, federations_info, type='dst_federation'):
if type == 'dst_federation':
pl_type = 'real_federation'
for fed in data['results'][0]['series']:
# fed['values'][-1][1] = 0
if fed['tags'][type] not in federations_info:
federations_info[fed['tags'][type]] = {}
if fed['tags']['computingsite'] not in federations_info[fed['tags'][type]]:
federations_info[fed['tags'][type]][fed['tags']['computingsite']] = \
{'site': fed['tags']['dst_experiment_site'],
'computingsite': fed['tags']['computingsite'],
'tier': fed['tags']['dst_tier'],
'sum_hs06sec': int(round(float(sum_calculate(fed['values'], 1) / 86400))),
'sum_count': sum_calculate(fed['values'], 2),
'sum_cpuconsumptiontime': int(round(float(sum_calculate(fed['values'], 3) / 86400))),
'sum_walltime': int(round(float(sum_calculate(fed['values'], 4) / 86400)))
}
else:
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['site'] \
= fed['tags']['dst_experiment_site']
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['computingsite'] \
= fed['tags']['computingsite']
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['tier'] \
= fed['tags']['dst_tier']
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['sum_hs06sec'] \
+= int(round(float(sum_calculate(fed['values'], 1) / 86400)))
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['sum_count'] \
= sum_calculate(fed['values'], 2)
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['sum_cpuconsumptiontime'] \
+= int(round(float(sum_calculate(fed['values'], 3) / 86400)))
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['sum_walltime'] \
+= int(round(float(sum_calculate(fed['values'], 4) / 86400)))
if fed['tags'][type] not in pledges_dict:
pledges_dict[fed['tags'][type]] = {}
pledges_dict[fed['tags'][type]]['tier'] = fed['tags']['dst_tier']
pledges_dict[fed['tags'][type]]["hs06sec"] = 0
pledges_dict[fed['tags'][type]]["pledges"] = 0
for value in fed['values']:
pledges_dict[fed['tags'][type]]['hs06sec'] += value[1]
for fed in pledges['results'][0]['series']:
# fed['values'][-1][1] = 0
if fed['tags'][pl_type] not in pledges_dict:
pledges_dict[fed['tags'][pl_type]] = {}
if fed['tags']['tier'] == 'Tier 0':
pledges_dict[fed['tags'][pl_type]]['tier'] = 0
elif fed['tags']['tier'] == 'Tier 1':
pledges_dict[fed['tags'][pl_type]]['tier'] = 1
elif fed['tags']['tier'] == 'Tier 2':
pledges_dict[fed['tags'][pl_type]]['tier'] = 2
elif fed['tags']['tier'] == 'Tier 3':
pledges_dict[fed['tags'][pl_type]]['tier'] = 3
pledges_dict[fed['tags'][pl_type]]["hs06sec"] = 0
pledges_dict[fed['tags'][pl_type]]["pledges"] = 0
for value in fed['values']:
pledges_dict[fed['tags'][pl_type]]['pledges'] += value[1]
return pledges_dict, federations_info
if type == 'dst_country':
pl_type = 'country'
for fed in data['results'][0]['series']:
# fed['values'][-1][1] = 0
if fed['tags'][type] == "United States of America":
fed['tags'][type] = "USA"
if fed['tags'][type] not in pledges_dict:
if fed['tags']['dst_federation'] in ('CH-CERN'):
fed['tags'][type] = 'CERN'
pledges_dict[fed['tags'][type]] = {}
pledges_dict[fed['tags'][type]]["hs06sec"] = 0
pledges_dict[fed['tags'][type]]["pledges"] = 0
for value in fed['values']:
pledges_dict[fed['tags'][type]]['hs06sec'] += value[1]
else:
if fed['tags']['dst_federation'] in ('CH-CERN'):
fed['tags'][type] = 'CERN'
for value in fed['values']:
pledges_dict[fed['tags'][type]]['hs06sec'] += value[1]
for fed in pledges['results'][0]['series']:
# fed['values'][-1][1] = 0
if fed['tags'][pl_type] not in pledges_dict:
# fed['values'][1] = fed['values'][2]
# pledges_dict[fed['tags'][pl_type]]['pledges'] = 0
if fed['tags'][pl_type] == 'Latin America':
fed['tags'][pl_type] = 'Chile'
if fed['tags']['real_federation'] in ('CH-CERN'):
fed['tags'][pl_type] = 'CERN'
if fed['tags'][pl_type] not in pledges_dict:
pledges_dict[fed['tags'][pl_type]] = {}
pledges_dict[fed['tags'][pl_type]]["hs06sec"] = 0
pledges_dict[fed['tags'][pl_type]]["pledges"] = 0
for value in fed['values']:
pledges_dict[fed['tags'][pl_type]]['pledges'] += value[1]
else:
if fed['tags'][pl_type] == 'Latin America':
fed['tags'][pl_type] = 'Chile'
if fed['tags']['real_federation'] in ('CH-CERN'):
fed['tags'][pl_type] = 'CERN'
for value in fed['values']:
pledges_dict[fed['tags'][pl_type]]['pledges'] += value[1]
return pledges_dict
def sum_calculate(data, column_number):
sum_for_column = 0
for value in data:
sum_for_column += value[column_number]
return sum_for_column
|
the-stack_0_17194
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMemberDataTagQueryModel(object):
def __init__(self):
self._isv_pid = None
self._shop_id = None
self._user_id = None
@property
def isv_pid(self):
return self._isv_pid
@isv_pid.setter
def isv_pid(self, value):
self._isv_pid = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.isv_pid:
if hasattr(self.isv_pid, 'to_alipay_dict'):
params['isv_pid'] = self.isv_pid.to_alipay_dict()
else:
params['isv_pid'] = self.isv_pid
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMemberDataTagQueryModel()
if 'isv_pid' in d:
o.isv_pid = d['isv_pid']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
the-stack_0_17195
|
# Copyright 2021 Injective Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Injective Exchange API client for Python. Example only."""
import asyncio
import logging
from pyinjective.async_client import AsyncClient
from pyinjective.constant import Network
async def main() -> None:
network = Network.testnet()
client = AsyncClient(network, insecure=False)
market_id = "0x4ca0f92fc28be0c9761326016b5a1a2177dd6375558365116b5bdda9abc229ce"
subaccount_id = "0xc6fe5d33615a1c52c08018c47e8bc53646a0e101000000000000000000000000"
trades = await client.get_derivative_trades(
market_id=market_id,
subaccount_id=subaccount_id
)
print(trades)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.get_event_loop().run_until_complete(main())
|
the-stack_0_17197
|
import gym
def get_env_monitor(env):
"""
Args:
env: gym.Env. The wrapped environment.
Returns:
the `gym.wrappers.Monitor` around env
Raises:
`ValueError` if env is not wrapper by Monitor
"""
currentenv = env
while True:
if "Monitor" in currentenv.__class__.__name__:
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named Monitor")
|
the-stack_0_17198
|
'''
Let’s say I give you a list saved in a variable: a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100].
Write one line of Python that takes this list a and makes a new list that has only the even elements of this list in it.
'''
def main():
a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
b = [num for num in a if num % 2 == 0]
print(b)
if __name__ == '__main__' :
main()
|
the-stack_0_17200
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import resources
from neutron_lib import exceptions as n_exceptions
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib.services.logapi import constants as log_const
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from ovsdbapp.backend.ovs_idl import idlutils
from neutron._i18n import _
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils
from neutron.conf.services import logging as log_cfg
from neutron.services.logapi.common import db_api
from neutron.services.logapi.common import sg_callback
from neutron.services.logapi.drivers import base
from neutron.services.logapi.drivers import manager
LOG = logging.getLogger(__name__)
DRIVER = None
log_cfg.register_log_driver_opts()
SUPPORTED_LOGGING_TYPES = [log_const.SECURITY_GROUP]
class LoggingNotSupported(n_exceptions.NeutronException):
message = _("The current OVN version does not offer support "
"for neutron network log functionality.")
class OVNDriver(base.DriverBase):
def __init__(self):
super().__init__(
name="ovn",
vif_types=[portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_VHOST_USER],
vnic_types=[portbindings.VNIC_NORMAL],
supported_logging_types=SUPPORTED_LOGGING_TYPES,
requires_rpc=False)
self._log_plugin_property = None
self.meter_name = (
cfg.CONF.network_log.local_output_log_base or "acl_log_meter")
@staticmethod
def network_logging_supported(ovn_nb):
columns = list(ovn_nb._tables["Meter"].columns)
return ("fair" in columns)
@classmethod
def create(cls, plugin_driver):
cls.plugin_driver = plugin_driver
return OVNDriver()
@property
def _log_plugin(self):
if self._log_plugin_property is None:
self._log_plugin_property = directory.get_plugin(
plugin_constants.LOG_API)
return self._log_plugin_property
@staticmethod
def _log_dict_to_obj(log_dict):
cls = namedtuple('Log_obj', log_dict)
cls.__new__.__defaults__ = tuple(log_dict.values())
return cls()
def _get_logs(self, context):
log_objs = self._log_plugin.get_logs(context)
return [self._log_dict_to_obj(lo) for lo in log_objs]
@property
def ovn_nb(self):
return self.plugin_driver.nb_ovn
def _create_ovn_fair_meter(self, ovn_txn):
"""Create row in Meter table with fair attribute set to True.
Create a row in OVN's NB Meter table based on well-known name. This
method uses the network_log configuration to specify the attributes
of the meter. Current implementation needs only one 'fair' meter row
which is then referred by multiple ACL rows.
:param ovn_txn: ovn nortbound idl transaction.
"""
meter = self.ovn_nb.db_find_rows(
"Meter", ("name", "=", self.meter_name)).execute(check_error=True)
if meter:
meter = meter[0]
try:
meter_band = self.ovn_nb.lookup("Meter_Band",
meter.bands[0].uuid)
if all((meter.unit == "pktps",
meter.fair[0],
meter_band.rate == cfg.CONF.network_log.rate_limit,
meter_band.burst_size ==
cfg.CONF.network_log.burst_limit)):
# Meter (and its meter-band) unchanged: noop.
return
except idlutils.RowNotFound:
pass
# Re-create meter (and its meter-band) with the new attributes.
# This is supposed to happen only if configuration changed, so
# doing updates is an overkill: better to leverage the ovsdbapp
# library to avoid the complexity.
ovn_txn.add(self.ovn_nb.meter_del(meter.uuid))
# Create meter
LOG.info("Creating network log fair meter %s", self.meter_name)
ovn_txn.add(self.ovn_nb.meter_add(
name=self.meter_name,
unit="pktps",
rate=cfg.CONF.network_log.rate_limit,
fair=True,
burst_size=cfg.CONF.network_log.burst_limit,
may_exist=False,
external_ids={ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY:
log_const.LOGGING_PLUGIN}))
@staticmethod
def _acl_actions_enabled(log_obj):
if not log_obj.enabled:
return set()
if log_obj.event == log_const.ACCEPT_EVENT:
return {ovn_const.ACL_ACTION_ALLOW_RELATED,
ovn_const.ACL_ACTION_ALLOW}
if log_obj.event == log_const.DROP_EVENT:
return {ovn_const.ACL_ACTION_DROP,
ovn_const.ACL_ACTION_REJECT}
# Fall through case: log_const.ALL_EVENT
return {ovn_const.ACL_ACTION_DROP,
ovn_const.ACL_ACTION_REJECT,
ovn_const.ACL_ACTION_ALLOW_RELATED,
ovn_const.ACL_ACTION_ALLOW}
def _remove_acls_log(self, pgs, ovn_txn, log_name=None):
acl_changes, acl_visits = 0, 0
for pg in pgs:
for acl_uuid in pg["acls"]:
acl_visits += 1
# skip acls used by a different network log
if log_name:
acl = self.ovn_nb.lookup("ACL", acl_uuid)
if acl.name and acl.name[0] != log_name:
continue
ovn_txn.add(self.ovn_nb.db_set(
"ACL", acl_uuid,
("log", False),
("meter", []),
("name", []),
("severity", [])
))
acl_changes += 1
msg = "Cleared %d (out of %d visited) ACLs"
if log_name:
msg += " for network log {}".format(log_name)
LOG.info(msg, acl_changes, acl_visits)
def _set_acls_log(self, pgs, ovn_txn, actions_enabled, log_name):
acl_changes, acl_visits = 0, 0
for pg in pgs:
for acl_uuid in pg["acls"]:
acl_visits += 1
acl = self.ovn_nb.lookup("ACL", acl_uuid)
# skip acls used by a different network log
if acl.name and acl.name[0] != log_name:
continue
ovn_txn.add(self.ovn_nb.db_set(
"ACL", acl_uuid,
("log", acl.action in actions_enabled),
("meter", self.meter_name),
("name", log_name),
("severity", "info")
))
acl_changes += 1
LOG.info("Set %d (out of %d visited) ACLs for network log %s",
acl_changes, acl_visits, log_name)
def _update_log_objs(self, context, ovn_txn, log_objs):
for log_obj in log_objs:
pgs = self._pgs_from_log_obj(context, log_obj)
actions_enabled = self._acl_actions_enabled(log_obj)
self._set_acls_log(pgs, ovn_txn, actions_enabled,
utils.ovn_name(log_obj.id))
def _pgs_all(self):
return self.ovn_nb.db_list(
"Port_Group", columns=["name", "acls"]).execute(check_error=True)
def _pgs_from_log_obj(self, context, log_obj):
"""Map Neutron log_obj into affected port groups in OVN.
:param context: current running context information
:param log_obj: a log_object to be analyzed.
"""
if not log_obj.resource_id and not log_obj.target_id:
# No sg, no port: return all pgs
return self._pgs_all()
pgs = []
# include special pg_drop to log DROP and ALL actions
if not log_obj.event or log_obj.event in (log_const.DROP_EVENT,
log_const.ALL_EVENT):
try:
pg = self.ovn_nb.lookup("Port_Group",
ovn_const.OVN_DROP_PORT_GROUP_NAME)
pgs.append({"name": pg.name,
"acls": [r.uuid for r in pg.acls]})
except idlutils.RowNotFound:
pass
if log_obj.resource_id:
try:
pg = self.ovn_nb.lookup("Port_Group",
utils.ovn_port_group_name(
log_obj.resource_id))
pgs.append({"name": pg.name,
"acls": [r.uuid for r in pg.acls]})
except idlutils.RowNotFound:
pass
# Note: when sg is provided, it is redundant to get sgs from port,
# because model will ensure that sg is associated with neutron port
elif log_obj.target_id:
sg_ids = db_api._get_sgs_attached_to_port(context,
log_obj.target_id)
for sg_id in sg_ids:
try:
pg = self.ovn_nb.lookup("Port_Group",
utils.ovn_port_group_name(sg_id))
pgs.append({"name": pg.name,
"acls": [r.uuid for r in pg.acls]})
except idlutils.RowNotFound:
pass
return pgs
def create_log(self, context, log_obj):
"""Create a log_obj invocation.
:param context: current running context information
:param log_obj: a log objects being created
"""
LOG.debug("Create_log %s", log_obj)
pgs = self._pgs_from_log_obj(context, log_obj)
actions_enabled = self._acl_actions_enabled(log_obj)
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._create_ovn_fair_meter(ovn_txn)
self._set_acls_log(pgs, ovn_txn, actions_enabled,
utils.ovn_name(log_obj.id))
def create_log_precommit(self, context, log_obj):
"""Create a log_obj precommit.
:param context: current running context information
:param log_obj: a log object being created
"""
LOG.debug("Create_log_precommit %s", log_obj)
if not self.network_logging_supported(self.ovn_nb):
raise LoggingNotSupported()
def update_log(self, context, log_obj):
"""Update a log_obj invocation.
:param context: current running context information
:param log_obj: a log object being updated
"""
LOG.debug("Update_log %s", log_obj)
pgs = self._pgs_from_log_obj(context, log_obj)
actions_enabled = self._acl_actions_enabled(log_obj)
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._set_acls_log(pgs, ovn_txn, actions_enabled,
utils.ovn_name(log_obj.id))
def delete_log(self, context, log_obj):
"""Delete a log_obj invocation.
:param context: current running context information
:param log_obj: a log_object being deleted
"""
LOG.debug("Delete_log %s", log_obj)
# If we are removing the last log_obj, let's clear log from all acls.
# This is a simple way of ensuring that no acl logs are left behind!
log_objs = self._get_logs(context)
if not log_objs or (
len(log_objs) == 1 and log_objs[0].id == log_obj.id):
pgs = self._pgs_all()
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._remove_acls_log(pgs, ovn_txn)
ovn_txn.add(self.ovn_nb.meter_del(self.meter_name,
if_exists=True))
LOG.info("All ACL logs cleared after deletion of log_obj %s",
log_obj.id)
return
# Remove log_obj and revisit all remaining ones, since the acls that
# were serving the removed log_obj may be usable by the remaining
# log_objs.
pgs = self._pgs_from_log_obj(context, log_obj)
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._remove_acls_log(pgs, ovn_txn, utils.ovn_name(log_obj.id))
# TODO(flaviof): We needed to break this second part into a separate
# transaction because logic that determines the value of the 'freed up'
# acl rows will not see the modified rows unless it was inside an an
# idl command.
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._update_log_objs(context, ovn_txn, [lo for lo in log_objs
if lo.id != log_obj.id])
def resource_update(self, context, log_objs):
"""Tell the agent when resources related to log_objects are
being updated
:param context: current running context information
:param log_objs: a list of log_objects, whose related resources are
being updated.
"""
LOG.debug("Resource_update %s", log_objs)
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._update_log_objs(context, ovn_txn, log_objs)
def register(plugin_driver):
"""Register the driver."""
global DRIVER
if not DRIVER:
DRIVER = OVNDriver.create(plugin_driver)
# Trigger decorator
importutils.import_module(
"neutron.services.logapi.common.sg_validate"
)
# Register resource callback handler
manager.register(
resources.SECURITY_GROUP_RULE, sg_callback.SecurityGroupRuleCallBack)
LOG.info("OVN logging driver registered")
return DRIVER
|
the-stack_0_17201
|
# -*- coding: utf-8 -*-
# Copyright 2021 Tampere University and VTT Technical Research Centre of Finland
# This software was developed as a part of the ProCemPlus project: https://www.senecc.fi/projects/procemplus
# This source code is licensed under the MIT license. See LICENSE in the repository root directory.
# Author(s): Otto Hylli <[email protected]>
# Antti Keski-Koukkari <[email protected]>
'''
Contains class for a simulation platform component used to simulate energy storages.
'''
import asyncio
from typing import Union, Any
from tools.components import AbstractSimulationComponent
from tools.messages import BaseMessage, AbstractResultMessage
from tools.tools import FullLogger, load_environmental_variables, EnvironmentVariable
from tools.datetime_tools import to_utc_datetime_object
from domain_messages.resource import ResourceStateMessage
from domain_messages.ControlState import ControlStatePowerSetpointMessage
from domain_tools.resource.resource_state_source import ResourceState, CsvFileResourceStateSource, CsvFileError
from storage_resource.state import StorageState
# names of used environment variables
RESOURCE_STATE_TOPIC = "RESOURCE_STATE_TOPIC"
RESOURCE_STATE_CSV_FILE = "RESOURCE_STATE_CSV_FILE"
RESOURCE_STATE_CSV_DELIMITER = "RESOURCE_STATE_CSV_DELIMITER"
CUSTOMER_ID = 'CUSTOMER_ID'
NODE = 'NODE'
CHARGE_RATE = 'CHARGE_RATE'
DISCHARGE_RATE = 'DISCHARGE_RATE'
INITIAL_STATE_OF_CHARGE = 'INITIAL_STATE_OF_CHARGE'
KWH_RATED = 'KWH_RATED'
DISCHARGE_EFFICIENCY = 'DISCHARGE_EFFICIENCY'
CHARGE_EFFICIENCY = 'CHARGE_EFFICIENCY'
KW_RATED = 'KW_RATED'
SELF_DISCHARGE = 'SELF_DISCHARGE'
CONTROLLER_ID = 'CONTROLLER_ID'
CONTROL_STATE_TOPIC = 'CONTROL_STATE_TOPIC'
LOGGER = FullLogger( __name__ )
class StorageResource(AbstractSimulationComponent):
'''
A simulation platform component used to simulate energy storages. For each epoch it gets the power required from it from either a csv file or from a ControlState message.
It calculates its state based on the power and epoch length. Then it publishes its state as a ResourceState message which
includes the actual power the storage could manage and its state of charge.
'''
def __init__(self, storage: StorageState, state_source: CsvFileResourceStateSource = None, initialization_error: str = None ):
'''
Create a storage resource.
storage: Used to model and calculate the actual state of the storage.
state_source: If not None this storage will operate according to the power values from the given source. If None expects to get ControlState messages.
initialization_error: If not None indicates that the component cannot function properly and it should send an error message with the given error message when the simulation starts.
'''
super().__init__()
self._storage = storage
self._state_source = state_source
self.initialization_error = initialization_error
if self.initialization_error is not None:
LOGGER.error( self.initialization_error )
# get used main topics from environment or use defaults.
environment = load_environmental_variables(
(RESOURCE_STATE_TOPIC, str, "ResourceState"),
(CONTROL_STATE_TOPIC, str, 'ControlState.PowerSetpoint' )
)
# used as a part of the topic ResourceState messages are published to.
self._type = 'Storage'
self._resource_state_topic = environment[ RESOURCE_STATE_TOPIC ]
# publish resource states to this topic
self._result_topic = '.'.join( [ self._resource_state_topic, self._type, self.component_name ])
# other topics component listens to. Possibly the ControlState topic.
other_topics = []
if state_source is None:
# no csv file subscribe to ControlState messages
control_state_topic = environment[ CONTROL_STATE_TOPIC ]
control_state_topic = control_state_topic +'.' +self.component_name
other_topics = [ control_state_topic ]
LOGGER.info(f'Listening to control state messages from topic {control_state_topic}.')
else:
LOGGER.info('Using a CSV file as the control state source.')
# super class will handle subscribing to the topic.
self._other_topics = other_topics
# store ControlState message for current epoch here.
self._control_state_for_epoch = None
async def process_epoch(self) -> bool:
'''
Handles the processing of an epoch by calculating the new state for the storage and publishing it for the epoch.
'''
LOGGER.debug( f'Starting to process epoch {self._latest_epoch}.' )
try:
await self._send_resource_state_message()
except Exception as error:
description = f'Unable to create or send a ResourceState message: {str( error )}'
LOGGER.error( description )
await self.send_error_message(description)
return False
return True
async def all_messages_received_for_epoch(self) -> bool:
'''Component is ready to process a epoch if it uses a csv state source or if it has already gotten a ControlState message for the current epoch.'''
return self._state_source is not None or (self._control_state_for_epoch is not None and self._control_state_for_epoch.epoch_number == self._latest_epoch)
async def general_message_handler(self, message_object: Union[BaseMessage, Any],
message_routing_key: str) -> None:
'''Handle receiving of ControlState messages.'''
# Check that we have a ControlState message and it is what we expect.
if isinstance( message_object, ControlStatePowerSetpointMessage ):
if message_object.epoch_number != self._latest_epoch:
LOGGER.warning(f'Got a control state message with id {message_object.message_id} for epoch {message_object.epoch_number} but was expecting it for epoch {self._latest_epoch}.')
# ignore message
return
if self._control_state_for_epoch is not None and self._control_state_for_epoch.epoch_number == message_object.epoch_number:
LOGGER.warning(f'Already had received a control state message for epoch {self._latest_epoch} but received another one with message id {message_object.message_id}.')
# ignore message
return
# got an expected message. Ready to process epoch.
self._control_state_for_epoch = message_object
self._triggering_message_ids.append(message_object.message_id)
await self.start_epoch()
async def _send_resource_state_message(self):
'''
Calculates new state for the storage and publishes it as a ResourceState message.
'''
resource_state = self._get_resource_state_message()
await self._rabbitmq_client.send_message(self._result_topic, resource_state.bytes())
def _get_resource_state_message(self) -> ResourceStateMessage:
'''
Create a ResourceStateMessage from the new state of the storage.
'''
if self._state_source is not None:
# get desired power from state source
control_state = self._state_source.getNextEpochData()
# add possible new customer id and node to storage so it can be included as a part of the resource state message
self._storage.customer_id = control_state.customerid
self._storage.node = control_state.node
else:
# get storage control information from received message
control_state = ResourceState( customerid = None, real_power = self._control_state_for_epoch.real_power.value, reactive_power = self._control_state_for_epoch.reactive_power.value )
# power desired from the storage
real_power = control_state.real_power
# calculate the duration of the epoch in hours required to calculate the new state of the storage
epoch_start = to_utc_datetime_object( self._latest_epoch_message.start_time )
epoch_end = to_utc_datetime_object( self._latest_epoch_message.end_time )
epoch_duration = epoch_end -epoch_start
epoch_duration_h = epoch_duration.total_seconds() /3600
state = self._storage.calculate_state(real_power, epoch_duration_h)
# create ResourceState message based on the storage state
message = ResourceStateMessage(
SimulationId = self.simulation_id,
Type = ResourceStateMessage.CLASS_MESSAGE_TYPE,
SourceProcessId = self.component_name,
MessageId = next(self._message_id_generator),
EpochNumber = self._latest_epoch,
TriggeringMessageIds = self._triggering_message_ids,
CustomerId = state.customerid,
node = state.node,
RealPower = state.real_power,
ReactivePower = state.reactive_power,
Node = state.node,
StateOfCharge = state.state_of_charge
)
if control_state.real_power != state.real_power:
# storage could not operate with the required power so add a warning about it.
message.warnings = [ 'warning.input.range' ]
return message
def create_component() -> StorageResource:
'''
Create a StorageResource based on the initialization environment variables.
'''
# specify environment variables to be read.For optional ones mark default value as None though it is the default any way.
env_variable_spec = (
( RESOURCE_STATE_CSV_FILE, str, None ),
( RESOURCE_STATE_CSV_DELIMITER, str, "," ),
( CUSTOMER_ID, str, None ),
( NODE, str, None ),
( CHARGE_RATE, float, 100.0 ),
( DISCHARGE_RATE, float, 100.0 ),
( CHARGE_EFFICIENCY, float, 90.0 ),
( DISCHARGE_EFFICIENCY, float, 90.0 ),
( KWH_RATED, float ),
( INITIAL_STATE_OF_CHARGE, float ),
( KW_RATED, float ),
( SELF_DISCHARGE, float, 0.0 )
)
environment = load_environmental_variables( *env_variable_spec )
# check if some required environment variables were missing.
missing = []
for item in env_variable_spec:
if len( item ) == 2:
# no explicit default value given so this was required
if environment[ item[0] ] is None:
missing.append( item[0] )
initialization_error = None # possible initialization error message goes here
if len( missing ) > 0:
initialization_error = 'Component missing required initialization environment variables: ' +', '.join( missing )
csv_file = environment[RESOURCE_STATE_CSV_FILE]
state_source = None # if a state source is used it goes here.
if csv_file is not None:
node = None # no initial value for storage node. Read from csv.
try:
state_source = CsvFileResourceStateSource( csv_file, environment[RESOURCE_STATE_CSV_DELIMITER])
except CsvFileError as error:
initialization_error = f'Unable to create a csv file resource state source for the component: {str( error )}'
elif csv_file is None:
# Since currently ControlState message does not have node it can be set with environment variable
node = environment[NODE]
# since state source is not used customer id is required from environment
# ResourceState message requires customer id and ControlState message does not include it.
if environment[CUSTOMER_ID] is None:
initialization_error = f'when {RESOURCE_STATE_CSV_FILE} initialization environment variable is not given {CUSTOMER_ID} is required.'
storage = None
try:
# create storage state based on environment variables to be used by the component
storage = StorageState(customer_id = environment[CUSTOMER_ID], node = node, initial_state_of_charge = environment[INITIAL_STATE_OF_CHARGE], kwh_rated = environment[KWH_RATED], kw_rated = environment[KW_RATED],
self_discharge = environment[SELF_DISCHARGE], charge_rate = environment[CHARGE_RATE],
discharge_rate = environment[DISCHARGE_RATE ], charge_efficiency = environment[CHARGE_EFFICIENCY],
discharge_efficiency = environment[DISCHARGE_EFFICIENCY])
except Exception as err:
initialization_error = f'Unable to create a storage state: {err}'
# create component
return StorageResource(storage, state_source, initialization_error )
async def start_component():
'''
Create and start a StorageResource component.
'''
try:
resource = create_component()
await resource.start()
while not resource.is_stopped:
await asyncio.sleep( 2 )
except Exception as error:
LOGGER.error("{} : {}".format(type(error).__name__, error))
if __name__ == '__main__':
asyncio.run(start_component())
|
the-stack_0_17202
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains operator for uploading local file to GCS.
"""
import warnings
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class LocalFilesystemToGCSOperator(BaseOperator):
"""
Uploads a file to Google Cloud Storage.
Optionally can compress the file for upload.
:param src: Path to the local file. (templated)
:type src: str
:param dst: The object name to set when uploading the file. (templated)
:type dst: str
:param bucket: The bucket to upload to. (templated)
:type bucket: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param mime_type: The mime-type string
:type mime_type: str
:param delegate_to: The account to impersonate, if any
:type delegate_to: str
:param gzip: Allows for file to be compressed and uploaded as gzip
:type gzip: bool
"""
template_fields = ('src', 'dst', 'bucket')
@apply_defaults
def __init__(self,
src,
dst,
bucket,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
mime_type='application/octet-stream',
delegate_to=None,
gzip=False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.src = src
self.dst = dst
self.bucket = bucket
self.gcp_conn_id = gcp_conn_id
self.mime_type = mime_type
self.delegate_to = delegate_to
self.gzip = gzip
def execute(self, context):
"""
Uploads the file to Google Cloud Storage
"""
hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket_name=self.bucket,
object_name=self.dst,
mime_type=self.mime_type,
filename=self.src,
gzip=self.gzip,
)
|
the-stack_0_17203
|
# Import libraries
import argparse
from azureml.core import Run
# Get parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_folder', type=str, dest='model_folder',
default="driver-training", help='model location')
args = parser.parse_args()
model_folder = args.model_folder
# Get the experiment run context
run = Run.get_context()
# load the model
print("Loading model from " + model_folder)
model_name = 'porto_seguro_safe_driver_model'
model_file = model_folder + "/" + model_name + ".pkl"
# Get metrics for registration
metrics = run.parent.get_metrics()
# Register the model
run.upload_file(model_name, model_file)
run.register_model(
model_path=model_name,
model_name=model_name,
tags=metrics)
run.complete()
|
the-stack_0_17204
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from multiprocessing.pool import Pool
import numpy as np
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
from meddec.paper_plot.nature_methods.challenge_visualization_stuff.own_implementation.ranking import \
rank_then_aggregate
import scipy.stats as ss
from nnunet.dataset_conversion.Task032_BraTS_2018 import convert_labels_back_to_BraTS_2018_2019_convention
from nnunet.dataset_conversion.Task043_BraTS_2019 import copy_BraTS_segmentation_and_convert_labels
from nnunet.evaluation.region_based_evaluation import get_brats_regions, evaluate_regions
from nnunet.paths import nnUNet_raw_data
import SimpleITK as sitk
import shutil
from medpy.metric import dc, hd95
from nnunet.postprocessing.consolidate_postprocessing import collect_cv_niftis
from typing import Tuple
def apply_brats_threshold(fname, out_dir, threshold, replace_with):
img_itk = sitk.ReadImage(fname)
img_npy = sitk.GetArrayFromImage(img_itk)
s = np.sum(img_npy == 3)
if s < threshold:
print(s, fname)
img_npy[img_npy == 3] = replace_with
img_itk_postprocessed = sitk.GetImageFromArray(img_npy)
img_itk_postprocessed.CopyInformation(img_itk)
sitk.WriteImage(img_itk_postprocessed, join(out_dir, fname.split("/")[-1]))
def load_niftis_threshold_compute_dice(gt_file, pred_file, thresholds: Tuple[list, tuple]):
gt = sitk.GetArrayFromImage(sitk.ReadImage(gt_file))
pred = sitk.GetArrayFromImage(sitk.ReadImage(pred_file))
mask_pred = pred == 3
mask_gt = gt == 3
num_pred = np.sum(mask_pred)
num_gt = np.sum(mask_gt)
dice = dc(mask_pred, mask_gt)
res_dice = {}
res_was_smaller = {}
for t in thresholds:
was_smaller = False
if num_pred < t:
was_smaller = True
if num_gt == 0:
dice_here = 1.
else:
dice_here = 0.
else:
dice_here = deepcopy(dice)
res_dice[t] = dice_here
res_was_smaller[t] = was_smaller
return res_was_smaller, res_dice
def apply_threshold_to_folder(folder_in, folder_out, threshold, replace_with, processes=24):
maybe_mkdir_p(folder_out)
niftis = subfiles(folder_in, suffix='.nii.gz', join=True)
p = Pool(processes)
p.starmap(apply_brats_threshold, zip(niftis, [folder_out]*len(niftis), [threshold]*len(niftis), [replace_with] * len(niftis)))
p.close()
p.join()
def determine_brats_postprocessing(folder_with_preds, folder_with_gt, postprocessed_output_dir, processes=8,
thresholds=(0, 10, 50, 100, 200, 500, 750, 1000, 1500, 2500, 10000), replace_with=2):
# find pairs
nifti_gt = subfiles(folder_with_gt, suffix=".nii.gz", sort=True)
p = Pool(processes)
nifti_pred = subfiles(folder_with_preds, suffix='.nii.gz', sort=True)
results = p.starmap_async(load_niftis_threshold_compute_dice, zip(nifti_gt, nifti_pred, [thresholds] * len(nifti_pred)))
results = results.get()
all_dc_per_threshold = {}
for t in thresholds:
all_dc_per_threshold[t] = np.array([i[1][t] for i in results])
print(t, np.mean(all_dc_per_threshold[t]))
means = [np.mean(all_dc_per_threshold[t]) for t in thresholds]
best_threshold = thresholds[np.argmax(means)]
print('best', best_threshold, means[np.argmax(means)])
maybe_mkdir_p(postprocessed_output_dir)
p.starmap(apply_brats_threshold, zip(nifti_pred, [postprocessed_output_dir]*len(nifti_pred), [best_threshold]*len(nifti_pred), [replace_with] * len(nifti_pred)))
p.close()
p.join()
save_pickle((thresholds, means, best_threshold, all_dc_per_threshold), join(postprocessed_output_dir, "threshold.pkl"))
def collect_and_prepare(base_dir, num_processes = 12, clean=False):
"""
collect all cv_niftis, compute brats metrics, compute enh tumor thresholds and summarize in csv
:param base_dir:
:return:
"""
out = join(base_dir, 'cv_results')
out_pp = join(base_dir, 'cv_results_pp')
experiments = subfolders(base_dir, join=False, prefix='nnUNetTrainer')
regions = get_brats_regions()
gt_dir = join(base_dir, 'gt_niftis')
replace_with = 2
failed = []
successful = []
for e in experiments:
print(e)
try:
o = join(out, e)
o_p = join(out_pp, e)
maybe_mkdir_p(o)
maybe_mkdir_p(o_p)
collect_cv_niftis(join(base_dir, e), o)
if clean or not isfile(join(o, 'summary.csv')):
evaluate_regions(o, gt_dir, regions, num_processes)
if clean or not isfile(join(o_p, 'threshold.pkl')):
determine_brats_postprocessing(o, gt_dir, o_p, num_processes, thresholds=list(np.arange(0, 760, 10)), replace_with=replace_with)
if clean or not isfile(join(o_p, 'summary.csv')):
evaluate_regions(o_p, gt_dir, regions, num_processes)
successful.append(e)
except Exception as ex:
print("\nERROR\n", e, ex, "\n")
failed.append(e)
# we are interested in the mean (nan is 1) column
with open(join(base_dir, 'cv_summary.csv'), 'w') as f:
f.write('name,whole,core,enh,mean\n')
for e in successful:
expected_nopp = join(out, e, 'summary.csv')
expected_pp = join(out, out_pp, e, 'summary.csv')
if isfile(expected_nopp):
res = np.loadtxt(expected_nopp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_noPP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
if isfile(expected_pp):
res = np.loadtxt(expected_pp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_PP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
# this just crawls the folders and evaluates what it finds
with open(join(base_dir, 'cv_summary2.csv'), 'w') as f:
for folder in ['cv_results', 'cv_results_pp']:
for ex in subdirs(join(base_dir, folder), join=False):
print(folder, ex)
expected = join(base_dir, folder, ex, 'summary.csv')
if clean or not isfile(expected):
evaluate_regions(join(base_dir, folder, ex), gt_dir, regions, num_processes)
if isfile(expected):
res = np.loadtxt(expected, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write('%s__%s,' % (folder, ex))
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
f.write('name,whole,core,enh,mean\n')
for e in successful:
expected_nopp = join(out, e, 'summary.csv')
expected_pp = join(out, out_pp, e, 'summary.csv')
if isfile(expected_nopp):
res = np.loadtxt(expected_nopp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_noPP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
if isfile(expected_pp):
res = np.loadtxt(expected_pp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_PP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
# apply threshold to val set
expected_num_cases = 125
missing_valset = []
has_val_pred = []
for e in successful:
if isdir(join(base_dir, 'predVal', e)):
currdir = join(base_dir, 'predVal', e)
files = subfiles(currdir, suffix='.nii.gz', join=False)
if len(files) != expected_num_cases:
print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases))
continue
output_folder = join(base_dir, 'predVal_PP', e)
maybe_mkdir_p(output_folder)
threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
if threshold > 1000: threshold = 750 # don't make it too big!
apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
has_val_pred.append(e)
else:
print(e, 'has no valset predictions')
missing_valset.append(e)
# 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold' needs special treatment
e = 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5'
currdir = join(base_dir, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
output_folder = join(base_dir, 'predVal_PP', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
maybe_mkdir_p(output_folder)
threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
if threshold > 1000: threshold = 750 # don't make it too big!
apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
# 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold' needs special treatment
e = 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5'
currdir = join(base_dir, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
output_folder = join(base_dir, 'predVal_PP', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
maybe_mkdir_p(output_folder)
threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
if threshold > 1000: threshold = 750 # don't make it too big!
apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
# convert val set to brats labels for submission
output_converted = join(base_dir, 'converted_valSet')
for source in ['predVal', 'predVal_PP']:
for e in has_val_pred + ['nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold']:
expected_source_folder = join(base_dir, source, e)
if not isdir(expected_source_folder):
print(e, 'has no', source)
raise RuntimeError()
files = subfiles(expected_source_folder, suffix='.nii.gz', join=False)
if len(files) != expected_num_cases:
print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases))
continue
target_folder = join(output_converted, source, e)
maybe_mkdir_p(target_folder)
convert_labels_back_to_BraTS_2018_2019_convention(expected_source_folder, target_folder)
summarize_validation_set_predictions(output_converted)
def summarize_validation_set_predictions(base):
with open(join(base, 'summary.csv'), 'w') as f:
f.write('name,whole,core,enh,mean,whole,core,enh,mean\n')
for subf in subfolders(base, join=False):
for e in subfolders(join(base, subf), join=False):
expected = join(base, subf, e, 'Stats_Validation_final.csv')
if not isfile(expected):
print(subf, e, 'has missing csv')
continue
a = np.loadtxt(expected, delimiter=',', dtype=str)
assert a.shape[0] == 131, 'did not evaluate all 125 cases!'
selected_row = a[-5]
values = [float(i) for i in selected_row[1:4]]
f.write(e + "_" + subf + ',')
f.write("%0.4f," % values[1])
f.write("%0.4f," % values[2])
f.write("%0.4f," % values[0])
f.write("%0.4f," % np.mean(values))
values = [float(i) for i in selected_row[-3:]]
f.write("%0.4f," % values[1])
f.write("%0.4f," % values[2])
f.write("%0.4f," % values[0])
f.write("%0.4f\n" % np.mean(values))
def compute_BraTS_dice(ref, pred):
"""
ref and gt are binary integer numpy.ndarray s
:param ref:
:param gt:
:return:
"""
num_ref = np.sum(ref)
num_pred = np.sum(pred)
if num_ref == 0:
if num_pred == 0:
return 1
else:
return 0
else:
return dc(pred, ref)
def convert_all_to_BraTS(input_folder, output_folder, expected_num_cases=125):
for s in subdirs(input_folder, join=False):
nii = subfiles(join(input_folder, s), suffix='.nii.gz', join=False)
if len(nii) != expected_num_cases:
print(s)
else:
target_dir = join(output_folder, s)
convert_labels_back_to_BraTS_2018_2019_convention(join(input_folder, s), target_dir, num_processes=6)
def compute_BraTS_HD95(ref, pred):
"""
ref and gt are binary integer numpy.ndarray s
spacing is assumed to be (1, 1, 1)
:param ref:
:param pred:
:return:
"""
num_ref = np.sum(ref)
num_pred = np.sum(pred)
if num_ref == 0:
if num_pred == 0:
return 0
else:
return 373.12866
elif num_pred == 0 and num_ref != 0:
return 373.12866
else:
return hd95(pred, ref, (1, 1, 1))
def evaluate_BraTS_case(arr: np.ndarray, arr_gt: np.ndarray):
"""
attempting to reimplement the brats evaluation scheme
assumes edema=1, non_enh=2, enh=3
:param arr:
:param arr_gt:
:return:
"""
# whole tumor
mask_gt = (arr_gt != 0).astype(int)
mask_pred = (arr != 0).astype(int)
dc_whole = compute_BraTS_dice(mask_gt, mask_pred)
hd95_whole = compute_BraTS_HD95(mask_gt, mask_pred)
del mask_gt, mask_pred
# tumor core
mask_gt = (arr_gt > 1).astype(int)
mask_pred = (arr > 1).astype(int)
dc_core = compute_BraTS_dice(mask_gt, mask_pred)
hd95_core = compute_BraTS_HD95(mask_gt, mask_pred)
del mask_gt, mask_pred
# enhancing
mask_gt = (arr_gt == 3).astype(int)
mask_pred = (arr == 3).astype(int)
dc_enh = compute_BraTS_dice(mask_gt, mask_pred)
hd95_enh = compute_BraTS_HD95(mask_gt, mask_pred)
del mask_gt, mask_pred
return dc_whole, dc_core, dc_enh, hd95_whole, hd95_core, hd95_enh
def load_evaluate(filename_gt: str, filename_pred: str):
arr_pred = sitk.GetArrayFromImage(sitk.ReadImage(filename_pred))
arr_gt = sitk.GetArrayFromImage(sitk.ReadImage(filename_gt))
return evaluate_BraTS_case(arr_pred, arr_gt)
def evaluate_BraTS_folder(folder_pred, folder_gt, num_processes: int = 24, strict=False):
nii_pred = subfiles(folder_pred, suffix='.nii.gz', join=False)
if len(nii_pred) == 0:
return
nii_gt = subfiles(folder_gt, suffix='.nii.gz', join=False)
assert all([i in nii_gt for i in nii_pred]), 'not all predicted niftis have a reference file!'
if strict:
assert all([i in nii_pred for i in nii_gt]), 'not all gt niftis have a predicted file!'
p = Pool(num_processes)
nii_pred_fullpath = [join(folder_pred, i) for i in nii_pred]
nii_gt_fullpath = [join(folder_gt, i) for i in nii_pred]
results = p.starmap(load_evaluate, zip(nii_gt_fullpath, nii_pred_fullpath))
# now write to output file
with open(join(folder_pred, 'results.csv'), 'w') as f:
f.write("name,dc_whole,dc_core,dc_enh,hd95_whole,hd95_core,hd95_enh\n")
for fname, r in zip(nii_pred, results):
f.write(fname)
f.write(",%0.4f,%0.4f,%0.4f,%3.3f,%3.3f,%3.3f\n" % r)
def load_csv_for_ranking(csv_file: str):
res = np.loadtxt(csv_file, dtype='str', delimiter=',')
scores = res[1:, [1, 2, 3, -3, -2, -1]].astype(float)
scores[:, -3:] *= -1
scores[:, -3:] += 373.129
assert np.all(scores <= 373.129)
assert np.all(scores >= 0)
return scores
def rank_algorithms(data:np.ndarray):
"""
data is (metrics x experiments x cases)
:param data:
:return:
"""
num_metrics, num_experiments, num_cases = data.shape
ranks = np.zeros((num_metrics, num_experiments))
for m in range(6):
r = np.apply_along_axis(ss.rankdata, 0, -data[m], 'min')
ranks[m] = r.mean(1)
average_rank = np.mean(ranks, 0)
final_ranks = ss.rankdata(average_rank, 'min')
return final_ranks, average_rank, ranks
def score_and_postprocess_model_based_on_rank_then_aggregate():
"""
Similarly to BraTS 2017 - BraTS 2019, each participant will be ranked for each of the X test cases. Each case
includes 3 regions of evaluation, and the metrics used to produce the rankings will be the Dice Similarity
Coefficient and the 95% Hausdorff distance. Thus, for X number of cases included in the BraTS 2020, each
participant ends up having X*3*2 rankings. The final ranking score is the average of all these rankings normalized
by the number of teams.
https://zenodo.org/record/3718904
-> let's optimize for this.
Important: the outcome very much depends on the competing models. We need some references. We only got our own,
so let's hope this still works
:return:
"""
base = "/media/fabian/Results/nnUNet/3d_fullres/Task082_BraTS2020"
replace_with = 2
num_processes = 24
expected_num_cases_val = 125
# use a separate output folder from the previous experiments to ensure we are not messing things up
output_base_here = join(base, 'use_brats_ranking')
maybe_mkdir_p(output_base_here)
# collect cv niftis and compute metrics with evaluate_BraTS_folder to ensure we work with the same metrics as brats
out = join(output_base_here, 'cv_results')
experiments = subfolders(base, join=False, prefix='nnUNetTrainer')
gt_dir = join(base, 'gt_niftis')
experiments_with_full_cv = []
for e in experiments:
print(e)
o = join(out, e)
maybe_mkdir_p(o)
try:
collect_cv_niftis(join(base, e), o)
if not isfile(join(o, 'results.csv')):
evaluate_BraTS_folder(o, gt_dir, num_processes, strict=True)
experiments_with_full_cv.append(e)
except Exception as ex:
print("\nERROR\n", e, ex, "\n")
if isfile(join(o, 'results.csv')):
os.remove(join(o, 'results.csv'))
# rank the non-postprocessed models
tmp = np.loadtxt(join(out, experiments_with_full_cv[0], 'results.csv'), dtype='str', delimiter=',')
num_cases = len(tmp) - 1
data_for_ranking = np.zeros((6, len(experiments_with_full_cv), num_cases))
for i, e in enumerate(experiments_with_full_cv):
scores = load_csv_for_ranking(join(out, e, 'results.csv'))
for metric in range(6):
data_for_ranking[metric, i] = scores[:, metric]
final_ranks, average_rank, ranks = rank_algorithms(data_for_ranking)
for t in np.argsort(final_ranks):
print(final_ranks[t], average_rank[t], experiments_with_full_cv[t])
# for each model, create output directories with different thresholds. evaluate ALL OF THEM (might take a while lol)
thresholds = np.arange(25, 751, 25)
output_pp_tmp = join(output_base_here, 'cv_determine_pp_thresholds')
for e in experiments_with_full_cv:
input_folder = join(out, e)
for t in thresholds:
output_directory = join(output_pp_tmp, e, str(t))
maybe_mkdir_p(output_directory)
if not isfile(join(output_directory, 'results.csv')):
apply_threshold_to_folder(input_folder, output_directory, t, replace_with, processes=16)
evaluate_BraTS_folder(output_directory, gt_dir, num_processes)
# load ALL the results!
results = []
experiment_names = []
for e in experiments_with_full_cv:
for t in thresholds:
output_directory = join(output_pp_tmp, e, str(t))
expected_file = join(output_directory, 'results.csv')
if not isfile(expected_file):
print(e, 'does not have a results file for threshold', t)
continue
results.append(load_csv_for_ranking(expected_file))
experiment_names.append("%s___%d" % (e, t))
all_results = np.concatenate([i[None] for i in results], 0).transpose((2, 0, 1))
# concatenate with non postprocessed models
all_results = np.concatenate((data_for_ranking, all_results), 1)
experiment_names += experiments_with_full_cv
final_ranks, average_rank, ranks = rank_algorithms(all_results)
for t in np.argsort(final_ranks):
print(final_ranks[t], average_rank[t], experiment_names[t])
# for each model, print the non postprocessed model as well as the best postprocessed model. If there are
# validation set predictions, apply the best threshold to the validation set
pred_val_base = join(base, 'predVal_PP_rank')
has_val_pred = []
for e in experiments_with_full_cv:
rank_nonpp = final_ranks[experiment_names.index(e)]
avg_rank_nonpp = average_rank[experiment_names.index(e)]
print(e, avg_rank_nonpp, rank_nonpp)
predicted_val = join(base, 'predVal', e)
pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
if len(pp_models) > 0:
ranks = [final_ranks[i] for i in pp_models]
best_idx = np.argmin(ranks)
best = experiment_names[pp_models[best_idx]]
best_avg_rank = average_rank[pp_models[best_idx]]
print(best, best_avg_rank, min(ranks))
print('')
# apply threshold to validation set
best_threshold = int(best.split('___')[-1])
if not isdir(predicted_val):
print(e, 'has not valset predictions')
else:
files = subfiles(predicted_val, suffix='.nii.gz')
if len(files) != expected_num_cases_val:
print(e, 'has missing val cases. found: %d expected: %d' % (len(files), expected_num_cases_val))
else:
apply_threshold_to_folder(predicted_val, join(pred_val_base, e), best_threshold, replace_with, num_processes)
has_val_pred.append(e)
else:
print(e, 'not found in ranking')
# apply nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5 to nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold
e = 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5'
pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
ranks = [final_ranks[i] for i in pp_models]
best_idx = np.argmin(ranks)
best = experiment_names[pp_models[best_idx]]
best_avg_rank = average_rank[pp_models[best_idx]]
best_threshold = int(best.split('___')[-1])
predicted_val = join(base, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
apply_threshold_to_folder(predicted_val, join(pred_val_base, 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold'), best_threshold, replace_with, num_processes)
has_val_pred.append('nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
# apply nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5 to nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold
e = 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5'
pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
ranks = [final_ranks[i] for i in pp_models]
best_idx = np.argmin(ranks)
best = experiment_names[pp_models[best_idx]]
best_avg_rank = average_rank[pp_models[best_idx]]
best_threshold = int(best.split('___')[-1])
predicted_val = join(base, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
apply_threshold_to_folder(predicted_val, join(pred_val_base, 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold'), best_threshold, replace_with, num_processes)
has_val_pred.append('nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
# convert valsets
output_converted = join(base, 'converted_valSet')
for e in has_val_pred:
expected_source_folder = join(base, 'predVal_PP_rank', e)
if not isdir(expected_source_folder):
print(e, 'has no predVal_PP_rank')
raise RuntimeError()
files = subfiles(expected_source_folder, suffix='.nii.gz', join=False)
if len(files) != expected_num_cases_val:
print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases_val))
continue
target_folder = join(output_converted, 'predVal_PP_rank', e)
maybe_mkdir_p(target_folder)
convert_labels_back_to_BraTS_2018_2019_convention(expected_source_folder, target_folder)
# now load all the csvs for the validation set (obtained from evaluation platform) and rank our models on the
# validation set
flds = subdirs(output_converted, join=False)
results_valset = []
names_valset = []
for f in flds:
curr = join(output_converted, f)
experiments = subdirs(curr, join=False)
for e in experiments:
currr = join(curr, e)
expected_file = join(currr, 'Stats_Validation_final.csv')
if not isfile(expected_file):
print(f, e, "has not been evaluated yet!")
else:
res = load_csv_for_ranking(expected_file)[:-5]
assert res.shape[0] == expected_num_cases_val
results_valset.append(res[None])
names_valset.append("%s___%s" % (f, e))
results_valset = np.concatenate(results_valset, 0) # experiments x cases x metrics
# convert to metrics x experiments x cases
results_valset = results_valset.transpose((2, 0, 1))
final_ranks, average_rank, ranks = rank_algorithms(results_valset)
for t in np.argsort(final_ranks):
print(final_ranks[t], average_rank[t], names_valset[t])
if __name__ == "__main__":
"""
THIS CODE IS A MESS. IT IS PROVIDED AS IS WITH NO GUARANTEES. YOU HAVE TO DIG THROUGH IT YOURSELF. GOOD LUCK ;-)
"""
"""
REMEMBER TO CONVERT LABELS BACK TO BRATS CONVENTION AFTER PREDICTION!
"""
task_name = "Task082_BraTS2020"
downloaded_data_dir = "/home/fabian/Downloads/MICCAI_BraTS2020_TrainingData"
downloaded_data_dir_val = "/home/fabian/Downloads/MICCAI_BraTS2020_ValidationData"
target_base = join(nnUNet_raw_data, task_name)
target_imagesTr = join(target_base, "imagesTr")
target_imagesVal = join(target_base, "imagesVal")
target_imagesTs = join(target_base, "imagesTs")
target_labelsTr = join(target_base, "labelsTr")
maybe_mkdir_p(target_imagesTr)
maybe_mkdir_p(target_imagesVal)
maybe_mkdir_p(target_imagesTs)
maybe_mkdir_p(target_labelsTr)
patient_names = []
cur = join(downloaded_data_dir)
for p in subdirs(cur, join=False):
patdir = join(cur, p)
patient_name = p
patient_names.append(patient_name)
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
seg = join(patdir, p + "_seg.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
isfile(seg)
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTr, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTr, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTr, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTr, patient_name + "_0003.nii.gz"))
copy_BraTS_segmentation_and_convert_labels(seg, join(target_labelsTr, patient_name + ".nii.gz"))
json_dict = OrderedDict()
json_dict['name'] = "BraTS2020"
json_dict['description'] = "nothing"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see BraTS2020"
json_dict['licence'] = "see BraTS2020 license"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "T1",
"1": "T1ce",
"2": "T2",
"3": "FLAIR"
}
json_dict['labels'] = {
"0": "background",
"1": "edema",
"2": "non-enhancing",
"3": "enhancing",
}
json_dict['numTraining'] = len(patient_names)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
patient_names]
json_dict['test'] = []
save_json(json_dict, join(target_base, "dataset.json"))
if downloaded_data_dir_val is not None:
for p in subdirs(downloaded_data_dir_val, join=False):
patdir = join(downloaded_data_dir_val, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesVal, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesVal, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesVal, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesVal, patient_name + "_0003.nii.gz"))
downloaded_data_dir_test = "/home/fabian/Downloads/MICCAI_BraTS2020_TestingData"
if isdir(downloaded_data_dir_test):
for p in subdirs(downloaded_data_dir_test, join=False):
patdir = join(downloaded_data_dir_test, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTs, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTs, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTs, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTs, patient_name + "_0003.nii.gz"))
# test set
# nnUNet_ensemble -f nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold -o ensembled_nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold
# apply_threshold_to_folder('ensembled_nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold/', 'ensemble_PP100/', 100, 2)
# convert_labels_back_to_BraTS_2018_2019_convention('ensemble_PP100/', 'ensemble_PP100_converted')
|
the-stack_0_17205
|
#!/usr/bin/env python
#
# Gather revision statistics from version control system.
# The process is not optimized for speed or anything. It
# just uses command line tools and updates working copy
# from revision to revision..
#
# 1. Get a fresh clone / checkout
# 2. Run gather.py inside of it
#
#
# --- dataset 1: size ---
#
# - size of all files in revision
# - number of files
# - number of dirs
#
#
# status
#
# [ ] subversion traversing
# [x] getting list of all revisions
# [ ] processing one revision at a time
# [ ] update copy to revision
# [x] stop on error
# [ ] save progress
# [ ] rollback bad revision data
# [ ] ...
#
# [ ] mercurial traversing
# [x] getting list of all revisions
# [ ] processing one revision at a time
# [x] update copy to revision
# [x] stop on error
# [ ] save progress
# [ ] rollback bad revision data
# [ ] ...
import copy
import os
import sys
import subprocess
PY3K = sys.version_info[0] == 3
SET1 = {
'totalsize': 0,
'dirsnum': 0,
'filesnum': 0,
}
def echo(msg):
'''only for debug'''
pass # print msg
def runout(cmd):
if not PY3K:
return subprocess.check_output(cmd, shell=True)
else:
return subprocess.check_output(cmd, shell=True).decode('utf-8')
class HG(object):
def check_clean(self):
"""check that working copy is clean and can be
successfully updated to any revision"""
if len(runout('hg status')) != 0:
return False
else:
return True
def revlist(self):
"""get list of revisions from oldest to youngest"""
output = runout('hg log --template "{rev}\\n"')
rev = []
for line in output.splitlines():
rev.append(line)
return reversed(rev)
def up(self, rev):
runout('hg up -r %s' % rev)
class SVN(object):
def check_clean(self):
"""check that working copy is clean and can be
successfully updated to any revision"""
if len(runout('svn status')) != 0:
return False
else:
return True
def revlist(self):
"""get list of revisions from oldest to youngest"""
output = runout('svn info -r HEAD')
lastrev = 0
for line in output.splitlines():
if line.startswith('Revision: '):
lastrev = line.strip().split()[1]
lastrev = int(lastrev)
rev = range(1, lastrev + 1)
return rev
def up(self, rev):
runout('svn up -r %s' % rev)
def process(path, ignore=[]):
"""calculate SET1 directory stats for given path, skipping
directories mentioned in ignore (e.g. '.hg', '.svn', ...)
"""
if not PY3K:
# unicode is critical to for non-English local names on Windows
path = unicode(path)
s = copy.copy(SET1)
s['totalsize'] = 0
for root, dirs, files in os.walk(path):
# filter directories
for ig in ignore:
if ig in dirs:
dirs.remove(ig)
for f in files:
s['totalsize'] += os.path.getsize(os.path.join(root, f))
s['filesnum'] += len(files)
s['dirsnum'] += len(dirs)
return s
if __name__ == '__main__':
# detect version control type
i = 0
reptype = None
for n in ['.svn', '.hg']:
if os.path.isdir(n):
i += 1
reptype = n
if i == 0:
sys.exit('Error: Can\'t detect version control system')
if i > 1:
sys.exit('Error: Detected several version control systems')
# get API to repository information
if reptype == '.hg':
repapi = HG()
echo('HG selected')
else:
repapi = SVN()
echo('SVN selected')
# get clearance
if not repapi.check_clean():
sys.exit('Error: Working copy is not clean, can not continue')
# CSV header
sys.stdout.write("revision, size, dirs, files\n")
for rev in repapi.revlist():
repapi.up(rev)
line = process('.', ignore=[reptype])
line['rev'] = rev
#print line
s = "{rev}, {totalsize}, {dirsnum}, {filesnum}\n".format(**line)
sys.stdout.write(s)
sys.stdout.flush()
|
the-stack_0_17207
|
import copy
from abc import ABC
from typing import Generic, TypeVar, Union, Sequence, Callable, Optional, \
Dict, Any, Iterable, List, Set
from avalanche.benchmarks.scenarios.generic_definitions import \
TExperience, ScenarioStream, TScenarioStream, Experience, TScenario, \
TrainSet, TestSet
from avalanche.benchmarks.utils import AvalancheDataset, \
AvalancheSubset
TGenericCLScenario = TypeVar('TGenericCLScenario', bound='GenericCLScenario')
TGenericExperience = TypeVar('TGenericExperience', bound='GenericExperience')
TGenericScenarioStream = TypeVar('TGenericScenarioStream',
bound='GenericScenarioStream')
class GenericCLScenario(Generic[TrainSet, TestSet, TExperience]):
"""
Base implementation of a Continual Learning scenario. A Continual Learning
scenario is defined by a sequence of experiences (batches or tasks depending
on the terminology), with each experience containing the training (or test)
data that becomes available at a certain time instant.
From a practical point of view, this means that we usually have to define
two datasets (training and test), and some way to assign the patterns
contained in these datasets to each experience.
This assignment is usually made in children classes, with this class serving
as the more general implementation. This class handles the most simple type
of assignment: each experience is defined by a list of patterns (identified
by their indexes) contained in that experience.
"""
def __init__(self: TGenericCLScenario,
original_train_dataset: TrainSet,
original_test_dataset: TestSet,
train_dataset: AvalancheDataset,
test_dataset: AvalancheDataset,
train_exps_patterns_assignment: Sequence[Sequence[int]],
test_exps_patterns_assignment: Sequence[Sequence[int]],
task_labels: Sequence[List[int]],
pattern_train_task_labels: Sequence[int],
pattern_test_task_labels: Sequence[int],
complete_test_set_only: bool = False,
reproducibility_data: Optional[Dict[str, Any]] = None,
experience_factory: Callable[['GenericScenarioStream', int],
TExperience] = None):
"""
Creates an instance of a Continual Learning scenario.
The scenario is defined by the train and test datasets plus the
assignment of patterns to experiences (batches/tasks).
:param train_dataset: The training dataset. The dataset must be a
subclass of :class:`AvalancheDataset`. For instance, one can
use the datasets from the torchvision package like that:
``train_dataset=AvalancheDataset(torchvision_dataset)``.
:param test_dataset: The test dataset. The dataset must be a
subclass of :class:`AvalancheDataset`. For instance, one can
use the datasets from the torchvision package like that:
``test_dataset=AvalancheDataset(torchvision_dataset)``.
:param train_exps_patterns_assignment: A list of experiences. Each
experience is in turn defined by a list of integers describing the
pattern index inside the training dataset.
:param test_exps_patterns_assignment: A list of experiences. Each
experience is in turn defined by a list of integers describing the
pattern index inside the test dataset.
:param task_labels: The mapping from experience IDs to task labels,
usually as a list of integers.
:param pattern_train_task_labels: The list of task labels of each
pattern in the `train_dataset`.
:param pattern_test_task_labels: The list of task labels of each
pattern in the `test_dataset`.
:param complete_test_set_only: If True, only the complete test
set will be returned from test set related methods of the linked
:class:`GenericExperience` instances. This also means that the
``test_exps_patterns_assignment`` parameter can be a single element
or even an empty list (in which case, the full set defined by
the ``test_dataset`` parameter will be returned). The returned
task label for the complete test set will be the first element
of the ``task_labels`` parameter. Defaults to False, which means
that ```train_exps_patterns_assignment`` and
``test_exps_patterns_assignment`` parameters must describe an equal
amount of experiences.
:param reproducibility_data: If not None, overrides the
``train/test_exps_patterns_assignment`` and ``task_labels``
parameters. This is usually a dictionary containing data used to
reproduce a specific experiment. One can use the
``get_reproducibility_data`` method to get (and even distribute)
the experiment setup so that it can be loaded by passing it as this
parameter. In this way one can be sure that the same specific
experimental setup is being used (for reproducibility purposes).
Beware that, in order to reproduce an experiment, the same train and
test datasets must be used. Defaults to None.
:param experience_factory: If not None, a callable that, given the
scenario instance and the experience ID, returns a experience
instance. This parameter is usually used in subclasses (when
invoking the super constructor) to specialize the experience class.
Defaults to None, which means that the :class:`GenericExperience`
constructor will be used.
"""
self.original_train_dataset: TrainSet = original_train_dataset
""" The original training set. """
self.original_test_dataset: TestSet = original_test_dataset
""" The original test set. """
self.train_exps_patterns_assignment: Sequence[Sequence[int]]
""" A list containing which training patterns are assigned to each
experience. Patterns are identified by their id w.r.t. the dataset found
in the train_dataset field. """
self.test_exps_patterns_assignment: Sequence[Sequence[int]]
""" A list containing which test patterns are assigned to each
experience. Patterns are identified by their id w.r.t. the dataset found
in the test_dataset field """
self.task_labels: Sequence[List[int]] = task_labels
""" The task label of each experience. """
self.pattern_train_task_labels: Sequence[int] = \
pattern_train_task_labels
""" The task label of each pattern in the training dataset. """
self.pattern_test_task_labels: Sequence[int] = pattern_test_task_labels
""" The task label of each pattern in the test dataset. """
self.train_exps_patterns_assignment: Sequence[Sequence[int]] = \
train_exps_patterns_assignment
self.test_exps_patterns_assignment: Sequence[Sequence[int]] = \
test_exps_patterns_assignment
self.complete_test_set_only: bool = bool(complete_test_set_only)
"""
If True, only the complete test set will be returned from experience
instances.
This flag is usually set to True in scenarios where having one separate
test set aligned to each training experience is impossible or doesn't
make sense from a semantic point of view.
"""
if reproducibility_data is not None:
self.train_exps_patterns_assignment = reproducibility_data['train']
self.test_exps_patterns_assignment = reproducibility_data['test']
self.task_labels = reproducibility_data['task_labels']
self.pattern_train_task_labels = reproducibility_data[
'pattern_train_task_labels']
self.pattern_test_task_labels = reproducibility_data[
'pattern_test_task_labels']
self.complete_test_set_only = \
reproducibility_data['complete_test_only']
self.n_experiences: int = len(self.train_exps_patterns_assignment)
""" The number of incremental experiences this scenario is made of. """
if experience_factory is None:
experience_factory = GenericExperience
self.experience_factory: Callable[[TGenericScenarioStream, int],
TExperience] = experience_factory
if self.complete_test_set_only:
if len(self.test_exps_patterns_assignment) > 1:
raise ValueError(
'complete_test_set_only is True, but '
'test_exps_patterns_assignment contains more than one '
'element')
elif len(self.train_exps_patterns_assignment) != \
len(self.test_exps_patterns_assignment):
raise ValueError('There must be the same amount of train and '
'test experiences')
if len(self.train_exps_patterns_assignment) != len(self.task_labels):
raise ValueError('There must be the same number of train '
'experiences and task labels')
self.train_dataset: AvalancheDataset = AvalancheDataset(
train_dataset, task_labels=self.pattern_train_task_labels)
""" The training set used to generate the incremental experiences. """
self.test_dataset: AvalancheDataset = AvalancheDataset(
test_dataset, task_labels=self.pattern_test_task_labels)
""" The test set used to generate the incremental experiences. """
self.train_stream: GenericScenarioStream[
TExperience, TGenericCLScenario] = GenericScenarioStream('train',
self)
"""
The stream used to obtain the training experiences.
This stream can be sliced in order to obtain a subset of this stream.
"""
self.test_stream: GenericScenarioStream[
TExperience, TGenericCLScenario] = GenericScenarioStream('test',
self)
"""
The stream used to obtain the test experiences. This stream can be
sliced in order to obtain a subset of this stream.
Beware that, in certain scenarios, this stream may contain a single
element. Check the ``complete_test_set_only`` field for more details.
"""
def get_reproducibility_data(self) -> Dict[str, Any]:
"""
Gets the data needed to reproduce this experiment.
This data can be stored using the pickle module or some other mechanism.
It can then be loaded by passing it as the ``reproducibility_data``
parameter in the constructor.
Child classes should get the reproducibility dictionary from super class
and then merge their custom data before returning it.
:return: A dictionary containing the data needed to reproduce the
experiment.
"""
train_exps = []
for train_exp_id in range(len(self.train_exps_patterns_assignment)):
train_exp = self.train_exps_patterns_assignment[train_exp_id]
train_exps.append(list(train_exp))
test_exps = []
for test_exp_id in range(len(self.test_exps_patterns_assignment)):
test_exp = self.test_exps_patterns_assignment[test_exp_id]
test_exps.append(list(test_exp))
return {'train': train_exps, 'test': test_exps,
'task_labels': list(self.task_labels),
'complete_test_only': bool(self.complete_test_set_only),
'pattern_train_task_labels': list(
self.pattern_train_task_labels),
'pattern_test_task_labels': list(self.pattern_test_task_labels)}
def get_classes_timeline(self, current_experience: int):
"""
Returns the classes timeline for a this scenario.
Given a experience ID, this method returns the classes in this
experience, previously seen classes, the cumulative class list and a
list of classes that will be encountered in next experiences.
:param current_experience: The reference experience ID.
:return: A tuple composed of four lists: the first list contains the
IDs of classes in this experience, the second contains IDs of
classes seen in previous experiences, the third returns a cumulative
list of classes (that is, the union of the first two list) while the
last one returns a list of classes that will be encountered in next
experiences.
"""
train_exps_patterns_assignment: Sequence[Sequence[int]]
class_set_current_exp = self.classes_in_experience[current_experience]
classes_in_this_exp = list(class_set_current_exp)
class_set_prev_exps = set()
for exp_id in range(0, current_experience):
class_set_prev_exps.update(self.classes_in_experience[exp_id])
previous_classes = list(class_set_prev_exps)
classes_seen_so_far = \
list(class_set_current_exp.union(class_set_prev_exps))
class_set_future_exps = set()
for exp_id in range(current_experience, self.n_experiences):
class_set_prev_exps.update(self.classes_in_experience[exp_id])
future_classes = list(class_set_future_exps)
return (classes_in_this_exp, previous_classes, classes_seen_so_far,
future_classes)
@property
def classes_in_experience(self) -> Sequence[Set[int]]:
""" A list that, for each experience (identified by its index/ID),
stores a set of the (optionally remapped) IDs of classes of patterns
assigned to that experience. """
return LazyClassesInExps(self)
class GenericScenarioStream(Generic[TExperience, TGenericCLScenario],
ScenarioStream[TGenericCLScenario, TExperience],
Sequence[TExperience]):
def __init__(self: TGenericScenarioStream,
name: str,
scenario: TGenericCLScenario,
*,
slice_ids: List[int] = None):
self.slice_ids: Optional[List[int]] = slice_ids
"""
Describes which experiences are contained in the current stream slice.
Can be None, which means that this object is the original stream. """
self.name: str = name
self.scenario = scenario
def __len__(self) -> int:
"""
Gets the number of experiences this scenario it's made of.
:return: The number of experiences in this scenario.
"""
if self.slice_ids is None:
if self.name == 'train':
return len(self.scenario.train_exps_patterns_assignment)
elif self.scenario.complete_test_set_only:
return 1
else:
return len(self.scenario.test_exps_patterns_assignment)
else:
return len(self.slice_ids)
def __getitem__(self, exp_idx: Union[int, slice, Iterable[int]]) -> \
Union[TExperience, TScenarioStream]:
"""
Gets a experience given its experience index (or a stream slice given
the experience order).
:param exp_idx: An int describing the experience index or an
iterable/slice object describing a slice of this stream.
:return: The experience instance associated to the given experience
index or a sliced stream instance.
"""
if isinstance(exp_idx, int):
if exp_idx < len(self):
if self.slice_ids is None:
return self.scenario.experience_factory(self, exp_idx)
else:
return self.scenario.experience_factory(
self, self.slice_ids[exp_idx])
raise IndexError('Experience index out of bounds' +
str(int(exp_idx)))
else:
return self._create_slice(exp_idx)
def _create_slice(self: TGenericScenarioStream,
exps_slice: Union[int, slice, Iterable[int]]) \
-> TScenarioStream:
"""
Creates a sliced version of this stream.
In its base version, a shallow copy of this stream is created and
then its ``slice_ids`` field is adapted.
:param exps_slice: The slice to use.
:return: A sliced version of this stream.
"""
stream_copy = copy.copy(self)
slice_exps = _get_slice_ids(exps_slice, len(self))
if self.slice_ids is None:
stream_copy.slice_ids = slice_exps
else:
stream_copy.slice_ids = [self.slice_ids[x] for x in slice_exps]
return stream_copy
class LazyClassesInExps(Sequence[Set[int]]):
def __init__(self, scenario: GenericCLScenario):
self._scenario = scenario
def __len__(self):
return len(self._scenario.train_stream)
def __getitem__(self, exp_id) -> Set[int]:
return set(
[self._scenario.train_dataset.targets[pattern_idx]
for pattern_idx
in self._scenario.train_exps_patterns_assignment[exp_id]])
def __str__(self):
return '[' + \
', '.join([str(self[idx]) for idx in range(len(self))]) + \
']'
def _get_slice_ids(slice_definition: Union[int, slice, Iterable[int]],
sliceable_len: int) -> List[int]:
# Obtain experiences list from slice object (or any iterable)
exps_list: List[int]
if isinstance(slice_definition, slice):
exps_list = list(
range(*slice_definition.indices(sliceable_len)))
elif isinstance(slice_definition, int):
exps_list = [slice_definition]
elif hasattr(slice_definition, 'shape') and \
len(getattr(slice_definition, 'shape')) == 0:
exps_list = [int(slice_definition)]
else:
exps_list = list(slice_definition)
# Check experience id(s) boundaries
if max(exps_list) >= sliceable_len:
raise IndexError(
'Experience index out of range: ' + str(max(exps_list)))
if min(exps_list) < 0:
raise IndexError(
'Experience index out of range: ' + str(min(exps_list)))
return exps_list
class AbstractExperience(Experience[TScenario, TScenarioStream], ABC):
"""
Definition of a learning experience. A learning experience contains a set of
patterns which has become available at a particular time instant. The
content and size of an Experience is defined by the specific benchmark that
creates the experience.
For instance, an experience of a New Classes scenario will contain all
patterns belonging to a subset of classes of the original training set. An
experience of a New Instance scenario will contain patterns from previously
seen classes.
Experiences of Single Incremental Task (a.k.a. task-free) scenarios are
usually called "batches" while in Multi Task scenarios an Experience is
usually associated to a "task". Finally, in a Multi Incremental Task
scenario the Experience may be composed by patterns from different tasks.
"""
def __init__(
self: TExperience,
origin_stream: TScenarioStream,
current_experience: int,
classes_in_this_exp: Sequence[int],
previous_classes: Sequence[int],
classes_seen_so_far: Sequence[int],
future_classes: Optional[Sequence[int]]):
"""
Creates an instance of the abstract experience given the scenario
stream, the current experience ID and data about the classes timeline.
:param origin_stream: The stream from which this experience was
obtained.
:param current_experience: The current experience ID, as an integer.
:param classes_in_this_exp: The list of classes in this experience.
:param previous_classes: The list of classes in previous experiences.
:param classes_seen_so_far: List of classes of current and previous
experiences.
:param future_classes: The list of classes of next experiences.
"""
self.origin_stream: TScenarioStream = origin_stream
# scenario keeps a reference to the base scenario
self.scenario: TScenario = origin_stream.scenario
# current_experience is usually an incremental, 0-indexed, value used to
# keep track of the current batch/task.
self.current_experience: int = current_experience
self.classes_in_this_experience: Sequence[int] = classes_in_this_exp
""" The list of classes in this experience """
self.previous_classes: Sequence[int] = previous_classes
""" The list of classes in previous experiences """
self.classes_seen_so_far: Sequence[int] = classes_seen_so_far
""" List of classes of current and previous experiences """
self.future_classes: Optional[Sequence[int]] = future_classes
""" The list of classes of next experiences """
@property
def task_label(self) -> int:
"""
The task label. This value will never have value "None". However,
for scenarios that don't produce task labels a placeholder value like 0
is usually set. Beware that this field is meant as a shortcut to obtain
a unique task label: it assumes that only patterns labeled with a
single task label are present. If this experience contains patterns from
multiple tasks, accessing this property will result in an exception.
"""
if len(self.task_labels) != 1:
raise ValueError('The task_label property can only be accessed '
'when the experience contains a single task label')
return self.task_labels[0]
class GenericExperience(AbstractExperience[TGenericCLScenario,
GenericScenarioStream[
TGenericExperience,
TGenericCLScenario]]):
"""
Definition of a learning experience based on a :class:`GenericCLScenario`
instance.
This experience implementation uses the generic experience-patterns
assignment defined in the :class:`GenericCLScenario` instance. Instances of
this class are usually obtained from a scenario stream.
"""
def __init__(self: TGenericExperience,
origin_stream: GenericScenarioStream[TGenericExperience,
TGenericCLScenario],
current_experience: int):
"""
Creates an instance of a generic experience given the stream from this
experience was taken and and the current experience ID.
:param origin_stream: The stream from which this experience was
obtained.
:param current_experience: The current experience ID, as an integer.
"""
(classes_in_this_exp, previous_classes, classes_seen_so_far,
future_classes) = origin_stream.scenario.get_classes_timeline(
current_experience)
super(GenericExperience, self).__init__(
origin_stream, current_experience, classes_in_this_exp,
previous_classes, classes_seen_so_far, future_classes)
@property
def dataset(self) -> AvalancheDataset:
if self._is_train():
dataset = self.scenario.train_dataset
patterns_indexes = \
self.scenario.train_exps_patterns_assignment[
self.current_experience]
else:
dataset = self.scenario.test_dataset
if self.scenario.complete_test_set_only:
patterns_indexes = None
else:
patterns_indexes = self.scenario.test_exps_patterns_assignment[
self.current_experience]
return AvalancheSubset(dataset, indices=patterns_indexes)
@property
def task_labels(self) -> List[int]:
if self._is_train():
return self.scenario.task_labels[self.current_experience]
else:
if self.scenario.complete_test_set_only:
return self.scenario.task_labels[0]
else:
return self.scenario.task_labels[self.current_experience]
def _is_train(self):
return self.origin_stream.name == 'train'
__all__ = [
'TGenericCLScenario',
'GenericCLScenario',
'GenericScenarioStream',
'AbstractExperience',
'GenericExperience',
]
|
the-stack_0_17208
|
import random
import numpy as np
import tensorflow as tf
import scipy.sparse as sp
from .base_sequence import Sequence
class MiniBatchSequence(Sequence):
def __init__(
self,
x,
y,
out_weight=None,
shuffle=False,
batch_size=1,
*args, **kwargs
):
super().__init__(*args, **kwargs)
assert batch_size == 1
self.n_batches = len(x)
self.shuffle = shuffle
self.indices = list(range(self.n_batches))
self.batch_size = batch_size
self.x, self.y, self.out_weight = self.astensors(x, y, out_weight)
def __len__(self):
return self.n_batches
def __getitem__(self, index):
idx = self.indices[index]
return self.x[idx], self.y[idx], self.out_weight[idx]
def on_epoch_end(self):
if self.shuffle:
self._shuffle_batches()
def _shuffle_batches(self):
"""
Shuffle all nodes at the end of each epoch
"""
random.shuffle(self.indices)
class SAGEMiniBatchSequence(Sequence):
def __init__(
self,
x,
y=None,
out_weight=None,
sizes=[5, 5],
shuffle=False,
batch_size=512,
*args, **kwargs
):
super().__init__(*args, **kwargs)
self.node_attr, self.adj_matrix, self.batch_nodes = x
self.y = y
self.n_batches = int(np.ceil(len(self.batch_nodes) / batch_size))
self.shuffle = shuffle
self.batch_size = batch_size
self.indices = np.arange(len(self.batch_nodes))
self.sizes = sizes
self.node_attr = self.astensor(self.node_attr)
def __len__(self):
return self.n_batches
def __getitem__(self, index):
if self.shuffle:
idx = self.indices[index *
self.batch_size:(index + 1) * self.batch_size]
else:
idx = slice(index * self.batch_size, (index + 1) * self.batch_size)
nodes_input = [self.batch_nodes[idx]]
for num_sample in self.sizes:
neighbors = sample_neighbors(
self.adj_matrix, nodes_input[-1], num_sample).ravel()
nodes_input.append(neighbors)
y = self.y[idx] if self.y is not None else None
return self.astensors([self.node_attr, *nodes_input], y)
def on_epoch_end(self):
pass
def on_epoch_end(self):
if self.shuffle:
self._shuffle_batches()
def _shuffle_batches(self):
"""
Shuffle all nodes at the end of each epoch
"""
random.shuffle(self.indices)
def sample_neighbors(adj_matrix, nodes, num_neighbors):
np.random.shuffle(adj_matrix.T)
return adj_matrix[nodes, :num_neighbors]
class FastGCNBatchSequence(Sequence):
def __init__(
self,
x,
y=None,
shuffle=False,
batch_size=None,
rank=None,
*args, **kwargs
):
super().__init__(*args, **kwargs)
node_attr, adj_matrix = x
self.y = y
self.n_batches = int(
np.ceil(adj_matrix.shape[0] / batch_size)) if batch_size else 1
self.shuffle = shuffle
self.batch_size = batch_size
self.indices = np.arange(adj_matrix.shape[0])
self.rank = rank
if rank:
self.p = column_prop(adj_matrix)
self.node_attr, self.adj_matrix = node_attr, adj_matrix
def __len__(self):
return self.n_batches
def __getitem__(self, index):
if not self.batch_size:
(node_attr, adj_matrix), y = self.full_batch()
else:
(node_attr, adj_matrix), y = self.mini_batch(index)
if self.rank:
p = self.p
rank = self.rank
distr = adj_matrix.sum(0).A1.nonzero()[0]
if rank > distr.size:
q = distr
else:
q = np.random.choice(
distr, rank, replace=False, p=p[distr] / p[distr].sum())
adj_matrix = adj_matrix[:, q].dot(sp.diags(1.0 / (p[q] * rank)))
if tf.is_tensor(node_attr):
node_attr = tf.gather(node_attr, q)
else:
node_attr = node_attr[q]
return self.astensors((node_attr, adj_matrix), y)
def full_batch(self):
return (self.node_attr, self.adj_matrix), self.y
def mini_batch(self, index):
if self.shuffle:
idx = self.indices[index *
self.batch_size:(index + 1) * self.batch_size]
else:
idx = slice(index * self.batch_size, (index + 1) * self.batch_size)
y = self.y[idx]
adj_matrix = self.adj_matrix[idx]
node_attr = self.node_attr
return (node_attr, adj_matrix), y
def on_epoch_end(self):
pass
def on_epoch_end(self):
if self.shuffle:
self._shuffle_batches()
def _shuffle_batches(self):
"""
Shuffle all nodes at the end of each epoch
"""
random.shuffle(self.indices)
def column_prop(adj):
column_norm = sp.linalg.norm(adj, axis=0)
norm_sum = column_norm.sum()
return column_norm / norm_sum
|
the-stack_0_17212
|
#!/usr/bin/env python3
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import shutil
import subprocess
from glob import glob
from common import die
def main() -> None:
ensure_shellcheck_installed()
run_shellcheck()
def ensure_shellcheck_installed() -> None:
if shutil.which("shellcheck") is None:
die(
"`shellcheck` not installed! You may download this through your operating system's "
"package manager, such as brew, apt, or yum. See "
"https://github.com/koalaman/shellcheck#installing."
)
def run_shellcheck() -> None:
targets = set(glob("./**/*.sh", recursive=True)) | {
"./pants",
"./build-support/pants_venv",
"./build-support/virtualenv",
"./build-support/githooks/pre-commit",
"./build-support/githooks/prepare-commit-msg",
}
targets -= set(glob("./build-support/bin/native/src/**/*.sh", recursive=True))
targets -= set(glob("./build-support/virtualenv.dist/**/*.sh", recursive=True))
targets -= set(glob("./build-support/virtualenvs/**/*.sh", recursive=True))
targets -= set(glob("./build-support/twine-deps.venv/**/*.sh", recursive=True))
command = ["shellcheck", "--shell=bash", "--external-sources"] + sorted(targets)
try:
subprocess.run(command, check=True)
except subprocess.CalledProcessError:
die("Please fix the above errors and run again.")
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.