content
stringlengths 5
1.05M
|
---|
# -*- test-case-name: mimic.test.test_dns -*-
"""
Defines get for reverse dns
"""
import json
from uuid import uuid4
from six import text_type
from zope.interface import implementer
from twisted.plugin import IPlugin
from mimic.rest.mimicapp import MimicApp
from mimic.catalog import Entry
from mimic.catalog import Endpoint
from mimic.imimic import IAPIMock
@implementer(IAPIMock, IPlugin)
class DNSApi(object):
"""
Rest endpoints for mocked DNS Api.
"""
def __init__(self, regions=[""]):
"""
Create a DNSApi.
"""
self._regions = regions
def catalog_entries(self, tenant_id):
"""
List catalog entries for the DNS API.
"""
return [
Entry(
tenant_id, "rax:dns", "cloudDNS",
[
Endpoint(tenant_id, region, text_type(uuid4()), prefix="v1.0")
for region in self._regions
]
)
]
def resource_for_region(self, region, uri_prefix, session_store):
"""
Get an :obj:`twisted.web.iweb.IResource` for the given URI prefix;
implement :obj:`IAPIMock`.
"""
return DNSMock(self, uri_prefix, session_store, region).app.resource()
class DNSMock(object):
"""
DNS Mock
"""
def __init__(self, api_mock, uri_prefix, session_store, name):
"""
Create a DNS region with a given URI prefix
"""
self.uri_prefix = uri_prefix
self._api_mock = api_mock
self._session_store = session_store
self._name = name
app = MimicApp()
@app.route('/v1.0/<string:tenant_id>/rdns/cloudServersOpenStack', methods=['GET'])
def get_PTR_records(self, request, tenant_id):
"""
Lists all PTR records configured for a specified Cloud device
"""
request.setResponseCode(404)
return json.dumps({'message': 'Not Found',
'code': 404,
'details': 'No PTR records found'})
|
from .. import arr
import numpy as np
import math
cfg = {
'name': 'PVI - Plethysmographic Variability Index',
'group': 'Medical algorithms',
'desc': 'Calculate pulse pressure variation',
'reference': 'Aboy et al, An Enhanced Automatic Algorithm for Estimation of Respiratory Variations in Arterial Pulse Pressure During Regions of Abrupt Hemodynamic Changes. IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING, VOL. 56, NO. 10, OCTOBER 2009',
'overlap': 3,
'interval': 40,
'inputs': [{'name': 'pleth', 'type': 'wav'}],
'outputs': [{'name': 'rr', 'type': 'num', 'min': 0, 'max': 30, 'unit': '/min'}, {'name': 'pvi', 'type': 'num', 'min': 0, 'max': 30, 'unit': '%'}],
'pp': 0
}
def b(u):
if -5 <= u <= 5:
return math.exp(-u * u / 2)
else:
return 0
def run(inp, opt, cfg):
"""
calculate ppv from arterial waveform
:param art: arterial waveform
:return: max, min, upper envelope, lower envelope, respiratory rate, ppv
"""
data = arr.interp_undefined(inp['pleth']['vals'])
srate = inp['pleth']['srate']
data = arr.resample_hz(data, srate, 100)
srate = 100
if len(data) < 30 * srate:
return [{}, {}, {}, {}, {}, [], []]
minlist, maxlist = arr.detect_peaks(data, srate)
maxlist = maxlist[1:]
# estimates the upper ue(n) and lower le(n) envelopes
xa = np.array([data[idx] for idx in minlist])
le = np.array([0] * len(data))
for i in range(len(data)):
be = np.array([b((i - idx) / (0.2 * srate)) for idx in minlist])
s = sum(be)
if s != 0:
le[i] = np.dot(xa, be) / s
xb = np.array([data[idx] for idx in maxlist])
ue = np.array([0] * len(data))
for i in range(len(data)):
be = np.array([b((i - idx) / (0.2 * srate)) for idx in maxlist])
s = sum(be)
if s != 0:
ue[i] = np.dot(xb, be) / s
re = ue - le
re[re < 0] = 0
# estimates resp rate
rr = arr.estimate_resp_rate(re, srate)
# split by respiration
nsamp_in_breath = int(srate * 60 / rr)
m = int(len(data) / nsamp_in_breath) # m segments exist
pps = []
for i in range(m - 1):
imax = arr.max_idx(re, i * nsamp_in_breath, (i+2) * nsamp_in_breath) # 50% overlapping
imin = arr.min_idx(re, i * nsamp_in_breath, (i+2) * nsamp_in_breath)
ppmax = re[imax]
ppmin = re[imin]
ppe = 2 * (ppmax - ppmin) / (ppmax + ppmin) * 100 # estimate
if ppe > 50 or ppe < 0:
continue
pp = cfg['pp']
if pp == 0:
pp = ppe
err = abs(ppe - pp)
if err < 1:
pp = ppe
elif err < 25:
pp = (pp + ppe) / 2
else:
pass # dont update
cfg['pp'] = pp
pps.append({'dt': (i * nsamp_in_breath) / srate, 'val': pp})
return [
[{'dt': cfg['interval'], 'val': rr}],
pps
]
|
#!/usr/bin/env python3
import argparse
import logging
from shared import common_utils
from operations import operations
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="%(levelname)s : %(message)s")
parser = argparse.ArgumentParser(prog='ALU UCE Auto Builder')
sub_parsers = parser.add_subparsers(dest='subcommand', title='Subcommands')
sub_parsers.metavar = 'subcommand-name'
for operation, spec in operations.items():
subcommand_parser = sub_parsers.add_parser(operation.replace('_', '-'), help=spec['help'])
common_utils.add_arguments_to_parser(subcommand_parser, operations[operation]['options'])
args = vars(parser.parse_args())
if args['subcommand']:
subcommand = args['subcommand'].replace('-', '_')
operations[args['subcommand'].replace('-', '_')]['runner'](args)
|
from typing import Optional, Union, Tuple, List, Dict
from torch import nn
def init_weights(module: nn.Module):
"""
Initialize one module. It uses xavier_norm to initialize nn.Embedding
and xavier_uniform to initialize nn.Linear's weight.
Parameters
----------
module
A Pytorch nn.Module.
"""
if isinstance(module, nn.Embedding):
nn.init.xavier_normal_(module.weight)
elif isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def assign_encoder_layer_ids(
encoder_names: List[List[str]],
):
"""
Assign ids to encoder layers. The encoder may contain several blocks e.g., block1 and block2.
This function iterates through all the layers of each block from the input end towards the output end.
It increases 1 on the layer id when the detected digit in a layer name changes.
Parameters
----------
encoder_names
Encoder layer names.
Returns
-------
name_to_id
The encoder layer-to-id mapping.
encoder_layer_num
The encoder layer number.
"""
name_to_id = {}
cur_id = 0
for i, group_names in enumerate(encoder_names):
last_inferred_id = -1
for n in group_names:
detect_id = False
n_splits = n.split('.')
for split in n_splits:
# the first digit encountered is used to infer layer id
if split.isdigit():
inferred_id = int(split)
# increase at most 1 one time
if inferred_id != last_inferred_id:
cur_id += 1 # layer.0 -> layer_id 1
last_inferred_id = inferred_id
name_to_id[n] = cur_id
detect_id = True
break
if detect_id is False:
raise ValueError(f"parameter name: {n} not has no id inside")
if len(name_to_id) > 0:
encoder_layer_num = max(name_to_id.values())
else:
encoder_layer_num = 0
return name_to_id, encoder_layer_num
def assign_non_encoder_layer_ids(
non_encoder_names: List[str],
layer_id: int,
):
"""
Assign the provided id to non-encoder layers.
Parameters
----------
non_encoder_names
Names layers not belonging to an encoder.
layer_id
provided id.
Returns
-------
A dictionary mapping the layer names (keys) to their ids (values).
"""
name_to_id = {}
for n in non_encoder_names:
name_to_id[n] = layer_id
return name_to_id
def split_encoder_non_encoder(names: List[str]):
"""
Group layer names into two types: encoder and non-encoder.
A layer belongs to encoder if its name contains at least one digit.
It uses this rule since a model's encoder in Pytorch's implementation
is generally wrapped by nn.Sequential() or nn.ModuleList(),
which produce digits in layer names.
Parameters
----------
names
Model layer names.
Returns
-------
encoder_names
A list of encoder layer names.
non_encoder_names
A list of non-encoder layer names.
"""
encoder_names = []
non_encoder_names = []
for n in names:
is_encoder = False
for i in n.split("."):
if i.isdigit():
encoder_names.append(n)
is_encoder = True
break
if not is_encoder:
non_encoder_names.append(n)
return encoder_names, non_encoder_names
def group_param_names(
names: List[str],
pre_encoder_patterns: Tuple[str, ...],
post_encoder_patterns: Tuple[str, ...],
model_prefix: Optional[str] = None,
):
"""
Group layer names into three types: pre-encoder, encoder, and post-encoder.
If "model_prefix" is provided, the selected layer names must start with it.
In this case, the left names will be returned for the next-time processing.
This function first extracts the first-level children modules' names and
classify them into encoder and non-encoder layers. Note that an encoder may
consist of several manually named children modules, e.g., block1 and block2.
The non-encoder layers are further subdivided into pre-encoder and post-encoder.
Parameters
----------
names
Model layer names
pre_encoder_patterns
Patterns to identify a layer as a pre-encoder layer. If a layer name contains one pattern,
the layer will be grouped into pre-encoder layers.
post_encoder_patterns
Patterns to identify a layer as a post-encoder layer. If a layer name contains one pattern,
the layer will be grouped into post-encoder layers.
model_prefix
A prefix to filter layer names. Only layer names starting with it will be selected.
Returns
-------
left_names
The layer names left for the next-time processing.
encoder_names_grouped
Encoder layer names.
pre_encoder_names
Names of layers before the encoder.
post_encoder_names
Names of layers after the encoder.
"""
# two set of patterns can't have intersections
assert all(pre_p not in post_encoder_patterns for pre_p in pre_encoder_patterns)
left_names = []
# in case model_prefix is provided, e.g., the clip model with image and text branches
selected_names = []
for n in names:
if model_prefix is not None and not n.startswith(model_prefix):
left_names.append(n)
else:
selected_names.append(n)
# split blocks at the first level
children_prefix = []
for n in selected_names:
child_name = n[len(model_prefix)+1:].split(".")[0]
child_prefix = f"{model_prefix}.{child_name}"
if child_prefix not in children_prefix:
children_prefix.append(child_prefix)
encoder_names_grouped = []
non_encoder_names = []
for child_prefix in children_prefix:
per_names_group = [n for n in selected_names if n.startswith(child_prefix)]
per_encoder_names, per_non_encoder_names = split_encoder_non_encoder(per_names_group)
encoder_names_grouped.append(per_encoder_names)
non_encoder_names.extend(per_non_encoder_names)
pre_encoder_names = []
post_encoder_names = []
for n in non_encoder_names:
if any(p in n for p in pre_encoder_patterns):
pre_encoder_names.append(n)
elif any(p in n for p in post_encoder_patterns):
post_encoder_names.append(n)
else:
raise ValueError(f"parameter name: {n} belong to neither pre or post encoder names")
# only process left names in next iteration
return left_names, encoder_names_grouped, pre_encoder_names, post_encoder_names
def reverse_layer_ids(
encoder_name_to_id: dict,
pre_enocder_name_to_id: dict,
post_enocder_name_to_id: dict,
):
"""
The layer ids need to increase when going from the output end to the input end.
We need to reverse the ids which were originally assigned in a decreasing order.
Parameters
----------
encoder_name_to_id
The layer-to-id mapping of encoder layers.
pre_enocder_name_to_id
The layer-to-id mapping of pre-encoder layers.
post_enocder_name_to_id
The layer-to-id mapping of post-encoder layers.
Returns
-------
The layer-to-id mapping of all layers with layer ids reversed.
"""
name_to_id = {**pre_enocder_name_to_id, **encoder_name_to_id, **post_enocder_name_to_id}
if len(name_to_id) > 0:
layer_num = max(name_to_id.values())
# if no post encoder layers, the minimum layer id should be 1
if len(post_enocder_name_to_id) == 0:
layer_num += 1
for n, layer_id in name_to_id.items():
name_to_id[n] = layer_num - layer_id
return name_to_id
def assign_layer_ids(
names: List[str],
pre_encoder_patterns: Tuple[str, ...],
post_encoder_patterns: Tuple[str, ...],
model_pre: Optional[str] = None,
):
"""
Assign ids to all layers. It splits a model into three parts: pre-encoder, encoder, and post-encoder.
Encoder is generally a stack of multiple similar layers, such as transformer layers. Since encoder is
generally wrapped by nn.Sequential() or nn.ModuleList(), its inside layer names contain digits.
It sets 0 as the ids of all post-encoder layers and a maximum id (layer_num) for the all the pre-encoder
layers. The encoder layers have decreasing ids from the input to the output ends.
Parameters
----------
names
model layer names.
pre_encoder_patterns
Patterns to identify a layer as a pre-encoder layer. If a layer name contains one pattern,
the layer will be grouped into pre-encoder layers.
post_encoder_patterns
Patterns to identify a layer as a post-encoder layer. If a layer name contains one pattern,
the layer will be grouped into post-encoder layers.
model_pre
The layer names' prefix. Only the layer names with this prefix will be assigned ids. The left
layer names will be returned.
Returns
-------
name_to_id
A dictionary mapping the layer names (keys) to their ids (values).
left_names
The layer names not starting with the "model_pre".
"""
left_names, encoder_names, pre_encoder_names, post_encoder_names = \
group_param_names(
names=names,
pre_encoder_patterns=pre_encoder_patterns,
post_encoder_patterns=post_encoder_patterns,
model_prefix=model_pre,
)
# add a constraint
if len(encoder_names) == 0 and len(pre_encoder_names) != 0:
raise ValueError(
f"encoder_names is empty, but pre_encoder_names has values: {pre_encoder_names}"
)
encoder_name_to_id, encoder_layer_num = \
assign_encoder_layer_ids(
encoder_names=encoder_names,
)
pre_encoder_name_to_id = \
assign_non_encoder_layer_ids(
non_encoder_names=pre_encoder_names,
layer_id=0
)
post_encoder_name_to_id = \
assign_non_encoder_layer_ids(
non_encoder_names=post_encoder_names,
layer_id=encoder_layer_num + 1
)
name_to_id = reverse_layer_ids(
encoder_name_to_id=encoder_name_to_id,
pre_enocder_name_to_id=pre_encoder_name_to_id,
post_enocder_name_to_id=post_encoder_name_to_id
)
return name_to_id, left_names
|
import sys
import time
import ipdb
import numpy as np
import os
path = os.path.realpath('../')
if not path in sys.path:
sys.path.insert(0, path)
from pyDecorators import InOut, ChangeState, Catch
class <YourLaserName>(object):
def __init__(self, **kwargs):
super(<YourLaserName>, self).__init__()
self._open = False
self._lbd = 0
self._cc = 0
self._scan_lim = []
self._scan_speed = 0
self._scan = 0
self._beep = 0
self._output = 0
self._is_scaning = False
# self._is_changing_lbd = False
self._no_error = <>
self._haserr = False
# Miscs
self._err_msg = ''
# -- Methods --
# ---------------------------------------------------------
def Query(self, word):
#querry method for your laser
pass
# -- Properties --
# ---------------------------------------------------------
@property
@InOut.output(bool)
def connected(self):
return self._open
@connected.setter
@Catch.error
def connected(self,value):
pass
@property
@InOut.output(bool)
def output(self):
word = <>
self._output = self.Query(word)
return self._output
@output.setter
@Catch.error
@InOut.accepts(bool)
def output(self,value):
word = <>
self.Query(word)
self._output = value
@property
@InOut.output(float)
def lbd(self):
word = <>
self._lbd = self.Query(word)
return self._lbd
@lbd.setter
@InOut.accepts(float)
@Catch.error
def lbd(self, value):
self._targetlbd = value
self.Query('OUTP:TRACK 1')
word = <>
self.Query(word)
self._lbd = value
@property
@InOut.output(float)
def current(self):
word = <>
self._cc = self.Query(word)
return self._cc
@current.setter
@Catch.error
@InOut.accepts(float)
def current(self, value):
word = <>
self.Query(word)
self._cc = value
@property
@InOut.output(float,float)
def scan_limit(self):
word1 = 'SOUR:WAVE:START?'
word2 = 'SOUR:WAVE:STOP?'
self._scan_lim = [self.Query(word1),
self.Query(word2)]
return self._scan_lim
@scan_limit.setter
@Catch.error
@InOut.accepts(list)
def scan_limit(self, value):
start = value[0]
stop = value[1]
word1 = 'SOUR:WAVE:START {}'.format(start)
self.Query(word1)
word2 = 'SOUR:WAVE:STOP {}'.format(stop)
self.Query(word2)
self._scan_lim = value
@property
@Catch.error
@InOut.output(float)
def scan_speed(self):
word1 = 'SOUR:WAVE:SLEW:FORW?'
self._scan_speed = self.Query(word1)
return self._scan_speed
@scan_speed.setter
@Catch.error
@InOut.accepts(float)
def scan_speed(self, value):
word = <>
self.Query(word)
word = <>
self.Query(word)
self._scan_speed = value
@property
@InOut.output(float)
def scan(self):
word = <>
self._scan = self.Query(word)
return self._scan
@scan.setter
@Catch.error
@ChangeState.scan("OUTPut:SCAN:START",'OUTPut:SCAN:STOP')
@InOut.accepts(bool)
def scan(self, value):
self.Query('SOUR:WAVE:DESSCANS 1')
self._scan = value
if self._scan:
self.Query("OUTPut:SCAN:START")
else:
self.Query("OUTPut:SCAN:STOP")
@property
@InOut.output(float)
def pzt(self):
word = <>
self._pzt = self.Query(word)
return self._pzt
@pzt.setter
@Catch.error
@InOut.accepts(float)
def pzt(self, value):
word = <>
self.Query(word)
self._pzt = value
@property
@InOut.output(bool)
def beep(self):
word = <>
self._beep = self.Query(word)
return self.beep
@beep.setter
@Catch.error
@InOut.accepts(bool)
def beep(self, value):
word = <>
self.Query(word)
self._beep = value
@property
def identity(self):
word = <>
self._id = self.Query(word)
return self._id
@property
def error(self):
word = <>
self._error = ''
err = self.Query(word)
return err
@property
def has_error(self):
word = <>
dum = self.Query(word)
if dum =='128': self._haserr = True
if dum == '0': self._haserr = False
return self._haserr
@property
@InOut.output(bool)
def _is_changing_lbd(self):
return self.Query('OUTP:TRACK?')
@property
def clear(self):
pass
@clear.setter
@InOut.accepts(bool)
def clear(self,val):
if val:
self.Query('*CLS')
if __name__ == '__main__':
pass |
class Planet():
"""
This object represents a planet
_________________________________________________
Attributes:
name, mass, aphelion, perihelion, semi_major_axis,
eccentricity, orbital_period, synodic_peroid
_________________________________________________
"""
def __init__(self,
name=None,
mass=None,
aphelion = None,
perihelion = None,
semi_major_axis = None,
eccentricity = None,
orbital_period = None,
synodic_peroid = None
):
self.name = name
self.mass = mass
self.aphelion = aphelion
self.perihelion = perihelion
self.semi_major_axis = semi_major_axis
self.eccentricity = eccentricity
self.orbital_period = orbital_period
self.synodic_peroid = synodic_peroid
|
"""Test migration
Revision ID: 5df4b399aae2
Revises: 486c860564a2
Create Date: 2022-03-17 11:41:30.500797
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '5df4b399aae2'
down_revision = '486c860564a2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# add filters from sncosmo 2.7.0 in the right place
with op.get_context().autocommit_block():
op.execute(
"ALTER TYPE bandpasses ADD VALUE IF NOT EXISTS 'atlasc' AFTER 'ps1::w'"
)
op.execute(
"ALTER TYPE bandpasses ADD VALUE IF NOT EXISTS 'atlaso' AFTER 'atlasc'"
)
op.execute(
"ALTER TYPE bandpasses ADD VALUE IF NOT EXISTS '2massj' AFTER 'atlaso'"
)
op.execute(
"ALTER TYPE bandpasses ADD VALUE IF NOT EXISTS '2massh' AFTER '2massj'"
)
op.execute(
"ALTER TYPE bandpasses ADD VALUE IF NOT EXISTS '2massks' AFTER '2massh'"
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
from consolemenu import *
from consolemenu.items import *
import modules
class Modules:
def __init__(self):
self.modules = {}
self.menu = ConsoleMenu("Modules", "Run code on the db data.")
def register_module(self, module_name, module_func):
self.modules[module_name] = module_func
def run_module(self, module_name):
self.modules[module_name]()
def print_menu(self):
self.menu = SelectionMenu(self.modules.keys(), "Run Module:")
self.menu.show()
self.menu.join()
selection = list(self.modules.keys())[self.menu.selected_option]
self.run_module(selection)
if __name__ == "__main__":
modulesHandler = Modules()
modulesHandler.register_module("Convert Watt to Kelvin", modules.convert_W2K_module)
modulesHandler.register_module("Convert Kelvin to Watt", modules.convert_K2W_module)
modulesHandler.register_module(
"Check for faulty values in the dataset",
modules.check_for_faulty_values_in_dataset,
)
modulesHandler.print_menu()
|
""" Generate disconnected testrespiratory network """
import graph_tool.all as gt
import pickle
f = open('param_files/param_dashevskiy.pkl')
p = pickle.load(f)
f.close()
# setup "enum" types
CS = 0
CI = 1
TS = 2
Sil = 3
gCaN_array = (p['gCaNS'], p['gCaNI'], p['gCaNTS'], p['gCaNSil']);
gP_array = (p['gPS'], p['gPI'], p['gPTS'], p['gPSil']);
EL_array = (p['ELS'], p['ELI'], p['ELTS'], p['ELSil']);
g = gt.Graph()
g.add_vertex(4)
edge_gsyn = g.new_edge_property("double")
vertex_type = g.new_vertex_property("int")
# vertex_gCaN = g.new_vertex_property("double")
# vertex_gP = g.new_vertex_property("double")
# vertex_EL = g.new_vertex_property("double")
# CS neuron
v = g.vertex(0)
vertex_type[v] = CS
# vertex_gCaN[v] = gCaN_array[CS]
# vertex_gP[v] = gP_array[CS]
# vertex_EL[v] = EL_array[CS]
# CI neuron
v = g.vertex(1)
vertex_type[v] = CI
# TS neuron
v = g.vertex(2)
vertex_type[v] = TS
# SIL neuron
v = g.vertex(3)
vertex_type[v] = Sil
g.edge_properties["gsyn"] = edge_gsyn
g.vertex_properties["type"] = vertex_type
g.save("../graphs/test.gml", fmt="gml")
|
supported_vector_sizes = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]
result = ""
for i in range(len(supported_vector_sizes)):
vsize = supported_vector_sizes[i]
if i == 0:
result += "#if"
else:
result += "#elif"
result += " STANDARD_VECTOR_SIZE == " + str(vsize) + "\n"
result += "const sel_t FlatVector::incremental_vector[] = {"
for idx in range(vsize):
if idx != 0:
result += ", "
result += str(idx)
result += "};\n"
result += """#else
#error Unsupported VECTOR_SIZE!
#endif"""
print(result)
|
def read_source_json(source):
from sheetsite.json_spreadsheet import JsonSpreadsheet
wb = JsonSpreadsheet(source['filename'])
return wb
|
"""Functions for making test data JSON-serializable.
"""
from collections import Counter
import json
def serializable(obj):
"""Return whether `obj` is JSON-serializable."""
try:
json.dumps(obj)
except (TypeError, OverflowError):
return False
return True
def make_collector(report, result):
"""Return JSON-serializable collector node."""
collector = {
'nodeid': report.nodeid,
# This is the outcome of the collection, not the test outcome
'outcome': report.outcome,
'result': result,
}
if report.longrepr:
# The collection report doesn't provide crash details, so we can only
# add the message, but no traceback etc.
collector['longrepr'] = str(report.longrepr)
return collector
def make_collectitem(item):
"""Return JSON-serializable collection item."""
json_item = {
'nodeid': item.nodeid,
'type': item.__class__.__name__,
}
try:
location = item.location
except AttributeError:
pass
else:
json_item['lineno'] = location[1]
return json_item
def make_testitem(nodeid, keywords, location):
"""Return JSON-serializable test item."""
item = {
'nodeid': nodeid,
'lineno': location[1],
# The outcome will be overridden in case of failure
'outcome': 'passed',
}
if keywords:
item['keywords'] = keywords
return item
def make_teststage(report, stdout, stderr, log, omit_traceback):
"""Return JSON-serializable test stage (setup/call/teardown)."""
stage = {
'duration': report.duration,
'outcome': report.outcome,
}
crash = getattr(report.longrepr, 'reprcrash', None)
if crash is not None:
stage['crash'] = make_fileloc(crash)
if not omit_traceback:
try:
stage['traceback'] = [make_fileloc(x.reprfileloc) for x in
report.longrepr.reprtraceback.reprentries]
except AttributeError:
# Happens if no detailed tb entries are available (e.g. due to
# `--tb=native`, see `_pytest._code.code.ReprTracebackNative`).
# Then we can't provide any tb info beyond the raw error text
# in `longrepr`, so just pass quietly.
pass
if stdout:
stage['stdout'] = stdout
if stderr:
stage['stderr'] = stderr
if log:
stage['log'] = log
# Error representation string (attr is computed property, so get only once)
longrepr = report.longreprtext
if longrepr:
stage['longrepr'] = longrepr
return stage
def make_fileloc(loc):
"""Return JSON-serializable file location representation.
See `_pytest._code.code.ReprFileLocation`.
"""
return {
'path': loc.path,
'lineno': loc.lineno,
'message': loc.message,
}
def make_summary(tests, **kwargs):
"""Return JSON-serializable test result summary."""
summary = Counter([t['outcome'] for t in tests.values()])
summary['total'] = sum(summary.values())
summary.update(kwargs)
return summary
def make_warning(warning_message, when):
# `warning_message` is a stdlib warnings.WarningMessage object
return {
'message': str(warning_message.message),
'category': warning_message.category.__name__,
'when': when,
'filename': warning_message.filename,
'lineno': warning_message.lineno
}
def make_report(**kwargs):
return dict(kwargs)
|
#!/usr/bin/env python3
from __future__ import division
from builtins import str
import os, sys, time, json, requests, logging
import re, traceback, argparse, copy, bisect
from xml.etree import ElementTree
from UrlUtils import UrlUtils
import util
import gtUtil
from util import ACQ, InvalidOrbitException
import datetime
from datetime import datetime, timedelta
import groundTrack
from osgeo import ogr
import lightweight_water_mask
import csv
from dateutil import parser
# set logger
log_format = "[%(asctime)s: %(levelname)s/%(name)s/%(funcName)s] %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
class LogFilter(logging.Filter):
def filter(self, record):
if not hasattr(record, 'id'): record.id = '--'
return True
logger = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
logger.setLevel(logging.INFO)
logger.addFilter(LogFilter())
SLC_RE = re.compile(r'(?P<mission>S1\w)_IW_SLC__.*?' +
r'_(?P<start_year>\d{4})(?P<start_month>\d{2})(?P<start_day>\d{2})' +
r'T(?P<start_hour>\d{2})(?P<start_min>\d{2})(?P<start_sec>\d{2})' +
r'_(?P<end_year>\d{4})(?P<end_month>\d{2})(?P<end_day>\d{2})' +
r'T(?P<end_hour>\d{2})(?P<end_min>\d{2})(?P<end_sec>\d{2})_.*$')
BASE_PATH = os.path.dirname(__file__)
MISSION = 'S1A'
def query_es(query, es_index=None):
logger.info("query: %s" %query)
"""Query ES."""
uu = UrlUtils()
es_url = uu.rest_url
rest_url = es_url[:-1] if es_url.endswith('/') else es_url
url = "{}/_search?search_type=scan&scroll=60&size=100".format(rest_url)
if es_index:
url = "{}/{}/_search?search_type=scan&scroll=60&size=100".format(rest_url, es_index)
logger.info("url: {}".format(url))
r = requests.post(url, data=json.dumps(query))
if r.status_code != 200:
print("Failed to query %s:\n%s" % (es_url, r.text))
print("query: %s" % json.dumps(query, indent=2))
print("returned: %s" % r.text)
r.raise_for_status()
scan_result = r.json()
#logger.info("scan_result: {}".format(json.dumps(scan_result, indent=2)))
count = scan_result['hits']['total']
if count == 0:
return []
if '_scroll_id' not in scan_result:
logger.info("_scroll_id not found in scan_result. Returning empty array for the query :\n%s" %query)
return []
scroll_id = scan_result['_scroll_id']
hits = []
while True:
r = requests.post('%s/_search/scroll?scroll=60m' % rest_url, data=scroll_id)
res = r.json()
scroll_id = res['_scroll_id']
if len(res['hits']['hits']) == 0: break
hits.extend(res['hits']['hits'])
return hits
def query_aois(starttime, endtime):
"""Query ES for active AOIs that intersect starttime and endtime."""
es_index = "grq_*_area_of_interest"
query = {
"query": {
"bool": {
"should": [
{
"bool": {
"must": [
{
"range": {
"starttime": {
"lte": endtime
}
}
},
{
"range": {
"endtime": {
"gte": starttime
}
}
},
{
"match": {
"dataset_type": "area_of_interest"
}
}
]
}
},
{
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"dataset_type": "area_of_interest"
}
},
{
"range": {
"starttime": {
"lte": endtime
}
}
}
]
}
},
"filter": {
"missing": {
"field": "endtime"
}
}
}
},
{
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"dataset_type": "area_of_interest"
}
},
{
"range": {
"endtime": {
"gte": starttime
}
}
}
]
}
},
"filter": {
"missing": {
"field": "starttime"
}
}
}
}
]
}
},
"partial_fields" : {
"partial" : {
"include" : [ "id", "starttime", "endtime", "location",
"metadata.user_tags", "metadata.priority" ]
}
}
}
# filter inactive
hits = [i['fields']['partial'][0] for i in query_es(query)
if 'inactive' not in i['fields']['partial'][0].get('metadata', {}).get('user_tags', [])]
#logger.info("hits: {}".format(json.dumps(hits, indent=2)))
#logger.info("aois: {}".format(json.dumps([i['id'] for i in hits])))
return hits
def get_orbit_file(orbit_dt, platform):
logger.info("get_orbit_file : %s : %s" %(orbit_dt, platform))
hits = util.query_orbit_file(orbit_dt, orbit_dt, platform)
#logger.info("get_orbit_file : hits : \n%s\n" %hits)
logger.info("get_orbit_file returns %s result " %len(hits))
#return hits
for hit in hits:
metadata = hit["metadata"]
id = hit['id']
orbit_platform = metadata["platform"]
logger.info(orbit_platform)
if orbit_platform == platform:
url = metadata["context"]["localize_urls"][0]["url"]
return True, id, url
return False, None, None
def query_aois_new(starttime, endtime):
"""Query ES for active AOIs that intersect starttime and endtime."""
es_index = "grq_*_area_of_interest"
query = {
"query": {
"bool": {
"should": [
{
"bool": {
"must": [
{
"range": {
"starttime": {
"lte": endtime
}
}
},
{
"range": {
"endtime": {
"gte": starttime
}
}
},
{
"match": {
"dataset_type": "area_of_interest"
}
},
{
"match": {
"metadata.tags": "standard_product"
}
}
],
"must_not": {
"term": {
"metadata.user_tags": "inactive"
}
}
}
},
{
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"dataset_type": "area_of_interest"
}
},
{
"range": {
"starttime": {
"lte": endtime
}
}
},
{
"match": {
"metadata.user_tags": "standard_product"
}
}
],
"must_not": {
"term": {
"metadata.user_tags": "inactive"
}
}
}
},
"filter": {
"missing": {
"field": "endtime"
}
}
}
},
{
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"dataset_type": "area_of_interest"
}
},
{
"range": {
"endtime": {
"gte": starttime
}
}
},
{
"match": {
"metadata.user_tags": "standard_product"
}
}
],
"must_not": {
"term": {
"metadata.user_tags": "inactive"
}
}
}
},
"filter": {
"missing": {
"field": "starttime"
}
}
}
}
]
}
},
"partial_fields" : {
"partial" : {
"include" : [ "id", "starttime", "endtime", "location",
"metadata.user_tags", "metadata.priority" ]
}
}
}
# filter inactive
hits = [i['fields']['partial'][0] for i in query_es(query)
if 'inactive' not in i['fields']['partial'][0].get('metadata', {}).get('user_tags', [])]
#logger.info("hits: {}".format(json.dumps(hits, indent=2)))
#logger.info("aois: {}".format(json.dumps([i['id'] for i in hits])))
return hits
def get_aois_by_id(aoi_list):
aois = []
for aoi in aoi_list:
aoi_data = get_aoi_data_by_id(aoi)
logger.info("aoi_data : %s" %aoi_data)
if aoi_data and len(aoi_data)>0:
logger.info("Adding data for aoi: %s" %aoi)
aois.extend(aoi_data)
else:
logger.info("No data found for aoi: %s" %aoi)
return aois
def get_aoi_data_by_id(aoi_id):
es_index = "grq_*_area_of_interest"
# query
query = {
"query":{
"bool":{
"must":[
{ "term":{ "_id": aoi_id } },
]
}
},
"partial_fields" : {
"partial" : {
"include" : [ "id", "starttime", "endtime", "location" ]
}
}
}
# filter inactive
hits = [i['fields']['partial'][0] for i in query_es(query)]
logger.info("hits: {}".format(json.dumps(hits, indent=2)))
#logger.info("aois: {}".format(json.dumps([i['id'] for i in hits])))
return hits
def get_dem_type(acq):
dem_type = "SRTM+v3"
if acq['city'] is not None and len(acq['city'])>0:
if acq['city'][0]['country_name'] is not None and acq['city'][0]['country_name'].lower() == "united states":
dem_type="Ned1"
return dem_type
def getUpdatedTime(s, m):
#date = dateutil.parser.parse(s, ignoretz=True)
#new_date = s + timedelta(minutes = m)
new_date = s + timedelta(minutes = m)
return new_date
def get_time(t):
logger.info("get_time(t) : %s" %t)
t = parser.parse(t).strftime('%Y-%m-%dT%H:%M:%S')
t1 = datetime.strptime(t, '%Y-%m-%dT%H:%M:%S')
logger.info("returning : %s" %t1)
return t1
def isTrackSelected(land, water, land_area, water_area):
selected = False
total_acq_land = 0
for acq_land in land:
total_acq_land+= acq_land
if ((total_acq_land*100)/land)> 98:
selected = True
return selected
def update_dateformat(d):
logger.info("update_dateformat in: %s" %d)
try:
if isinstance(d, datetime):
d = d.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(d, str):
d = parser.parse(d).strftime('%Y-%m-%dT%H:%M:%SZ')
else:
logger.info("unknown type : %s" %type(d))
except Exception as err:
logger.info(str(err))
logger.info("update_dateformat out: %s" %d)
return d
def update_dateformat2(d):
logger.info("update_dateformat in: %s" %d)
try:
if isinstance(d, datetime):
d = d.strftime('%Y%m%dT%H%M%S')
elif isinstance(d, str):
d = parser.parse(d).strftime('%Y%m%dT%H%M%S')
else:
logger.info("unknown type : %s" %type(d))
except Exception as err:
logger.info(str(err))
logger.info("update_dateformat out: %s" %d)
return d
def write_result_file(result_file, result):
try:
with open(result_file, 'a') as fo:
cw = csv.writer(fo, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
cw.writerow(["Date", "Orbit", "Type", "Track","Track_Land","Total_Acquisition_Land", "area_delta_in_resolution", "area_threshold_passed", "Orbit_Quality_Test_Passed", "Reference_Unique_IPF_Count", "Secondary_Unique_IPF_Count", "BlackList_Test_Passed", "Enumeration_Passed", "Candidate_Pairs", "Failure_Reason", "comment","Track_AOI_Intersection", "ACQ_POEORB_AOI_Intersection"])
cw.writerow([result.get('dt', ''), result.get('orbit_name', ''), "Primary", result.get('track', ''),result.get('Track_POEORB_Land', '') , result.get('ACQ_Union_POEORB_Land', ''), result.get('res', ''), result.get('area_threshold_passed', ''), result.get('WATER_MASK_PASSED', ''), result.get('primary_ipf_count', ''), result.get('secondary_ipf_count', ''), result.get('BL_PASSED', ''), result.get('matched', ''), result.get('candidate_pairs', ''), result.get('fail_reason', ''), result.get('comment', ''), result.get('Track_AOI_Intersection', ''), result.get('ACQ_POEORB_AOI_Intersection', '')])
except Exception as err:
logger.info("Error writing to csv file : %s : " %str(err))
traceback.print_exc()
def publish_result(reference_result, id_hash):
version = "v2.0.0"
logger.info("\nPUBLISH RESULT")
#write_result_file(result_file, reference_result)
orbit_type = 'poeorb'
aoi_id = reference_result['aoi'].strip().replace(' ', '_')
logger.info("aoi_id : %s" %aoi_id)
reference_result['list_slave_dt']="00000000T000000"
ACQ_RESULT_ID_TMPL = "S1-GUNW-acqlist-audit_trail-R{}-M{:d}S{:d}-TN{:03d}-{}-{}-{}-{}"
id = ACQ_RESULT_ID_TMPL.format('M', reference_result.get('master_count', 0), reference_result.get('slave_count', 0), reference_result.get('track', 0), update_dateformat2(reference_result.get('list_master_dt', '')), update_dateformat2(reference_result.get('list_slave_dt', '')), orbit_type, id_hash[0:4])
logger.info("publish_result : id : %s " %id)
#id = "acq-list-%s" %id_hash[0:4]
prod_dir = id
os.makedirs(prod_dir, 0o755)
met_file = os.path.join(prod_dir, "{}.met.json".format(id))
ds_file = os.path.join(prod_dir, "{}.dataset.json".format(id))
aoi = []
track = []
full_id_hash = reference_result.get('full_id_hash', None)
this_aoi = reference_result.get('aoi', None)
if this_aoi:
aoi.append(this_aoi)
this_track = reference_result.get('track', None)
if this_track:
track.append(this_track)
if full_id_hash:
track, aoi = util.get_complete_track_aoi_by_hash(full_id_hash, track, aoi)
logger.info("publish_result : Final AOI : {}, Final Track : {}".format(aoi, track))
logger.info("\n\npublish_result: PUBLISHING %s : " %id)
#with open(met_file) as f: md = json.load(f)
md = {}
md['id'] = id
md['aoi'] = aoi
md['reference_orbit'] = reference_result.get('orbit_name', '')
md['reference_orbit_quality_passed'] = reference_result.get('orbit_quality_check_passed', '')
md['reference_tract_land'] = reference_result.get('Track_POEORB_Land', '')
md['reference_total_acqusition_land'] = reference_result.get('ACQ_Union_POEORB_Land', '')
md['pair_created'] = reference_result.get('result', '')
md['track_number'] = track
md['failure_reason'] = reference_result.get('fail_reason', '')
md['comment'] = reference_result.get('comment', '')
md['starttime'] = update_dateformat(reference_result.get('starttime', ''))
md['endtime'] = update_dateformat(reference_result.get('endtime', ''))
md['reference_area_threshold_passed'] = reference_result.get('area_threshold_passed', '')
md['reference_date'] = update_dateformat(reference_result.get('dt', ''))
md['reference_delta_area_sqkm'] = reference_result.get('delta_area', '')
md['reference_delta_area_pixel'] = reference_result.get('res', '')
md['union_geojson'] = reference_result.get('union_geojson', '')
md['reference_dropped_ids']=reference_result.get('master_dropped_ids', [])
md['full_id_hash']=reference_result.get('full_id_hash', '')
md['reference_acquisitions'] = reference_result.get('master_acquisitions', [])
md['secondary_acquisitions'] = reference_result.get('slave_acquisitions', [])
md['reference_scenes'] = reference_result.get('master_scenes', [])
md['secondary_scenes'] = reference_result.get('slave_scenes', [])
md['secondary_date'] = update_dateformat(reference_result.get('dt', ''))
md['failed_orbit'] = reference_result.get('failed_orbit', '')
with open(met_file, 'w') as f: json.dump(md, f, indent=2)
logger.info("publish_result : creating dataset file : %s" %ds_file)
util.create_dataset_json(id, version, met_file, ds_file)
def print_groups(grouped_matched):
for track in grouped_matched["grouped"]:
logger.info("\nTrack : %s" %track)
for day_dt in sorted(grouped_matched["grouped"][track], reverse=True):
logger.info("\tDate : %s" %day_dt)
for acq in grouped_matched["grouped"][track][day_dt]:
logger.info("\t\t %s" %acq[0])
def group_acqs_by_track_date_from_metadata(frames):
logger.info("group_acqs_by_track_date_from_metadata")
return util.group_acqs_by_track_multi_date(create_acqs_from_metadata(frames))
def create_acqs_from_metadata(frames):
acqs = []
logger.info("frame length : %s" %len(frames))
for acq in frames:
logger.info("create_acqs_from_metadata : %s" %acq['id'])
acq_obj = util.create_acq_obj_from_metadata(acq)
if acq_obj:
acqs.append(acq_obj)
return acqs
def get_covered_acquisitions_by_track_date(aoi, acqs, threshold_pixel, orbit_file, orbit_dir, platform, result_file, selected_track_list):
#util.print_acquisitions(aoi['id'], util.create_acqs_from_metadata(acqs))
logger.info("\nget_covered_acquisitions_by_track_date")
#logger.info(acqs)
logger.info("PROCESSING AOI : %s : \nlocation %s" %(aoi['id'], aoi['location']))
grouped_matched = util.group_acqs_by_track_date_from_metadata(acqs) #group_acqs_by_track(acqs)
logger.info("grouped_matched Done")
print_groups(grouped_matched)
matched_ids = list(grouped_matched["acq_info"].keys())
#logger.info("grouped_matched : %s" %grouped_matched)
logger.info("matched_ids : %s" %matched_ids)
logger.info("PLATFORM : %s" %platform)
orbit_type = "P"
orbit_file = os.path.basename(orbit_file)
mission = "S1A"
if platform == "Sentinel-1B":
mission = "S1B"
selected_track_acqs = {}
result_track_acqs = {}
logger.info("Tracks to process : %s" %grouped_matched["grouped"])
for track in grouped_matched["grouped"]:
logger.info("get_covered_acquisitions_by_track_date : Processing track : %s" %track)
if len(selected_track_list)>0:
if int(track) not in selected_track_list:
logger.info("%s not in selected_track_list %s. So skipping this track" %(track, selected_track_list))
continue
selected_track_dt_acqs = {}
result_track_dt_acqs = {}
for track_dt in grouped_matched["grouped"][track]:
filtered_acd_ids, dropped_ids = util.filter_acq_ids(grouped_matched["acq_info"], grouped_matched["grouped"][track][track_dt])
logger.info("filtered_acd_ids : %s" %filtered_acd_ids)
valid_orbit = False
valid_orbit_err = ''
try:
selected, result, removed_ids = gtUtil.water_mask_check(track, track_dt, grouped_matched["acq_info"], filtered_acd_ids, aoi['location'], aoi['id'], threshold_pixel, mission, orbit_type, orbit_file, orbit_dir)
valid_orbit = True
orbit_name = orbit_file.split('.EOF')[0].strip()
if len(removed_ids)>0:
logger.info("Removed Acquisitions by WaterMaskTest : %s" %removed_ids)
for acq_id in removed_ids:
logger.info("removing %s from filtered_acd_ids" %acq_id)
filtered_acd_ids.remove(acq_id)
logger.info("filtered_acd_ids : %s:" %filtered_acd_ids)
except InvalidOrbitException as err:
selected = False
valid_orbit = False
valid_orbit_err = err
result['orbit_name']= orbit_name
result['track'] = track
result['master_dropped_ids'] = dropped_ids
result_track_dt_acqs[track_dt] = result
starttime, endtime = util.get_start_end_time2(grouped_matched["acq_info"], filtered_acd_ids)
result['starttime'] = starttime
result['endtime'] = endtime
result['union_geojson']=aoi['location']
#master_dt_str = util.get_time_str_with_format(track_dt, "%Y%m%dT%H%M%S")
logger.info("master_dt_str : %s" %track_dt)
result['list_master_dt'] = track_dt
result['list_slave_dt'] = track_dt
result['master_count'] = 1
result['slave_count'] = 0
if selected:
logger.info("SELECTED : aoi : %s track : %s track_dt : %s" %(aoi['id'], track, track_dt))
selected_acqs = []
for acq_id in filtered_acd_ids:
acq = grouped_matched["acq_info"][acq_id]
#acq.pv = pv #util.get_processing_version(acq.identifier)
#util.update_grq(acq_id, acq.pv)
logger.info("APPENDING : %s" %acq_id)
selected_acqs.append(acq)
selected_track_dt_acqs[track_dt] = selected_acqs
result['orbit_quality_check_passed']=True
else:
result['result'] = False
id_hash = '0000'
result['orbit_quality_check_passed']=False
result['failed_orbit'] = 'reference'
publish_result(result, id_hash)
if not valid_orbit:
raise InvalidOrbitException(valid_orbit_err)
try:
with open(result_file, 'a') as fo:
cw = csv.writer(fo, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
cw.writerow([result.get('dt', ''), result.get('orbit_name', ''), "Primary", result.get('track', ''),result.get('Track_POEORB_Land', '') , result.get('ACQ_Union_POEORB_Land', ''), result.get('delta_area', ''), result.get('res', ''), result.get('area_threshold_passed', ''), result.get('WATER_MASK_PASSED', ''), result.get('primary_ipf_count', ''), result.get('secondary_ipf_count', ''), result.get('BL_PASSED', ''), result.get('matched', ''), result.get('candidate_pairs', ''), result.get('fail_reason', ''), result.get('comment', ''), result.get('Track_AOI_Intersection', ''), result.get('ACQ_POEORB_AOI_Intersection', '')])
except Exception as err:
logger.info("\n\nERROR Writing to csv file : %s" %str(err))
traceback.print_exc()
selected_track_acqs[track] = selected_track_dt_acqs
logger.info("CHECK: selected_track_acqs[track] : %s" %selected_track_acqs[track])
result_track_acqs[track] = result_track_dt_acqs
#exit (0)
logger.info("get_covered_acquisitions_by_track_date returns : %s" %selected_track_acqs)
return selected_track_acqs, result_track_acqs
def get_covered_acquisitions(aoi, acqs, orbit_file):
#util.print_acquisitions(aoi['id'], util.create_acqs_from_metadata(acqs))
logger.info("AOI : %s" %aoi['location'])
grouped_matched = util.group_acqs_by_orbit_number_from_metadata(acqs) #group_acqs_by_track(acqs)
matched_ids = list(grouped_matched["acq_info"].keys())
#logger.info("grouped_matched : %s" %grouped_matched)
logger.info("matched_ids : %s" %matched_ids)
selected_track_acqs = {}
for track in grouped_matched["grouped"]:
selected_orbitnumber_acqs = {}
for orbitnumber in grouped_matched["grouped"][track]:
selected = gtUtil.water_mask_check(track, orbitnumber, grouped_matched["acq_info"], grouped_matched["grouped"][track][orbitnumber], aoi['location'], aoi['id'], threshold_pixel, orbit_file)
if selected:
logger.info("SELECTED")
selected_acqs = []
for pv in grouped_matched["grouped"][track][orbitnumber]:
for acq_id in grouped_matched["grouped"][track][orbitnumber][pv]:
acq = grouped_matched["acq_info"][acq_id]
if not acq.pv:
acq.pv = pv #util.get_processing_version(acq.identifier)
#util.update_grq(acq_id, acq.pv)
logger.info("APPENDING : %s" %acq_id)
selected_acqs.append(acq)
selected_orbitnumber_acqs[orbitnumber] = selected_acqs
selected_track_acqs[track] = selected_orbitnumber_acqs
#exit (0)
return selected_track_acqs
def query_aoi_acquisitions(starttime, endtime, platform, orbit_file, orbit_dir, threshold_pixel, acquisition_version, selected_track_list, selected_aoi_list):
"""Query ES for active AOIs that intersect starttime and endtime and
find acquisitions that intersect the AOI polygon for the platform."""
#aoi_acq = {}
orbit_aoi_data = {}
es_index = "grq_*_*acquisition*"
es_index = "grq_%s_acquisition-s1-iw_slc/acquisition-S1-IW_SLC/" %(acquisition_version)
logger.info("query_aoi_acquisitions : es_index : %s" %es_index)
aois = None
if len(selected_aoi_list)>0:
aois = get_aois_by_id(selected_aoi_list)
else:
aois = query_aois_new(starttime, endtime)
logger.info("No of AOIs : %s " %len(aois))
logger.info("aois : %s" %aois)
if not aois or len(aois) <=0:
logger.info("Existing as NO AOI Found")
sys.exit(0)
for aoi in aois:
logger.info("aoi: {}".format(aoi['id']))
query = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"term": {
"dataset_type.raw": "acquisition"
}
},
{
"term": {
"version.raw": acquisition_version
}
},
{
"term": {
"metadata.platform.raw": platform
}
},
{
"range": {
"starttime": {
"lte": endtime
}
}
},
{
"range": {
"endtime": {
"gte": starttime
}
}
}
],
"must_not": {
"term": {
"metadata.tags": "deprecated"
}
}
}
},
"filter": {
"geo_shape": {
"location": {
"shape": aoi['location']
}
}
}
}
},
"partial_fields" : {
"partial" : {
"include" : [ "id", "dataset_type", "dataset", "metadata", "city", "continent", "starttime", "endtime"]
}
}
}
logger.info(query)
acqs = [i['fields']['partial'][0] for i in query_es(query, es_index)]
logger.info("Found {} acqs for {}: {}".format(len(acqs), aoi['id'],
json.dumps([i['id'] for i in acqs], indent=2)))
#logger.info("ALL ACQ of AOI : \n%s" %acqs)
if len(acqs) <=0:
logger.info("Excluding AOI %s as no acquisitions there" %aoi['id'])
selected_track_acqs = {}
result_file = "RESULT_SUMMARY_%s.csv" %aoi['id']
with open(result_file, 'w') as fo:
cw = csv.writer(fo, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
cw.writerow(["Date", "Orbit", "Type", "Track","Track_Land","Total_Acquisition_Land", "delta_area_sqkm", "delta_area_pixel", "area_threshold_passed", "Orbit_Quality_Test_Passed", "Reference_Unique_IPF_Count", "Secondary_Unique_IPF_Count", "BlackList_Test_Passed", "Enumeration_Passed", "Candidate_Pairs", "Failure_Reason", "comment","Track_AOI_Intersection", "ACQ_POEORB_AOI_Intersection"])
selected_track_acqs, result_track_acqs = get_covered_acquisitions_by_track_date(aoi, acqs, threshold_pixel, orbit_file, orbit_dir, platform, result_file, selected_track_list)
if len(list(selected_track_acqs.keys()))==0:
logger.info("Nothing selected from AOI %s " %aoi['id'])
continue
#for acq in acqs:
aoi_data = {}
aoi_priority = aoi.get('metadata', {}).get('priority', 0)
# ensure highest priority is assigned if multiple AOIs resolve the acquisition
#if acq['id'] in acq_info and acq_info[acq['id']].get('priority', 0) > aoi_priority:
#continue
aoi_data['aoi_id'] = aoi['id']
aoi_data['aoi_location'] = aoi['location']
aoi_data['priority'] = aoi_priority
aoi_data['selected_track_acqs'] = selected_track_acqs
aoi_data['result_track_acqs'] = result_track_acqs
orbit_aoi_data[aoi['id']] = aoi_data
#acq_info[aoi_data['id']] = acq
#aoi_acq[aoi] = acq_info
#logger.info("Acquistions to localize: {}".format(json.dumps(acq_info, indent=2)))
if len(list(orbit_aoi_data.keys()))<=0:
logger.info("Existing as NOTHING selected for any aois")
sys.exit(0)
return orbit_aoi_data
def resolve_s1_slc(identifier, download_url, project):
"""Resolve S1 SLC using ASF datapool (ASF or NGAP). Fallback to ESA."""
# determine best url and corresponding queue
vertex_url = "https://datapool.asf.alaska.edu/SLC/SA/{}.zip".format(identifier)
r = requests.head(vertex_url, allow_redirects=True)
if r.status_code == 403:
url = r.url
queue = "{}-job_worker-small".format(project)
elif r.status_code == 404:
url = download_url
queue = "factotum-job_worker-scihub_throttled"
else:
raise RuntimeError("Got status code {} from {}: {}".format(r.status_code, vertex_url, r.url))
return url, queue
class DatasetExists(Exception):
"""Exception class for existing dataset."""
pass
def get_temporal_baseline(ctx):
temporalBaseline = 24
if 'temporalBaseline' in ctx:
temporalBaseline = int(ctx['temporalBaseline'])
return temporalBaseline
def resolve_s1_slc(identifier, download_url, project):
"""Resolve S1 SLC using ASF datapool (ASF or NGAP). Fallback to ESA."""
# determine best url and corresponding queue
vertex_url = "https://datapool.asf.alaska.edu/SLC/SA/{}.zip".format(identifier)
r = requests.head(vertex_url, allow_redirects=True)
if r.status_code == 403:
url = r.url
queue = "{}-job_worker-small".format(project)
elif r.status_code == 404:
url = download_url
queue = "factotum-job_worker-scihub_throttled"
else:
raise RuntimeError("Got status code {} from {}: {}".format(r.status_code, vertex_url, r.url))
return url, queue
class DatasetExists(Exception):
"""Exception class for existing dataset."""
pass
def get_temporal_baseline(ctx):
temporalBaseline = 24
if 'temporalBaseline' in ctx:
temporalBaseline = int(ctx['temporalBaseline'])
return temporalBaseline
def resolve_aoi_acqs(ctx_file):
"""Resolve best URL from acquisitions from AOIs."""
# read in context
with open(ctx_file) as f:
ctx = json.load(f)
project = 'grfn'
logger.info("PROJECT : %s" %project)
priority = int(ctx["job_priority"])
minMatch = int(ctx["minMatch"])
dataset_version = ctx["dataset_version"]
acquisition_version = ctx["acquisition_version"]
threshold_pixel = int(ctx["threshold_pixel"])
job_type, job_version = ctx['job_specification']['id'].split(':')
skip_days = int(ctx.get("skipDays", 0))
selected_track_list = []
try:
if "track_numbers" in ctx and ctx["track_numbers"] is not None:
track_numbers = ctx["track_numbers"].strip()
if track_numbers:
track_numbers_list = track_numbers.split(',')
for tn in track_numbers_list:
selected_track_list.append(int(tn))
except:
pass
selected_aoi_list = []
try:
if "aoi_name" in ctx and ctx["aoi_name"] is not None:
aois = ctx["aoi_name"].strip()
logger.info("passed aoi: %s" %aois)
if aois:
aoi_list = aois.split(',')
logger.info(aoi_list)
for aoi in aoi_list:
selected_aoi_list.append(aoi.strip())
except:
pass
selected_aoi_list = list(set(selected_aoi_list))
logger.info("selected_aoi_list : %s" %selected_aoi_list)
logger.info("selected_track_list : %s" %selected_track_list)
logger.info("skip_days : %s" %skip_days)
#Find Orbit File Info
orbit_file = None
orbit_file_dir =os.path.basename(ctx["localize_urls"][0]["url"])
for file in os.listdir(orbit_file_dir):
if file.endswith(".EOF"):
orbit_file = os.path.join(orbit_file_dir, file)
if not orbit_file:
raise RuntimeError("Orbit File NOT Found")
else:
logger.info("Orbit File : %s " %orbit_file)
orbit_aoi_data = query_aoi_acquisitions(ctx['starttime'], ctx['endtime'], ctx['platform'], orbit_file, orbit_file_dir, threshold_pixel, acquisition_version, selected_track_list, selected_aoi_list)
#osaka.main.get("http://aux.sentinel1.eo.esa.int/POEORB/2018/09/15/S1A_OPER_AUX_POEORB_OPOD_20180915T120754_V20180825T225942_20180827T005942.EOF")
#logger.info(orbit_aoi_data)
#exit(0)
# build args
#queue = ctx["recommended-queues"][0]
queue = "system-jobs-queue"
singlesceneOnly = True
precise_orbit_only = True
job_data = {}
job_data["project"] = project
'''
job_data["spyddder_extract_version"] = spyddder_extract_version
job_data["standard_product_ifg_version"] = standard_product_ifg_version
job_data["acquisition_localizer_version"] = acquisition_localizer_version
job_data["standard_product_localizer_version"] = standard_product_localizer_version
'''
job_data["job_type"] = job_type
job_data["job_version"] = job_version
job_data["job_priority"] = ctx['job_priority']
job_data['orbit_file'] = orbit_file
job_data['minMatch'] = minMatch
job_data['threshold_pixel'] = threshold_pixel
job_data["acquisition_version"] = acquisition_version
job_data["selected_track_list"] = selected_track_list
job_data["skip_days"] = skip_days
orbit_data = {}
orbit_data['starttime'] = ctx['starttime']
orbit_data['endtime'] = ctx['endtime']
orbit_data['platform'] = ctx['platform']
orbit_data['orbit_file'] = orbit_file
orbit_acq_selections = {}
orbit_acq_selections["job_data"] = job_data
orbit_acq_selections["orbit_aoi_data"] = orbit_aoi_data
orbit_acq_selections["orbit_data"] = orbit_data
return orbit_acq_selections
def main():
# read in _context.json
context_file = os.path.abspath("_context.json")
if not os.path.exists(context_file):
raise RuntimeError("Context file doesn't exist.")
resolve_aoi_acqs(context_file)
if __name__ == "__main__":
sys.exit(main())
|
# -*- coding: utf-8 -*-
from scrapy.selector import Selector
from scrapy import Request
import re
import execjs
from gorden_crawler.spiders.shiji_base import BaseSpider
from gorden_crawler.items import BaseItem, ImageItem, SkuItem, Color
import copy
import json
import logging
import difflib
import requests
import datetime
class AsosSpider(BaseSpider):
#class AsosSpider(RedisSpider):
name = "asos"
base_url = "http://us.asos.com"
allowed_domains = ["us.asos.com", "asos-media.com"]
start_urls = [
'http://us.asos.com/?hrd=1',
'http://us.asos.com/women/outlet/',
'http://us.asos.com/men/outlet/',
]
custom_settings = {
'COOKIES_ENABLED': True,
'DOWNLOAD_DELAY': 0.2,
'DOWNLOAD_TIMEOUT': 30,
'RETRY_TIMES': 20,
# 'DOWNLOADER_MIDDLEWARES': {
# # 'gorden_crawler.middlewares.MyCustomDownloaderMiddleware': 543,
# 'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
# 'gorden_crawler.contrib.downloadmiddleware.rotate_useragent.RotateUserAgentMiddleware':1,
# 'gorden_crawler.middlewares.proxy_ats.ProxyMiddleware': 100,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 110,
# }
}
def start_requests(self):
for url in self.start_urls:
yield Request(url, dont_filter=True, cookies={'asos': 'currencyid=1'})
def make_requests_from_url(self, url):
return Request(url, dont_filter=True, cookies={'asos': 'currencyid=1'})
#yield list items of some brand
def parse(self, response):
sel = Selector(response)
response_url = response.url
if response_url == "http://us.asos.com/?hrd=1":
# women
url_a_s_women = sel.xpath('//li[contains(@class, "floor_1")]//div[@class="sub-floor-menu"]/dl[1]//ul[@class="items"]//li/a')
li_urls = {} #key为ProductType, value为ProductType对应的url
flag=False
for a in url_a_s_women:
if len(a.xpath('text()').extract()) == 0:
continue
li_text=a.xpath('text()').extract()[0]
li_href=a.xpath('@href').extract()[0]
if li_text == 'Accessories':
flag=True
if li_text == 'Packs SAVE' or li_text == 'Packs Save':
flag=False
if flag:
li_urls[li_text]=li_href
if len(li_urls) > 0:
for li_item in li_urls.items():
item=BaseItem()
item['type']='base'
item['product_type'] = li_item[0]
item['gender'] = 'women'
li_url=li_item[1]
if item['product_type'] == "Beauty":
yield Request(li_url, callback=self.parse_beauty, cookies={'asos': 'currencyid=1'}, meta={'item': item})
else:
yield Request(li_url, callback=self.parse_categories, cookies={'asos': 'currencyid=1'}, meta={'item': item})
#men
url_a_s_men=sel.xpath('//li[contains(@class, "floor_2")]//div[@class="sub-floor-menu"]/dl[1]//ul[@class="items"]//li/a')
li_urls={} #key为ProductType, value为ProductType对应的url
flag=False
for a in url_a_s_men:
if len(a.xpath('text()').extract()) == 0:
continue
li_text=a.xpath('text()').extract()[0]
li_href=a.xpath('@href').extract()[0]
if li_text == 'Accessories':
flag=True
if li_text == 'Packs SAVE' or li_text == 'Packs Save':
flag=False
if flag:
li_urls[li_text]=li_href
if len(li_urls)>0:
for li_item in li_urls.items():
item=BaseItem()
item['type']='base'
item['product_type']=li_item[0]
item['gender'] = 'men'
li_url=li_item[1]
yield Request(li_url, callback=self.parse_categories, cookies={'asos': 'currencyid=1'}, meta={'item': item})
if 'sale' in response.url or 'outlet' in response.url:
# response_url=="http://us.asos.com/men/outlet/" or response_url=="http://us.asos.com/women/outlet/":
# outlet women and men
# if 'women' in response_url: #response_url=="http://us.asos.com/men/outlet/":
product_type_str = sel.xpath('//div[@class="lside"]/div/h4/text()').extract()
if 'Shop by category' in product_type_str:
position = product_type_str.index('Shop by category')
elif 'Shop by Category' in product_type_str:
position = product_type_str.index('Shop by Category')
product_type_lis = sel.xpath('//div[@class="lside"]/div/h4')[position].xpath('./parent::*/ul/li')
# product_type_lis = sel.xpath('//div[@class="lside"]/div')[2].xpath('./ul/li')
# else:
# product_type_lis = sel.xpath('//div[@class="lside"]/div')[1].xpath('./ul/li')
for product_type_li in product_type_lis:
item = BaseItem()
item['type']='base'
if 'women' in response_url: #response_url == "http://us.asos.com/men/outlet/":
item['gender'] = 'women'
else:
item['gender'] = 'men'
url = product_type_li.xpath('./a/@href').extract()[0]
product_type = product_type_li.xpath('./a/text()').extract()[0]
item['product_type']=product_type
yield Request(url, callback=self.parse_categories, cookies={'asos': 'currencyid=1'}, meta={'item': item})
def parse_beauty(self, response):
sel=Selector(response)
item = response.meta['item']
cat_divs = sel.xpath('//div[@class="boxes"]/div')
for cat_div in cat_divs:
url = cat_div.xpath('./a/@href').extract()[0]
yield Request(url, callback=self.parse_categories, cookies={'asos': 'currencyid=1'}, meta={'item': item})
def parse_categories(self, response):
sel=Selector(response)
item=response.meta['item']
# if 'on_sale' in response.meta.keys():
category_link_list_a = sel.xpath('//div[@data-id="attribute_989"]//ul/li/a')
# else:
# category_link_list_a = sel.xpath('//ul[@class="link-list"]/li/a')
for category_link_a in category_link_list_a:
item_category = category_link_a.xpath('./span[@class="facetvalue-name"]/text()').extract()
if len(item_category) > 0:
item['category']= item_category[0]
# if 'New In' in item['category']:
# continue
url=category_link_a.xpath('./@href').extract()[0]
if not re.match(r'^http:\/\/', url):
url = re.findall(r'[^\?]+', response.url)[0] + url
yield Request(url, callback=self.parse_category, cookies={'asos': 'currencyid=1'}, meta={'item': item})
def parse_category(self, response):
sel=Selector(response)
item=response.meta['item']
li_url=response.url
li_uri=re.findall(r'[^\?]+', li_url)[0]
#tail_str='&pgeSize=36&sort=-1'
#m=re.match(r"http://us.asos.com/[^\&]+", li_url)
#li_pages=sel.xpath('//ol[@class="page-nos"]/li')
#if li_pages:
# total_items=sel.xpath('//div[@id="pagingHeader"]//span[@class="total-items"]/text()').extract()[0]
# page_count=int(total_items)/36
# if int(total_items)%36 !=0:
# page_count=page_count+1
# max_page_num=li_pages[-2:-1].xpath('./a/text()').extract()[0]
# for pge in range(page_count):
# item_list_url=m.group(0)+'&pge='+str(pge)+tail_str
# next_page_uris = sel.xpath('//li[@class="next"]/a')
# for next_page_uri in next_page_uris:
# if re.search(r'Next',next_page_uri.xpath('text()').extract()[0]):
#
# next_uri = str(next_page_uri.xpath('@href').extract()[0])
# if not re.search(r'^http:\/\/', next_uri):
# url=li_uri+ next_uri
# else:
# url = next_uri
#
copy_item = copy.deepcopy(item)
# yield Request(url, callback=self.parse_category, cookies={'asos': 'currencyid=1'}, meta={'item': copy_item})
item_lis = sel.xpath('//div[@class="results three-grid"]/ul/li')
# print item_lis
for item_li in item_lis:
item_url = item_li.xpath('./a/@href').extract()[0]
if not re.match(r'^http:\/\/', item_url):
item_url = self.base_url + item_url
item['url'] = item_url
item['cover'] = item_li.xpath('.//img[@class="product-img"]/@src').extract()[0]
# rrp_price_sel=item_li.xpath('.//span[@class="recRP rrp"]/text()').extract()
# if len(rrp_price_sel) > 0 and rrp_price_sel[0]:
# item['list_price'] = rrp_price_sel[0]
# item['current_price'] = item_li.xpath('.//div[@class="productprice"]/span[@class="price outlet-current-price"]/text()').extract()[0]
# else:
# item['list_price']=item_li.xpath('.//div[@class="productprice"]/span[@class="price"]/text()').extract()[0]
#
# current_price_sel=item_li.xpath('.//span[contains(@class, "prevPrice")]/text()').extract()
#
# if len(current_price_sel) > 0 and current_price_sel[0]:
# item['current_price']= current_price_sel[0]
# else:
# item['current_price']= item['list_price']
current_price_sel = item_li.xpath('.//div[@class="price-wrap price-current"]/span[@class="price"]/text()').extract()
if len(current_price_sel) > 0 and current_price_sel[0]:
item['current_price'] = current_price_sel[0]
list_price_sel = item_li.xpath('.//div[@class="price-wrap price-previous"]/span[@class="price"]/text()').extract()
if len(list_price_sel) > 0 and list_price_sel[0]:
item['list_price'] = list_price_sel[0]
else:
item['list_price'] = item['current_price']
product_id = item_li.xpath('./@data-productid').extract()[0]
if 'prod/pgeproduct.aspx' in item_url:
item_url = item_url.replace('prod/pgeproduct.aspx', 'prd/'+str(product_id))
yield Request(item_url, callback=self.parse_item, meta={'item': item}, cookies={'asos': 'currencyid=1'})
total_counts = sel.xpath('//span[@class="total-results"]/text()').extract()[0]
if ',' in total_counts:
total_counts = total_counts.replace(',', '')
current_url = response.url
if 'pge=' in response.url:
current_page = int(re.search('pge=(\d+)', response.url).group(1)) + 1
else:
current_page = 1
if int(total_counts) % 36>0:
last_page = int(total_counts)/36 + 1
else:
last_page = 1
if current_page < last_page:
next_page = current_page + 1
if 'pge=' in response.url:
next_url = re.sub('pge=\d+', 'pge=' + str(next_page-1), current_url)
else:
next_url = current_url + '&pge=1'
yield Request(next_url, callback=self.parse_category, cookies={'asos': 'currencyid=1'}, meta={'item': copy_item})
def parse_item(self, response):
item=response.meta['item']
return self.handle_parse_item(response, item)
def parse_stock(self, response):
if not response.body:
return
item=response.meta['item']
related_products_url = response.meta['related_products_url']
goods_details = json.loads(response.body)
if 'variants' not in goods_details[0]:
return
sku_infos = goods_details[0]['variants']
handle_sku_infos = {}
for sku in sku_infos:
handle_sku_infos[sku['variantId']] = sku
final_skus = []
for sku in item['skus']:
if handle_sku_infos[sku['id']]['isInStock'] == True:
sku['current_price'] = handle_sku_infos[sku['id']]['price']['current']['value']
sku['list_price'] = handle_sku_infos[sku['id']]['price']['previous']['value']
final_skus.append(sku)
item['skus'] = final_skus
parse_media_url = 'http://video.asos-media.com/products/test-desc/' + str(item['show_product_id']) + '-catwalk-AVS.m3u8'
try:
req = requests.head(parse_media_url)
if req.ok:
req = requests.get(parse_media_url)
media_uri = re.search('(ASOS/_media.+?)\.m3u8', req.text).group(1)
media_url = 'http://video.asos-media.com/products/' + media_uri
item['media_url'] = media_url
except Exception as e:
logging.error('error media url: '+ parse_media_url + ' error msg: ' + str(e))
yield Request(related_products_url, callback=self.parse_related_products, meta={"item": item})
def parse_related_products(self, response):
item = response.meta['item']
related_items = json.loads(response.body)
related_items_id = []
if 'products' in related_items.keys():
for related_item_detail in related_items['products']:
if related_item_detail['product']['isInStock'] == True:
related_items_id.append(related_item_detail['product']['id'])
if related_items_id:
item['related_items_id'] = related_items_id
yield item
def handle_parse_item(self, response, item):
if re.match(r'^http:\/\/us\.asos\.com\/mp_sp\/',response.url):
sel = Selector(response)
url = sel.xpath('//li[@id="mp_li_cnti"]/a/@href').extract()[0]
yield Request(url, callback=self.parse_item, cookies={'asos': 'currencyid=1'}, meta={'item': item})
else:
skus=[]
sel=Selector(response)
json_info = re.search("view\(\'(.+\})\'\,", response.body)
if not json_info:
return
else:
json_info = json_info.group(1)
json_info = "".join(json_info)
json_info = json_info.decode("string-escape")
goods_detail = json.loads(json_info)
descs = sel.xpath('//div[@class="overflow-container"]/div/div')
item['desc'] = ''
for desc in descs:
item['desc'] = item['desc'] + desc.extract()
item['title'] = goods_detail['name']
if 'brandName' not in goods_detail.keys():
item['brand'] = 'asos'
else:
item['brand'] = goods_detail['brandName']
item['from_site'] = self.name
if 'price' not in goods_detail.keys():
return
item['current_price'] = goods_detail['price']['current']
if float(goods_detail['price']['previous']) != 0:
item['list_price'] = goods_detail['price']['previous']
elif float(goods_detail['price']['rrp']) != 0:
item['list_price'] = goods_detail['price']['rrp']
else:
item['list_price'] = goods_detail['price']['current']
item['show_product_id'] = goods_detail['id']
sizes = []
colors = []
for sku in goods_detail['variants']:
skuItem = SkuItem()
skuItem['type'] = "sku"
skuItem['from_site'] = self.name
skuItem['is_outof_stock'] = False
skuItem['id'] = sku['variantId']
skuItem['show_product_id'] = goods_detail['id']
skuItem['current_price'] = item['current_price']
skuItem['list_price'] = item['list_price']
skuItem['size'] = sku['size']
if sku['size'] not in sizes:
sizes.append(sku['size'])
skuItem['color'] = sku['colour']
if sku['colour'] not in colors:
colors.append(sku['colour'])
skus.append(skuItem)
for color_name in colors:
images = []
for image in goods_detail['images']:
if image['colour'] == '' or (image['colour'] and color_name and len(image['colour']) == len(color_name) and (len(color_name) - difflib.SequenceMatcher(None,color_name,image['colour']).ratio()*len(color_name)) <=1):
imageItem = ImageItem()
imageItem['image'] = image['url'] + '?$XXL$'
imageItem['thumbnail'] = image['url']
images.append(imageItem)
color = Color()
color['type'] = 'color'
color['from_site'] = self.name
color['show_product_id'] = goods_detail['id']
color['images'] = images
color['name'] = color_name
color['cover'] = images[0]['image']
yield color
item['skus'] = skus
item['sizes'] = list(set(sizes))
item['dimensions'] = ['size']
item['colors'] = colors
related_products_url = 'http://us.asos.com/api/product/catalogue/v2/productgroups/ctl/' + str(item['show_product_id']) + '?store=US&store=US¤cy=USD'
yield Request('http://us.asos.com/api/product/catalogue/v2/stockprice?productIds=' + str(goods_detail['id']) + '&store=US¤cy=USD', callback=self.parse_stock, meta={'item': item, 'related_products_url': related_products_url})
# color_size_str="".join(re.findall(r"var\s+arrSzeCol_ctl00_ContentMainPage_ctlSeparateProduct[^<]+", response.body))
# sep_image_str="".join(re.findall(r"var\s+arrSepImage_ctl00_ContentMainPage_ctlSeparateProduct[^<]+", response.body))
# thumb_image_str="".join(re.findall(r"var\s+arrThumbImage_ctl00_ContentMainPage_ctlSeparateProduct[^<]+", response.body))
# if len(color_size_str)>0:
# context = execjs.compile('''
# %s
# %s
# %s
# function get_color_size(){
# return arrSzeCol_ctl00_ContentMainPage_ctlSeparateProduct;
# }
# function get_sep_image(){
# return arrSepImage_ctl00_ContentMainPage_ctlSeparateProduct;
# }
# function get_thumb_image(){
# return arrThumbImage_ctl00_ContentMainPage_ctlSeparateProduct;
# }
# ''' % (color_size_str, sep_image_str, thumb_image_str))
# color_sizes = context.call('get_color_size')
# sep_image= context.call('get_sep_image')
# thumb_images = context.call('get_thumb_image')
# #import pdb;pdb.set_trace()
# if len(sel.xpath('//div[@id="ctl00_ContentMainPage_ctlSeparateProduct_pnlOutofStock"]').extract()) > 0:
# return
#
# if len(sel.xpath('//span[@id="ctl00_ContentMainPage_ctlSeparateProduct_lblProductTitle"]/text()').extract()) > 0:
# item['title']=sel.xpath('//span[@id="ctl00_ContentMainPage_ctlSeparateProduct_lblProductTitle"]/text()').extract()[0]
#
# data_dic_str = sel.xpath('//script[@id="dataDictionary"]/text()')
#
# product_data_str=data_dic_str.re(r'^var Product\s*=\s*({.*?});')[0]
# product_data=eval(product_data_str)
# item['show_product_id']=product_data['ProductIID']
# desc=sel.xpath('//div[@id="ctl00_ContentMainPage_productInfoPanel"]//ul')
# if len(desc)>0:
# item['desc']=desc.extract()[0]
# item['brand']=product_data['ProductBrand']
# item['from_site']=self.name
#
# '''有严重问题,注释掉了'''
# # gender_category_str=product_data['ProductCategory']
# # m=re.search(r'(.+)\|(.+)', gender_category_str)
# # if m:
# # item['gender']=m.group(1).strip()
# # m=re.search(r'(.+)\|(.+)', gender_category_str)
# # if m:
# # item['category']=m.group(2).strip()
#
# sku_data_str = data_dic_str.re(r'var ProductChildSkuInfo\s*=\s*({.*?});')[0]
# sku_data=eval(sku_data_str)
# sku_data_list=sku_data['ChildSkuInfo'][item['show_product_id']]
# #color_list=sel.xpath('//select[@id="ctl00_ContentMainPage_ctlSeparateProduct_drpdwnColour"]').extract()
# if color_sizes:
# '''handle color and image'''
#
# # thumbnail_lis=sel.xpath('//ul[@class="productThumbnails"]//li//img/@src')
# # image_lis=sel.xpath('//div[@id="productImages"]//img/@src')
# # if len(thumbnail_lis)>0:
# # for i in range(len(thumbnail_lis)):
# # imageItem=ImageItem()
# # imageItem['image']=image_lis[i].extract()
# # imageItem['thumbnail']=thumbnail_lis[i].extract()
# # images.append(imageItem)
# #left three imageItem
# images=[]
# for thumb_image in thumb_images:
# imageItem=ImageItem()
# imageItem['image']=thumb_image[2]
# imageItem['thumbnail']=thumb_image[0]
# images.append(imageItem)
#
# item_color_names=[]
# #all color names of item
#
# sep_image_dict = {}
# for sep_image_arr in sep_image:
# key = sep_image_arr[3]
# sep_image_dict[key] = {'image': sep_image_arr[2], 'thumbnail': sep_image_arr[0]}
#
# color_names = sel.xpath('//div[@id="ctl00_ContentMainPage_ctlSeparateProduct_pnlColour"]//option/@value')[1:].extract()
# for color_name in color_names:
#
# lower_color_name = color_name.lower()
# if '/' in lower_color_name:
# lower_color_name_2 = lower_color_name.replace('/', '')
# else:
# lower_color_name_2 = lower_color_name
# if lower_color_name not in sep_image_dict.keys() and lower_color_name_2 not in sep_image_dict.keys():
# return
# imageItem=ImageItem()
# imageItem['thumbnail']= sep_image_dict[lower_color_name_2]['thumbnail']
# imageItem['image']= sep_image_dict[lower_color_name_2]['image']
# images.insert(0, imageItem)
# # import pdb;pdb.set_trace()
# color=Color()
# color['type'] ='color'
# color['from_site'] = self.name
# color['show_product_id'] = product_data['ProductIID']
# color['images'] = images
# color['name'] = color_name
# color['cover'] = sep_image_dict[lower_color_name_2]['thumbnail']
#
# yield color
#
# item_color_names.append(color_name)
# '''handle price'''
# #list_price_sel=sel.xpath('//span[@id="ctl00_ContentMainPage_ctlSeparateProduct_lblRRP"]')
# sizes=[]
# for color_size in color_sizes:
# size_id = color_size[0]
# size = color_size[1]
# if not size.strip():
# size = 'onesize'
#
# if color_size[3] == "False":
# continue
#
# original_color_name = color_size[2]
# for color_name in item_color_names:
# tmp_color_name = re.sub(r'[^\w]', '', color_name)
#
# if tmp_color_name == original_color_name:
# original_color_name = color_name
#
# skuItem=SkuItem()
# skuItem['type']="sku"
# skuItem['from_site']=self.name
# skuItem['is_outof_stock']=False
# skuItem['id']=sku_data_list[str(size_id)+original_color_name]['Sku']
# #skuItem['id']=color_size[0]
# skuItem['show_product_id']=product_data['ProductIID']
# skuItem['current_price']= color_size[5]
#
# if color_size[6] == color_size[5] and color_size[8] != '0' and color_size[8] != '0.00':
# skuItem['list_price']= color_size[8]
# else:
# skuItem['list_price']= color_size[6]
#
# sizes.append(size)
# skuItem['color'] = original_color_name
# skuItem['size'] = size
# skus.append(skuItem)
#
# item['skus']=skus
# item['sizes']=list(set(sizes))
# item['dimensions']=['size']
# item['colors'] = item_color_names
# size_info = sel.xpath('//a[@id="ctl00_ContentMainPage_SizeGuideButton_SizeGuideLink"]/@href')
# if size_info:
# item['size_info'] = size_info.extract()[0]
# if not re.match(r'^http', size_info.extract()[0]):
# item['size_info'] = self.base_url + size_info.extract()[0]
# yield item |
import json
import logging
import msgpack
log = logging.getLogger(__name__)
class QuacGeneralProcessor:
def __init__(self, args):
self.train_file = args.train_file
self.dev_file = args.dev_file
self.save_train_file = args.save_train_file
self.save_dev_file = args.save_dev_file
self.args = args
def run(self):
train_raw_data = self.load_raw_data(self.train_file)
if train_raw_data:
log.info('loaded quac train data')
quac_train_data = self.dialogues_unpack(train_raw_data)
if self.msgpack_dump(quac_train_data, self.save_train_file):
del train_raw_data
del quac_train_data
log.info('saved quac preprocessed train data')
else:
log.info('failed saving quac preprocessed train data')
else:
log.info('failed loading quac train data')
dev_raw_data = self.load_raw_data(self.dev_file)
if dev_raw_data:
log.info('loaded quac dev data')
quac_dev_data = self.dialogues_unpack(dev_raw_data)
if self.msgpack_dump(quac_dev_data, self.save_dev_file):
del dev_raw_data
del quac_dev_data
log.info('saved quac preprocessed dev data')
else:
log.info('failed loading quac dev data')
def load_raw_data(self, file):
with open(file, encoding="utf8") as f:
data = json.load(f)['data']
return data
return None
def msgpack_dump(self, json, file):
with open(file, 'wb') as f:
msgpack.dump(json, f)
return True
return False
def dialogues_unpack(self, data):
quac_data = []
for conv in data:
dialogue = []
dialogue_json = {
"title": conv['title'],
"background": conv['background'],
"section_title": conv['section_title'],
"context": conv["paragraphs"][0]['context'],
"id": conv["paragraphs"][0]['id']
}
for qa in conv["paragraphs"][0]['qas']:
question = qa['question']
answers = qa['orig_answer']
answer = answers['text']
answer_start = answers['answer_start']
answer_end = answers['answer_start'] + len(answers['text'])
answer_choice = 0 if answer == 'CANNOTANSWER' else \
1 if qa['yesno'] == 'y' else \
2 if qa['yesno'] == 'n' else \
3 # Not a yes/no question
"""
0: Do not ask a follow up question!
1: Definitely ask a follow up question!
2: Not too important, but you can ask a follow up.
"""
answer_followup = 0
if qa['followup'] == "n":
answer_followup = 0
elif qa['followup'] == "y":
answer_followup = 1
else:
answer_followup = 2
if answer_choice == 0:
answer_start, answer_end = -1, -1
ans_ls = []
for ans in qa['answers']:
ans_ls.append(ans['text'])
qa_pair = {
"question": question,
"answer": answer,
"answer_start": answer_start,
"answer_end": answer_end,
"answer_choice": answer_choice,
"answer_followup": answer_followup,
"ans_ls": ans_ls
}
dialogue.append(qa_pair)
dialogue_json['dialogue'] = dialogue
quac_data.append(dialogue_json)
return quac_data
|
from collections import defaultdict
import gzip
import re
# Code snipped from:
# https://gist.github.com/slowkow/8101481
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame']
R_SEMICOLON = re.compile(r'\s*;\s*')
R_COMMA = re.compile(r'\s*,\s*')
R_KEYVALUE = re.compile(r'(\s+|\s*=\s*)')
def gff_lines(filename):
"""Open an optionally gzipped GTF file and generate a dict for each line.
"""
fn_open = gzip.open if filename.endswith('.gz') else open
with fn_open(filename) as fh:
for line in fh:
if line.startswith('#'):
continue
else:
yield gff_parse(line)
def gff_parse(line):
"""Parse a single GTF line and return a dict.
"""
result = {}
fields = line.rstrip().split('\t')
for i, col in enumerate(GTF_HEADER):
result[col] = _get_value(fields[i])
# INFO field consists of "key1=value;key2=value;...".
infos = [x for x in re.split(R_SEMICOLON, fields[8]) if x.strip()]
for i, info in enumerate(infos, 1):
# It should be key="value".
try:
key, _, value = re.split(R_KEYVALUE, info, 1)
# But sometimes it is just "value".
except ValueError:
key = 'INFO{}'.format(i)
value = info
# Ignore the field if there is no value.
if value:
result[key] = _get_value(value)
return result
def _get_value(value):
if not value:
return None
# Strip double and single quotes.
value = value.strip('"\'')
# Return a list if the value has a comma.
if ',' in value:
value = re.split(R_COMMA, value)
# These values are equivalent to None.
elif value in ['', '.', 'NA']:
return None
return value |
import numpy as np
from typing import List, Tuple
def hard_disk(x, y, r=1.0, center=(0, 0), scale=100):
"""
A circular barrier
"""
xc, yc = center
v_grid = np.zeros((len(y), len(x)))
barrier_cond = np.add.outer((y - yc) ** 2, (x - xc) ** 2) <= r ** 2
v_grid[barrier_cond] = scale
return v_grid
def multiple_hard_disks(x, y, rs: List[float], centers: List[Tuple[float, float]], scales: List[float] = None):
"""
Multiple circular barriers
"""
if scales is None:
scales = [100]*len(rs)
v = sum(hard_disk(x, y, r, c, s) for r, c, s in zip(rs, centers, scales))
return v
def gravity_and_floor(x, y, floor, g=1, scale=100):
"""
A hard wall at the bottom of the view and a linear downward potential.
Note: y increases in the downward direction.
"""
v = np.zeros((len(y), len(x)))
v += g*np.abs(y[:,None]-floor)
v[y>floor] = scale
return v
def ring_with_gaps(x, y, radius, width=0.1, height=1e6, gap_angle=30 * np.pi / 180, num_gaps=5, x_center=0.5,
y_center=0.5):
"""
A ring with gaps
"""
V = np.zeros_like(np.outer(y, x))
# radius
dsq = np.add.outer((y - y_center) ** 2, (x - x_center) ** 2)
cond_circle = (dsq < (radius + width / 2) ** 2) & (dsq > (radius - width / 2) ** 2)
# angle
phis = np.arctan(np.divide.outer((y - y_center) + 1e-6, (x - x_center) + 1e-6))
phis[:, x < x_center] = phis[:, x < x_center] + np.pi
phis = phis + np.pi / 2
cond_angle = np.zeros_like(phis, dtype=np.bool8)
for i in range(num_gaps):
start_angle = i * 2 * np.pi / num_gaps
cond_gap = ((phis > start_angle) & (phis <= (start_angle + gap_angle)))
cond_angle = cond_angle | cond_gap
V[cond_circle & ~cond_angle] = height
return V
|
from javax.swing import *
from java.awt import *
from java.awt.event import *
from variables import *
from javax.swing.table import DefaultTableModel
import sys
from importFiles import *
from JMRIlistAll import *
class ListAllJmriLocations(JDialog):
def __init__(self):
JDialog.__init__(self, None, 'All JMRI Locations', True)
#self.setDefaultCloseOperation(DO_NOTHING_ON_CLOSE)
self.setSize(900,250)
self.setLayout(BorderLayout())
self.setLocation(100,100)
self.preferredSize = 900,250
self.setBackground(Color.LIGHT_GRAY)
self.add(self.add_jmri_table(),BorderLayout.CENTER)
self.setVisible(True)
def add_jmri_table(self):
southPanel = JPanel()
#northPanel.setLayout(BorderLayout())
titleText = JLabel(' This is a read-only list of all JMRI Operations locations. ')
titleText.setForeground(Color.decode("#000dff"))
titleText.setBackground(Color.decode("#000000"))
titleText.setFont(Font("Serif", Font.PLAIN, 20))
southPanel.add(titleText)
self.add(southPanel,BorderLayout.SOUTH)
northPanel = JPanel()
#northPanel.setLayout(BorderLayout())
noteText = JLabel(' JMRI Operations Locations ')
noteText.setForeground(Color.decode("#000dff"))
noteText.setBackground(Color.decode("#000000"))
noteText.setFont(Font("Serif", Font.PLAIN, 24))
northPanel.add(noteText)
self.add(northPanel,BorderLayout.NORTH)
eastPanel = JPanel()
eastPanel.setLayout(BoxLayout(eastPanel, BoxLayout.Y_AXIS))
eastPanel.preferredSize = (Dimension(150,1))
cancel = JButton('Cancel', actionPerformed = self.cancel_data)
eastPanel.add(Box.createRigidArea(Dimension(25, 25)))
eastPanel.add(Box.createRigidArea(Dimension(25, 25)))
eastPanel.add(cancel)
self.add(eastPanel,BorderLayout.EAST)
westPanel = JPanel()
westPanel.setLayout(BoxLayout(westPanel, BoxLayout.Y_AXIS))
westPanel.preferredSize = (Dimension(150,1))
westPanel.add(Box.createRigidArea(Dimension(25, 25)))
westPanel.add(Box.createRigidArea(Dimension(25, 25)))
self.add(westPanel,BorderLayout.WEST)
panel = JPanel()
panel.setLayout(BoxLayout(panel, BoxLayout.Y_AXIS))
panel.preferredSize = (Dimension(125,1))
colNames = ('Location','Tracks')
dataModel = DefaultTableModel(self.sort_jmri_data(), colNames)
self.table = JTable(dataModel)
self.table.getTableHeader().setReorderingAllowed(0)
scrollPane = JScrollPane()
scrollPane.setPreferredSize(Dimension(300,100))
scrollPane.getViewport().setView((self.table))
panel.add(scrollPane)
return panel
def cancel_data(self, event):
#print "got to cancel data"
global changes
changes = []
#print changes
self.dispose()
def sort_jmri_data(self):
'''Sort list of locations from Operations XML file for table display'''
try:
#JMRI = Locations() #locationXML
wholeList = get_jmri_loc_track_tags()
self.temp = []
self.tableData = []
for loc in wholeList:
for item in loc:
location = isinstance(item, dict)
if location :
self.temp.append(item.get('name'))
self.temp.append(" ")
self.tableData.append(self.temp)
self.temp = []
track = isinstance(item, list)
if track :
for tracks in item:
self.temp.append(" ")
self.temp.append(tracks.get('name'))
self.tableData.append(self.temp)
self.temp = []
return self.tableData
except:
print "something wrong with sort_data"
print "Unexpected error: ", sys.exc_info()[0], sys.exc_info()[1]
if __name__ == "__main__":
print "Running locationJmriGUI.py"
me = LocationJmriGUI() |
import visa
rm = visa.ResourceManager()
print(rm.list_resources())
inst = rm.open_resource('GPIB0::23::INSTR')
print(inst.query("*IDN?")) |
import requests
from bs4 import BeautifulSoup
import random
import os
import click
l=[] #For storing random numbers
choice=input("How many random images you want to download?? \n")
def download_img(data,filename): #Function to download images
if (os.path.isdir('XKCD')): #Asserts for existence
pass
else:
os.mkdir('XKCD') #If false create a folder
op_file=open('XKCD/'+filename,'wb')
op_file.write(data) #Download off
print "Downloaded",filename
@click.command()
@click.option('--image',is_flag=True,help="Allows you to download XKCD images")
def cli(image):
if(image):
for i in range(choice):
l.append(str(random.randint(1,1933))) #Last comic till date is 1933
for i in l:
url="https://xkcd.com/"+str(i)+"/"
r=requests.get(url)
soup=BeautifulSoup(r.content,'html.parser')
filename=str(soup.select('#ctitle')).split('">')
filename=filename[1].split('<')
filename=filename[0] #Getting filename using string manip
img_url=soup.select('#comic')
img_url=str(img_url).split('src=')[1]
img_url='https:'+img_url.split('"')[1]
download_img(requests.get(img_url).content,filename+'.png') #Caling the func i times
|
import pyspark.sql.types as t
from datalakebundle.table.schema.TableSchemaGenerator import TableSchemaGenerator
schema = t.StructType(
[
t.StructField("FIELD1", t.IntegerType()),
t.StructField("FIELD2", t.DoubleType()),
t.StructField("FIELD3", t.DoubleType()),
t.StructField(
"STRUCT1",
t.StructType(
[
t.StructField("NESTED_FIELD1", t.StringType()),
t.StructField(
"STRUCT2",
t.StructType(
[
t.StructField("NESTED_FIELD2", t.StringType()),
],
),
),
],
),
),
],
)
expected_result = """def get_schema():
return TableSchema(
[
t.StructField("FIELD1", t.IntegerType()),
t.StructField("FIELD2", t.DoubleType()),
t.StructField("FIELD3", t.DoubleType()),
t.StructField(
"STRUCT1",
t.StructType(
[
t.StructField("NESTED_FIELD1", t.StringType()),
t.StructField(
"STRUCT2",
t.StructType(
[
t.StructField("NESTED_FIELD2", t.StringType()),
],
),
),
],
),
),
],
# primary_key="", # INSERT PRIMARY KEY(s) HERE (OPTIONAL)
# partition_by="" # INSERT PARTITION KEY(s) HERE (OPTIONAL)
# tbl_properties={} # INSERT TBLPROPERTIES HERE (OPTIONAL)
)
"""
assert TableSchemaGenerator().generate(schema) == expected_result
schema = t.StructType(
[
t.StructField("FIELD1", t.IntegerType()),
t.StructField("FIELD2", t.DoubleType()),
t.StructField("FIELD3", t.DoubleType()),
t.StructField(
"ARRAY1",
t.ArrayType(
t.StructType(
[
t.StructField("NESTED_ARRAY_FIELD1", t.StringType()),
t.StructField("NESTED_ARRAY_FIELD2", t.StringType()),
t.StructField("NESTED_ARRAY_FIELD3", t.ArrayType(t.StringType())),
],
),
),
),
],
)
expected_result = """def get_schema():
return TableSchema(
[
t.StructField("FIELD1", t.IntegerType()),
t.StructField("FIELD2", t.DoubleType()),
t.StructField("FIELD3", t.DoubleType()),
t.StructField(
"ARRAY1",
t.ArrayType(
t.StructType(
[
t.StructField("NESTED_ARRAY_FIELD1", t.StringType()),
t.StructField("NESTED_ARRAY_FIELD2", t.StringType()),
t.StructField("NESTED_ARRAY_FIELD3", t.ArrayType(t.StringType())),
],
),
),
),
],
# primary_key="", # INSERT PRIMARY KEY(s) HERE (OPTIONAL)
# partition_by="" # INSERT PARTITION KEY(s) HERE (OPTIONAL)
# tbl_properties={} # INSERT TBLPROPERTIES HERE (OPTIONAL)
)
"""
assert TableSchemaGenerator().generate(schema) == expected_result
schema = t.StructType(
[
t.StructField("FIELD1", t.IntegerType()),
t.StructField("FIELD2", t.DoubleType()),
t.StructField("FIELD3", t.DoubleType()),
t.StructField(
"ARRAY1",
t.ArrayType(
t.ArrayType(t.StringType()),
),
),
],
)
expected_result = """def get_schema():
return TableSchema(
[
t.StructField("FIELD1", t.IntegerType()),
t.StructField("FIELD2", t.DoubleType()),
t.StructField("FIELD3", t.DoubleType()),
t.StructField(
"ARRAY1",
t.ArrayType(
t.ArrayType(t.StringType()),
),
),
],
# primary_key="", # INSERT PRIMARY KEY(s) HERE (OPTIONAL)
# partition_by="" # INSERT PARTITION KEY(s) HERE (OPTIONAL)
# tbl_properties={} # INSERT TBLPROPERTIES HERE (OPTIONAL)
)
"""
assert TableSchemaGenerator().generate(schema) == expected_result
schema = t.StructType(
[
t.StructField("FIELD1", t.IntegerType()),
t.StructField("FIELD2", t.DoubleType()),
t.StructField("FIELD3", t.DoubleType()),
t.StructField(
"ARRAY1",
t.ArrayType(
t.ArrayType(
t.StructType(
[
t.StructField(
"VERY_BADLY_NESTED_ARRAY_OF_ARRAY_OF_ARRAY_OF_DOUBLES",
t.ArrayType(
t.ArrayType(
t.ArrayType(t.DoubleType()),
),
),
),
],
),
),
),
),
],
)
expected_result = """def get_schema():
return TableSchema(
[
t.StructField("FIELD1", t.IntegerType()),
t.StructField("FIELD2", t.DoubleType()),
t.StructField("FIELD3", t.DoubleType()),
t.StructField(
"ARRAY1",
t.ArrayType(
t.ArrayType(
t.StructType(
[
t.StructField(
"VERY_BADLY_NESTED_ARRAY_OF_ARRAY_OF_ARRAY_OF_DOUBLES",
t.ArrayType(
t.ArrayType(
t.ArrayType(t.DoubleType()),
),
),
),
],
),
),
),
),
],
# primary_key="", # INSERT PRIMARY KEY(s) HERE (OPTIONAL)
# partition_by="" # INSERT PARTITION KEY(s) HERE (OPTIONAL)
# tbl_properties={} # INSERT TBLPROPERTIES HERE (OPTIONAL)
)
"""
assert TableSchemaGenerator().generate(schema) == expected_result
|
from abc import ABC, abstractmethod
from typing import Union
import numpy as np
class ForwardCurve(ABC):
"""
Abstract base class for deterministic forward curves.
Examples:
Equity: F(T) = S_0 * Div(T) / Disc(T) (more generally includes dividends, borrow cost, etc.)
FX: F(T) = FX_0 * Div_f(T) / Div_d(T)
Rates: F(T) = IBOR(T), the forward rate for some IBOR curve, e.g. LIBOR 3M
Commodity: F(T) = Futures(T), ie. some interpolation of the futures curve
"""
@abstractmethod
def spot(self) -> float:
""" Spot price. In some cases this is the actual spot (e.g. Equity/FX), otherwise it is F(0) """
raise NotImplementedError
@abstractmethod
def fwd_T(self, T: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Forward at time T in the future
:param T: float or np.ndarray, time(s) in the future
:return: float or np.ndarray, forward(s) at time(s) in the future
"""
raise NotImplementedError
def __call__(self, T: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Forward at time T in the future. Ability to call term structure using ()
:param T: float or np.ndarray, time(s) in the future
:return: float or np.ndarray, forward(s) at time(s) in the future
"""
return self.fwd_T(T)
def drift(self, t: float, T: float) -> float:
"""
Drift implied by the forward curve, implied over a time interval [t,T]
:param t: float, start time
:param T: float, end time
:return: float, drift implied over [t,T]
"""
return np.log(self.fwd_T(T)/self.fwd_T(t)) / (T - t)
|
from collections import OrderedDict
import blueice.exceptions
import pytest
import numpy as np
from blueice import pdf_morphers
def test_morpher_api():
conf = dict(hypercube_shuffle_steps=2,
r_sample_points=2)
for name, morph_class in pdf_morphers.MORPHERS.items():
print("Testing %s" % name)
with pytest.raises(blueice.exceptions.NoShapeParameters):
morph_class(config=conf, shape_parameters=OrderedDict())
shape_pars = OrderedDict([('bla', ({-1: -1, 0: 0, 1: 1}, None, None))])
mr = morph_class(config=conf, shape_parameters=shape_pars)
aps = mr.get_anchor_points(bounds=[(-1, 1)], n_models=3)
assert isinstance(aps, list)
assert isinstance(aps[0], tuple)
def scalar_f(_):
return 0
scalar_itp = mr.make_interpolator(scalar_f, extra_dims=[], anchor_models={z: None for z in aps})
assert scalar_itp([0]) == 0
def matrix_f(_):
return np.zeros(2, 2)
matrix_itp = mr.make_interpolator(scalar_f, extra_dims=[2, 2], anchor_models={z: None for z in aps})
np.testing.assert_array_equal(matrix_itp([0]), np.zeros((2, 2)))
if __name__ == '__main__':
pytest.main() |
# coding: utf-8
from __future__ import unicode_literals
import pytest
# fmt: off
TEXTS = ("作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。",)
JIEBA_TOKENIZER_TESTS = [
(TEXTS[0],
['作为', '语言', '而言', ',', '为', '世界', '使用', '人', '数最多',
'的', '语言', ',', '目前', '世界', '有', '五分之一', '人口', '做',
'为', '母语', '。']),
]
PKUSEG_TOKENIZER_TESTS = [
(TEXTS[0],
['作为', '语言', '而言', ',', '为', '世界', '使用', '人数', '最多',
'的', '语言', ',', '目前', '世界', '有', '五分之一', '人口', '做为',
'母语', '。']),
]
# fmt: on
@pytest.mark.parametrize("text", TEXTS)
def test_zh_tokenizer_char(zh_tokenizer_char, text):
tokens = [token.text for token in zh_tokenizer_char(text)]
assert tokens == list(text)
@pytest.mark.parametrize("text,expected_tokens", JIEBA_TOKENIZER_TESTS)
def test_zh_tokenizer_jieba(zh_tokenizer_jieba, text, expected_tokens):
tokens = [token.text for token in zh_tokenizer_jieba(text)]
assert tokens == expected_tokens
@pytest.mark.parametrize("text,expected_tokens", PKUSEG_TOKENIZER_TESTS)
def test_zh_tokenizer_pkuseg(zh_tokenizer_pkuseg, text, expected_tokens):
tokens = [token.text for token in zh_tokenizer_pkuseg(text)]
assert tokens == expected_tokens
def test_extra_spaces(zh_tokenizer_char):
# note: three spaces after "I"
tokens = zh_tokenizer_char("I like cheese.")
assert tokens[1].orth_ == " "
|
# Copyright 2016 Eotvos Lorand University, Budapest, Hungary
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Miscellaneous utility functions (not using HLIR)
from __future__ import print_function
import sys
import os
import pkgutil
global filename
global filepath
global genfile
global outfile
filename = "?"
filepath = "?"
genfile = "?"
outfile = "?"
errors = []
warnings = []
def addError(where, msg):
rootcwd = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import traceback
import itertools
sb = traceback.extract_stack()
res = list(itertools.dropwhile(lambda (mod, line, fun, code): mod == 'src/compiler.py', sb))
lineno = res[0][1]
res = [(genfile, lineno, res[0][2], res[0][3])] + res[1:-1]
try:
with open(genfile) as f:
lines = f.readlines()
origlineno = int(lines[lineno+1].split(" ")[-1])
with open(filepath) as f:
origlines = f.readlines()
res = [(filepath, origlineno, "...", origlines[origlineno].strip())] + res
except:
pass
res = [("." + path[len(rootcwd):] if path.startswith(rootcwd) else path, line, module, errmsg) for (path, line, module, errmsg) in res]
global errors
msg = "Error while {}: {}".format(where, msg)
if pkgutil.find_loader('backtrace'):
# uses the backtrace module to prettify output
import backtrace
btrace = backtrace._Hook(res, align=True)
errors += [msg] + [" " + msg for msg in btrace.generate_backtrace(backtrace.STYLES)]
else:
errors += [msg] + traceback.format_list(res)
def addWarning(where, msg):
global warnings
warnings += ["WARNING: " + msg + " (While " + where + ").\n"]
def showErrors():
global errors
for e in errors:
print(e, file=sys.stderr)
def showWarnings():
global warnings
for w in warnings:
print(w, file=sys.stderr)
disable_hlir_messages = False
def build_hlir(hlir):
"""Builds the P4 internal representation, optionally disabling its output messages.
Returns True if the compilation was successful."""
if disable_hlir_messages:
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
success = hlir.build()
if disable_hlir_messages:
sys.stdout = old_stdout
sys.stderr = old_stderr
return success
|
import zen
import numpy
import unicodedata
import sys
sys.path.append('../zend3js/')
import d3js
import csv
import progressbar
import numpy.linalg as la
import matplotlib.pyplot as plt
plt.ioff()
from scipy.linalg import solve
from numpy import *
from time import sleep
import random
G_1=zen.io.gml.read('Billion_1.gml')
def print_top(G,v, num=5):
idx_list = [(i,v[i]) for i in range(len(v))]
idx_list = sorted(idx_list, key = lambda x: x[1], reverse=True)
for i in range(min(num,len(idx_list))):
nidx, score = idx_list[i]
print ' %i. %s (%1.4f)' % (i+1,G.node_object(nidx),score)
#####Eigenvector centrality
print '\nEigenvector Centrality (by Zen):'
V1= zen.algorithms.centrality.eigenvector_centrality_(G_1,weighted=True)
print_top(G_1,V1, num=5)
#Degree Centrality
print 'Degree Centrality'
N = G_1.num_nodes
A = G_1.matrix()
A_sum=[]
for i in range(G_1.num_nodes):
A_sumi=sum (A[i,:])
A_sum.append(A_sumi)
K=A_sum
print_top(G_1,K)
##### Betweenness Centrality
print '\nBetweenness Centrality'
V3=zen.algorithms.centrality.betweenness_centrality_(G_1,weighted=True)
print_top(G_1,V3, num=5)
###Degree Distribution
print 'The one who knows the maximum number of people but not'
print 'necessarily he has indepth connections with a particular person is'
dist=[]
for i in range(G_1.num_nodes):
deg=G_1.degree_(i)
dist.append(deg)
plt.plot(numpy.arange(0,548),dist)
plt.show()
array=numpy.array(dist)
index=numpy.argmax(array)
print G_1.node_object(index)
#Diameter
print 'Diameter of the graph is:'
print zen.diameter(G_1)
#Modularity for industry
print 'Modularity of the graph is:'
c = { 'Real Estate': ['James Packer','Leonard Stern','Mitchell Goldhar','Anas Sefrioui','Samuel Tak Lee','Carlo Fidani','Angela Leong','Asok Kumar Hiranandani','Daniel Pritzker','Anthony Pritzker','Ina Chan','Bahaa Hariri','Dean White',
'Alexandra Schorghuber','Edward DeBartolo, Jr.','Kushal Pal Singh','Kwee brothers','Samih Sawiris','Thomas Pritzker','Alfred Taubman','Donald Bren','Kirk Kerkorian','Walter Kwok','Yang Huiyan','Jennifer Pritzker',
'Lin Rong San','Richard Marriott','Pansy Ho','John Gandel','John Pritzker','Marilyn Carlson Nelson','Penny Pritzker','Jean (Gigi) Pritzker','Nicholas Pritzker, II.','Raj Kumar & Kishin RK','Alexander Skorobogatko',
'Chen Lip Keong','Igor Olenicoff','Lawrence Ho','Linda Pritzker','Alexander Ponomarenko','Barbara Carlson Gage','Bill Marriott, Jr.','Eyal Ofer','Fredrik Lundberg','Eduardo Eurnekian','Georg von Opel','Edward Roski, Jr.',
'Karen Pritzker','Hui Wing Mau','Jay Robert (J.B.) Pritzker'],
'Diversed Financial': ['Isabel dos Santos','Jose Ermirio de Moraes Neto','Michael Milken','Robert Ziff','Yasseen Mansour','Sun Guangxin','Suna Kirac','Robert Kuok','Sebastian Pinera','Vincent Tan','Albert von Thurn und Taxis',
'Bassam Alghanim','Daniel Ziff','Alberto Cortina','Alexander Mamut','Chairul Tanjung','Charles Koch','Dirk Ziff','Stephan Schmidheiny','Andrew Tan','Clemmie Spangler, Jr.','Juan Abello','Mohamed Mansour',
'Stefan Olsson','Sukanto Tanoto','Zhang Hongwei','Robert Rowling','Bidzina Ivanishvili','Wilma Tisch','Kerr Neilson','Kutayba Alghanim','Neide Helena de Moraes','Youssef Mansour','Murdaya Poo','Joan Tisch',
'Li Ka-shing','Peter Kellogg','Prince Alwaleed Bin Talal Alsaud','Richard Scaife','Alberto Alcocer','Henry Hillman','Madeleine Olsson Ericksson','Antonia Johnson','Chen Jinxia','Anil Ambani',
'August von Finck','Dan Olsson','Alexander Vik','Antonio Ermirio de Moraes','David Koch','Eli Broad','Cheng Yu-tung','H. Wayne Huizenga','Jose Roberto Ermirio de Moraes','Maria Helena Moraes Scripilliti',
'Suleiman Kerimov','Vincent Bollore','Ermirio Pereira de Moraes','Ira Rennert','James Irving','Mustafa Rahmi Koc'],
'Consumer': ['Jorge Paulo Lemann','Liu Yongxing','Mary Alice Dorrance Malone','Patricia Matte','Anthony Pratt','Jeremy Jacobs, Sr.','Rishad Naoroji','Suh Kyung-Bae','Susanne Klatten','Tsai Eng-Meng','Jean Pierre Cayard',
'Robert Ingham','S. Curtis Johnson','Stefan Reimann-Andersen','Jean-Michel Besnier','John Fisher','Vikram Lal','Theo Mueller','Thomas Straumann','William Ford, Sr.','Alberto Bombassei','August Oetker',
'Charoen Sirivadhanabhakdi','Christopher Goldsbury','Robert Fisher','Robert Rich, Jr.','Sylvia Stroher','Winnie Johnson-Marquart','Aerin Lauder Zinterhofer','Ahsen Ozokur','Axel Oberwelland','Carl Ferdinand Oetker',
'Chung Mong-Koo','Alfred Oetker','Chey Tae-Won','Christian Oetker','Hans Peter Wild','Guilherme Peirao Leal','Hans Rausing','Ioan Niculae','Stefan Quandt','Charlotte Colket Weber','Cho Yang-Rai','Eliodoro Matte',
'Forrest Mars, Jr.','Imogene Powers Johnson','William Fisher','Wolfgang Reimann','Julia Oetker','Michael Hartono','Nicola Bulgari','Renate Reimann-Haas','Lei Jun','Stefan Persson','Wolfgang Herz','Ronald Lauder',
'Timothy Boyle','William Wrigley, Jr.','Anton Kathrein, Jr.','Ashwin Dani','Benedicta Chamberlain','Bennett Dorrance','Charlene de Carvalho-Heineken','Chung Mong-Joon','Helen Johnson-Leipold','Hubert d Ornano',
'Julio Ponce','Laurent Burelle','Leonard Lauder','Vladimir Scherbakov','Murat Ulker','Renzo Rosso','Liselott Persson','Paolo Bulgari','Marie Besnier Beauvalot','Matthias Reimann-Andersen','Kirsten Rausing',
'Nicolas Puech','Petro Poroshenko','Pier Luigi Loro Piana','Richard Oetker','Charles Bronfman','Heloise Waislitz','Jane Lauder','Alain Wertheimer','Bergit Douglas','Bernardo Matte','Carlos Ardila Lulle',
'Chung Eui-Sun','Daniela Herz','Andrea Della Valle','James Jannard','Richard Yuengling, Jr.','Andrei Guriev','David Murdock','Erika Pohl-Stroher','Fiona Geminder','Francisco Ivens de Sa Dias Branco','Gerard Wertheimer',
'Guenter Herz & Family','Jacqueline Mars','James Leprino','Prince Sultan bin Mohammed bin Saud Al Kabeer','Emanuele (Lino) Saputo','Horst Brandstaetter','H. Fisk Johnson','Herbert Louis','Ingeburg Herz',
'John Dorrance, III.','Rosely Schweizer','Vadim Moshkovich','Emmanuel Besnier','John Mars','Jose and Francisco Jose Calderon Rojas','Kjeld Kirk Kristiansen','Hanni Toosbuy Kasprzak','Johanna Quandt','Jean Burelle',
'Michael Herz','Michael Pieper','Patrizio Bertelli','Rahul Bajaj','Ravi Jaipuria','Miuccia Prada','Nobutada Saji'],
'Retail and Restaurant': ['Juan Roig','Robert Piccinini','S. Robson Walton','Albert Blokker','Alice Walton','Stefano Pessina','Walter Frey','Shin Dong-Bin','Dieter Schwarz','Shari Arison','Shin Dong-Joo','Anders Holch Povlsen',
'Dan Friedkin','Donald Hall','Els Blokker','Sergei Katsiev','David Sainsbury','Stelios Haji-Ioannou','Thomas Bruch','Anne Gittinger','Diego Della Valle','Igor Kesaev','Lee Myung-Hee','Micky Arison',
'Mark Shoen','Jim Walton','Carol Jenkins Barnett','Fernando Roig','Karl Albrecht','M.A. Yusuff Ali','Bruce Nordstrom','Belmiro de Azevedo','Abilio dos Santos Diniz','Ann Walton Kroenke','Chung Yong-Jin',
'Edward Stack','Drayton McLane, Jr.','Heidi Horten','Johan Johannson','Giuseppe De Longhi','Isidoro Alvarez','Joyce Raley Teel','Nancy Walton Laurie','Michael Klein'],
'Energy': ['Mokhzani Mahathir','Gordon Getty','Sid Bass','Robert Bass','Daisy Igel','George Kaiser','Vagit Alekperov','William Moncrief, Jr.','Ahmet Calik','Americo Amorim','Arthur Irving','Gian Marco Moratti','Daniel Harrison, III.',
'Rubens Ometto Silveira Mello','W. Herbert Hunt','William Koch','Evgeny (Eugene) Shvidler','Lee Bass','Lynn Schusterman','Mikhail Gutseriev','Ray Lee Hunt','Massimo Moratti','Mukesh Ambani','Farkhad Akhmedov',
'David Rockefeller, Sr.','Edward Bass','Folorunsho Alakija','Gennady Timchenko','Idan Ofer','Igor Makarov','Gregorio Perez Companc'],
'Mining and metals': ['Patrice Motsepe','Patricia Angelini Rossi','Ana Maria Brescia Cafferata','Bulat Utemuratov','Dan Gertler','Alex Beard','Angela Bennett','Roberto Angelini Rossi','Claude Dauphin','Gina Rinehart','Tor Peterson',
'Margarita Louis-Dreyfus','Monique Louis-Dreyfus','Rosa Brescia Cafferata','Kumar Birla','Oleg Deripaska','Marie-Jeanne Meyer','Aristotelis Mistakidis','Beny Steinmetz','Daniel Mate','Eduardo Hochschild',
'Edwin Soeryadjaya','Ivan Glasenberg','Desmond Sacco','Vladimir Potanin','Jim Justice, II.','Pavel Tykac'],
'Construction': ['Juan-Miguel Villar Mir','Arkady Rotenberg','Fahd Hariri','Riley Bechtel','Simonpietro Salini','Rossana Camargo de Arruda Botelho','D. Leopoldo Del Pino y Calvo-Sotelo','Francesco Saverio Salini','Zhu Xingliang',
'Alicia Koplowitz','Benu Gopal Bangur','Saad Hariri','Ali Ibrahim Agaoglu','Thomas Schmidheiny','Stephen Bechtel, Jr.','Walter Scott, Jr.','Yoshiko Mori','Maria Del Pino y Calvo-Sotelo','Pallonji Mistry',
'Renata de Camargo Nascimento','Mehmet Sinan Tara','Regina de Camargo Pires Oliveira Dias','Ayman Hariri','Elena Baturina','Ziyad Manasir','Rafael Del Pino y Calvo-Sotelo','Nassef Sawiris'],
'Non-consumer industrial': ['Magdalena Martullo-Blocher','Anita Zucker','Georg Schaeffler','Scott Duncan','Vladimir Lisin','Alexander Abramov','Roman Abramovich','Antti Herlin','Francisco Jose Riberas Mera','Friedhelm Loh','Hans Melchers',
'Catherine Lozick','Martin Viessmann','Max Turnauer','Milane Frantz','Randa Williams','Victor Pinchuk','Mitchell Jacobson','Lilian Werninghaus','Niklas Herlin','Miriam Blocher','Richard Kinder','Jose Maria Aristrain',
'Maria-Elisabeth Schaeffler','Rahel Blocher','Reinhold Wuerth','Bachtiar Karim','Dannine Avara','Ilkka Herlin','Ilona Herlin','Lakshmi Mittal','Dieter Schnabel'],
'Technology': ['Martin Haefner','Koo Bon-Neung','Sean Parker','Bent Jensen','David Cheriton','Eva Maria Bucher-Haefner','Evan Williams','Zhou Hongyi','Mark Vadon','Lee Boo-Jin','Lee Seo-Hyun','Venugopal Dhoot','Koo Bon-Moo',
'Jay Y. Lee','Lee Kun-Hee','Hong Ra-Hee','Azim Premji','H. Ross Perot, Jr.','Pat Stryker','Ronda Stryker','Stewart Rahr','Yusuf Hamied','Bernard (Barry) Sherman','Curt Engelhorn','Bulent Eczacibasi',
'Alberto Roemmers','Maja Oeri','Ludwig Merckle','Niels Peter Louis-Hansen','Pankaj Patel','Paul Ramsay','Phillip Frost','Randal Kirk','Henri Beaufour','Faruk Eczacibasi','Anne Beaufour','Shoji Uehara',
'Jacques Servier','Jon Stryker','Gary Michelson','Jeanine Dick','Frederik Paulsen'],
'Money Management': ['Aloysio de Andrade Faria','Joao Moreira Salles','Dinara Kulibaeva','Rupert Johnson, Jr.','Walther Moreira Salles Junior','Timur Kulibaev','Warren Stephens','Rolf Gerling','Yuri Kovalchuk','Austen Cargill, II.',
'Carlos Rodriguez-Pastor','Deniz Sahenk','Fayez Sarofim','Gwendolyn Sontheim Meyer','James Cargill, II.','Benjamin de Rothschild','Filiz Sahenk','Tsai Hong-tu','Lily Safra','Marianne Liebmann',
'Pedro Moreira Salles','Susan Hirt Hagen','Whitney MacMillan','Ana Lucia de Mattos Barretto Villela','Fernando Roberto Moreira Salles','Wee Cho Yaw','Lina Maria Aguiar','Pauline MacMillan Keinath',
'Lia Maria Aguiar','Nikolai Tsvetkov','Luis Enrique Yarur Rey','Othman Benjelloun','Jaime Gilinski Bacal','Alfredo Egydio Arruda Villela Filho','Abigail Johnson','Charles Johnson','Bernard Saul, II.',
'Jaime Botin','Helena Revoredo','Edward Johnson, III.','Husnu Ozyegin'],
'Media': ['Joao Roberto Marinho','Gary Magness','Indu Jain','Samuel Newhouse, Jr.','Stefan von Holtzbrinck','Emilio Azcarraga Jean','Sumner Redstone','Vladimir Yevtushenkov','Alan Rydge','Craig McCaw','Taha Mikati',
'Denis O Brien','Hary Tanoesoedibjo','Naguib Sawiris','Jim Kennedy','Lee Jay-Hyun','Yasumitsu Shigeta','Jose Roberto Marinho','Monika Schoeller','Najib Mikati','Andrei Kuzyaev','Blair Parry-Okeden','Krit Ratanarak',
'Donald Trump','Anne Cox Chambers','Brian Roberts','A. Jerrold Perenchio','Mark Cuban','Donald Newhouse','Friede Springer','Hubert Burda','Jonathan Harmsworth','Lee Hwa-Kyung','Richard Li','Mike Adenuga',
'Patrick McGovern'],
'Other': ['Stanley Kroenke','Helmut Sohmen','Steven Udvar-Hazy','Sunny Varkey','Ana Maria Marcondes Penido Sant Anna','Baba Kalyani','Finn Rausing','Jorn Rausing','Peter Sperling','James France','Victor Fung',
'Sergio Mantegazza','Soichiro Fukutake','James Irsay','Philip Niarchos','John Fredriksen','Otto Happel','Kjell Inge Rokke','William Fung','Klaus-Michael Kuehne','Ranjan Pai','Denise York','Vincent McMahon','John Doerr',
'Daniel Och','Julian Robertson, Jr.','John Arnold','Danil Khachaturov','Leon G. Cooperman','Shin Chang-Jae','George Soros'],
}
Q=zen.modularity(G_1,c)
print Q
#Power law
print 'Power law'
def calc_powerlaw(G,kmin):
ddist = zen.degree.ddist(G,normalize=False)
cdist = zen.degree.cddist(G,inverse=True)
k = numpy.arange(len(ddist))
plt.figure(figsize=(8,12))
plt.subplot(211)
plt.bar(k,ddist, width=0.8, bottom=0, color='b')
plt.subplot(212)
plt.loglog(k,cdist)
alpha = 0
print '%1.2f' % alpha
plt.show()
calc_powerlaw(G_1,1)
|
#print("Hello"+ " " + "World")
#print('Enter Your name')
#val=input('Enter your name : ')
#print("The length of your name is - " +str(len(val)))
a=input("Enter value of a ")
b=input("Enter value of b ")
print("Swapping varibales ==" )
c=b
b=a
a=c
print(a)
print(b)
|
import os
import time
import boto3
from sklearn.model_selection import train_test_split
import pickle
import cv2
import pytesseract
import spacy
#Comment out the next line if you are using a CPU to train your model
spacy.prefer_gpu()
from utils import evaluate_model()
from utils import save_model()
import random
from spacy.util import minibatch, compounding
from pathlib import Path
from spacy.gold import GoldParse
from spacy.scorer import Scorer
import pandas as pd
# Train new NER model
def train_new_NER(model=None, output_dir='models/', n_iter=100):
#Load the model, set up the pipeline and train the entity recognizer.
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# create the built-in pipeline components and add them to the pipeline
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe("ner")
# add labels
for _, annotations in training_data:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
with nlp.disable_pipes(*other_pipes): # only train NER
# reset and initialize the weights randomly – but only if we're training a new model
if model is None:
nlp.begin_training()
print("Training Started...")
history_blank = []
for itn in range(n_iter):
random.shuffle(training_data)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(training_data, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.2, # dropout - make it harder to memorize data
losses=losses,
)
print("Losses", losses)
epoch_path = output_dir + 'model_blank/epoch_' + str(itn)
nlp.to_disk(epoch_path)
if val is not None:
score_prf = evaluate_model(nlp,val)
history_blank.append({"epoch": itn, "losses": losses, "Precision": score_prf['ents_p'], "Recall": score_prf['ents_r'], "F1-score": score_prf['ents_f']})
data = pd.DataFrame(history_blank)
data.to_csv('history_blank_model.csv',index=False)
return nlp
## Update existing spacy model and store into a folder
def update_model(model='en_core_web_sm', output_dir='models/', n_iter=100):
#Load the model, set up the pipeline and train the entity recognizer.
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe("ner")
# add labels
for _, annotations in training_data:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
with nlp.disable_pipes(*other_pipes): # only train NER
# reset and initialize the weights randomly – but only if we're
# training a new model
print("Training model...")
final_loss = []
if model is None:
nlp.begin_training()
else:
optimizer = nlp.resume_training()
history_pretrained = []
for itn in range(n_iter):
random.shuffle(training_data)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(training_data, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.2, # dropout - make it harder to memorise data
sgd = optimizer,
losses=losses,
)
print("Losses", losses)
epoch_path = output_dir + 'model_pretrained/epoch_' + str(itn)
nlp.to_disk(epoch_path) # Make sure you don't use the SpaCy's large model because each model occupies 786 MB of data.
if val is not None:
score_prf = evaluate_model(nlp,val)
history_pretrained.append({"Epoch": itn, "losses": losses, "Precision": score_prf['ents_p'], "Recall": score_prf['ents_r'], "F1-score": score_prf['ents_f']})
data = pd.DataFrame(history_pretrained)
data.to_csv('history_pretrained_model.csv',index=False)
save_model(nlp, output_dir)
return nlp
|
from django.test import TestCase, client
class SmokeTest(TestCase):
"""I.e., does it run?"""
def SetUp(self):
self.client = client.Client()
def fetch(self, path, status):
"""See if the specified page produces the expected HTTP status"""
response = self.client.get(path)
self.failUnlessEqual(response.status_code, status)
def test_pages(self):
self.fetch("/", 404)
self.fetch("/equipment/", 302)
## TODO: to use this test, create a fixture with the account credentials
# def test_access(self):
# response = self.client.login(path="/equipment/", username="instructor", password="instructor")
# self.failUnlessEqual(response.status_code, 200) |
from core.terraform.resources import TerraformResource
from core.config import Settings
from core.providers.aws.boto3 import batch
class BatchComputeEnvironmentResource(TerraformResource):
"""
Base resource class for Terraform AWS Batch compute environment resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_batch_compute_environment"
available_args = {
'compute_environment_name': {'required': True, 'prefix': True, 'sep': '-'},
'compute_resources': {
'required': True,
'inline_args': {
'instance_role': {'required': True},
'instance_type': {'required': True},
'max_vcpus': {'required': True},
'min_vcpus': {'required': True},
'desired_vcpus': {'required': False},
'ec2_key_pair': {'required': False, 'prefix': True, 'sep': '_'},
'security_group_ids': {'required': True},
'subnets': {'required': True},
'resource_type': {'required': True, 'tf_arg_key': "type"},
'compute_resources_tags': {'required': False, 'tf_arg_key': "tags"}
}
},
'service_role': {'required': True},
'env_type': {'required': True, 'tf_arg_key': "type"},
'ecs_cluster_arn': {'required': False, 'prefix': True, 'sep': '-'},
}
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('compute_environment_name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = batch.check_compute_env_exists(
checked_details['value'],
input.aws_access_key,
input.aws_secret_key,
input.aws_region)
return exists, checked_details
class BatchJobDefinitionResource(TerraformResource):
"""
Base resource class for Terraform AWS Batch job definition resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_batch_job_definition"
available_args = {
'name': {'required': True, 'prefix': True, 'sep': '-'},
'jd_type': {'required': True, 'tf_arg_key': "type"},
'retry_strategy': {
'required': True,
'inline_args': {
'attempts': {'required': True},
}
},
'container_properties': {'required': True},
'parameters': {'required': False}
}
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = batch.check_job_definition_exists(
checked_details['value'],
input.aws_access_key,
input.aws_secret_key,
input.aws_region)
return exists, checked_details
class BatchJobQueueResource(TerraformResource):
"""
Base resource class for Terraform AWS Batch job queue resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_batch_job_queue"
available_args = {
'name': {'required': True, 'prefix': True, 'sep': '-'},
'state': {'required': True},
'priority': {'required': True},
'compute_environments': {'required': True}
}
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = batch.check_job_queue_exists(
checked_details['value'],
input.aws_access_key,
input.aws_secret_key,
input.aws_region)
return exists, checked_details
|
import pathlib
from ftplib import FTP
ftp = FTP(
"10.10.10.5",
"anonymous",
passwd=""
)
fnames = open("filename.list","r")
fn = fnames.readlines()
plist = open("password.list","r")
pwd = plist.readlines()
for i in range(12):
pf = pathlib.Path(fn[i].rstrip('\n') +".txt")
pf.touch()
wf = open(fn[i].rstrip('\n')+".txt","w")
wf.write(pwd[i])
wf.close()
uf = open(fn[i].rstrip('\n')+".txt", "rb")
ftp.storlines("STOR "+fn[i].rstrip('\n')+".txt", uf)
ff = open("../flag1-1/flag.zip", "rb")
ftp.storbinary("STOR flag.zip", ff)
for i in range(12,35):
pf = pathlib.Path(fn[i].rstrip('\n') +".txt")
pf.touch()
wf = open(fn[i].rstrip('\n')+".txt","w")
wf.write(pwd[i])
wf.close()
uf = open(fn[i].rstrip('\n')+".txt", "rb")
ftp.storlines("STOR "+fn[i].rstrip('\n')+".txt", uf)
ff = open("../flag2-1/flag.zip", "rb")
ftp.storbinary("STOR flag.zip", ff)
for i in range(35,50):
pf = pathlib.Path(fn[i].rstrip('\n') +".txt")
pf.touch()
wf = open(fn[i].rstrip('\n')+".txt","w")
wf.write(pwd[i])
wf.close()
uf = open(fn[i].rstrip('\n')+".txt", "rb")
ftp.storlines("STOR "+fn[i].rstrip('\n')+".txt", uf)
ff = open("../flag3-1/flag.zip", "rb")
ftp.storbinary("STOR flag.zip", ff)
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
LOG = logging.getLogger("transformers4rec")
def download(output_path):
"""Download YooChoose dataset.
Parameters
----------
output_path: str
Path to download the data to
Returns output_path: str
-------
"""
from kaggle import api as kaggle_api
kaggle_api.authenticate()
LOG.info("Downloading data from Kaggle...")
kaggle_api.dataset_download_files(
"chadgostopp/recsys-challenge-2015", path=output_path, unzip=True
)
return output_path
def process_clicks(data_path: str, device="gpu"):
"""Process YooChoose dataset.
Parameters
----------
data_path: str
Path to use to read the data from.
device: str, default: "gpu"
Device to use for processing clicks.
Returns
-------
Union[cudf.DataFrame, pandas.DataFrame]
"""
from .preprocessing import ( # type: ignore
add_item_first_seen_col_to_df,
remove_consecutive_interactions,
)
if device == "gpu":
import cudf
df = cudf.read_csv(
data_path,
sep=",",
names=["session_id", "timestamp", "item_id", "category"],
parse_dates=["timestamp"],
)
else:
import pandas as pd
df = pd.read_csv(
data_path,
sep=",",
names=["session_id", "timestamp", "item_id", "category"],
parse_dates=["timestamp"],
)
df = remove_consecutive_interactions(df)
df = add_item_first_seen_col_to_df(df)
return df
|
# Quick tests for the markup templatetags (django_markwhat)
import re
import unittest
from django.template import Template, Context
from django.utils.html import escape
try:
import textile
except ImportError:
textile = None
try:
import markdown
markdown_version = getattr(markdown, "version_info", 0)
except ImportError:
markdown = None
try:
import commonmark
except ImportError:
commonmark = None
try:
import docutils
except ImportError:
docutils = None
class Templates(unittest.TestCase):
textile_content = """Paragraph 1
Paragraph 2 with "quotes" and @code@"""
markdown_content = """Paragraph 1
## An h2"""
markdown_content_with_html_code = """Paragraph 1
## An h2
```
<video width="320" height="240" controls>
<source src="movie.mp4" type="video/mp4">
<source src="movie.ogg" type="video/ogg">
</video>
```
"""
markdown_content_with_iframe_code = """Paragraph 1
## An h2
```
<iframe src="http://example.com"></iframe>
```
"""
rest_content = """Paragraph 1
Paragraph 2 with a link_
.. _link: http://www.example.com/"""
@unittest.skipUnless(textile, 'texttile not installed')
def test_textile(self):
t = Template("{% load markup %}{{ textile_content|textile }}")
rendered = t.render(Context(
{'textile_content': self.textile_content})).strip()
self.assertEqual(rendered.replace('\t', ''), """<p>Paragraph 1</p>
<p>Paragraph 2 with “quotes” and <code>code</code></p>""")
@unittest.skipIf(textile, 'texttile is installed')
def test_no_textile(self):
t = Template("{% load markup %}{{ textile_content|textile }}")
rendered = t.render(Context(
{'textile_content': self.textile_content})).strip()
self.assertEqual(rendered, escape(self.textile_content))
@unittest.skipUnless(markdown, 'markdown not installed')
def test_markdown(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content
})).strip()
pattern = re.compile(r"<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>")
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(markdown, 'markdown not installed')
def test_markdown_html_code(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content_with_html_code
})).strip()
pattern = re.compile(
r'<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>' +
r'\s*<p><code>\s*<video width="320"'
)
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(markdown, 'markdown not installed')
def test_markdown_html_iframe_code(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content_with_iframe_code
})).strip()
pattern = re.compile(
r'<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>' +
r'\s*<p><code>\s*<iframe src="http://example.com">' +
r'</iframe>'
)
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(
markdown and markdown_version >= (2, 1),
'markdown >= 2.1 not installed'
)
def test_markdown_attribute_disable(self):
t = Template("{% load markup %}{{ markdown_content|markdown:'safe' }}")
markdown_content = "{@onclick=alert('hi')}some paragraph"
rendered = t.render(Context(
{'markdown_content': markdown_content})).strip()
self.assertTrue('@' in rendered)
@unittest.skipUnless(
markdown and markdown_version >= (2, 1),
'markdown >= 2.1 not installed'
)
def test_markdown_attribute_enable(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
markdown_content = "{@onclick=alert('hi')}some paragraph"
rendered = t.render(Context(
{'markdown_content': markdown_content})).strip()
self.assertFalse('@' in rendered)
@unittest.skipIf(markdown, 'markdown is installed')
def test_no_markdown(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context(
{'markdown_content': self.markdown_content})).strip()
self.assertEqual(rendered, self.markdown_content)
@unittest.skipUnless(commonmark, 'commonmark not installed')
def test_commonmark(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(
Context({'markdown_content': self.markdown_content})).strip()
pattern = re.compile(r"<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>")
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(commonmark, 'commonmark not installed')
def test_commonmark_html_code(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content_with_html_code
})).strip()
pattern = re.compile(
r'<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>' +
r'\s*<pre><code>\s*<video width="320"'
)
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(commonmark, 'commonmark not installed')
def test_commonmark_html_iframe_code(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content_with_iframe_code
})).strip()
pattern = re.compile(
r'<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>' +
r'\s*<pre><code>\s*<iframe ' +
r'src="http://example.com">' +
r'</iframe>'
)
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(commonmark, 'commonmark not installed')
def test_commonmark_empty_str(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(Context({'markdown_content': ''})).strip()
self.assertEqual(rendered, '')
@unittest.skipUnless(commonmark, 'commonmark not installed')
def test_commonmark_none(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(Context({'markdown_content': None})).strip()
self.assertEqual(rendered, '<p>None</p>')
@unittest.skipUnless(docutils, 'docutils not installed')
def test_docutils(self):
t = Template("{% load markup %}{{ rest_content|restructuredtext }}")
rendered = t.render(Context({
'rest_content': self.rest_content
})).strip()
# Different versions of docutils return slightly different HTML
try:
# Docutils v0.4 and earlier
self.assertEqual(
rendered,
'<p>Paragraph 1</p>\n' +
'<p>Paragraph 2 with a <a class="reference" ' +
'href="http://www.example.com/">link</a></p>')
except AssertionError:
# Docutils from SVN (which will become 0.5)
self.assertEqual(
rendered,
'<p>Paragraph 1</p>\n' +
'<p>Paragraph 2 with a ' +
'<a class="reference external" ' +
'href="http://www.example.com/">link</a></p>')
@unittest.skipIf(docutils, 'docutils is installed')
def test_no_docutils(self):
t = Template("{% load markup %}{{ rest_content|restructuredtext }}")
rendered = t.render(
Context({'rest_content': self.rest_content})).strip()
self.assertEqual(rendered, self.rest_content)
|
# -*- coding: utf-8 -*-
import os
import json
from flask import current_app, request, Response
from flask.ext.restful import abort, fields, marshal_with, marshal
from sqlalchemy.orm.exc import NoResultFound
from export_report import ExportOgranisationsResource
from domain.models import Address, Organisation, Person, \
BrregActivityCode, FlodActivityType, UmbrellaOrganisation, \
PersonOrgAssociationRole, OrganisationPersonAssociation, \
UmbrellaOrgMemberOrgAssociation, OrganisationInternalNote, District
from flodapi import FlodApi
from flod_common.session.utils import (unsign_auth_token,
verify_auth_token,
verify_superuser_auth_token)
from util.brregclient import BrRegClient, OrgNrNotFoundException
from api import PersonsResource, PersonResource, PersonOrganisationsResource, OrganisationResource, \
OrganisationPersonsResource, UmbrellaOrganisationResource, UmbrellaOrganisationPersonsResource, \
UmbrellaOrgMemberOrgAssociationsResource, BrregActivityCodeResource, FlodActivityTypeResource, DistrictResource, \
OrganisationInternalNotesResource, address_fields
BRREG_URL = os.environ.get("FLOD_BRREG_URL")
BRREG_USER_ID = os.environ.get("FLOD_BRREG_USER")
BRREG_PASSWORD = os.environ.get("FLOD_BRREG_PASS")
USERS_URL = os.environ.get('USERS_URL', 'http://localhost:4000')
booking_service_base_url = os.environ.get('BOOKING_URL', "http://localhost:1337")
booking_service_version = os.environ.get('BOOKING_VERSION', 'v1')
organisation_booking_uri = '%s/api/%s/organisations' % (booking_service_base_url, booking_service_version)
try:
brreg_client = BrRegClient(BRREG_URL, BRREG_USER_ID, BRREG_PASSWORD)
except Exception:
brreg_client = None
brreg_name_search_fields = {
'name': fields.String(attribute='OrgNavn', default=None),
'org_number': fields.String(attribute='Orgnr', default=None),
'org_form': fields.String(attribute='OrgForm', default=None),
'postal_place': fields.String(attribute='Sted', default=None),
'is_registered': fields.Boolean,
'relevance_score': fields.String(attribute='Score', default=None),
'id': fields.Integer(default=None)
}
brreg_org_fields = {
'id': fields.Integer(default=None),
'name': fields.String(default=None),
'org_number': fields.String(default=None),
'org_form': fields.String(default=None),
'account_number': fields.String(default=None),
'email_address': fields.String(default=None),
'phone_number': fields.String(default=None),
'telefax_number': fields.String(default=None),
'url': fields.String(default=None),
'uri': fields.String(default=None),
'business_address': fields.Nested(address_fields, allow_null=True),
'postal_address': fields.Nested(address_fields, allow_null=True),
'is_public': fields.Boolean,
# activity codes
'brreg_activity_code': fields.List(fields.String),
# Trondheim kommune specific fields
'flod_activity_type': fields.List(fields.Integer, default=[]),
'num_members_b20': fields.Integer(default=None),
'num_members': fields.Integer(default=None),
'description': fields.String(default=None),
'area': fields.Integer(default=None),
'registered_tkn': fields.Boolean,
'is_registered': fields.Boolean
}
def create_api(app, api_version):
api = FlodApi(app)
##
## Actually setup the Api resource routing here
##
api.add_resource(PersonsResource, '/api/%s/persons/' % api_version)
api.add_resource(
PersonResource,
'/api/%s/persons/' % api_version,
'/api/%s/persons/<int:person_id>' % api_version
)
api.add_resource(
PersonOrganisationsResource,
'/api/%s/persons/<int:person_id>/organisations/' % api_version
)
api.add_resource(
OrganisationResource,
'/api/%s/organisations/<int:org_id>' % api_version,
'/api/%s/organisations/' % api_version
)
api.add_resource(
ExportOgranisationsResource,
'/api/%s/export/organisations/' % api_version
)
api.add_resource(
OrganisationPersonsResource,
'/api/%s/organisations/<int:org_id>/persons/' % api_version,
'/api/%s/organisations/<int:org_id>/persons/<int:person_id>' % api_version
)
api.add_resource(
UmbrellaOrganisationResource,
'/api/%s/umbrella_organisations/<int:umb_org_id>' % api_version,
'/api/%s/umbrella_organisations/<string:umb_org_name>' % api_version,
'/api/%s/umbrella_organisations/' % api_version
)
api.add_resource(
UmbrellaOrganisationPersonsResource,
'/api/%s/umbrella_organisations/<int:umb_org_id>/persons/' % api_version,
'/api/%s/umbrella_organisations/<int:umb_org_id>/persons/<int:person_id>' % api_version
)
api.add_resource(
UmbrellaOrgMemberOrgAssociationsResource,
'/api/%s/umbrella_organisations/<int:umbrella_organisation_id>/organisations/' % api_version,
'/api/%s/umbrella_organisations/<int:umbrella_organisation_id>/organisations/<int:association_id>' % api_version
)
api.add_resource(
BrregActivityCodeResource,
'/api/%s/brreg_activity_codes/' % api_version
)
api.add_resource(
FlodActivityTypeResource,
'/api/%s/flod_activity_types/' % api_version
)
api.add_resource(
DistrictResource,
'/api/%s/districts/' % api_version
)
api.add_resource(
OrganisationInternalNotesResource,
'/api/%s/organisations/<int:organisation_id>/notes/' % api_version,
'/api/%s/organisations/<int:organisation_id>/notes/<int:note_id>' % api_version
)
@app.route('/api/%s/brreg/enhet' % api_version)
def lookup_basic_brreg_data():
"""
Lookup unit information in Brreg.
:param orgnr: Organisation number
:statuscode 401: Unauthorized
"""
if not verify_auth_token(request.cookies.get('auth_token')):
abort(401)
if not brreg_client:
abort(404)
org_number = request.args['orgnr']
try:
response = brreg_client.get_brreg_enhet_basis_data_full(org_number)
except OrgNrNotFoundException, e:
return Response(
json.dumps({"__error__": [e.message]}),
mimetype='application/json',
status=404
)
try:
flod_org = current_app.db_session.query(Organisation).filter(
Organisation.org_number == org_number
).one()
response['id'] = flod_org.id
is_registered = not flod_org.is_deleted
except NoResultFound:
is_registered = False
response['is_registered'] = is_registered
marshalled = marshal(response, brreg_org_fields)
return Response(
json.dumps(marshalled),
mimetype='application/json'
)
@app.route('/api/%s/brreg/enhet/roller' % api_version)
def lookup_brreg_roles():
"""
Lookup roles in Brreg.
:param orgnr: Organisation number
:statuscode 401: Unauthorized
"""
if not verify_auth_token(request.cookies.get('auth_token')):
abort(401)
if not brreg_client:
abort(404)
org_number = request.args['orgnr']
response = brreg_client.get_brreg_enhet_role_data(org_number)
return Response(json.dumps(response), mimetype='application/json')
@app.route('/api/%s/brreg/enhet/kontakt' % api_version)
def lookup_brreg_contact():
"""
Lookup contact information in Brreg.
:param orgnr: Organisation number
:statuscode 401: Unauthorized
"""
if not verify_auth_token(request.cookies.get('auth_token')):
abort(401)
if not brreg_client:
abort(404)
org_number = request.args['orgnr']
response = brreg_client.get_brreg_enhet_contact_data(org_number)
return Response(json.dumps(response), mimetype='application/json')
@app.route('/api/%s/brreg/search' % api_version)
def lookup_org_name():
"""
Lookup organisation in Brreg by name.
:param name: Organisation name
:statuscode 401: Unauthorized
"""
if not verify_auth_token(request.cookies.get('auth_token')):
abort(401)
if not brreg_client:
abort(404)
name = request.args['name']
response = brreg_client.get_brreg_enhet_name_search(name)
search_result = response.get('result', [])
for org in search_result:
org_number = org.get('Orgnr', None)
try:
flod_org = current_app.db_session.query(Organisation).filter(
Organisation.org_number == org_number
).one()
org['id'] = flod_org.id
org['is_registered'] = not flod_org.is_deleted
except NoResultFound:
org['is_registered'] = False
marshalled = marshal(search_result, brreg_name_search_fields)
return Response(json.dumps(marshalled), mimetype='application/json')
return api |
"""
A Pillow loader for .ftc and .ftu files (FTEX)
Jerome Leclanche <[email protected]>
The contents of this file are hereby released in the public domain (CC0)
Full text of the CC0 license:
https://creativecommons.org/publicdomain/zero/1.0/
Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001
The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a
packed custom format called FTEX. This file format uses file extensions FTC and FTU.
* FTC files are compressed textures (using standard texture compression).
* FTU files are not compressed.
Texture File Format
The FTC and FTU texture files both use the same format. This
has the following structure:
{header}
{format_directory}
{data}
Where:
{header} = { u32:magic, u32:version, u32:width, u32:height, u32:mipmap_count, u32:format_count }
* The "magic" number is "FTEX".
* "width" and "height" are the dimensions of the texture.
* "mipmap_count" is the number of mipmaps in the texture.
* "format_count" is the number of texture formats (different versions of the same texture) in this file.
{format_directory} = format_count * { u32:format, u32:where }
The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB uncompressed textures.
The texture data for a format starts at the position "where" in the file.
Each set of texture data in the file has the following structure:
{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } }
* "mipmap_size" is the number of bytes in that mip level. For compressed textures this is the
size of the texture data compressed with DXT1. For 24 bit uncompressed textures, this is 3 * width * height.
Following this are the image bytes for that mipmap level.
Note: All data is stored in little-Endian (Intel) byte order.
"""
import struct
from io import BytesIO
from . import Image, ImageFile
MAGIC = b"FTEX"
FORMAT_DXT1 = 0
FORMAT_UNCOMPRESSED = 1
class FtexImageFile(ImageFile.ImageFile):
format = "FTEX"
format_description = "Texture File Format (IW2:EOC)"
def _open(self):
magic = struct.unpack("<I", self.fp.read(4))
version = struct.unpack("<i", self.fp.read(4))
self.size = struct.unpack("<2i", self.fp.read(8))
mipmap_count, format_count = struct.unpack("<2i", self.fp.read(8))
self.mode = "RGB"
# Only support single-format files. I don't know of any multi-format file.
assert format_count == 1
format, where = struct.unpack("<2i", self.fp.read(8))
self.fp.seek(where)
mipmap_size, = struct.unpack("<i", self.fp.read(4))
data = self.fp.read(mipmap_size)
if format == FORMAT_DXT1:
self.mode = "RGBA"
self.tile = [("bcn", (0, 0) + self.size, 0, (1))]
elif format == FORMAT_UNCOMPRESSED:
self.tile = [("raw", (0, 0) + self.size, 0, ('RGB', 0, 1))]
else:
raise ValueError("Invalid texture compression format: %r" % (format))
self.fp.close()
self.fp = BytesIO(data)
def load_seek(self, pos):
pass
def _validate(prefix):
return prefix[:4] == MAGIC
Image.register_open(FtexImageFile.format, FtexImageFile, _validate)
Image.register_extension(FtexImageFile.format, ".ftc")
Image.register_extension(FtexImageFile.format, ".ftu")
|
import contextlib
import os
@contextlib.contextmanager
def modified_environ(*remove, **update):
"""Temporarily updates the os.environ dictionary in-place."""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
|
""" PUBLIC CONSTANTS """
DOMAIN = "script_engine"
CLASS_NAME_PATTERN = '_Script_*'
FILE_NAME_PATTERN = 'script_*.py'
FUNCTION_NAME_PATTERN = '_script_*'
SCRIPT_FOLDER = "/config/script_engine/"
|
from glob import glob
import os
import os.path
import shutil
import subprocess
import sys
from argparse import ArgumentParser
def pad_number(num):
length = len(str(num))
return ((3 - length) * '0') + str(num)
ddpost_par = """
'{e_file}' = name of file with E stored
'{outfile}' = prefix for name of VTR output files
{e_e2} = IVTR (set to 1 to create VTR output)
0 = ILINE (set to 1 to evaluate E along a line)
"""
def main():
argparser = ArgumentParser()
argparser.add_argument('subdir')
args = argparser.parse_args()
os.chdir(args.subdir)
subprocess.call('mkdir vtr', shell=True)
subprocess.call('cp ~/scratch/exec/ddpostprocess .', shell=True)
vtrdir = os.path.join(args.subdir, 'vtr')
efiles = glob('w*r000k000.E1')
efiles.sort()
i = 0
for i, efile in enumerate(efiles):
with open('ddpostprocess.par', 'w') as fp:
fp.write(ddpost_par.format(
e_file=efile,
outfile='vtr/wav_{}'.format(i),
e_e2='1'
))
subprocess.call('./ddpostprocess', shell=True)
subprocess.call('rm ddpostprocess.par', shell=True)
unchanged = glob('vtr/*')
for file in unchanged:
newfile = file.replace('_1.vtr', '.vtr')
shutil.move(file, newfile)
os.chdir('..')
shutil.move(vtrdir, '{}_vtr'.format(args.subdir))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import turtle
def main():
window = turtle.Screen()
tortuga = turtle.Turtle()
make_square(tortuga)
turtle.mainloop()
def make_square(turtle):
lenght = int(raw_input('Digite el largo del lado: '))
for i in range(4):
make_line_and_turn(turtle, lenght)
def make_line_and_turn(turtle, lenght):
turtle.forward(lenght)
turtle.left(90)
# Le indicamos a python que ejecute el método de entrada
if __name__ == '__main__':
main() |
import json
import os
import re
import psutil
import time
import redis
from flask import Flask
from flask import render_template
from flask import jsonify
app = Flask(__name__)
r = redis.StrictRedis(host='localhost', port=6379, db=0)
log_path = '/opt/doorbell/logs'
@app.route("/", methods=['GET'])
def hello():
return render_template('hello.html')
@app.route("/config", methods=['GET'])
def config():
doorbell_config = {
'uptime': int(round(time.time() - psutil.boot_time())),
'time': int(round(time.time())),
'total_rings': int(r.get('total_rings')),
'rings_since_boot': int(r.get('rings_since_boot')),
'volume': int(r.get('volume')),
'language': int(r.get('language')),
'switch_voice_randomly': int(r.get('switch_voice_randomly')),
'voice': int(r.get('voice')),
'last_ring': int(round(float(r.get('last_ring'))))
}
return jsonify(doorbell_config)
def is_ringing():
return time.time() - float(r.get('last_ring')) < 3
@app.route("/poll", methods=['GET'])
def poll():
request_time = time.time()
while not is_ringing():
if time.time() - request_time > 60:
break
time.sleep(1)
return jsonify({
'currently_ringing': is_ringing(),
'last_ring': float(r.get('last_ring')),
'time': time.time()
})
@app.route("/log/<int:year>/<int:month>", methods=['GET'])
def log(year, month):
path = '%s/%s-%s.json' % (log_path,year, month)
if not os.path.exists(path):
return jsonify({'error': 'Log file not found'}), 404
with open(path) as data_file:
logs = json.load(data_file)
return jsonify(logs)
@app.route("/logs", methods=['GET'])
def logs():
files = os.listdir(log_path)
log_routes = []
for file in files:
if not file.endswith('.json'):
continue
m = re.match(r'^(?P<year>[0-9]{4})-(?P<month>[0-9]{2})\.json$', file)
log_routes.append('/log/%s/%s' % (m.group('year'), m.group('month')))
return jsonify(log_routes) |
from __future__ import division
import collections
from typing import Any, Dict, List, Optional, Union
import astropy.units as u
import numba as nb
import numpy
import scipy.integrate
from past.utils import old_div
from astromodels.core.memoization import use_astromodels_memoization
from astromodels.core.parameter import Parameter
from astromodels.core.sky_direction import SkyDirection
from astromodels.core.spectral_component import SpectralComponent
from astromodels.core.tree import Node
from astromodels.core.units import get_units
from astromodels.functions.function import Function1D
from astromodels.sources.source import Source, SourceType
from astromodels.utils.logging import setup_logger
from astromodels.utils.pretty_list import dict_to_list
__author__ = 'giacomov'
__all__ = ["PointSource"]
log = setup_logger(__name__)
class PointSource(Source, Node):
"""
A point source. You can instance this class in many ways.
- with Equatorial position and a function as spectrum (the component will be automatically called 'main')::
>>> from astromodels import *
>>> point_source = PointSource('my_source', 125.6, -75.3, Powerlaw())
- with Galactic position and a function as spectrum (the component will be automatically called 'main')::
>>> point_source = PointSource('my_source', l=15.67, b=80.75, spectral_shape=Powerlaw())
- with Equatorial position or Galactic position and a list of spectral components::
>>> c1 = SpectralComponent("component1", Powerlaw())
>>> c2 = SpectralComponent("component2", Powerlaw())
>>> point_source = PointSource("test_source",125.6, -75.3,components=[c1,c2])
Or with Galactic position:
>>> point_source = PointSource("test_source",l=15.67, b=80.75,components=[c1,c2])
NOTE: by default the position of the source is fixed (i.e., its positional parameters are fixed)
:param source_name: name for the source
:param ra: Equatorial J2000 Right Ascension (ICRS)
:param dec: Equatorial J2000 Declination (ICRS)
:param spectral_shape: a 1d function representing the spectral shape of the source
:param l: Galactic latitude
:param b: Galactic longitude
:param components: list of spectral components (instances of SpectralComponent)
:param sky_position: an instance of SkyDirection
:return:
"""
def __init__(self,
source_name: str,
ra: Optional[float] = None,
dec: Optional[float] = None,
spectral_shape: Optional[Function1D] = None,
l: Optional[float] = None,
b: Optional[float] = None,
components=None,
sky_position: Optional[SkyDirection]=None):
# Check that we have all the required information
# (the '^' operator acts as XOR on booleans)
# Check that we have one and only one specification of the position
if not ((ra is not None and dec is not None) ^
(l is not None and b is not None) ^
(sky_position is not None)):
log.error(
"You have to provide one and only one specification for the position"
)
raise AssertionError()
# Gather the position
if not isinstance(sky_position, SkyDirection):
if (ra is not None) and (dec is not None):
# Check that ra and dec are actually numbers
try:
ra = float(ra)
dec = float(dec)
except (TypeError, ValueError):
log.error("RA and Dec must be numbers. If you are confused by this message, you "
"are likely using the constructor in the wrong way. Check the documentation.")
raise AssertionError()
sky_position = SkyDirection(ra=ra, dec=dec)
else:
sky_position = SkyDirection(l=l, b=b)
self._sky_position: SkyDirection = sky_position
# Now gather the component(s)
# We need either a single component, or a list of components, but not both
# (that's the ^ symbol)
if not (spectral_shape is not None) ^ (components is not None):
log.error("You have to provide either a single component, or a list of components (but not both).")
raise AssertionError()
# If the user specified only one component, make a list of one element with a default name ("main")
if spectral_shape is not None:
components = [SpectralComponent("main", spectral_shape)]
Source.__init__(self, components, src_type=SourceType.POINT_SOURCE)
# A source is also a Node in the tree
Node.__init__(self, source_name)
# Add the position as a child node, with an explicit name
self._add_child(self._sky_position)
# Add a node called 'spectrum'
spectrum_node = Node('spectrum')
spectrum_node._add_children(list(self._components.values()))
self._add_child(spectrum_node)
# Now set the units
# Now sets the units of the parameters for the energy domain
current_units = get_units()
# Components in this case have energy as x and differential flux as y
x_unit = current_units.energy
y_unit = (current_units.energy * current_units.area * current_units.time) ** (-1)
# Now set the units of the components
for component in list(self._components.values()):
component.shape.set_units(x_unit, y_unit)
def __call__(self, x, tag=None):
if tag is None:
# No integration nor time-varying or whatever-varying
if isinstance(x, u.Quantity):
# Slow version with units
results = [component.shape(x) for component in list(self.components.values())]
# We need to sum like this (slower) because using np.sum will not preserve the units
# (thanks astropy.units)
return sum(results)
else:
# Fast version without units, where x is supposed to be in the same units as currently defined in
# units.get_units()
results = numpy.array([component.shape(x) for component in list(self.components.values())])
return _sum(results)
else:
# Time-varying or energy-varying or whatever-varying
integration_variable, a, b = tag
if b is None:
# Evaluate in a, do not integrate
# Suspend memoization because the memoization gets confused when integrating
with use_astromodels_memoization(False):
integration_variable.value = a
res = self.__call__(x, tag=None)
return res
else:
# Integrate between a and b
integrals = numpy.zeros(len(x))
# TODO: implement an integration scheme avoiding the for loop
# Suspend memoization because the memoization gets confused when integrating
with use_astromodels_memoization(False):
reentrant_call = self.__call__
for i, e in enumerate(x):
def integral(y):
integration_variable.value = y
return reentrant_call(e, tag=None)
# Now integrate
integrals[i] = scipy.integrate.quad(integral, a, b, epsrel=1e-5)[0]
return old_div(integrals, (b - a))
def has_free_parameters(self) -> bool:
"""
Returns True or False whether there is any parameter in this source
:return:
"""
for component in list(self._components.values()):
for par in list(component.shape.parameters.values()):
if par.free:
return True
for par in list(self.position.parameters.values()):
if par.free:
return True
return False
@property
def free_parameters(self) -> Dict[str, Parameter]:
"""
Returns a dictionary of free parameters for this source.
We use the parameter path as the key because it's
guaranteed to be unique, unlike the parameter name.
:return:
"""
free_parameters = collections.OrderedDict()
for component in list(self._components.values()):
for par in list(component.shape.parameters.values()):
if par.free:
free_parameters[par.path] = par
for par in list(self.position.parameters.values()):
if par.free:
free_parameters[par.path] = par
return free_parameters
@property
def parameters(self) -> Dict[str, Parameter]:
"""
Returns a dictionary of all parameters for this source.
We use the parameter path as the key because it's
guaranteed to be unique, unlike the parameter name.
:return:
"""
all_parameters = collections.OrderedDict()
for component in self._components.values():
for par in component.shape.parameters.values():
all_parameters[par.path] = par
for par in self.position.parameters.values():
all_parameters[par.path] = par
return all_parameters
def _repr__base(self, rich_output=False):
"""
Representation of the object
:param rich_output: if True, generates HTML, otherwise text
:return: the representation
"""
# Make a dictionary which will then be transformed in a list
repr_dict = collections.OrderedDict()
key = '%s (point source)' % self.name
repr_dict[key] = collections.OrderedDict()
repr_dict[key]['position'] = self._sky_position.to_dict(minimal=True)
repr_dict[key]['spectrum'] = collections.OrderedDict()
for component_name, component in list(self.components.items()):
repr_dict[key]['spectrum'][component_name] = component.to_dict(minimal=True)
return dict_to_list(repr_dict, rich_output)
@nb.njit(fastmath=True)
def _sum(x):
return numpy.sum(x, axis=0)
|
import pygame
from engine import *
from eventManager import *
class Label(Engine.GUI.Widget):
def __init__(self, text, textColor = None, backgroundColor = None, fontSize = None, padding = None, width = None, height = None, transparentBackground = True):
super().__init__()
self.textColor = textColor if textColor != None else self.options.labelWidgetTextColor
self.backgroundColor = backgroundColor if backgroundColor != None else self.options.labelWidgetBackgroundColor
self.hasTransparentBackground = transparentBackground
self.fontSize = fontSize if fontSize != None else self.options.widgetFontSize
self.font = pygame.font.Font(self.options.widgetFont, self.fontSize)
self.text = text
self.renderedText = self.font.render(self.text, True, self.textColor, self.backgroundColor) #need this here to get initial size
self.textRect = self.renderedText.get_rect()
self.rect = self.textRect #self.rect is actually Rect for widget, used here to provide initial size values
self.padding = padding if padding != None else self.options.widgetPadding
self.width = width
if self.width == None:
self.width = self.rect.width + self.padding
self.rect.width = self.width
self.height = height
if self.height == None:
self.height = self.rect.height + self.padding
self.rect.height = self.height
def redrawWidget(self):
self.dirty = True
self.image = pygame.Surface((self.width, self.height))
self.image.fill(self.backgroundColor)
if self.hasTransparentBackground:
self.image.set_colorkey(self.backgroundColor)
self.renderedText = self.font.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.textRect.x = (self.width / 2) - (self.textRect.width / 2)
self.textRect.y = (self.height / 2) - (self.textRect.height / 2)
self.image.blit(self.renderedText, self.textRect)
def update(self):
if self.dirty:
self.redrawWidget()
self.dirty = False
class StatTracker(Label):
def __init__(self, stat, value, textColor = None, backgroundColor = None, fontSize = None, padding = None, width = None, height = None, transparentBackground = True):
super().__init__(stat, textColor, backgroundColor, fontSize, padding, width, height, transparentBackground)
self.stat = stat
self.statValue = value
self.text = stat
self.valueFontSize = fontSize if fontSize != None else self.options.widgetFontSize
self.valueFont = pygame.font.Font(self.options.widgetFont, self.valueFontSize)
self.textFontSize = self.options.statTrackerTextFontSize
self.textFont = pygame.font.Font(self.options.widgetFont, self.textFontSize)
# get initial sizes
self.renderedValue = self.valueFont.render(str(self.statValue), True, self.textColor, self.backgroundColor)
self.valueRect = self.renderedValue.get_rect()
self.renderedText = self.textFont.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.padding = padding if padding != None else self.options.statTrackerTextPadding
self.width = width if width != None else max(self.valueRect.width, self.textRect.width) + self.padding
self.height = height if height != None else self.valueRect.height + self.textRect.height + self.padding + self.options.statTrackerValueTextSpacing
self.rect = self.textRect #self.rect is actually Rect for widget, used here to provide initial size values
self.rect.width = self.width
self.rect.height = self.height
def addListeners(self):
event = Events.StatUpdateEvent()
self.eventManager.addListener(event, self)
def notify(self, event):
if isinstance(event, Events.StatUpdateEvent):
if event.stat == self.stat:
self.value(event.value)
def value(self, value = None):
if value != None:
self.statValue += value
self.redrawWidget()
return self.statValue
def redrawWidget(self):
self.dirty = True
self.image = pygame.Surface((self.width, self.height))
self.image.fill(self.backgroundColor)
if self.hasTransparentBackground:
self.image.set_colorkey(self.backgroundColor)
self.renderedValue = self.valueFont.render(str(self.statValue), True, self.textColor, self.backgroundColor)
self.valueRect = self.renderedValue.get_rect()
self.valueRect.x = (self.width / 2) - (self.valueRect.width / 2)
self.valueRect.y = (self.padding / 2) #self.topEdge() +
self.renderedText = self.textFont.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.textRect.x = (self.width / 2) - (self.textRect.width / 2)
self.textRect.y = self.rect.height - (self.padding / 2) - self.textRect.height
self.width = max(self.valueRect.width, self.textRect.width) + self.padding
self.rect.width = self.width
self.image.blit(self.renderedValue, self.valueRect)
self.image.blit(self.renderedText, self.textRect)
class HoverableWidget(Label):
def __init__(self, text, textColor = None, backgroundColor = None, fontSize = None, padding = None, onHoverAction = None, width = None, height = None, transparentBackground = False):
super().__init__(text, textColor, backgroundColor, fontSize, padding, width, height, transparentBackground)
self.unfocusedBackgroundColor = self.backgroundColor
self.focusedBackgroundColor = self.getFocusedColor(self.backgroundColor)
if onHoverAction:
self.onHoverAction = onHoverAction
else:
self.onHoverAction = self.changeBackground
def addListeners(self):
event = Events.HoverWidgetEvent()
self.eventManager.addListener(event, self)
def getContrastingShade(self, color):
constrastingShadeOffset = .2 * 255
if 255 - color > constrastingShadeOffset:
color += constrastingShadeOffset
else:
color -= constrastingShadeOffset
return color
def getFocusedColor(self, color):
r = self.getContrastingShade(color[0])
g = self.getContrastingShade(color[1])
b = self.getContrastingShade(color[2])
a = None
rgb = None
if len(color) > 3:
a = self.getContrastingShade(color[3])
if a:
rgb = (r, g, b, a)
else:
rgb = (r, g, b)
return rgb
def changeBackground(self):
self.dirty = True
if self.focused:
self.backgroundColor = self.focusedBackgroundColor
else:
self.backgroundColor = self.unfocusedBackgroundColor
self.update()
def hover(self, focused):
if self.onHoverAction:
self.dirty = True
self.focused = focused
self.onHoverAction()
self.update()
def notify(self, event):
if isinstance(event, Events.HoverWidgetEvent):
focused = self.rect.collidepoint(event.pos)
self.hover(focused)
class Button(HoverableWidget):
def __init__(self, text, textColor = None, buttonColor = None, fontSize = None, padding = None, onClickAction = None, onHoverAction = None, width = None, height = None):
super().__init__(text, textColor, buttonColor, fontSize, padding, onHoverAction, width, height, transparentBackground = False)
self.onClickAction = onClickAction
def addListeners(self):
super().addListeners()
event = Events.LeftClickWidgetEvent()
self.eventManager.addListener(event, self)
#print("Adding listeners for", self.text)
def click(self):
if self.onClickAction:
self.dirty = True
self.onClickAction()
def notify(self, event):
super().notify(event)
if isinstance(event, Events.LeftClickWidgetEvent) and self.rect.collidepoint(event.pos):
#print("Firing", event.name, "for Listener", self.text)
self.click()
elif isinstance(event, Events.KeyboardActivateWidgetEvent) and self.focused:
self.click()
class SliderWidget(Engine.GUI.Widget):
def __init__(self, valueKey, values, defaultValue, textColor = None, fillColor = None, backgroundColor = None, onDragAction = None, transparentBackground = True):
super().__init__()
self.eventManager = EventManager()
self.textColor = textColor if textColor != None else self.options.sliderWidgetTextColor
self.fillColor = fillColor if fillColor != None else self.options.sliderWidgetFillColor
self.backgroundColor = backgroundColor if backgroundColor != None else self.options.sliderWidgetBackgroundColor
self.hasTransparentBackground = transparentBackground
self.width = self.options.sliderWidth
self.height = self.options.sliderHeight
self.valueKey = valueKey
self.defaultValue = defaultValue
self.text = str(self.defaultValue)
self.value = self.defaultValue
self.stepValues = {}
self.font = pygame.font.Font(self.options.widgetFont, self.options.sliderFontSize)
self.onDragAction = onDragAction if onDragAction != None else self.slideToValue
self.image = pygame.Surface((self.width, self.height)) #contains bar, slide and text; all are defined here for initial positioning
self.rect = self.image.get_rect()
self.bar = pygame.Surface((self.options.sliderWidth, self.options.sliderBarHeight))
self.bar.fill(self.fillColor)
self.barRect = self.bar.get_rect()
self.barRect.x = self.options.sliderBarOffsetX
self.barRect.y = self.options.sliderBarOffsetY
self.slide = pygame.Surface((self.options.sliderSlideWidth, self.options.sliderSlideHeight))
self.slide.fill(self.fillColor)
self.slideRect = self.slide.get_rect()
self.renderedText = self.font.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.textRect.x = (self.image.get_rect().width / 2) - (self.textRect.width / 2)
self.textRect.y = self.image.get_rect().height - self.options.sliderTextOffsetY - self.textRect.height
#make a lookup table for slide position and value
vals = len(values) - 1
isRawList = not (type(values[0]) == type([]) or type(values[0]) == type(()))
maxStep = self.barRect.width - self.slideRect.width
minStep = 0
stepCounter = 0
self.step = ((maxStep - minStep) / vals)
for val in values:
key = self.step * stepCounter
if isRawList:
self.stepValues[key] = (str(val), val) #mimic (label, value)
else:
self.stepValues[key] = val #should already be (label, value) or [label, value]
stepCounter += 1
self.setValue(self.defaultValue)
def addListeners(self):
event = Events.DragWidgetEvent()
self.eventManager.addListener(event, self)
event = Events.LeftClickWidgetEvent()
self.eventManager.addListener(event, self)
def redrawWidget(self):
self.dirty = True
self.image = pygame.Surface((self.width, self.height))
self.image.fill(self.backgroundColor)
if self.hasTransparentBackground:
self.image.set_colorkey(self.backgroundColor)
self.bar = pygame.Surface((self.options.sliderWidth, self.options.sliderBarHeight))
self.bar.fill(self.fillColor)
self.slide = pygame.Surface((self.options.sliderSlideWidth, self.options.sliderSlideHeight))
self.slide.fill(self.fillColor)
self.renderedText = self.font.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.textRect.x = (self.image.get_rect().width / 2) - (self.textRect.width / 2)
self.textRect.y = self.image.get_rect().height - self.options.sliderTextOffsetY - self.textRect.height
self.image.blit(self.bar, self.barRect)
self.image.blit(self.slide, self.slideRect)
self.image.blit(self.renderedText, self.textRect)
def update(self):
if self.dirty:
self.redrawWidget()
self.dirty = False
def drag(self, pos):
if self.onDragAction:
self.dirty = True
self.onDragAction(pos)
self.update()
def setValue(self, val):
for key in self.stepValues.keys():
item = self.stepValues[key]
if val == item[0] or val == item[1]:
self.slideToValue(key)
def slideToValue(self, dx):
self.dirty = True
closestStep = int(dx / self.step) #ensure integer
key = closestStep * self.step
if key in self.stepValues.keys():
item = self.stepValues[key]
self.text = item[0]
self.value = item[1]
self.slideRect.x = key
self.update()
def handleIfOnSelf(self, event):
relx = event.pos[0] - self.rect.x
minx = self.barRect.x - self.options.sliderDragPaddingX
maxx = minx + self.barRect.width + self.options.sliderDragPaddingX
rely = event.pos[1] - self.rect.y
miny = self.slideRect.y
maxy = miny + self.slideRect.height
if (minx <= relx <= maxx and miny <= rely <= maxy):
self.drag(relx)
def notify(self, event):
if isinstance(event, Events.DragWidgetEvent):
self.handleIfOnSelf(event)
if isinstance(event, Events.LeftClickWidgetEvent):
self.handleIfOnSelf(event)
|
"""
백준 10162번 : 전자레인지
"""
a = 300
b = 60
c = 10
n = int(input())
i = n // a
n -= a * i
j = n // b
n -= b * j
k = n // c
n -= c * k
if n != 0:
print(-1)
else:
print(i, j, k) |
# coding=UTF-8
from .attributes import Mutation as M
from .attributes import VerbTense as VT
from .attributes import VerbDependency as VD
from .attributes import VerbPerson as VPN
class VerbTenseRule:
def __init__(self,
particle: str = "",
mutation: M = M.NoMut,
tense: VT = VT.Pres,
dependency: VD = VD.Indep,
person: VPN = VPN.Base,
pronoun: str = "") -> None:
self.particle = particle
self.mutation = mutation
self.tense = tense
self.dependency = dependency
self.person = person
self.pronoun = pronoun
|
/home/runner/.cache/pip/pool/fe/ca/59/86e292e614eb58e873ac46f32bb19e13a24808e2adf28dec6abb5ce99f |
import os
import re
import struct
import sys
import textwrap
sys.path.insert(0, os.path.dirname(__file__))
import ufunc_docstrings as docstrings
sys.path.pop(0)
Zero = "PyInt_FromLong(0)"
One = "PyInt_FromLong(1)"
True_ = "(Py_INCREF(Py_True), Py_True)"
False_ = "(Py_INCREF(Py_False), Py_False)"
None_ = object()
AllOnes = "PyInt_FromLong(-1)"
MinusInfinity = 'PyFloat_FromDouble(-NPY_INFINITY)'
ReorderableNone = "(Py_INCREF(Py_None), Py_None)"
# Sentinel value to specify using the full type description in the
# function name
class FullTypeDescr:
pass
class FuncNameSuffix:
"""Stores the suffix to append when generating functions names.
"""
def __init__(self, suffix):
self.suffix = suffix
class TypeDescription:
"""Type signature for a ufunc.
Attributes
----------
type : str
Character representing the nominal type.
func_data : str or None or FullTypeDescr or FuncNameSuffix, optional
The string representing the expression to insert into the data
array, if any.
in_ : str or None, optional
The typecode(s) of the inputs.
out : str or None, optional
The typecode(s) of the outputs.
astype : dict or None, optional
If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y
instead of PyUFunc_x_x/PyUFunc_xx_x.
simd: list
Available SIMD ufunc loops, dispatched at runtime in specified order
Currently only supported for simples types (see make_arrays)
dispatch: list
Available SIMD ufunc loops, dispatched at runtime in specified order
Currently only supported for simples types (see make_arrays)
"""
def __init__(self, type, f=None, in_=None, out=None, astype=None, simd=None, dispatch=None):
self.type = type
self.func_data = f
if astype is None:
astype = {}
self.astype_dict = astype
if in_ is not None:
in_ = in_.replace('P', type)
self.in_ = in_
if out is not None:
out = out.replace('P', type)
self.out = out
self.simd = simd
self.dispatch = dispatch
def finish_signature(self, nin, nout):
if self.in_ is None:
self.in_ = self.type * nin
assert len(self.in_) == nin
if self.out is None:
self.out = self.type * nout
assert len(self.out) == nout
self.astype = self.astype_dict.get(self.type, None)
_fdata_map = dict(
e='npy_%sf',
f='npy_%sf',
d='npy_%s',
g='npy_%sl',
F='nc_%sf',
D='nc_%s',
G='nc_%sl'
)
def build_func_data(types, f):
func_data = [_fdata_map.get(t, '%s') % (f,) for t in types]
return func_data
def TD(types, f=None, astype=None, in_=None, out=None, simd=None, dispatch=None):
if f is not None:
if isinstance(f, str):
func_data = build_func_data(types, f)
elif len(f) != len(types):
raise ValueError("Number of types and f do not match")
else:
func_data = f
else:
func_data = (None,) * len(types)
if isinstance(in_, str):
in_ = (in_,) * len(types)
elif in_ is None:
in_ = (None,) * len(types)
elif len(in_) != len(types):
raise ValueError("Number of types and inputs do not match")
if isinstance(out, str):
out = (out,) * len(types)
elif out is None:
out = (None,) * len(types)
elif len(out) != len(types):
raise ValueError("Number of types and outputs do not match")
tds = []
for t, fd, i, o in zip(types, func_data, in_, out):
# [(simd-name, list of types)]
if simd is not None:
simdt = [k for k, v in simd if t in v]
else:
simdt = []
# [(dispatch file name without extension '.dispatch.c*', list of types)]
if dispatch:
dispt = [k for k, v in dispatch if t in v]
else:
dispt = []
tds.append(TypeDescription(
t, f=fd, in_=i, out=o, astype=astype, simd=simdt, dispatch=dispt
))
return tds
class Ufunc:
"""Description of a ufunc.
Attributes
----------
nin : number of input arguments
nout : number of output arguments
identity : identity element for a two-argument function
docstring : docstring for the ufunc
type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
*type_descriptions, signature=None):
self.nin = nin
self.nout = nout
if identity is None:
identity = None_
self.identity = identity
self.docstring = docstring
self.typereso = typereso
self.type_descriptions = []
self.signature = signature
for td in type_descriptions:
self.type_descriptions.extend(td)
for td in self.type_descriptions:
td.finish_signature(self.nin, self.nout)
# String-handling utilities to avoid locale-dependence.
import string
UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"),
bytes(string.ascii_uppercase, "ascii"))
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.lib.utils import english_upper
>>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_upper(s)
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
#each entry in defdict is a Ufunc object.
#name: [string of chars for which it is defined,
# string of characters using func interface,
# tuple of strings giving funcs for data,
# (in, out), or (instr, outstr) giving the signature as character codes,
# identity,
# docstring,
# output specification (optional)
# ]
chartoname = {
'?': 'bool',
'b': 'byte',
'B': 'ubyte',
'h': 'short',
'H': 'ushort',
'i': 'int',
'I': 'uint',
'l': 'long',
'L': 'ulong',
'q': 'longlong',
'Q': 'ulonglong',
'e': 'half',
'f': 'float',
'd': 'double',
'g': 'longdouble',
'F': 'cfloat',
'D': 'cdouble',
'G': 'clongdouble',
'M': 'datetime',
'm': 'timedelta',
'O': 'OBJECT',
# '.' is like 'O', but calls a method of the object instead
# of a function
'P': 'OBJECT',
}
noobj = '?bBhHiIlLqQefdgFDGmM'
all = '?bBhHiIlLqQefdgFDGOmM'
O = 'O'
P = 'P'
ints = 'bBhHiIlLqQ'
times = 'Mm'
timedeltaonly = 'm'
intsO = ints + O
bints = '?' + ints
bintsO = bints + O
flts = 'efdg'
fltsO = flts + O
fltsP = flts + P
cmplx = 'FDG'
cmplxvec = 'FD'
cmplxO = cmplx + O
cmplxP = cmplx + P
inexact = flts + cmplx
inexactvec = 'fd'
noint = inexact+O
nointP = inexact+P
allP = bints+times+flts+cmplxP
nobool_or_obj = noobj[1:]
nobool_or_datetime = noobj[1:-1] + O # includes m - timedelta64
intflt = ints+flts
intfltcmplx = ints+flts+cmplx
nocmplx = bints+times+flts
nocmplxO = nocmplx+O
nocmplxP = nocmplx+P
notimes_or_obj = bints + inexact
nodatetime_or_obj = bints + inexact
# Find which code corresponds to int64.
int64 = ''
uint64 = ''
for code in 'bhilq':
if struct.calcsize(code) == 8:
int64 = code
uint64 = english_upper(code)
break
# This dictionary describes all the ufunc implementations, generating
# all the function names and their corresponding ufunc signatures. TD is
# an object which expands a list of character codes into an array of
# TypeDescriptions.
defdict = {
'add':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.add'),
'PyUFunc_AdditionTypeResolver',
TD(notimes_or_obj, simd=[('avx512f', cmplxvec),('avx2', ints)]),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'mM', 'M'),
],
TD(O, f='PyNumber_Add'),
),
'subtract':
Ufunc(2, 1, None, # Zero is only a unit to the right, not the left
docstrings.get('numpy.core.umath.subtract'),
'PyUFunc_SubtractionTypeResolver',
TD(ints + inexact, simd=[('avx512f', cmplxvec),('avx2', ints)]),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'MM', 'm'),
],
TD(O, f='PyNumber_Subtract'),
),
'multiply':
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.multiply'),
'PyUFunc_MultiplicationTypeResolver',
TD(notimes_or_obj, simd=[('avx512f', cmplxvec),('avx2', ints)]),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'qm', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'dm', 'm'),
],
TD(O, f='PyNumber_Multiply'),
),
#'divide' : aliased to true_divide in umathmodule.c:initumath
'floor_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.floor_divide'),
'PyUFunc_DivisionTypeResolver',
TD(intfltcmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'mm', 'q'),
],
TD(O, f='PyNumber_FloorDivide'),
),
'true_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.true_divide'),
'PyUFunc_TrueDivisionTypeResolver',
TD(flts+cmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_TrueDivide'),
),
'conjugate':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.conjugate'),
None,
TD(ints+flts+cmplx, simd=[('avx2', ints), ('avx512f', cmplxvec)]),
TD(P, f='conjugate'),
),
'fmod':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.fmod'),
None,
TD(ints),
TD(flts, f='fmod', astype={'e':'f'}),
TD(P, f='fmod'),
),
'square':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.square'),
None,
TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f', 'FDfd')]),
TD(O, f='Py_square'),
),
'reciprocal':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.reciprocal'),
None,
TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f','fd')]),
TD(O, f='Py_reciprocal'),
),
# This is no longer used as numpy.ones_like, however it is
# still used by some internal calls.
'_ones_like':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath._ones_like'),
'PyUFunc_OnesLikeTypeResolver',
TD(noobj),
TD(O, f='Py_get_one'),
),
'power':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.power'),
None,
TD(ints),
TD(inexact, f='pow', astype={'e':'f'}),
TD(O, f='npy_ObjectPower'),
),
'float_power':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.float_power'),
None,
TD('dgDG', f='pow'),
),
'absolute':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.absolute'),
'PyUFunc_AbsoluteTypeResolver',
TD(bints+flts+timedeltaonly, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD(cmplx, simd=[('avx512f', cmplxvec)], out=('f', 'd', 'g')),
TD(O, f='PyNumber_Absolute'),
),
'_arg':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath._arg'),
None,
TD(cmplx, out=('f', 'd', 'g')),
),
'negative':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.negative'),
'PyUFunc_NegativeTypeResolver',
TD(ints+flts+timedeltaonly, simd=[('avx2', ints)]),
TD(cmplx, f='neg'),
TD(O, f='PyNumber_Negative'),
),
'positive':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.positive'),
'PyUFunc_SimpleUniformOperationTypeResolver',
TD(ints+flts+timedeltaonly),
TD(cmplx, f='pos'),
TD(O, f='PyNumber_Positive'),
),
'sign':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sign'),
'PyUFunc_SimpleUniformOperationTypeResolver',
TD(nobool_or_datetime),
),
'greater':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
TD('O', out='?'),
),
'greater_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
TD('O', out='?'),
),
'less':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
TD('O', out='?'),
),
'less_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
TD('O', out='?'),
),
'equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
TD('O', out='?'),
),
'not_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.not_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
TD('O', out='?'),
),
'logical_and':
Ufunc(2, 1, True_,
docstrings.get('numpy.core.umath.logical_and'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalAnd'),
TD(O, f='npy_ObjectLogicalAnd', out='?'),
),
'logical_not':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.logical_not'),
None,
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalNot'),
TD(O, f='npy_ObjectLogicalNot', out='?'),
),
'logical_or':
Ufunc(2, 1, False_,
docstrings.get('numpy.core.umath.logical_or'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalOr'),
TD(O, f='npy_ObjectLogicalOr', out='?'),
),
'logical_xor':
Ufunc(2, 1, False_,
docstrings.get('numpy.core.umath.logical_xor'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(P, f='logical_xor'),
),
'maximum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.maximum'),
'PyUFunc_SimpleUniformOperationTypeResolver',
TD(noobj, simd=[('avx512f', 'fd')]),
TD(O, f='npy_ObjectMax')
),
'minimum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.minimum'),
'PyUFunc_SimpleUniformOperationTypeResolver',
TD(noobj, simd=[('avx512f', 'fd')]),
TD(O, f='npy_ObjectMin')
),
'clip':
Ufunc(3, 1, ReorderableNone,
docstrings.get('numpy.core.umath.clip'),
'PyUFunc_SimpleUniformOperationTypeResolver',
TD(noobj),
[TypeDescription('O', 'npy_ObjectClip', 'OOO', 'O')]
),
'fmax':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmax'),
'PyUFunc_SimpleUniformOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'fmin':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmin'),
'PyUFunc_SimpleUniformOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'logaddexp':
Ufunc(2, 1, MinusInfinity,
docstrings.get('numpy.core.umath.logaddexp'),
None,
TD(flts, f="logaddexp", astype={'e':'f'})
),
'logaddexp2':
Ufunc(2, 1, MinusInfinity,
docstrings.get('numpy.core.umath.logaddexp2'),
None,
TD(flts, f="logaddexp2", astype={'e':'f'})
),
'bitwise_and':
Ufunc(2, 1, AllOnes,
docstrings.get('numpy.core.umath.bitwise_and'),
None,
TD(bints, simd=[('avx2', ints)]),
TD(O, f='PyNumber_And'),
),
'bitwise_or':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.bitwise_or'),
None,
TD(bints, simd=[('avx2', ints)]),
TD(O, f='PyNumber_Or'),
),
'bitwise_xor':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.bitwise_xor'),
None,
TD(bints, simd=[('avx2', ints)]),
TD(O, f='PyNumber_Xor'),
),
'invert':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.invert'),
None,
TD(bints, simd=[('avx2', ints)]),
TD(O, f='PyNumber_Invert'),
),
'left_shift':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.left_shift'),
None,
TD(ints, simd=[('avx2', ints)]),
TD(O, f='PyNumber_Lshift'),
),
'right_shift':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.right_shift'),
None,
TD(ints, simd=[('avx2', ints)]),
TD(O, f='PyNumber_Rshift'),
),
'heaviside':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.heaviside'),
None,
TD(flts, f='heaviside', astype={'e':'f'}),
),
'degrees':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.degrees'),
None,
TD(fltsP, f='degrees', astype={'e':'f'}),
),
'rad2deg':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rad2deg'),
None,
TD(fltsP, f='rad2deg', astype={'e':'f'}),
),
'radians':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.radians'),
None,
TD(fltsP, f='radians', astype={'e':'f'}),
),
'deg2rad':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.deg2rad'),
None,
TD(fltsP, f='deg2rad', astype={'e':'f'}),
),
'arccos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccos'),
None,
TD(inexact, f='acos', astype={'e':'f'}),
TD(P, f='arccos'),
),
'arccosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccosh'),
None,
TD(inexact, f='acosh', astype={'e':'f'}),
TD(P, f='arccosh'),
),
'arcsin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsin'),
None,
TD(inexact, f='asin', astype={'e':'f'}),
TD(P, f='arcsin'),
),
'arcsinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsinh'),
None,
TD(inexact, f='asinh', astype={'e':'f'}),
TD(P, f='arcsinh'),
),
'arctan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctan'),
None,
TD(inexact, f='atan', astype={'e':'f'}),
TD(P, f='arctan'),
),
'arctanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctanh'),
None,
TD(inexact, f='atanh', astype={'e':'f'}),
TD(P, f='arctanh'),
),
'cos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cos'),
None,
TD('e', f='cos', astype={'e':'f'}),
TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
TD('fdg' + cmplx, f='cos'),
TD(P, f='cos'),
),
'sin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sin'),
None,
TD('e', f='sin', astype={'e':'f'}),
TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
TD('fdg' + cmplx, f='sin'),
TD(P, f='sin'),
),
'tan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tan'),
None,
TD(inexact, f='tan', astype={'e':'f'}),
TD(P, f='tan'),
),
'cosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cosh'),
None,
TD(inexact, f='cosh', astype={'e':'f'}),
TD(P, f='cosh'),
),
'sinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sinh'),
None,
TD(inexact, f='sinh', astype={'e':'f'}),
TD(P, f='sinh'),
),
'tanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tanh'),
None,
TD(inexact, f='tanh', astype={'e':'f'}),
TD(P, f='tanh'),
),
'exp':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp'),
None,
TD('e', f='exp', astype={'e':'f'}),
TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
TD('d', simd=[('avx512f', 'd')]),
TD('fdg' + cmplx, f='exp'),
TD(P, f='exp'),
),
'exp2':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp2'),
None,
TD(inexact, f='exp2', astype={'e':'f'}),
TD(P, f='exp2'),
),
'expm1':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.expm1'),
None,
TD(inexact, f='expm1', astype={'e':'f'}),
TD(P, f='expm1'),
),
'log':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log'),
None,
TD('e', f='log', astype={'e':'f'}),
TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
TD('d', simd=[('avx512f', 'd')]),
TD('fdg' + cmplx, f='log'),
TD(P, f='log'),
),
'log2':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log2'),
None,
TD(inexact, f='log2', astype={'e':'f'}),
TD(P, f='log2'),
),
'log10':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log10'),
None,
TD(inexact, f='log10', astype={'e':'f'}),
TD(P, f='log10'),
),
'log1p':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log1p'),
None,
TD(inexact, f='log1p', astype={'e':'f'}),
TD(P, f='log1p'),
),
'sqrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sqrt'),
None,
TD('e', f='sqrt', astype={'e':'f'}),
TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD('fdg' + cmplx, f='sqrt'),
TD(P, f='sqrt'),
),
'cbrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cbrt'),
None,
TD(flts, f='cbrt', astype={'e':'f'}),
TD(P, f='cbrt'),
),
'ceil':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.ceil'),
None,
TD('e', f='ceil', astype={'e':'f'}),
TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD('fdg', f='ceil'),
TD(O, f='npy_ObjectCeil'),
),
'trunc':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.trunc'),
None,
TD('e', f='trunc', astype={'e':'f'}),
TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD('fdg', f='trunc'),
TD(O, f='npy_ObjectTrunc'),
),
'fabs':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.fabs'),
None,
TD(flts, f='fabs', astype={'e':'f'}),
TD(P, f='fabs'),
),
'floor':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.floor'),
None,
TD('e', f='floor', astype={'e':'f'}),
TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD('fdg', f='floor'),
TD(O, f='npy_ObjectFloor'),
),
'rint':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rint'),
None,
TD('e', f='rint', astype={'e':'f'}),
TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD('fdg' + cmplx, f='rint'),
TD(P, f='rint'),
),
'arctan2':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.arctan2'),
None,
TD(flts, f='atan2', astype={'e':'f'}),
TD(P, f='arctan2'),
),
'remainder':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.remainder'),
'PyUFunc_RemainderTypeResolver',
TD(intflt),
[TypeDescription('m', FullTypeDescr, 'mm', 'm')],
TD(O, f='PyNumber_Remainder'),
),
'divmod':
Ufunc(2, 2, None,
docstrings.get('numpy.core.umath.divmod'),
'PyUFunc_DivmodTypeResolver',
TD(intflt),
[TypeDescription('m', FullTypeDescr, 'mm', 'qm')],
# TD(O, f='PyNumber_Divmod'), # gh-9730
),
'hypot':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.hypot'),
None,
TD(flts, f='hypot', astype={'e':'f'}),
TD(P, f='hypot'),
),
'isnan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnan'),
'PyUFunc_IsFiniteTypeResolver',
TD(noobj, simd=[('avx512_skx', 'fd')], out='?'),
),
'isnat':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnat'),
'PyUFunc_IsNaTTypeResolver',
TD(times, out='?'),
),
'isinf':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isinf'),
'PyUFunc_IsFiniteTypeResolver',
TD(noobj, simd=[('avx512_skx', 'fd')], out='?'),
),
'isfinite':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isfinite'),
'PyUFunc_IsFiniteTypeResolver',
TD(noobj, simd=[('avx512_skx', 'fd')], out='?'),
),
'signbit':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.signbit'),
None,
TD(flts, simd=[('avx512_skx', 'fd')], out='?'),
),
'copysign':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.copysign'),
None,
TD(flts),
),
'nextafter':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.nextafter'),
None,
TD(flts),
),
'spacing':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.spacing'),
None,
TD(flts),
),
'modf':
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.modf'),
None,
TD(flts),
),
'ldexp' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.ldexp'),
None,
[TypeDescription('e', None, 'ei', 'e'),
TypeDescription('f', None, 'fi', 'f', simd=['avx512_skx']),
TypeDescription('e', FuncNameSuffix('long'), 'el', 'e'),
TypeDescription('f', FuncNameSuffix('long'), 'fl', 'f'),
TypeDescription('d', None, 'di', 'd', simd=['avx512_skx']),
TypeDescription('d', FuncNameSuffix('long'), 'dl', 'd'),
TypeDescription('g', None, 'gi', 'g'),
TypeDescription('g', FuncNameSuffix('long'), 'gl', 'g'),
],
),
'frexp' :
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.frexp'),
None,
[TypeDescription('e', None, 'e', 'ei'),
TypeDescription('f', None, 'f', 'fi', simd=['avx512_skx']),
TypeDescription('d', None, 'd', 'di', simd=['avx512_skx']),
TypeDescription('g', None, 'g', 'gi'),
],
),
'gcd' :
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.gcd'),
"PyUFunc_SimpleUniformOperationTypeResolver",
TD(ints),
TD('O', f='npy_ObjectGCD'),
),
'lcm' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.lcm'),
"PyUFunc_SimpleUniformOperationTypeResolver",
TD(ints),
TD('O', f='npy_ObjectLCM'),
),
'matmul' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.matmul'),
"PyUFunc_SimpleUniformOperationTypeResolver",
TD(notimes_or_obj),
TD(O),
signature='(n?,k),(k,m?)->(n?,m?)',
),
}
def indent(st, spaces):
indentation = ' '*spaces
indented = indentation + st.replace('\n', '\n'+indentation)
# trim off any trailing spaces
indented = re.sub(r' +$', r'', indented)
return indented
# maps [nin, nout][type] to a suffix
arity_lookup = {
(1, 1): {
'e': 'e_e',
'f': 'f_f',
'd': 'd_d',
'g': 'g_g',
'F': 'F_F',
'D': 'D_D',
'G': 'G_G',
'O': 'O_O',
'P': 'O_O_method',
},
(2, 1): {
'e': 'ee_e',
'f': 'ff_f',
'd': 'dd_d',
'g': 'gg_g',
'F': 'FF_F',
'D': 'DD_D',
'G': 'GG_G',
'O': 'OO_O',
'P': 'OO_O_method',
},
(3, 1): {
'O': 'OOO_O',
}
}
#for each name
# 1) create functions, data, and signature
# 2) fill in functions and data in InitOperators
# 3) add function.
def make_arrays(funcdict):
# functions array contains an entry for every type implemented NULL
# should be placed where PyUfunc_ style function will be filled in
# later
code1list = []
code2list = []
names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
funclist = []
datalist = []
siglist = []
k = 0
sub = 0
for t in uf.type_descriptions:
if t.func_data is FullTypeDescr:
tname = english_upper(chartoname[t.type])
datalist.append('(void *)NULL')
funclist.append(
'%s_%s_%s_%s' % (tname, t.in_, t.out, name))
elif isinstance(t.func_data, FuncNameSuffix):
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
funclist.append(
'%s_%s_%s' % (tname, name, t.func_data.suffix))
elif t.func_data is None:
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
funclist.append('%s_%s' % (tname, name))
if t.simd is not None:
for vt in t.simd:
code2list.append(textwrap.dedent("""\
#ifdef HAVE_ATTRIBUTE_TARGET_{ISA}
if (NPY_CPU_HAVE({ISA})) {{
{fname}_functions[{idx}] = {type}_{fname}_{isa};
}}
#endif
""").format(
ISA=vt.upper(), isa=vt,
fname=name, type=tname, idx=k
))
if t.dispatch is not None:
for dname in t.dispatch:
code2list.append(textwrap.dedent("""\
#ifndef NPY_DISABLE_OPTIMIZATION
#include "{dname}.dispatch.h"
#endif
NPY_CPU_DISPATCH_CALL_XB({name}_functions[{k}] = {tname}_{name})
""").format(
dname=dname, name=name, tname=tname, k=k
))
else:
funclist.append('NULL')
try:
thedict = arity_lookup[uf.nin, uf.nout]
except KeyError as e:
raise ValueError(
f"Could not handle {name}[{t.type}] "
f"with nin={uf.nin}, nout={uf.nout}"
) from None
astype = ''
if not t.astype is None:
astype = '_As_%s' % thedict[t.astype]
astr = ('%s_functions[%d] = PyUFunc_%s%s;' %
(name, k, thedict[t.type], astype))
code2list.append(astr)
if t.type == 'O':
astr = ('%s_data[%d] = (void *) %s;' %
(name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
elif t.type == 'P':
datalist.append('(void *)"%s"' % t.func_data)
else:
astr = ('%s_data[%d] = (void *) %s;' %
(name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
#datalist.append('(void *)%s' % t.func_data)
sub += 1
for x in t.in_ + t.out:
siglist.append('NPY_%s' % (english_upper(chartoname[x]),))
k += 1
funcnames = ', '.join(funclist)
signames = ', '.join(siglist)
datanames = ', '.join(datalist)
code1list.append("static PyUFuncGenericFunction %s_functions[] = {%s};"
% (name, funcnames))
code1list.append("static void * %s_data[] = {%s};"
% (name, datanames))
code1list.append("static char %s_signatures[] = {%s};"
% (name, signames))
return "\n".join(code1list), "\n".join(code2list)
def make_ufuncs(funcdict):
code3list = []
names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
mlist = []
docstring = textwrap.dedent(uf.docstring).strip()
docstring = docstring.encode('unicode-escape').decode('ascii')
docstring = docstring.replace(r'"', r'\"')
docstring = docstring.replace(r"'", r"\'")
# Split the docstring because some compilers (like MS) do not like big
# string literal in C code. We split at endlines because textwrap.wrap
# do not play well with \n
docstring = '\\n\"\"'.join(docstring.split(r"\n"))
if uf.signature is None:
sig = "NULL"
else:
sig = '"{}"'.format(uf.signature)
fmt = textwrap.dedent("""\
identity = {identity_expr};
if ({has_identity} && identity == NULL) {{
return -1;
}}
f = PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
{name}_functions, {name}_data, {name}_signatures, {nloops},
{nin}, {nout}, {identity}, "{name}",
"{doc}", 0, {sig}, identity
);
if ({has_identity}) {{
Py_DECREF(identity);
}}
if (f == NULL) {{
return -1;
}}
""")
args = dict(
name=name, nloops=len(uf.type_descriptions),
nin=uf.nin, nout=uf.nout,
has_identity='0' if uf.identity is None_ else '1',
identity='PyUFunc_IdentityValue',
identity_expr=uf.identity,
doc=docstring,
sig=sig,
)
# Only PyUFunc_None means don't reorder - we pass this using the old
# argument
if uf.identity is None_:
args['identity'] = 'PyUFunc_None'
args['identity_expr'] = 'NULL'
mlist.append(fmt.format(**args))
if uf.typereso is not None:
mlist.append(
r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso)
mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name)
mlist.append(r"""Py_DECREF(f);""")
code3list.append('\n'.join(mlist))
return '\n'.join(code3list)
def make_code(funcdict, filename):
code1, code2 = make_arrays(funcdict)
code3 = make_ufuncs(funcdict)
code2 = indent(code2, 4)
code3 = indent(code3, 4)
code = textwrap.dedent(r"""
/** Warning this file is autogenerated!!!
Please make changes to the code generator program (%s)
**/
#include "ufunc_object.h"
#include "ufunc_type_resolution.h"
#include "loops.h"
#include "matmul.h"
#include "clip.h"
%s
static int
InitOperators(PyObject *dictionary) {
PyObject *f, *identity;
%s
%s
return 0;
}
""") % (filename, code1, code2, code3)
return code
if __name__ == "__main__":
filename = __file__
code = make_code(defdict, filename)
with open('__umath_generated.c', 'w') as fid:
fid.write(code)
|
# reference ==>
import taichi as ti
import handy_shader_functions as hsf
import argparse
ti.init(arch = ti.cuda)
res_x = 512
res_y = 512
pixels = ti.Vector.field(3, ti.f32, shape=(res_x, res_y))
@ti.kernel
def render(t:ti.f32):
# draw something on your canvas
for i,j in pixels:
# Set const
x = (2.0 * i - res_x) / res_y # [0.0, 1.0] in width
y = (2.0 * j - res_x) / res_y # [0.0, 1.0] in height
tau = 3.1415926535*2.0 # 2 pi
a = ti.atan2(x, y) # theta of the angle
u = a / tau # theta converted to [0.0, 1.0]
v = (ti.Vector([x, y]).norm()) * 0.75 # r = 0.75
uv = ti.Vector([u, v]) # (theta, r)
# Set the color
color = ti.Vector([0.25, 0.25, 0.25])
xCol = hsf.mod((uv[0] - (t / 3.0)) * 3.0, 3.0)
if xCol < 1.0:
color[0] += 1.0 - xCol
color[1] += xCol
elif xCol < 2.0:
xCol -= 1.0
color[1] += 1.0 - xCol
color[2] += xCol
else:
xCol -= 2.0
color[2] += 1.0 - xCol
color[0] += xCol
uv = (2.0 * uv) - 1.0 # tbh I don't know what the hell it is
beamWidth = (0.7+0.5*ti.cos(uv[0]*10.0*tau*0.15*hsf.clamp(hsf.floor(5.0 + 10.0*ti.cos(t)), 0.0, 10.0))) * ti.abs(1.0 / (30.0 * uv[1]))
horBeam = ti.Vector([beamWidth, beamWidth, beamWidth])
color = color * horBeam
pixels[i,j] = color
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Naive Ray Tracing')
parser.add_argument('--record', action='store_true')
args = parser.parse_args()
if (not args.record):
gui = ti.GUI("Total Noob", res=(res_x, res_y))
for i in range(100000):
t = i * 0.01
render(t)
gui.set_image(pixels)
gui.show()
else:
gui = ti.GUI("Total Noob", res=(res_x, res_y), show_gui = False)
video_manager = ti.VideoManager(output_dir = "./data", framerate = 24, automatic_build=False)
for i in range(0, 100 * 12, 12):
t = i * 0.01
render(t)
video_manager.write_frame(pixels)
print('Exporting .gif')
video_manager.make_video(gif=True, mp4=False)
# print(f'MP4 video is saved to {video_manager.get_output_filename(".mp4")}')
print(f'GIF video is saved to {video_manager.get_output_filename(".gif")}') |
# -*- coding: utf-8 -*-
from cms.admin.dialog.forms import PermissionAndModeratorForm, PermissionForm, ModeratorForm
from cms.models import Page
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.http import Http404, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
def _form_class_selector():
'''
This replaces the magic that used to happen in forms, where a dynamic
class was generated at runtime. Now it's a bit cleaner...
'''
form_class = None
if settings.CMS_PERMISSION and settings.CMS_MODERATOR:
form_class = PermissionAndModeratorForm
elif settings.CMS_PERMISSION:
form_class = PermissionForm
elif settings.CMS_MODERATOR:
form_class = ModeratorForm
return form_class
@staff_member_required
def get_copy_dialog(request, page_id):
if not (settings.CMS_PERMISSION or settings.CMS_MODERATOR):
return HttpResponse('')
page = get_object_or_404(Page, pk=page_id)
target = get_object_or_404(Page, pk=request.REQUEST['target'])
if not page.has_change_permission(request) or \
not target.has_add_permission(request): # pragma: no cover
raise Http404
context = {
'dialog_id': 'dialog-copy',
'form': _form_class_selector()(), # class needs to be instanciated
'callback': request.REQUEST['callback'],
}
return render_to_response("admin/cms/page/dialog/copy.html", context)
|
'''
You are given the root of a binary search tree (BST) and an integer val.
Find the node in the BST that the node's value equals val and return the subtree rooted with that node. If such a node does not exist, return null.
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def searchBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
stack = [root]
while stack:
node = stack.pop()
if node.val == val:
return node
if node.val < val and node.right:
stack.append(node.right)
continue
if node.val > val and node.left:
stack.append(node.left)
continue
if not node.left and not node.right:
return None
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from api.models.ec.organization import Organization
from api.models.ec.store import Store
from django.core.cache import caches
from django.contrib.sites.models import Site
class SubDomain(models.Model):
class Meta:
app_label = 'api'
ordering = ('name',)
db_table = 'ec_subdomains'
sub_domain_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=127, db_index=True, unique=True, null=True, blank=True,)
organization = models.ForeignKey(Organization, null=True, blank=True, db_index=True)
store = models.ForeignKey(Store, null=True, blank=True, db_index=True)
def __str__(self):
if self.name is None:
return str(self.sub_domain_id)
else:
return str(self.name)
def save(self, *args, **kwargs):
"""
Override the save function to reset the cache when a save was made.
"""
cache = caches['default']
if cache is not None:
cache.clear()
super(SubDomain, self).save(*args, **kwargs)
def get_absolute_url(self):
"""
When the sitemaps.xml gets generated for the all the URLS, all
returned "Subdomain" objects will have this URL called.
"""
return "/storefront/"+str(self.name)+"/"
|
import unittest
# O(1). Bit manipulation.
class Solution:
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
prev = n & 1
n >>= 1
while n:
if n & 1 ^ prev:
prev ^= 1
n >>= 1
else:
return False
return True
class Test(unittest.TestCase):
def test(self):
self._test(5, True)
self._test(6, False)
self._test(7, False)
self._test(10, True)
self._test(11, False)
def _test(self, n, expected):
actual = Solution().hasAlternatingBits(n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
#
# MIT License
#
# (C) Copyright 2020-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
'''
A set of routines for creating or reading from an existing timestamp file.
Created on April 27, 2020
@author: jsl
'''
import logging
from datetime import timedelta
from hwsyncagent.cfs.options import hardware_sync_interval
from liveness.timestamp import Timestamp as TimestampBase
LOGGER = logging.getLogger(__name__)
class Timestamp(TimestampBase):
@property
def max_age(self):
"""
The maximum amount of time that can elapse before we consider the timestamp
as invalid. This max_age is defined by the period of time between required
checks to HSM, which is defined by the set of options contained in the API.
This value is returned as a timedelta object.
"""
interval = 20 + hardware_sync_interval()
computation_time = timedelta(seconds=interval)
return computation_time
|
#!/usr/bin/env python
oracle_home = '/usr/local/ohs'
domain_name = 'base_domain'
domain_home = oracle_home + '/user_projects/domains/' + domain_name
node_manager_name = 'localmachine'
node_manager_listen_address = 'localhost'
node_manager_listen_port = '5556'
node_manager_username = 'ohs'
node_manager_password = 'welcome1'
######################################################################
readTemplate(oracle_home + '/ohs/common/templates/wls/ohs_standalone_template.jar')
cd('/')
create(domain_name, 'SecurityConfiguration')
cd('/SecurityConfiguration/base_domain')
set('NodeManagerUsername', node_manager_username)
set('NodeManagerPasswordEncrypted', node_manager_password)
setOption('NodeManagerType', 'PerDomainNodeManager')
cd('/')
create(node_manager_name, 'Machine')
cd('/Machines/' + node_manager_name)
create(node_manager_name, 'NodeManager')
cd('/Machines/' + node_manager_name + '/NodeManager/' + node_manager_name)
cmo.setListenAddress(node_manager_listen_address)
cmo.setListenPort(int(node_manager_listen_port))
cd('/')
writeDomain(domain_home)
closeTemplate()
exit()
|
(lambda N:(lambda N,A:print(("Average: %.2f\n"%A)+str([n for n in N if len(n)>A])))(N,sum([len(n)for n in N])/len(N)))([n.strip()for n in __import__("sys").stdin])
|
#!/usr/bin/env python
import random
import numpy as np
from cs224d.data_utils import *
import matplotlib.pyplot as plt
from c5_word2vec import *
from c6_sgd import *
random.seed(314)
dataSet = StanfordSentiment()
tokens = dataSet.tokens()
nWords = len(tokens)
# We are going to train 10-dimensional vectors for this assignment
dimVectors = 10
# Context size
# C = 5
contextSize = 5
# Reset the random seed to make sure that everyone gets the same results
random.seed(31415)
np.random.seed(9265)
wordVectors = np.concatenate(((np.random.rand(nWords, dimVectors) - .5) /
dimVectors, np.zeros((nWords, dimVectors))), axis=0)
wordVectors0 = sgd(
lambda vec: word2vec_sgd_wrapper(skipgram, tokens, vec, dataSet, contextSize, negSamplingCostAndGradient),
wordVectors, 0.3, 4000, None, True, PRINT_EVERY=10)
print "sanity check: cost at convergence should be around or below 10"
wordVectors = (wordVectors0[:nWords, :] + wordVectors0[nWords:, :])
_, wordVectors0, _ = load_saved_params()
wordVectors = (wordVectors0[:nWords, :] + wordVectors0[nWords:, :])
visualizeWords = ["the", "a", "an", ",", ".", "?", "!", "``", "''", "--",
"good", "great", "cool", "brilliant", "wonderful", "well", "amazing",
"worth", "sweet", "enjoyable", "boring", "bad", "waste", "dumb",
"annoying"]
visualizeIdx = [tokens[word] for word in visualizeWords]
visualizeVecs = wordVectors[visualizeIdx, :]
temp = (visualizeVecs - np.mean(visualizeVecs, axis=0))
covariance = 1.0 / len(visualizeIdx) * temp.T.dot(temp)
U, S, V = np.linalg.svd(covariance)
coord = temp.dot(U[:, 0:2])
for i in xrange(len(visualizeWords)):
plt.text(coord[i, 0], coord[i, 1], visualizeWords[i],
bbox=dict(facecolor='green', alpha=0.1))
plt.xlim((np.min(coord[:, 0]), np.max(coord[:, 0])))
plt.ylim((np.min(coord[:, 1]), np.max(coord[:, 1])))
plt.savefig('q3_word_vectors.png')
plt.show()
|
"""Implementation of treadmill admin aws role.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import json
import logging
import click
from treadmill import cli
from treadmill import exc
from treadmill_aws import awscontext
from treadmill_aws import cli as aws_cli
from treadmill_aws import iamclient
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.INFO)
def _user_arn(account, user):
return 'arn:aws:iam::{}:user/{}'.format(account, user)
def _saml_provider_arn(account, provider):
return 'arn:aws:iam::{}:saml-provider/{}'.format(account, provider)
def _generate_trust_document(trusted_entities):
"""Default role policy."""
account = awscontext.GLOBAL.sts.get_caller_identity().get('Account')
statements = []
principals = []
saml_providers = []
services = []
for entity in trusted_entities:
if entity == 'root':
principals.append('arn:aws:iam::{}:root'.format(account))
continue
if entity.startswith('user:'):
parts = entity.split(':')
principals.append(_user_arn(account, parts[1]))
continue
if entity.startswith('saml-provider:'):
parts = entity.split(':')
saml_providers.append(_saml_provider_arn(account, parts[1]))
continue
if entity.startswith('service:'):
parts = entity.split(':')
services.append(parts[1])
continue
raise click.UsageError('Invalid syntax for trusted entity [%s]'
% entity)
statements = []
if principals or services:
statement = {
'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {}
}
if principals:
statement['Principal']['AWS'] = principals
if services:
statement['Principal']['Service'] = services
statements.append(statement)
if saml_providers:
statement = {
'Action': 'sts:AssumeRoleWithSAML',
'Condition': {
'StringEquals': {
'SAML:aud': 'https://signin.aws.amazon.com/saml'
}
},
'Effect': 'Allow',
'Principal': {
'Federated': saml_providers
}
}
statements.append(statement)
if statements:
policy = {}
policy['Version'] = '2012-10-17'
policy['Statement'] = statements
return json.dumps(policy)
return None
def _set_role_policies(iam_conn, role_name, role_policies):
new_pols = []
if role_policies == [':']:
role_policies = []
for pol in role_policies:
policy_name, policy_file = pol.split(':', 2)
new_pols.append(policy_name)
with io.open(policy_file) as f:
policy_document = f.read()
_LOGGER.info('set/updated inline policy: %s', policy_name)
iamclient.put_role_policy(iam_conn,
role_name,
policy_name,
policy_document)
all_pols = iamclient.list_role_policies(iam_conn, role_name)
for policy_name in all_pols:
if policy_name not in new_pols:
_LOGGER.info('removing inline policy: %s', policy_name)
iamclient.delete_role_policy(iam_conn,
role_name,
policy_name)
def _set_attached_policies(iam_conn, role_name, attached_policies):
sts = awscontext.GLOBAL.sts
accountid = sts.get_caller_identity().get('Account')
if attached_policies == [':']:
attached_policies = []
del_pols = {}
for policy in iamclient.list_attached_role_policies(iam_conn,
role_name):
del_pols[policy['PolicyArn']] = 1
new_pols = {}
for policy in attached_policies:
scope, policy_name = policy.split(':', 2)
if scope == 'global':
new_pols['arn:aws:iam::aws:policy/%s' % policy_name] = 1
elif scope == 'local':
pol = 'arn:aws:iam::%s:policy/%s' % (accountid, policy_name)
new_pols[pol] = 1
else:
raise click.UsageError('Invalid policy scope [%s]' % scope)
for policy_arn in del_pols:
if policy_arn not in new_pols:
_LOGGER.info('detaching policy: %s', policy_arn)
iamclient.detach_role_policy(iam_conn,
role_name,
policy_arn)
else:
del new_pols[policy_arn]
for policy_arn in new_pols:
_LOGGER.info('attaching policy: %s', policy_arn)
iamclient.attach_role_policy(iam_conn, role_name, policy_arn)
def _create_role(iam_conn,
role_name,
path,
trust_document,
max_session_duration):
if not max_session_duration:
max_session_duration = 43200
iamclient.create_role(iam_conn,
role_name,
path,
trust_document,
max_session_duration)
# pylint: disable=R0915
def init():
"""Manage IAM roles."""
formatter = cli.make_formatter('aws_role')
@click.group()
def role():
"""Manage IAM roles."""
pass
@role.command()
@click.option('--create',
is_flag=True,
default=False,
help='Create if it does not exist')
@click.option('--path',
default='/',
help='Path for user name.')
@click.option('--max-session-duration',
type=click.IntRange(3600, 43200),
required=False,
help='maximum session duration.')
@click.option('--trust-policy',
required=False,
help='Trust policy (aka assume role policy).')
@click.option('--trusted-entities',
type=cli.LIST,
help='See above for syntax of --trusted-entities.')
@click.option('--inline-policies',
type=cli.LIST,
required=False,
help='Inline role policies, list of '
'<RolePolicyName>:<file>')
@click.option('--attached-policies',
type=cli.LIST,
required=False,
help='Attached policies, list of '
'global:<PolicyName> or local:<PolicyName>')
@click.argument('role_name',
required=True,
callback=aws_cli.sanitize_user_name)
@cli.admin.ON_EXCEPTIONS
def configure(create,
path,
max_session_duration,
trust_policy,
trusted_entities,
inline_policies,
attached_policies,
role_name):
"""Create/configure/get IAM role.
Arguments for --trusted-entities are of the form:
Entities are form:\n
* root: : trusted AWS account
* user:<user-name> : trusted IAM user
* saml-provider:<provider-name>: : trusted SAML Provider
* service:<service-name>: : trusted AWS Service
"""
iam_conn = awscontext.GLOBAL.iam
try:
role = iamclient.get_role(iam_conn, role_name)
except exc.NotFoundError:
if not create:
raise
role = None
if trust_policy:
with io.open(trust_policy) as f:
trust_document = f.read()
elif trusted_entities:
trust_document = _generate_trust_document(trusted_entities)
elif create:
raise click.UsageError('Must specify one:\n'
' --trust-policy\n'
' --trusted-entties')
else:
trust_document = None
if not role:
_create_role(iam_conn,
role_name,
path,
trust_document,
max_session_duration)
else:
if max_session_duration:
iamclient.update_role(iam_conn,
role_name,
max_session_duration)
if trust_document:
iamclient.update_assume_role_policy(iam_conn,
role_name,
trust_document)
if inline_policies:
_set_role_policies(iam_conn, role_name, inline_policies)
if attached_policies:
_set_attached_policies(iam_conn, role_name, attached_policies)
role = iamclient.get_role(iam_conn, role_name)
role['RolePolicies'] = iamclient.list_role_policies(iam_conn,
role_name)
role['AttachedPolicies'] = iamclient.list_attached_role_policies(
iam_conn,
role_name)
cli.out(formatter(role))
@role.command(name='list')
@click.option('--path',
default='/',
help='Path for user name.')
@cli.admin.ON_EXCEPTIONS
def list_roles(path):
"""List IAM roles.
"""
iam_conn = awscontext.GLOBAL.iam
roles = iamclient.list_roles(iam_conn, path)
cli.out(formatter(roles))
@role.command()
@click.option('--force',
is_flag=True,
default=False,
help='Delete role, even is role has policies attached.')
@click.argument('role-name')
@cli.admin.ON_EXCEPTIONS
def delete(force, role_name):
"""Delete IAM role."""
iam_conn = awscontext.GLOBAL.iam
if force:
role_policies = iamclient.list_role_policies(iam_conn, role_name)
for policy in role_policies:
_LOGGER.info('deleting inline policy: %s', policy)
iamclient.delete_role_policy(iam_conn, role_name, policy)
attached_pols = iamclient.list_attached_role_policies(iam_conn,
role_name)
for policy in attached_pols:
_LOGGER.info('detaching policy: %s', policy['PolicyArn'])
iamclient.detach_role_policy(iam_conn,
role_name,
policy['PolicyArn'])
try:
iamclient.delete_role(iam_conn, role_name)
except iam_conn.exceptions.DeleteConflictException:
raise click.UsageError('Role [%s] has inline or attached policies,'
'use --force to force delete.' % role_name)
del configure
del delete
return role
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from collections import defaultdict
from edb.common.ordered import OrderedSet
class UnresolvedReferenceError(Exception):
pass
class CycleError(Exception):
def __init__(self, msg, path=None):
super().__init__(msg)
self.path = path
def sort(graph, *, return_record=False, allow_unresolved=False):
adj = defaultdict(OrderedSet)
loop_control = defaultdict(OrderedSet)
for item_name, item in graph.items():
if "merge" in item:
for merge in item["merge"]:
if merge in graph:
adj[item_name].add(merge)
elif not allow_unresolved:
raise UnresolvedReferenceError(
'reference to an undefined item {} in {}'.format(
merge, item_name))
if "deps" in item:
for dep in item["deps"]:
if dep in graph:
adj[item_name].add(dep)
elif not allow_unresolved:
raise UnresolvedReferenceError(
'reference to an undefined item {} in {}'.format(
dep, item_name))
if "loop-control" in item:
for ctrl in item["loop-control"]:
if ctrl in graph:
loop_control[item_name].add(ctrl)
elif not allow_unresolved:
raise UnresolvedReferenceError(
'reference to an undefined item {} in {}'.format(
ctrl, item_name))
visiting = OrderedSet()
visited = set()
sorted = []
def visit(item, for_control=False):
if item in visiting:
raise CycleError(
f"dependency cycle between {list(visiting)[1]!r} "
f"and {item!r}",
path=list(visiting)[1:],
)
if item not in visited:
visiting.add(item)
for n in adj[item]:
visit(n)
for n in loop_control[item]:
visit(n, for_control=True)
if not for_control:
sorted.append(item)
visited.add(item)
visiting.remove(item)
for item in graph:
visit(item)
if return_record:
return ((item, graph[item]) for item in sorted)
else:
return (graph[item]["item"] for item in sorted)
def normalize(graph, merger, **merger_kwargs):
merged = {}
for name, item in sort(graph, return_record=True):
merge = item.get("merge")
if merge:
for m in merge:
merger(item["item"], merged[m], **merger_kwargs)
merged.setdefault(name, item["item"])
return merged.values()
|
# Django
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
# tools
from comic_app.tools.extract_comic_titles import extract_image_comics
from comic_app.tools.extract_comic_titles import extract_marvel_comics
from comic_app.tools.extract_comic_titles import extract_dc_comics
from comic_app.tools.generate_template import generate
#first party
import os
# Create your views here.
def index(request):
image_comic_titles, image_comic_images = extract_image_comics()
marvel_comic_titles, marvel_comic_images = extract_marvel_comics()
extract_dc_comics()
comic_titles = image_comic_titles + marvel_comic_titles
comic_images = image_comic_images + marvel_comic_images
generate(comic_titles, comic_images)
return render(request, "comic_app/dashboard.html", None)
|
## @ingroup Components-Energy-Networks
# Battery_Ducted_Fan.py
#
# Created: Sep 2014, M. Vegh
# Modified: Jan 2016, T. MacDonald
# Apr 2019, C. McMillan
# Apr 2021, M. Clarke
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
# package imports
import numpy as np
from .Network import Network
# ----------------------------------------------------------------------
# Network
# ----------------------------------------------------------------------
## @ingroup Components-Energy-Networks
class Battery_Ducted_Fan(Network):
""" Simply connects a battery to a ducted fan, with an assumed motor efficiency
Assumptions:
None
Source:
None
"""
def __defaults__(self):
""" This sets the default values for the network to function.
This network operates slightly different than most as it attaches a propulsor to the net.
Assumptions:
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
N/A
"""
self.propulsor = None
self.battery = None
self.motor_efficiency = 0.0
self.tag = 'Battery_Ducted_Fan'
self.number_of_engines = 0.
self.esc = None
self.avionics = None
self.payload = None
self.voltage = None
self.tag = 'Network'
self.generative_design_minimum = 0
# manage process with a driver function
def evaluate_thrust(self,state):
""" Calculate thrust given the current state of the vehicle
Assumptions:
Constant mass batteries
ESC input voltage is constant at max battery voltage
Source:
N/A
Inputs:
state [state()]
Outputs:
results.thrust_force_vector [newtons]
results.vehicle_mass_rate [kg/s]
Properties Used:
Defaulted values
"""
# unpack
conditions = state.conditions
numerics = state.numerics
esc = self.esc
avionics = self.avionics
payload = self.payload
battery = self.battery
propulsor = self.propulsor
battery = self.battery
# Set battery energy
battery.current_energy = conditions.propulsion.battery_energy
battery.pack_temperature = conditions.propulsion.battery_pack_temperature
battery.cell_charge_throughput = conditions.propulsion.battery_cell_charge_throughput
battery.age = conditions.propulsion.battery_cycle_day
battery.R_growth_factor = conditions.propulsion.battery_resistance_growth_factor
battery.E_growth_factor = conditions.propulsion.battery_capacity_fade_factor
# Calculate ducted fan power
results = propulsor.evaluate_thrust(state)
propulsive_power = np.reshape(results.power, (-1,1))
motor_power = propulsive_power/self.motor_efficiency
# Run the ESC
esc.inputs.voltagein = self.voltage
esc.voltageout(conditions)
esc.inputs.currentout = motor_power/esc.outputs.voltageout
esc.currentin(conditions)
esc_power = esc.inputs.voltagein*esc.outputs.currentin
# Run the avionics
avionics.power()
# Run the payload
payload.power()
# Calculate avionics and payload power
avionics_payload_power = avionics.outputs.power + payload.outputs.power
# Calculate avionics and payload current
avionics_payload_current = avionics_payload_power/self.voltage
# link to the battery
battery.inputs.current = esc.outputs.currentin + avionics_payload_current
battery.inputs.power_in = -(esc_power + avionics_payload_power)
battery.energy_calc(numerics)
# No mass gaining batteries
mdot = np.zeros(np.shape(conditions.freestream.velocity))
# Pack the conditions for outputs
current = esc.outputs.currentin
battery_power_draw = battery.inputs.power_in
battery_energy = battery.current_energy
voltage_open_circuit = battery.voltage_open_circuit
conditions.propulsion.current = current
conditions.propulsion.battery_power_draw = battery_power_draw
conditions.propulsion.battery_energy = battery_energy
conditions.propulsion.battery_voltage_open_circuit = voltage_open_circuit
results.vehicle_mass_rate = mdot
return results
__call__ = evaluate_thrust
|
class WindowError(Exception):
"""
Base exception for errors thrown by the window framework.
Idealy, this could be used to catch every error raised by this library.
This does not include errors raised by the underlying graphics backend.
"""
pass |
import numpy as np
from matplotlib import pyplot as plt
def plot_diagrams(
diagrams,
plot_only=None,
title=None,
xy_range=None,
labels=None,
colormap="default",
size=20,
ax_color=np.array([0.0, 0.0, 0.0]),
diagonal=True,
lifetime=False,
legend=True,
show=False,
ax=None,
torus_colors=[],
lw=2.5,
cs=["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728"],
):
ax = ax or plt.gca()
plt.style.use(colormap)
xlabel, ylabel = "Birth", "Death"
if labels is None:
# Provide default labels for diagrams if using self.dgm_
labels = [
"$H_0$",
"$H_1$",
"$H_2$",
"$H_3$",
"$H_4$",
"$H_5$",
"$H_6$",
"$H_7$",
"$H_8$",
]
if not isinstance(diagrams, list):
# Must have diagrams as a list for processing downstream
diagrams = [diagrams]
if len(plot_only) > 0:
diagrams = [diagrams[i] for i in plot_only]
labels = [labels[i] for i in plot_only]
if not isinstance(labels, list):
labels = [labels] * len(diagrams)
# Construct copy with proper type of each diagram
# so we can freely edit them.
diagrams = [dgm.astype(np.float32, copy=True) for dgm in diagrams]
aspect = "equal"
# find min and max of all visible diagrams
concat_dgms = np.concatenate(diagrams).flatten()
has_inf = np.any(np.isinf(concat_dgms))
finite_dgms = concat_dgms[np.isfinite(concat_dgms)]
if not xy_range:
# define bounds of diagram
ax_min, ax_max = np.min(finite_dgms), np.max(finite_dgms)
x_r = ax_max - ax_min
# Give plot a nice buffer on all sides.
# ax_range=0 when only one point,
buffer = 1 if xy_range == 0 else x_r / 5
x_down = ax_min - buffer / 2
x_up = ax_max + buffer
y_down, y_up = x_down, x_up
else:
x_down, x_up, y_down, y_up = xy_range
yr = y_up - y_down
if lifetime:
# Don't plot landscape and diagonal at the same time.
diagonal = False
# reset y axis so it doesn't go much below zero
y_down = -yr * 0.05
y_up = y_down + yr
# set custom ylabel
ylabel = "Lifetime"
# set diagrams to be (x, y-x)
for dgm in diagrams:
dgm[:, 1] -= dgm[:, 0]
# plot horizon line
# ax.plot([x_down, x_up], [0, 0], c=ax_color)
# Plot diagonal
if diagonal:
ax.plot([x_down, x_up], [x_down, x_up], "--", c=ax_color)
# Plot inf line
if has_inf:
# put inf line slightly below top
b_inf = y_down + yr * 0.95
# convert each inf in each diagram with b_inf
for dgm in diagrams:
dgm[np.isinf(dgm)] = b_inf
# Plot each diagram
i = 0
for dgm, label in zip(diagrams, labels):
c = cs[plot_only[i]]
# plot persistence pairs
ax.scatter(dgm[:, 0], dgm[:, 1], size, label=label, edgecolor="none", c=c)
i += 1
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if len(torus_colors) > 0:
# births1 = diagrams[1][:, 0] # the time of birth for the 1-dim classes
deaths1 = diagrams[1][:, 1] # the time of death for the 1-dim classes
deaths1[np.isinf(deaths1)] = 0
# lives1 = deaths1-births1
# inds1 = np.argsort(lives1)
inds1 = np.argsort(deaths1)
ax.scatter(
diagrams[1][inds1[-1], 0],
diagrams[1][inds1[-1], 1],
10 * size,
linewidth=lw,
edgecolor=torus_colors[0],
facecolor="none",
)
ax.scatter(
diagrams[1][inds1[-2], 0],
diagrams[1][inds1[-2], 1],
10 * size,
linewidth=lw,
edgecolor=torus_colors[1],
facecolor="none",
)
# births2 = diagrams[2][
# :,
# ] # the time of birth for the 1-dim classes
deaths2 = diagrams[2][:, 1] # the time of death for the 1-dim classes
deaths2[np.isinf(deaths2)] = 0
# lives2 = deaths2-births2
# inds2 = np.argsort(lives2)
inds2 = np.argsort(deaths2)
# print(lives2, births2[inds2[-1]],deaths2[inds2[-1]], diagrams[2][inds2[-1], 0], diagrams[2][inds2[-1], 1])
ax.scatter(
diagrams[2][inds2[-1], 0],
diagrams[2][inds2[-1], 1],
10 * size,
linewidth=lw,
edgecolor=torus_colors[2],
facecolor="none",
)
ax.set_xlim([x_down, x_up])
ax.set_ylim([y_down, y_up])
ax.set_aspect(aspect, "box")
if title is not None:
ax.set_title(title)
if legend is True:
ax.legend(loc="upper right")
if show is True:
plt.show()
return ax
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for the `aea scaffold protocol` sub-command."""
import filecmp
import json
import os
import shutil
import tempfile
import unittest.mock
from pathlib import Path
import jsonschema
from jsonschema import Draft4Validator, ValidationError
import yaml
import aea.cli.common
import aea.configurations.base
from aea import AEA_DIR
from aea.cli import cli
from aea.configurations.base import DEFAULT_PROTOCOL_CONFIG_FILE
from ...common.click_testing import CliRunner
from ...conftest import (
CLI_LOG_OPTION,
CONFIGURATION_SCHEMA_DIR,
PROTOCOL_CONFIGURATION_SCHEMA,
)
class TestScaffoldProtocol:
"""Test that the command 'aea scaffold protocol' works correctly in correct preconditions."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.resource_name = "myresource"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
cls.patch = unittest.mock.patch.object(aea.cli.common.logger, "error")
cls.mocked_logger_error = cls.patch.__enter__()
cls.schema = json.load(open(PROTOCOL_CONFIGURATION_SCHEMA))
cls.resolver = jsonschema.RefResolver(
"file://{}/".format(Path(CONFIGURATION_SCHEMA_DIR).absolute()), cls.schema
)
cls.validator = Draft4Validator(cls.schema, resolver=cls.resolver)
os.chdir(cls.t)
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False
)
assert result.exit_code == 0
os.chdir(cls.agent_name)
# scaffold protocol
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "scaffold", "protocol", cls.resource_name],
standalone_mode=False,
)
def test_exit_code_equal_to_0(self):
"""Test that the exit code is equal to 0."""
assert self.result.exit_code == 0
def test_resource_folder_contains_module_message(self):
"""Test that the resource folder contains scaffold message.py module."""
p = Path(self.t, self.agent_name, "protocols", self.resource_name, "message.py")
original = Path(AEA_DIR, "protocols", "scaffold", "message.py")
assert filecmp.cmp(p, original)
def test_resource_folder_contains_module_protocol(self):
"""Test that the resource folder contains scaffold protocol.py module."""
p = Path(
self.t, self.agent_name, "protocols", self.resource_name, "serialization.py"
)
original = Path(AEA_DIR, "protocols", "scaffold", "serialization.py")
assert filecmp.cmp(p, original)
def test_resource_folder_contains_configuration_file(self):
"""Test that the resource folder contains a good configuration file."""
p = Path(
self.t,
self.agent_name,
"protocols",
self.resource_name,
DEFAULT_PROTOCOL_CONFIG_FILE,
)
config_file = yaml.safe_load(open(p))
self.validator.validate(instance=config_file)
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestScaffoldProtocolFailsWhenDirectoryAlreadyExists:
"""Test that the command 'aea scaffold protocol' fails when a folder with 'scaffold' name already."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.resource_name = "myresource"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
cls.patch = unittest.mock.patch.object(aea.cli.common.logger, "error")
cls.mocked_logger_error = cls.patch.__enter__()
os.chdir(cls.t)
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False
)
assert result.exit_code == 0
os.chdir(cls.agent_name)
# create a dummy 'myresource' folder
Path(cls.t, cls.agent_name, "protocols", cls.resource_name).mkdir(
exist_ok=False, parents=True
)
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "scaffold", "protocol", cls.resource_name],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the exit code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_error_message_protocol_already_existing(self):
"""Test that the log error message is fixed.
The expected message is: 'A protocol with name '{protocol_name}' already exists. Aborting...'
"""
s = "A protocol with this name already exists. Please choose a different name and try again."
self.mocked_logger_error.assert_called_once_with(s)
def test_resource_directory_exists(self):
"""Test that the resource directory still exists.
This means that after every failure, we make sure we restore the previous state.
"""
assert Path(self.t, self.agent_name, "protocols", self.resource_name).exists()
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestScaffoldProtocolFailsWhenProtocolAlreadyExists:
"""Test that the command 'aea add protocol' fails when the protocol already exists."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.resource_name = "myresource"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
cls.patch = unittest.mock.patch.object(aea.cli.common.logger, "error")
cls.mocked_logger_error = cls.patch.__enter__()
os.chdir(cls.t)
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False
)
assert result.exit_code == 0
os.chdir(cls.agent_name)
# add protocol first time
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "scaffold", "protocol", cls.resource_name],
standalone_mode=False,
)
assert result.exit_code == 0
# scaffold protocol with the same protocol name
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "scaffold", "protocol", cls.resource_name],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the exit code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_error_message_protocol_already_existing(self):
"""Test that the log error message is fixed.
The expected message is: 'A protocol with name '{protocol_name}' already exists. Aborting...'
"""
s = "A protocol with name '{}' already exists. Aborting...".format(
self.resource_name
)
self.mocked_logger_error.assert_called_once_with(s)
def test_resource_directory_exists(self):
"""Test that the resource directory still exists.
This means that after every failure, we make sure we restore the previous state.
"""
assert Path(self.t, self.agent_name, "protocols", self.resource_name).exists()
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestScaffoldProtocolFailsWhenConfigFileIsNotCompliant:
"""Test that the command 'aea scaffold protocol' fails when the configuration file is not compliant with the schema."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.resource_name = "myresource"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
cls.patch = unittest.mock.patch.object(aea.cli.common.logger, "error")
cls.mocked_logger_error = cls.patch.__enter__()
os.chdir(cls.t)
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False
)
assert result.exit_code == 0
# change the dumping of yaml module to raise an exception.
cls.patch = unittest.mock.patch(
"yaml.safe_dump", side_effect=ValidationError("test error message")
)
cls.patch.__enter__()
os.chdir(cls.agent_name)
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "scaffold", "protocol", cls.resource_name],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the exit code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_configuration_file_not_valid(self):
"""Test that the log error message is fixed.
The expected message is: 'Cannot find protocol: '{protocol_name}'
"""
self.mocked_logger_error.assert_called_once_with(
"Error when validating the protocol configuration file."
)
def test_resource_directory_does_not_exists(self):
"""Test that the resource directory does not exist.
This means that after every failure, we make sure we restore the previous state.
"""
assert not Path(
self.t, self.agent_name, "protocols", self.resource_name
).exists()
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.patch.__exit__()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestScaffoldProtocolFailsWhenExceptionOccurs:
"""Test that the command 'aea scaffold protocol' fails when the configuration file is not compliant with the schema."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.resource_name = "myresource"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False
)
assert result.exit_code == 0
cls.patch = unittest.mock.patch(
"shutil.copytree", side_effect=Exception("unknwon exception")
)
cls.patch.__enter__()
os.chdir(cls.agent_name)
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "scaffold", "protocol", cls.resource_name],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the exit code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_resource_directory_does_not_exists(self):
"""Test that the resource directory does not exist.
This means that after every failure, we make sure we restore the previous state.
"""
assert not Path(
self.t, self.agent_name, "protocols", self.resource_name
).exists()
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.patch.__exit__()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
|
# Generated by Django 3.2.3 on 2021-06-10 01:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rosters', '0040_roster_settings'),
]
operations = [
migrations.AlterModelOptions(
name='rostersettings',
options={'permissions': (('change_roster', 'Can change rosters'),)},
),
]
|
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
dic = {}
l, res = 0, 0
for r in range(len(s)):
if s[r] in dic:
l = max(dic[s[r]], l)
dic[s[r]] = r + 1
res = max(res, r - l + 1)
return res
s = Solution()
print(s.lengthOfLongestSubstring("tmmzuxt"))
|
import argparse
import json
import os
import pickle
import sys
import time
from typing import Iterable, List, Dict
from rlo import config_utils
from rlo.cost_normalizers import available_cost_normalizers
from rlo.dataset_refiner import get_available_dataset_refiners
from rlo import factory
from rlo.extra_plots import extra_plots
from rlo.hybrid_search import MergeHandling
from rlo import utils
from rlo import git_utils
from rlo.utils import FunctionArgs as Args
from rlo.layers import available_aggregations
from rlo.expression_util import NamedExprWithEnv
from rlo.factory import ConfigType
def loss_str(loss: str) -> str:
# Validator for command-line 'loss' argument
if loss in ["huber", "mse"]:
return loss
if loss.startswith("pinball="):
_pinball, tau = loss.split("=")
if not 0.0 <= float(tau) <= 1.0:
raise ValueError("Tau must be between 0 and 1")
return loss
raise ValueError
# Note there is no way to specify train_exprs, test_exprs or rules here
# - they must come from the scenario or replay_config
general_arguments = [
Args(
"--exprs_per_generation",
type=int,
default=0,
help="Frequency with which to perform evaluation, once per this many training exprs",
),
Args(
"--no_subtree_match_edges",
action="store_false",
dest="use_subtree_match_edges",
help="Do not use connect identical sub expressions with a special edge type",
),
Args(
"--num_propagations",
type=int,
default=10,
help="number of the steps of the dynamics of GNN",
),
Args(
"--nonlinear_messages",
action="store_true",
help="If True, apply nonlinearity before edge-to-vertex message aggregation.",
),
Args(
"--aggregation_over_edge_types", # TODO concatenate and concatenate_by_agg_type not available in torch (yet)
type=str,
default="sum",
choices=available_aggregations,
help="aggregation of all edge_type messages before passing into gru",
),
Args(
"--decoder_readout",
type=str,
choices=["mean", "max", "min", "sum"],
default="mean",
help="How do we aggregate node features before feeding them to the value function.",
),
Args(
"--message_from_sender_receiver", # TODO: Not yet implemented in torch
action="store_true",
help="If true, the message is computed both using the sender and the receiver features.",
),
Args(
"--one_hot_embedding", # TODO: Not yet configurable in torch implementation
action="store_true",
help="use one-hot initial node embedding rather than learned lookup",
),
Args("--hidden_dim", type=int, default=200, help="GNN hidden dimension"),
Args(
"--output_hidden_dim", type=int, default=200, help="output MLP hidden dimension"
),
Args("--gamma", type=float, default=0.1, help="discount factor (not used)"),
Args(
"--max_num_episodes_train",
type=int,
default=4 ** 6,
help="max number of simulation episodes during train search",
),
Args(
"--max_num_episodes_eval",
type=int,
default=100,
help="max number of simulation episodes during eval search",
),
Args(
"--num_positive_examples",
type=int,
default=10,
help="min number of positive episodes",
),
Args(
"--simulation_depth_train",
type=int,
default=None,
help="max depth to simulate during training",
),
Args(
"--simulation_depth_eval",
type=int,
default=None,
help="max depth to simulate during evaluation",
),
Args(
"--maxing",
type=str,
help="algorithm to extract best empirical optimizations from the search tree",
default="accumulator",
choices=factory.maxing_algs.keys(),
),
Args(
"--min_epochs",
type=int,
default=10,
help="Min number of training epochs per generation (looking for epoch with lowest validation loss)",
),
Args(
"--max_epochs",
type=int,
default=None,
help="max number of training epochs per generation (default = limited by patience only)",
),
Args(
"--num_repetitions", type=int, default=8, help="number of training repetitions"
),
Args(
"--graph_state_keep_prob",
type=float,
default=0.5,
help="dropout keep probability for graph state",
),
Args(
"--output_keep_prob",
type=float,
default=0.5,
help="dropout keep probability for output MLP",
),
Args("--cost_normalization", type=str, choices=available_cost_normalizers()),
Args(
"--patience_epochs",
type=int,
default=4,
help="stop training if validation has not improved in this number of epochs",
),
Args(
"--num_generations",
type=int,
default=None,
help="number of generations over expressions",
),
Args(
"--total_train_time",
type=int,
default=None,
help="stop at end of first generation when training time exceeds this (seconds)",
),
Args(
"--num_episode_clusters",
type=int,
default=5,
help="number of clusters of episodes used to build dataset",
),
Args(
"--template_path",
type=str,
help="Path to template .kso file (from rlo directory), e.g. ksc/blas/blas_template.kso",
),
Args(
"--test_on_defs",
type=str,
default=None,
nargs="+",
help="allows to choose which functions from the test set to use",
),
Args(
"--train_on_defs",
type=str,
default=None,
nargs="+",
help="allows to choose which functions from the train set to use",
),
Args(
"--seed_all_reps",
type=int,
help="Seed all repetitions with same seed (usually each rep seeded with its number)",
),
Args(
"--loss",
type=loss_str,
default="huber",
help="types of the losses available. Options are 'huber', 'mse', or "
"strings of the form 'pinball=0.9' where 0.9 is the tau parameter for pinball loss.",
),
Args("--lr", type=float, default=0.0001, help="learning rate"),
Args(
"--grad_clip_value",
type=float,
default=0,
help="Coefficient used for gradient clipping by value. "
"If <=0 (default), the gradients will not be clipped. ",
),
Args("--split", type=float, default=0.9, help="train-validation split parameter"),
Args(
"--value_bin_splits",
type=float,
nargs="+",
default=None,
help="bin splits to use for value distribution plot",
),
Args(
"--time_bin_splits",
type=int,
nargs="+",
default=None,
help="bin splits to use for time distribution plot",
),
Args(
"--episode_bin_splits",
type=int,
nargs="+",
default=None,
help="bin splits to use for episode distribution plot",
),
Args(
"--extra_plots",
type=str,
nargs="*",
default=[],
choices=extra_plots.keys(),
help="Extra plots",
),
Args("--v2", action="store_true", help="use distillator_v2"),
Args("--verbose", action="store_true", help="use distillator_v2 verbose"),
]
dataset_processing_arguments = [
Args(
"--dataset_refiners",
nargs="*",
type=str,
default=["best_across_generations_refiner"],
choices=get_available_dataset_refiners(),
help="Sequence of dataset refiners to use",
),
]
search_algorithm_arguments = [
# The first two will need to be specified somewhere, probably in scenario
Args(
"--train_search",
type=str,
choices=factory.search_algs.keys(),
help="Search algorithm for training.",
),
Args(
"--eval_search",
type=str,
choices=factory.search_algs.keys(),
help="Search algorithm for evaluation.",
),
Args(
"--cost_per_step",
type=float,
default=None,
help="Use cost_per_step in search (0 = just take max over t'<t), for A*/Hybrid/Beam only",
),
Args(
"--max_gnn_train",
type=int,
default=None,
help="Max GNN evaluations in training; for A*/Hybrid/Beam only",
),
Args(
"--max_gnn_eval",
type=int,
default=None,
help="Max GNN evaluations in test; for A*/Hybrid/Beam only",
),
Args(
"--search_batch_size",
type=int,
default=16,
help="Batch size to use for GNN evaluation during search",
),
Args(
"--hybrid_merge_handling",
type=str.upper,
default=MergeHandling.STOP.name,
choices=[m.name for m in MergeHandling],
), # Help string shows uppercase, but parsed insensitively
Args(
"--hybrid_prob_rollout",
type=float,
default=1.0,
help="Probability of rolling out one more step in hybrid search",
),
Args(
"--hybrid_alpha",
type=float,
default=float("inf"),
help="Alpha value for hybrid search",
),
]
# alpha_tuning_arguments are only applicable for the rollout search algorithm
alpha_tuning_arguments = [
Args(
"--alpha_test",
type=float,
default=5.0,
help="Temperature for test runs. This does NOT affect the alpha for training runs.",
),
Args(
"--init_alpha",
type=float,
default=1.0,
help="Temperature for train runs at the first generation. Used in softmax action selection.",
),
Args(
"--alpha_scaling_factor",
type=float,
default=1.1,
help="Alpha for training is multiplied by it on success.",
),
Args(
"--alpha_scaling_factor_fail",
type=float,
default=1.0,
help="Alpha for training is multiplied by it on failure.",
),
]
sparse_gnn_arguments = [
Args(
"--sparse_gnn",
action="store_true",
help="For tensorflow, this flag enables sparse GNN. For Pytorch, sparse GNN is the only option, and this flag is ignored.",
),
Args(
"--tensorflow",
action="store_true",
dest="tensorflow",
help="Use tensorflow implementation. If not specified, default to pytorch.",
),
Args(
"--num_gnn_blocks",
type=int,
default=1,
help="How many GNN blocks to use. Should be a divider of the number of propagations. E.g. if we have --num_propagations=10 and 2 blocks, each block will do 5 propagations. "
"Will use StackedGNNEncoder if set --num_gnn_blocks. Will use old (default) SparseGNNEncoder if set to 1.",
),
Args(
"--stacked_gnn_double_hidden",
action="store_true",
help="If set, each next GNN block of a stack will have its hidden dim doubled. "
"Otherwise apply dimensionality reduction before the GNN output.",
),
Args(
"--max_nodes_per_batch",
type=int,
default=10000,
help="Maximum number of nodes in a sparse GNN batch",
),
]
value_function_arguments = [
# The nargs means that if no --cumsum is specified, returns the default; if cumsum is specified without argument, returns the 'const'.
Args(
"--cumsum",
type=float,
nargs="?",
default=None,
const="inf",
help="Use cumulative sum, with optional alpha value for softplus (else INF => relu)",
),
Args(
"--two_value_func",
type=str,
default=None,
choices=["train"],
help="Whether to use two value functions for training phase",
),
Args(
"--two_value_func_var_frac_train",
type=float,
help="Fraction of variance to add to mean when merging two value functions during training phase.",
),
]
# Fields from legacy configs, and legacy arguments. These will be *rejected* if present.
# Note the "merge-datasets" value in config.json was controlled by command-line argument --no_merge_datasets.
config_argument_deprecations = {
"num_timesteps": "num_propagations",
"merge_datasets": None,
"num_epochs": "num_generations",
"max_depth": "simulation_depth_train",
"total_num_iterations": "min_epochs/max_epochs",
"patience": "patience_epochs",
"nested": None,
"max_num_episodes": "max_num_episodes_train",
"simulation_depth": "simulation_depth_train and --simulation_depth_eval",
}
config_arguments = (
general_arguments
+ dataset_processing_arguments
+ search_algorithm_arguments
+ alpha_tuning_arguments
+ sparse_gnn_arguments
+ value_function_arguments
+ [
Args("--" + k, action="help", help="Do not use; use --{} instead".format(v))
for k, v in config_argument_deprecations.items()
]
)
run_arguments = [
Args(
"scenario",
type=str,
help="Path to scenario or name of .json (potentially old config)",
),
Args("--run_id", type=str, default=None, help="Run ID (best to keep default)"),
Args("--gitlog", type=str, default=git_utils.get_git_revision_short_hash()),
Args("--output_dir", type=str, default="outputs", help="Where models and plots go"),
Args(
"--repetition",
type=int,
default=None,
help="run just a single repetition, without plotting (use with run_id)",
),
Args(
"--force_gpu",
action="store_true",
help="Raise exception if we cannot create model on GPU",
),
Args(
"--force_cpu",
action="store_true",
help="Run on CPU even if GPU is available (useful if CUDA nondeterminism is a problem). Only supported for PyTorch.",
),
Args(
"--gpu_memory_fraction",
type=float,
default=None,
help="Use this fraction of GPU (e.g. 0.5) to allow sharing",
),
Args(
"--save_all_models",
action="store_true",
help="whether to store weights after every generation, or just to store them at the end of training",
),
Args(
"--num_parallel",
type=int,
default=1,
help="number of parallel threads on which to run repetitions",
),
Args(
"--upload_models",
action="store_true",
help="Upload models into the Azure ML workspace",
),
]
run_arguments_no_parallel = [a for a in run_arguments if a.args[0] != "--num_parallel"]
assert len(run_arguments_no_parallel) + 1 == len(run_arguments) # Check we removed it
ray_run_arguments = run_arguments_no_parallel + [
Args(
"--address",
default=None,
type=str,
help="ip_address:port to connect to redis (default is to run internally)",
),
Args("--redis_token", default=None, type=str, help="password to connect to redis"),
Args(
"--log_to_driver",
action="store_true",
help="Log all worker stdout/err to driver",
),
Args(
"--num_gpus",
default=None,
type=int,
help="Override Ray autodetecting number of GPUs (for this node only)",
),
Args(
"--num_cpus",
default=None,
type=int,
help="Override Ray autodetecting number of CPUs (for this node only)",
),
Args(
"--workers_per_gpu",
default=1,
type=int,
help="Tell Ray to put this number of workers (cpu threads+models) on each GPU",
),
Args(
"--test_kill_worker_after_tasks",
default=-1,
type=int,
help="Kill each remote worker after this many tasks (negative => never)",
),
Args(
"--ray_timeout",
default=3600,
type=int,
help="Starting timeout for ray tasks; doubles after each failure to complete",
),
Args(
"--profile_local",
nargs="?",
type=int,
default=0,
help="Run headnode under cprofile, with optional limit/s on local task duration",
),
Args(
"--timeline",
action="store_true",
help="Produce ray_timeline.json and ray_object_transfers.json",
),
]
def make_parser(arguments: List[Args]) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
for args in arguments:
utils.apply_args(parser.add_argument, args)
return parser
def parse_with_defaults(argument_fields, defaults, cmdline=None, known=False) -> Dict:
parser = make_parser(argument_fields)
parser.set_defaults(**defaults)
if known:
args, _ = parser.parse_known_args(cmdline)
else:
args = parser.parse_args(cmdline)
return vars(args)
def get_scenario_name_and_defaults(scenario_or_config):
""" Given the name or path to a scenario .json, or the path to a previous experiment config,
return the name of the underlying scenario, and the default values for all config_arguments
and scenario-only fields """
if not os.path.isfile(scenario_or_config):
exec_dir = os.path.dirname(sys.argv[0])
scenario_or_config = os.path.join(
exec_dir, "scenarios", f"{scenario_or_config}.json"
)
with open(scenario_or_config) as f:
scenario_dict = json.load(f)
config_argument_fields = set(
arg.args[0][2:] for arg in config_arguments if arg.args[0][:2] == "--"
)
scenario_required_fields = {"train_exprs", "test_exprs", "rules"}
# When replaying a previous experiment, keep the overrides from that (as well as any new)
scenario_optional_fields = {"oracle", "extra_scenario_params"}.union(
config_argument_fields
)
scenario_overrides = config_utils.kwargs_from_config(
scenario_dict,
required_keys=scenario_required_fields,
optional_keys=scenario_optional_fields,
)
if not scenario_overrides.keys().isdisjoint(config_argument_deprecations.keys()):
raise ValueError(
"\n".join(
[
"Key {} has been {}".format(
k, "removed" if v is None else "replaced by {}".format(v)
)
for k, v in config_argument_deprecations.items()
if k in scenario_overrides
]
)
)
# For running from a previous experiment, keep the old (base) scenario value
scenario_name = scenario_dict.get(
"scenario", os.path.splitext(os.path.basename(scenario_or_config))[0]
)
return (
scenario_name,
parse_with_defaults(config_arguments, scenario_overrides, cmdline=[]),
)
def check_overrides(config: ConfigType, defaults: ConfigType):
default_parser = make_parser(config_arguments)
default_parser.set_defaults(**defaults)
default_config = vars(default_parser.parse_args([])) # ignore command line
for k, v in default_config.items():
if config[k] != v:
print(
"Overriding {} with value {} (original value={})".format(
k, config[k], v
)
)
yield (k, config[k], v)
def make_config_for_scenario(
scenario: str,
run_args: List[Args] = run_arguments,
cmdline: Iterable[str] = None,
allow_unknown=False,
) -> ConfigType:
"""Create config for scenario"""
# Now load scenario (/replayed config)
scenario, scenario_defaults = get_scenario_name_and_defaults(scenario)
return make_config(
run_args + config_arguments, scenario, scenario_defaults, cmdline, allow_unknown
)
def make_config(
all_arguments, scenario, scenario_defaults, cmdline=None, allow_unknown=False
) -> ConfigType:
# This time we'll parse all the arguments, but using scenario defaults; these are the values to actually use.
config = parse_with_defaults(
all_arguments, scenario_defaults, cmdline=cmdline, known=allow_unknown
)
config["scenario"] = scenario
# Record those explicitly overridden on the commandline, in the description
config["extra_scenario_params"] = "".join(
[config.get("extra_scenario_params", "")]
+ [
"+{}:{}".format(k, conf_v)
for k, conf_v, _def_v in check_overrides(config, scenario_defaults)
]
)
# We've now computed global defaults, overridden by scenario, overridden by explicit cmdline.
if config["run_id"] is None:
config["run_id"] = "{}_{}_{}".format(
config["scenario"], time.strftime("%Y_%m_%d_%H_%M_%S"), os.getpid()
)
assert config["run_id"] is not None
config["result_save_path"] = os.path.join(
config["output_dir"], "Run_" + config["run_id"]
)
if config["repetition"] is not None:
# Apply the changes to directories etc. of the root config that we expect for the given repetition
config = config_utils.config_for_repetition(
# Base config = with repetition field removed
{k: v for k, v in config.items() if k != "repetition"},
config["repetition"],
)
return config
def _ensure_oracle_reachable(config, expressions: Iterable[NamedExprWithEnv]):
"""Check that all the expressions will have graphable known minimum costs.
Although not essential (we can just not graph the minimum), usually it's
helpful to know we *will* get a minumum on the graph before we spend hours running a test.
Also updates the config's simulation_depth_train, simulation_depth_eval to ensure the
run can reach the minimum."""
from rlo import best_results
for name, _ in expressions:
# Use str to double-check it'll be reachable from logs
assert best_results.best_cost_for_exp(name, config["rules"]).cost is not None
# Also ensure we will be able to reach the optimal value.
longest_sequence = max(
[
best_results.oracle_sequence(name, config["rules"])
for name, _ in expressions
],
key=len,
)
simulation_depth_required = len(longest_sequence) - 1
for simulation_depth_type in ["simulation_depth_train", "simulation_depth_eval"]:
if config[simulation_depth_type] is None:
config[simulation_depth_type] = simulation_depth_required
elif simulation_depth_required > config[simulation_depth_type]:
raise ValueError(
"Specified {} of {} is not sufficient to reach oracle minumum for {}, must be at least {}".format(
simulation_depth_type,
config[simulation_depth_type],
longest_sequence[0],
simulation_depth_required,
)
)
def check_save_config(config, train_exprs, eval_exprs):
"""Ensures that simulation_depth_train, simulation_depth_eval are sufficient to reach oracle min if known,
and save to disk the config and train.pck."""
if train_exprs and eval_exprs and config["oracle"]:
assert all(e in eval_exprs for e in train_exprs)
_ensure_oracle_reachable(config, eval_exprs)
with utils.open_file_mkdir(
os.path.join(config["result_save_path"], "config.json"), "w"
) as f:
json.dump(config, f, sort_keys=True, indent=2)
if train_exprs:
with open(os.path.join(config["result_save_path"], "train.pck"), "wb") as f:
pickle.dump(train_exprs, f)
|
from django import forms
from django.core.exceptions import ValidationError
from pyparsing import ParseException
from .grammar import parse
class SearchField(forms.CharField):
def clean(self, *args, **kwargs):
value = super().clean(*args, **kwargs)
if not value:
return ""
try:
return parse(value)
except ParseException as e:
raise ValidationError(str(e))
|
# -*- coding: utf-8 -*-
"""
@author : Wang Meng
@github : https://github.com/tianpangji
@software : PyCharm
@file : permissions.py
@create : 2021/2/14 17:11
"""
import json
from system.models import Permissions
def redis_storage_permissions(redis_conn):
permissions = Permissions.objects.filter(menu=False).values('id', 'path', 'method', 'sign')
# 如果还没有任何权限控制,直接跳过后续逻辑,以免报错
if not permissions.exists():
return None
permissions_dict = dict()
for permission in permissions:
# 去除不可见字符
method = str(permission.get('method')).replace('\u200b', '')
path = str(permission.get('path')).replace('\u200b', '')
sign = str(permission.get('sign')).replace('\u200b', '')
_id = permission.get('id')
if permissions_dict.get(path):
permissions_dict[path].append({
'method': method,
'sign': sign,
'id': _id,
})
else:
permissions_dict[path] = [{
'method': method,
'sign': sign,
'id': _id,
}]
for key in permissions_dict:
permissions_dict[key] = json.dumps(permissions_dict[key])
redis_conn.hmset('user_permissions_manage', permissions_dict)
|
import os
import matplotlib.pyplot as plt
import imageio
def visualize_ground_truth(mat, size=4.0):
"""
`mat`: (d, d)
"""
plt.rcParams['figure.figsize'] = [size, size]
fig, ax = plt.subplots(1, 1)
ax.matshow(mat, vmin=0, vmax=1)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.tick_params(axis='both', which='both', length=0)
ax.set_title(r'Ground truth $G^*$', pad=10)
plt.show()
return
def visualize(mats, t, save_path=None, n_cols=7, size=2.5, show=False):
"""
Based on visualization by https://github.com/JannerM/gamma-models/blob/main/gamma/visualization/pendulum.py
`mats` should have shape (N, d, d) and take values in [0,1]
"""
N = mats.shape[0]
n_rows = N // n_cols
if N % n_cols:
n_rows += 1
plt.rcParams['figure.figsize'] = [size * n_cols, size * n_rows]
fig, axes = plt.subplots(n_rows, n_cols)
axes = axes.flatten()
# for j, (ax, mat) in enumerate(zip(axes[:len(mats)], mats)):
for j, ax in enumerate(axes):
if j < len(mats):
# plot matrix of edge probabilities
ax.matshow(mats[j], vmin=0, vmax=1)
ax.tick_params(axis='both', which='both', length=0)
ax.set_title(r'$Z^{('f'{j}'r')}$', pad=3)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.axis('off')
# save
if save_path is not None:
if not os.path.exists(save_path):
os.makedirs(save_path)
plt.savefig(save_path + f'/img{t}.png')
img = imageio.imread(save_path + f'/img{t}.png')
else:
img = None
if show:
plt.show()
plt.close()
return img
|
#!/usr/bin/env python3
import sys
import unittest
sys.path.append('.')
from logger.transforms.prefix_transform import PrefixTransform
class TestPrefixTransform(unittest.TestCase):
def test_default(self):
transform = PrefixTransform('prefix')
self.assertIsNone(transform.transform(None))
self.assertEqual(transform.transform('foo'), 'prefix foo')
transform = PrefixTransform('prefix', sep='\t')
self.assertEqual(transform.transform('foo'), 'prefix\tfoo')
if __name__ == '__main__':
unittest.main()
|
import json
import re
from datetime import time, date, datetime
from enum import Enum
from typing import Any, Type, Union, List, Dict, Set, Optional, TypeVar, Sized, overload, Callable, cast
from .base import Field, Cleaned
from .errors import ValidationError, ErrorCode
T1 = TypeVar('T1')
T2 = TypeVar('T2')
VT = TypeVar('VT')
Num = Union[int, float]
HashableT = TypeVar('HashableT')
CleanedT = TypeVar('CleanedT', bound=Cleaned)
EnumT = TypeVar('EnumT', bound=Enum)
class StrField(Field[str]):
blank_pattern = re.compile(r'^\s*$')
linebreak_pattern = re.compile(r'(\r\n|\r|\n)')
linebreak_replacement = ' '
default_multiline = False
default_strip = True
def __init__(self,
*,
blank: bool,
multiline: Optional[bool] = None,
strip: Optional[bool] = None,
pattern: Optional[str] = None,
length: Optional[int] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
one_of: Optional[Set[str]] = None):
super().__init__()
self.is_blankable = blank
self.strip = strip
self.pattern = re.compile(pattern) if pattern else None
self.raw_pattern = pattern or ''
self.multiline = multiline
self.length = length
self.min_length = min_length
self.max_length = max_length
self.one_of = one_of
def value_to_str(self, value: Any) -> str:
if isinstance(value, str):
return value
raise TypeError()
def convert(self, value: Any) -> str:
value = self.value_to_str(value)
if (self.default_strip if self.strip is None else self.strip):
value = value.strip()
if not (self.default_multiline
if self.multiline is None else self.multiline):
value = self.linebreak_pattern.sub(
self.linebreak_replacement, value)
return value
def validate(self, value: str):
if self.blank_pattern.match(value):
if self.is_blankable:
# skip other validations for blank value
return
else:
self.raise_validation_error(
value=value,
default_message='This field can not be blank.',
code=ErrorCode.blank)
if self.pattern and not self.pattern.match(value):
self.raise_validation_error(
value=value,
default_message=f'The value must match: {self.raw_pattern}',
code=ErrorCode.pattern)
_validate(value, self, _LENGTH, _ONE_OF)
class BoolField(Field[bool]):
def __init__(self):
super().__init__()
def convert(self, value: Any) -> bool:
return bool(value)
def validate(self, value: bool):
pass
class IntField(Field[int]):
def __init__(self,
*,
lt: Optional[Num] = None,
lte: Optional[Num] = None,
gt: Optional[Num] = None,
gte: Optional[Num] = None,
one_of: Optional[Set[int]] = None):
super().__init__()
self.lt = lt
self.lte = lte
self.gt = gt
self.gte = gte
self.one_of = one_of
def convert(self, value: Any) -> int:
return int(value)
def validate(self, value: int):
_validate(value, self, _COMPARABLE, _ONE_OF)
class FloatField(Field[float]):
def __init__(self,
*,
lt: Optional[Num] = None,
lte: Optional[Num] = None,
gt: Optional[Num] = None,
gte: Optional[Num] = None,
one_of: Optional[Set[float]] = None):
super().__init__()
self.lt = lt
self.lte = lte
self.gt = gt
self.gte = gte
self.one_of = one_of
def convert(self, value: Any) -> float:
return float(value)
def validate(self, value: float):
_validate(value, self, _COMPARABLE, _ONE_OF)
class TimeField(Field[time]):
def __init__(self,
*,
lt: Optional[time] = None,
lte: Optional[time] = None,
gt: Optional[time] = None,
gte: Optional[time] = None,
one_of: Optional[Set[time]] = None):
super().__init__()
self.lt = lt
self.lte = lte
self.gt = gt
self.gte = gte
self.one_of = one_of
def convert(self, value: Any) -> time:
if isinstance(value, time):
return value
elif isinstance(value, datetime):
return value.time()
elif isinstance(value, date):
return time.min
return time.fromisoformat(value)
def validate(self, value: time):
_validate(value, self, _COMPARABLE, _ONE_OF)
class DateField(Field[date]):
def __init__(self,
*,
lt: Optional[date] = None,
lte: Optional[date] = None,
gt: Optional[date] = None,
gte: Optional[date] = None,
one_of: Optional[Set[date]] = None):
super().__init__()
self.lt = lt
self.lte = lte
self.gt = gt
self.gte = gte
self.one_of = one_of
def convert(self, value: Any) -> date:
if isinstance(value, date):
return value
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, (int, float)):
return datetime.fromtimestamp(value).date()
return datetime.fromisoformat(value).date()
def validate(self, value: date):
_validate(value, self, _COMPARABLE, _ONE_OF)
class DatetimeField(Field[datetime]):
def __init__(self,
*,
lt: Optional[datetime] = None,
lte: Optional[datetime] = None,
gt: Optional[datetime] = None,
gte: Optional[datetime] = None,
one_of: Optional[Set[datetime]] = None):
super().__init__()
self.lt = lt
self.lte = lte
self.gt = gt
self.gte = gte
self.one_of = one_of
def convert(self, value: Any) -> datetime:
if isinstance(value, datetime):
return value
elif isinstance(value, date):
return datetime.combine(value, time.min)
elif isinstance(value, (int, float)):
return datetime.fromtimestamp(value)
return datetime.fromisoformat(value)
def validate(self, value: datetime):
_validate(value, self, _COMPARABLE, _ONE_OF)
class EitherField(Field[Union[T1, T2]]):
def __init__(self,
t1: Field[T1],
t2: Field[T2]):
super().__init__()
self.t1 = t1
self.t2 = t2
def convert(self, value: Any) -> Union[T1, T2]:
try:
return self.t1.clean(value)
except Exception:
pass
try:
return self.t2.clean(value)
except Exception:
pass
raise ValueError(f'Either {self.t1.__class__.__name__} '
f'and {self.t2.__class__.__name__} '
f'can not handle `{value}`.')
def validate(self, value: Union[T1, T2]):
pass
class ListField(Field[List[VT]]):
def __init__(self,
value: Field[VT],
*,
length: Optional[int] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None):
super().__init__()
self.value = value
self.length = length
self.min_length = min_length
self.max_length = max_length
def convert(self, value: Any) -> List[VT]:
if isinstance(value, str):
value = json.loads(value)
value = list(value)
result: List[VT] = []
errors: Dict[str, ValidationError] = {}
for index, item in enumerate(value):
try:
result.append(self.value.clean(item))
except ValidationError as e:
errors[str(index)] = e
if errors:
raise ValidationError(errors)
return result
def validate(self, value: List[VT]):
_validate(value, self, _LENGTH)
class SetField(Field[Set[HashableT]]):
def __init__(self,
value: Field[HashableT],
*,
length: Optional[int] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None):
super().__init__()
self.value = value
self.length = length
self.min_length = min_length
self.max_length = max_length
def convert(self, value: Any) -> Set[HashableT]:
if isinstance(value, str):
value = json.loads(value)
value = iter(value)
result: Set[HashableT] = set()
error_items: List[ValidationError.Item] = []
for item in value:
try:
result.add(self.value.clean(item))
except ValidationError as e:
error_items.extend(e.items)
if error_items:
raise ValidationError(list(error_items))
return result
def validate(self, value: Set[HashableT]):
_validate(value, self, _LENGTH)
class DictField(Field[Dict[HashableT, VT]]):
key: Field[HashableT]
value: Field[VT]
@overload
def __init__(self: 'DictField[str, VT]',
value: Field[VT],
*,
length: Optional[int] = ...,
min_length: Optional[int] = ...,
max_length: Optional[int] = ...):
pass
@overload
def __init__(self,
value: Field[VT],
key: Field[HashableT],
*,
length: Optional[int] = ...,
min_length: Optional[int] = ...,
max_length: Optional[int] = ...):
pass
def __init__(self,
value,
key=None,
*,
length: Optional[int] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None):
super().__init__()
self.value = value
self.key = key or StrField(blank=False, multiline=False)
self.length = length
self.min_length = min_length
self.max_length = max_length
def convert(self, value: Any) -> Dict[HashableT, VT]:
if isinstance(value, str):
value = json.loads(value)
value = dict(value)
result: Dict[HashableT, VT] = {}
errors: Dict[str, ValidationError] = {}
for k, v in value.items():
try:
_key = self.key.clean(k)
try:
_value = self.value.clean(v)
result[_key] = _value
except ValidationError as e:
errors[f'{_key}'] = e
except ValidationError as e:
errors[f'{k}:key'] = e
try:
self.value.clean(k)
except ValidationError as e:
errors[f'{k}'] = e
if errors:
raise ValidationError(errors)
return result
def validate(self, value: Dict[HashableT, VT]):
_validate(value, self, _LENGTH)
class NestedField(Field[CleanedT]):
def __init__(self,
cleaned: Union[Type[CleanedT], Callable[[], Type[CleanedT]]]):
super().__init__()
if isinstance(cleaned, type) and issubclass(cleaned, Cleaned):
self._server = cast(Callable[[], Type[CleanedT]], lambda: cleaned)
else:
self._server = cast(Callable[[], Type[CleanedT]], cleaned)
def convert(self, value: Any) -> CleanedT:
if isinstance(value, str):
value = json.loads(value)
_type = self._server()
if isinstance(value, Cleaned):
return _type(**value._data)
return _type(**dict(value))
def validate(self, value: CleanedT):
pass
class EnumField(Field[EnumT]):
def __init__(self,
enum: Union[Type[EnumT], Callable[[], Type[EnumT]]]):
super().__init__()
if isinstance(enum, type) and issubclass(enum, Enum):
self._server = cast(Callable[[], Type[EnumT]], lambda: enum)
else:
self._server = cast(Callable[[], Type[EnumT]], enum)
def convert(self, value: Any) -> EnumT:
_type = self._server()
if isinstance(value, _type):
return value
if isinstance(value, str):
try:
return _type[value]
except KeyError:
pass
return _type(value)
def validate(self, value: EnumT):
pass
def _validate_comparable(
value: Any,
field: Field[Any],
lt: Optional[Any],
lte: Optional[Any],
gt: Optional[Any],
gte: Optional[Any]):
if lt is not None and value >= lt:
field.raise_validation_error(
value=value,
default_message=f'The value must be less than {lt}.',
code=ErrorCode.lt)
if lte is not None and value > lte:
field.raise_validation_error(
value=value,
default_message=f'The value must be less than or equal to {lte}.',
code=ErrorCode.lte)
if gt is not None and value <= gt:
field.raise_validation_error(
value=value,
default_message=f'The value must be greater than {gt}.',
code=ErrorCode.gt)
if gte is not None and value < gte:
field.raise_validation_error(
value=value,
default_message='The value '
f'must be greater than or equal to {gte}.',
code=ErrorCode.gte)
def _validate_length(
value: Sized,
field: Field[Sized],
length: Optional[int],
min_length: Optional[int],
max_length: Optional[int]):
if length is not None and len(value) != length:
field.raise_validation_error(
value=value,
default_message='The length of the value '
f'must be equal to {length}.',
code=ErrorCode.length)
if min_length is not None and len(value) < min_length:
field.raise_validation_error(
value=value,
default_message='The length of the value '
f'must be longer than or equal to {min_length}.',
code=ErrorCode.min_length)
if max_length is not None and len(value) > max_length:
field.raise_validation_error(
value=value,
default_message='The length of the value '
f'must be shorter than or equal to {max_length}.',
code=ErrorCode.max_length)
def _validate_one_of(
value: HashableT,
field: Field[HashableT],
one_of: Optional[Set[HashableT]]):
if one_of is not None and value not in one_of:
field.raise_validation_error(
value=value,
default_message='The value must be one of {one_of}',
code=ErrorCode.one_of)
_COMPARABLE = 'comparable'
_LENGTH = 'length'
_ONE_OF = 'one_of'
def _validate(
value: Any,
field: Any,
*methods: str):
for m in methods:
if m == _COMPARABLE:
_validate_comparable(
value=value,
field=field,
lt=field.lt,
lte=field.lte,
gt=field.gt,
gte=field.gte)
elif m == _LENGTH:
_validate_length(
value=value,
field=field,
length=field.length,
min_length=field.min_length,
max_length=field.max_length)
elif m == _ONE_OF:
_validate_one_of(
value=value,
field=field,
one_of=field.one_of)
|
from django.contrib import admin
from products.models import *
# Register your models here.
admin.site.register(Product)
admin.site.register(MainProductsCategorie)
admin.site.register(ProductsSubCategorie)
|
# Copyright 2019 Julie Jung <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import csv
import argparse
import os
import datetime
import dateutil.parser
import time
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.transforms.combiners import Mean
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
# data gets collected 4 times per hour (every 15 minutes)
DATA_COLLECTION_FREQUENCY = 4
ROWS_PER_DAY = 10 # this is an arbitrary value set for testing
# ROWS_PER_DAY = DATA_COLLECTION_FREQUENCY * 24
SCHEMA_PATH = 'data/processed_data/bq_schemas.txt'
class BQTranslateTransformation:
'''A helper class which contains the logic to translate the file into a
format BigQuery will accept.'''
def __init__(self):
# load_schema taken from json file extracted from processCSV.py
# in a realistic scenario, you won't be able to automate it like this.
# and probably have to manually insert schema
dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
schema_file = os.path.join(dir_path, SCHEMA_PATH)
with open(schema_file) as bq_schema_file:
self.schemas = json.load(bq_schema_file)
self.stream_schema = {'fields': [
{'name': 'window_start',
'type': 'TIMESTAMP',
'mode': 'REQUIRED'},
{'name': 'building_id',
'type': 'INTEGER',
'mode': 'REQUIRED'},
{'name': 'Gen_Avg',
'type': 'INTEGER',
'mode': 'REQUIRED'}]}
def parse_method_load(self, string_input):
'''This method translates a single line of comma separated values to a
dictionary which can be loaded into BigQuery.
Args:
string_input: A comma separated list of values in the form of
timestamp,building id,general meter reading, and variable size of
sub meter readings
ex1)2017-03-31T20:00:00-04:00,1,6443.0,1941.0,40.0
ex2)2017-03-31T20:00:00-04:00,2,5397.0,2590.0
Returns:
A dict mapping BigQuery column names as keys to
the corresponding value parsed from string_input.
Deciding which schema to use by building_id.
The schemas of 8 buildings can be retrieved from bq_schema.txt,
produced by processCSV.py and saved onto self.schemas
ex1)
{'timestamp': '2017-03-31T20:00:00-04:00',
'building_id': 1,
'1_Gen': 6443,
'1_Sub_1': 1941,
'1_Sub_14': 40
}
ex2)
{'timestamp': '2017-03-31T20:00:00-04:00',
'building_id': 2,
'2_Gen': 5397,
'2_Sub_1': 2590
}
'''
row = {}
schema = None
i = 0
values = string_input.split(',')
for value in values:
# if at first column, add the timestamp,
# which is the same format no matter the building
if i == 0:
fieldName = 'timestamp'
# first check building_id, which is always the 2nd column
elif i == 1:
schema = self.schemas[int(value)-1]['fields']
fieldName = 'building_id'
# then retrieve the corresponding schema and then
# match the values with field numbers to add to the dictionary
else:
fieldName = schema[i]['name']
row[fieldName] = value
i += 1
logging.info('passed Row: {}'.format(row))
return row
def parse_method_stream(self, s):
''' Same as parse_method_load(), but for hourly averages
of each sensor, combined to one table
Args:
s of building Id, main meter reading avg,
and start timestamp of the window the value belongs to
ex) '1,6443,2017-03-31T20:00:00-04:00'
Returns:
A dict mapping BigQuery column names as keys to
the corresponding value parsed from (k, v).
{'window_start': [time at the start of current window],
'building_id': 1,
'Gen_Avg': 6443}
'''
logging.info('row of average vals in a window: {}'.format(s))
[window_start, building_id, gen_avg] = s.split(',')
row = {'window_start': window_start,
'building_id': int(building_id),
'Gen_Avg': int(round(float(gen_avg)))}
logging.info('passed Row for Streams: {}'.format(row))
return row
class WindowStartTimestampFn(beam.DoFn):
def process(self, element, window=beam.DoFn.WindowParam):
window_start = window.start.to_utc_datetime()
building_id, gen_avg = element
logging.info('startWindow timestamped: {}'.format(window_start))
yield ','.join([str(window_start), building_id, str(gen_avg)])
class KVSplitDoFn(beam.DoFn):
def process(self, s, timestamp=beam.DoFn.WindowParam):
values = s.split(',')
building_id = values[1]
gen_energy = int(float(values[2]))
logging.info('kvSplit: ({},{})'.format(building_id, gen_energy))
yield (building_id, gen_energy)
def run(argv=None, save_main_session=True):
# main function for running the pipeline
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--input_topic', dest='input_topic', required=True,
help=('Input PubSub topic of the form '
'"projects/<PROJECT>/topics/<TOPIC>".'))
arg_parser.add_argument(
'--output_load_table_suffix', dest='output_l', required=True,
help=('Output BQ table to write results to (suffix). ' +
'"[datasetID].[tableID]" ' +
'Since we have 8 buildings, each building ' +
'will be loaded on the corresponding table.' +
'ex) given argument, "energy.building", ' +
'building 1\'s data will be loaded in energy.building1'))
arg_parser.add_argument(
'--output_stream_table', dest='output_s', required=True,
help='Output BQ table to write results to. "[datasetID].[tableID]"')
arg_parser.add_argument(
'--output_topic', dest='output_topic', required=True,
help=('Output PubSub topic of the form ' +
'"projects/<PROJECT>/topics/<TOPIC>".' +
'ex) "projects/building-energy-consumption/' +
'topics/energy_stream"'))
arg_parser.add_argument(
'--speedFactor', dest='speedFactor',
required=False, default=300, type=int,
help=('How wide do you want your window (in seconds) ' +
'(Ex) 3600 => 1 hr window'))
# Initiate the pipeline using the pipeline arguments passed in from the
# command line. This includes information like where Dataflow should
# store temp files, and what the project id is.
known_args, pipeline_args = arg_parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
p = beam.Pipeline(options=options)
# Require the --project option to access --dataset
if options.view_as(GoogleCloudOptions).project is None:
arg_parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# Use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., module imported at module level).
options.view_as(SetupOptions).save_main_session = save_main_session
rowToBQ = BQTranslateTransformation()
# ingest pubsub messages, extract data, and save to lines
# so it can be used by both batch ingest and stream aggregations
lines = (p | 'ReadFromPubSub' >> beam.io.ReadFromPubSub(
topic=known_args.input_topic).with_output_types(bytes)
| 'ConvertFromBytesToStr' >> beam.Map(
lambda b: b.decode('utf-8')))
# Convert row of str to BQ rows, and load batch data to table
# on a daily basis by setting batch_size to rows per day.
# batch_size is a number of rows to be written to BQ
# per streaming API insert.
rows = (lines | 'StringToBigQueryRowLoad' >> beam.Map(
lambda s: rowToBQ.parse_method_load(s)))
# load_schema taken from json file extracted from processCSV.py
# In a realistic scenario, you won't be able to automate it like this,
# but probably have to manually insert schema
load_schema = rowToBQ.schemas
# filter and load to 8 tables based off of the given table suffix argument
load1 = (rows | 'FilterBuilding1' >> beam.Filter(
lambda row: int(row['building_id']) == 1)
| 'B1BQLoad' >> beam.io.WriteToBigQuery(
table=known_args.output_l + '1',
schema=load_schema[0], batch_size=ROWS_PER_DAY))
load2 = (rows | 'FilterBuilding2' >> beam.Filter(
lambda row: int(row['building_id']) == 2)
| 'B2BQLoad' >> beam.io.WriteToBigQuery(
table=known_args.output_l + '2',
schema=load_schema[1], batch_size=ROWS_PER_DAY))
load3 = (rows | 'FilterBuilding3' >> beam.Filter(
lambda row: int(row['building_id']) == 3)
| 'B3BQLoad' >> beam.io.WriteToBigQuery(
table=known_args.output_l + '3',
schema=load_schema[2], batch_size=ROWS_PER_DAY))
load4 = (rows | 'FilterBuilding4' >> beam.Filter(
lambda row: int(row['building_id']) == 4)
| 'B4BQLoad' >> beam.io.WriteToBigQuery(
table=known_args.output_l + '4',
schema=load_schema[3], batch_size=ROWS_PER_DAY))
load5 = (rows | 'FilterBuilding5' >> beam.Filter(
lambda row: int(row['building_id']) == 5)
| 'B5BQLoad' >> beam.io.WriteToBigQuery(
table=known_args.output_l + '5',
schema=load_schema[4], batch_size=ROWS_PER_DAY))
load6 = (rows | 'FilterBuilding6' >> beam.Filter(
lambda row: int(row['building_id']) == 6)
| 'B6BQLoad' >> beam.io.WriteToBigQuery(
table=known_args.output_l + '6',
schema=load_schema[5], batch_size=ROWS_PER_DAY))
load7 = (rows | 'FilterBuilding7' >> beam.Filter(
lambda row: int(row['building_id']) == 7)
| 'B7BQLoad' >> beam.io.WriteToBigQuery(
table=known_args.output_l + '7',
schema=load_schema[6], batch_size=ROWS_PER_DAY))
load8 = (rows | 'FilterBuilding8' >> beam.Filter(
lambda row: int(row['building_id']) == 8)
| 'B8BQLoad' >> beam.io.WriteToBigQuery(
table=known_args.output_l + '8',
schema=load_schema[7], batch_size=ROWS_PER_DAY))
# stream aggregation pipeline; saved to avgs
# to be used for writing to BigQuery and publishing to Pubsub
# fixed window of 1 hour, adjusted according to speedFactor
window_size = known_args.speedFactor
avgs = (lines | 'SetTimeWindow' >> beam.WindowInto(
window.SlidingWindows(
window_size, float(window_size)/2))
| 'ByBuilding' >> beam.ParDo(KVSplitDoFn())
| 'GetAvgByBuilding' >> Mean.PerKey()
| 'AddWindowStartTimestamp' >> beam.ParDo(
WindowStartTimestampFn()))
# Convert row of str to BigQuery rows, and append to the BQ table.
(avgs | 'StrToBigQueryRowStream' >> beam.Map(
lambda s: rowToBQ.parse_method_stream(s))
| 'WriteToBigQueryStream' >> beam.io.WriteToBigQuery(
table=known_args.output_s,
schema=rowToBQ.stream_schema,
project=options.view_as(GoogleCloudOptions).project))
# write message to pubsub with a different output_topic
# for users to subscribe to and retrieve real time analysis data
(avgs | 'Encode' >> beam.Map(
lambda x: x.encode('utf-8')).with_output_types(bytes)
| 'PublishToPubSub' >> beam.io.WriteToPubSub(
'projects/{}/topics/{}'.format(
options.view_as(GoogleCloudOptions).project,
known_args.output_topic)))
# nothing will run until this command
p.run()
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
run()
|
from tqdm import tqdm
from rdkit import Chem
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
from pathlib import Path
def normalize_inchi(inchi):
try:
mol = Chem.MolFromInchi(inchi)
return inchi if (mol is None) else Chem.MolToInchi(mol)
except: return inchi
# Segfault in rdkit taken care of, run it with:
# while [ 1 ]; do python normalize_inchis.py && break; done
if __name__=='__main__':
# Input & Output
orig_path = Path('submission_LB_322.csv')
norm_path = orig_path.with_name(orig_path.stem+'_norm.csv')
# Do the job
N = norm_path.read_text().count('\n') if norm_path.exists() else 0
print(N, 'number of predictions already normalized')
r = open(str(orig_path), 'r')
w = open(str(norm_path), 'a', buffering=1)
for _ in range(N):
r.readline()
line = r.readline() # this line is the header or is where it segfaulted last time
w.write(line)
for line in tqdm(r):
splits = line[:-1].split(',')
image_id = splits[0]
inchi = ','.join(splits[1:]).replace('"','')
inchi_norm = normalize_inchi(inchi)
w.write(f'{image_id},"{inchi_norm}"\n')
r.close()
w.close() |
class Pessoa:
olhos =2
def __init__(self,*filhos,nome= None,idade=35):
self.idade = idade
self.nome=nome
self.filhos =list(filhos)
def cumprimentar(self):
return f'Olá, meu nome é {self.nome}'
@staticmethod
def metodo_estatico():
return 42
@classmethod
def nome_e_atributos_de_classes(cls):
return f' {cls} - olhos {cls.olhos}'
class Homem(Pessoa):
def cumprimentar(self):
cumprimentar_da_classe=super().cumprimentar()
return f' {cumprimentar_da_classe} Aperto de mão'
class Mutante(Pessoa):
olhos = 3
if __name__ == '__main__':
renzo = Mutante(nome ='Renzo')
luciano= Homem(renzo, nome='Luciano')
print(Pessoa.cumprimentar(luciano))
print(id(luciano))
print(luciano.nome)
print(luciano.idade)
for filho in luciano.filhos:
print(filho.nome)
luciano.sobrenome='Ramanlho'
del luciano.filhos
luciano.olhos = 1
del luciano.olhos
print(renzo.__dict__)
print(luciano.__dict__)
Pessoa.olhos=3
print(Pessoa.olhos)
print(luciano.olhos)
print(renzo.olhos)
print (id (Pessoa.olhos), id(luciano.olhos),id(renzo.olhos))
print(Pessoa.metodo_estatico(), luciano.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classes(),luciano.nome_e_atributos_de_classes())
pessoa = Pessoa('Anonimo')
print(isinstance(pessoa,Pessoa))
print(isinstance(pessoa,Homem))
print(isinstance(renzo,Pessoa))
print(isinstance(renzo,Homem))
print(renzo.cumprimentar())
print(luciano.cumprimentar())
|
# NLP written by GAMS Convert at 04/21/18 13:54:05
#
# Equation counts
# Total E G L N X C B
# 253 253 0 0 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 308 308 0 0 0 0 0 0
# FX 4 4 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 1307 404 903 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(0,None),initialize=0.02)
m.x3 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0.0384)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0.0564)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0.0736)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0.09)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0.1056)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0.1204)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0.1344)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0.1476)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0.16)
m.x13 = Var(within=Reals,bounds=(0,None),initialize=0.1716)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0.1824)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0.1924)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0.2016)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0.21)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0.2176)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0.2244)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0.2304)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0.2356)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0.24)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0.2436)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0.2464)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0.2484)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0.2496)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0.25)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0.2496)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0.2484)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0.2464)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0.2436)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0.24)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0.2356)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0.2304)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0.2244)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0.2176)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0.21)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0.2016)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0.1924)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0.1824)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0.1716)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0.16)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0.1476)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0.1344)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0.1204)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0.1056)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0.09)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0.0736)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0.0564)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0.0384)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0.0196)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(1,1),initialize=1)
m.x55 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x56 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x57 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x58 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x59 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x60 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x61 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x62 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x63 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x64 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x65 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x66 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x67 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x68 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x69 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x70 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x71 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x72 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x73 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x74 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x75 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x76 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x77 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x78 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x79 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x80 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x81 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x82 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x83 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x84 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x85 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x86 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x87 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x88 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x89 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x90 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x91 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x92 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x93 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x94 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x95 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x96 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x97 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x98 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x99 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x100 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x101 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x102 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x103 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x104 = Var(within=Reals,bounds=(1,None),initialize=1)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(1,1),initialize=1)
m.x157 = Var(within=Reals,bounds=(0.6,1),initialize=0.984)
m.x158 = Var(within=Reals,bounds=(0.6,1),initialize=0.976)
m.x159 = Var(within=Reals,bounds=(0.6,1),initialize=0.968)
m.x160 = Var(within=Reals,bounds=(0.6,1),initialize=0.96)
m.x161 = Var(within=Reals,bounds=(0.6,1),initialize=0.952)
m.x162 = Var(within=Reals,bounds=(0.6,1),initialize=0.944)
m.x163 = Var(within=Reals,bounds=(0.6,1),initialize=0.936)
m.x164 = Var(within=Reals,bounds=(0.6,1),initialize=0.928)
m.x165 = Var(within=Reals,bounds=(0.6,1),initialize=0.92)
m.x166 = Var(within=Reals,bounds=(0.6,1),initialize=0.912)
m.x167 = Var(within=Reals,bounds=(0.6,1),initialize=0.904)
m.x168 = Var(within=Reals,bounds=(0.6,1),initialize=0.896)
m.x169 = Var(within=Reals,bounds=(0.6,1),initialize=0.888)
m.x170 = Var(within=Reals,bounds=(0.6,1),initialize=0.88)
m.x171 = Var(within=Reals,bounds=(0.6,1),initialize=0.872)
m.x172 = Var(within=Reals,bounds=(0.6,1),initialize=0.864)
m.x173 = Var(within=Reals,bounds=(0.6,1),initialize=0.856)
m.x174 = Var(within=Reals,bounds=(0.6,1),initialize=0.848)
m.x175 = Var(within=Reals,bounds=(0.6,1),initialize=0.84)
m.x176 = Var(within=Reals,bounds=(0.6,1),initialize=0.832)
m.x177 = Var(within=Reals,bounds=(0.6,1),initialize=0.824)
m.x178 = Var(within=Reals,bounds=(0.6,1),initialize=0.816)
m.x179 = Var(within=Reals,bounds=(0.6,1),initialize=0.808)
m.x180 = Var(within=Reals,bounds=(0.6,1),initialize=0.8)
m.x181 = Var(within=Reals,bounds=(0.6,1),initialize=0.792)
m.x182 = Var(within=Reals,bounds=(0.6,1),initialize=0.784)
m.x183 = Var(within=Reals,bounds=(0.6,1),initialize=0.776)
m.x184 = Var(within=Reals,bounds=(0.6,1),initialize=0.768)
m.x185 = Var(within=Reals,bounds=(0.6,1),initialize=0.76)
m.x186 = Var(within=Reals,bounds=(0.6,1),initialize=0.752)
m.x187 = Var(within=Reals,bounds=(0.6,1),initialize=0.744)
m.x188 = Var(within=Reals,bounds=(0.6,1),initialize=0.736)
m.x189 = Var(within=Reals,bounds=(0.6,1),initialize=0.728)
m.x190 = Var(within=Reals,bounds=(0.6,1),initialize=0.72)
m.x191 = Var(within=Reals,bounds=(0.6,1),initialize=0.712)
m.x192 = Var(within=Reals,bounds=(0.6,1),initialize=0.704)
m.x193 = Var(within=Reals,bounds=(0.6,1),initialize=0.696)
m.x194 = Var(within=Reals,bounds=(0.6,1),initialize=0.688)
m.x195 = Var(within=Reals,bounds=(0.6,1),initialize=0.68)
m.x196 = Var(within=Reals,bounds=(0.6,1),initialize=0.672)
m.x197 = Var(within=Reals,bounds=(0.6,1),initialize=0.664)
m.x198 = Var(within=Reals,bounds=(0.6,1),initialize=0.656)
m.x199 = Var(within=Reals,bounds=(0.6,1),initialize=0.648)
m.x200 = Var(within=Reals,bounds=(0.6,1),initialize=0.64)
m.x201 = Var(within=Reals,bounds=(0.6,1),initialize=0.632)
m.x202 = Var(within=Reals,bounds=(0.6,1),initialize=0.624)
m.x203 = Var(within=Reals,bounds=(0.6,1),initialize=0.616)
m.x204 = Var(within=Reals,bounds=(0.6,1),initialize=0.608)
m.x205 = Var(within=Reals,bounds=(0.6,1),initialize=0.6)
m.x206 = Var(within=Reals,bounds=(0.6,0.6),initialize=0.6)
m.x207 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x208 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x209 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x210 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x211 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x212 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x213 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x214 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x215 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x216 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x217 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x218 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x219 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x220 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x221 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x222 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x223 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x224 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x225 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x226 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x227 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x228 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x229 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x230 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x231 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x232 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x233 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x234 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x235 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x236 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x237 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x238 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x239 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x240 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x241 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x242 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x243 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x244 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x245 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x246 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x247 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x248 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x249 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x250 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x251 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x252 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x253 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x254 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x255 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x256 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x257 = Var(within=Reals,bounds=(0,3.5),initialize=1.75)
m.x258 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x262 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x263 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x264 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x265 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x266 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x267 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x268 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x269 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x270 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x271 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x272 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x273 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x274 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x275 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x276 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x277 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x278 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x279 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x280 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x281 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x282 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x283 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x284 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x285 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x286 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x287 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x288 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x289 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x290 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x291 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x292 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x293 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x294 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x295 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x296 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x297 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x298 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x299 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x300 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x301 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x302 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x303 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x304 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x305 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x306 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x307 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x308 = Var(within=Reals,bounds=(0,None),initialize=0)
m.obj = Objective(expr= - m.x104, sense=minimize)
m.c1 = Constraint(expr=-310*m.x3**2*exp(500 - 500*m.x54) + m.x258 == 0)
m.c2 = Constraint(expr=-310*m.x4**2*exp(500 - 500*m.x55) + m.x259 == 0)
m.c3 = Constraint(expr=-310*m.x5**2*exp(500 - 500*m.x56) + m.x260 == 0)
m.c4 = Constraint(expr=-310*m.x6**2*exp(500 - 500*m.x57) + m.x261 == 0)
m.c5 = Constraint(expr=-310*m.x7**2*exp(500 - 500*m.x58) + m.x262 == 0)
m.c6 = Constraint(expr=-310*m.x8**2*exp(500 - 500*m.x59) + m.x263 == 0)
m.c7 = Constraint(expr=-310*m.x9**2*exp(500 - 500*m.x60) + m.x264 == 0)
m.c8 = Constraint(expr=-310*m.x10**2*exp(500 - 500*m.x61) + m.x265 == 0)
m.c9 = Constraint(expr=-310*m.x11**2*exp(500 - 500*m.x62) + m.x266 == 0)
m.c10 = Constraint(expr=-310*m.x12**2*exp(500 - 500*m.x63) + m.x267 == 0)
m.c11 = Constraint(expr=-310*m.x13**2*exp(500 - 500*m.x64) + m.x268 == 0)
m.c12 = Constraint(expr=-310*m.x14**2*exp(500 - 500*m.x65) + m.x269 == 0)
m.c13 = Constraint(expr=-310*m.x15**2*exp(500 - 500*m.x66) + m.x270 == 0)
m.c14 = Constraint(expr=-310*m.x16**2*exp(500 - 500*m.x67) + m.x271 == 0)
m.c15 = Constraint(expr=-310*m.x17**2*exp(500 - 500*m.x68) + m.x272 == 0)
m.c16 = Constraint(expr=-310*m.x18**2*exp(500 - 500*m.x69) + m.x273 == 0)
m.c17 = Constraint(expr=-310*m.x19**2*exp(500 - 500*m.x70) + m.x274 == 0)
m.c18 = Constraint(expr=-310*m.x20**2*exp(500 - 500*m.x71) + m.x275 == 0)
m.c19 = Constraint(expr=-310*m.x21**2*exp(500 - 500*m.x72) + m.x276 == 0)
m.c20 = Constraint(expr=-310*m.x22**2*exp(500 - 500*m.x73) + m.x277 == 0)
m.c21 = Constraint(expr=-310*m.x23**2*exp(500 - 500*m.x74) + m.x278 == 0)
m.c22 = Constraint(expr=-310*m.x24**2*exp(500 - 500*m.x75) + m.x279 == 0)
m.c23 = Constraint(expr=-310*m.x25**2*exp(500 - 500*m.x76) + m.x280 == 0)
m.c24 = Constraint(expr=-310*m.x26**2*exp(500 - 500*m.x77) + m.x281 == 0)
m.c25 = Constraint(expr=-310*m.x27**2*exp(500 - 500*m.x78) + m.x282 == 0)
m.c26 = Constraint(expr=-310*m.x28**2*exp(500 - 500*m.x79) + m.x283 == 0)
m.c27 = Constraint(expr=-310*m.x29**2*exp(500 - 500*m.x80) + m.x284 == 0)
m.c28 = Constraint(expr=-310*m.x30**2*exp(500 - 500*m.x81) + m.x285 == 0)
m.c29 = Constraint(expr=-310*m.x31**2*exp(500 - 500*m.x82) + m.x286 == 0)
m.c30 = Constraint(expr=-310*m.x32**2*exp(500 - 500*m.x83) + m.x287 == 0)
m.c31 = Constraint(expr=-310*m.x33**2*exp(500 - 500*m.x84) + m.x288 == 0)
m.c32 = Constraint(expr=-310*m.x34**2*exp(500 - 500*m.x85) + m.x289 == 0)
m.c33 = Constraint(expr=-310*m.x35**2*exp(500 - 500*m.x86) + m.x290 == 0)
m.c34 = Constraint(expr=-310*m.x36**2*exp(500 - 500*m.x87) + m.x291 == 0)
m.c35 = Constraint(expr=-310*m.x37**2*exp(500 - 500*m.x88) + m.x292 == 0)
m.c36 = Constraint(expr=-310*m.x38**2*exp(500 - 500*m.x89) + m.x293 == 0)
m.c37 = Constraint(expr=-310*m.x39**2*exp(500 - 500*m.x90) + m.x294 == 0)
m.c38 = Constraint(expr=-310*m.x40**2*exp(500 - 500*m.x91) + m.x295 == 0)
m.c39 = Constraint(expr=-310*m.x41**2*exp(500 - 500*m.x92) + m.x296 == 0)
m.c40 = Constraint(expr=-310*m.x42**2*exp(500 - 500*m.x93) + m.x297 == 0)
m.c41 = Constraint(expr=-310*m.x43**2*exp(500 - 500*m.x94) + m.x298 == 0)
m.c42 = Constraint(expr=-310*m.x44**2*exp(500 - 500*m.x95) + m.x299 == 0)
m.c43 = Constraint(expr=-310*m.x45**2*exp(500 - 500*m.x96) + m.x300 == 0)
m.c44 = Constraint(expr=-310*m.x46**2*exp(500 - 500*m.x97) + m.x301 == 0)
m.c45 = Constraint(expr=-310*m.x47**2*exp(500 - 500*m.x98) + m.x302 == 0)
m.c46 = Constraint(expr=-310*m.x48**2*exp(500 - 500*m.x99) + m.x303 == 0)
m.c47 = Constraint(expr=-310*m.x49**2*exp(500 - 500*m.x100) + m.x304 == 0)
m.c48 = Constraint(expr=-310*m.x50**2*exp(500 - 500*m.x101) + m.x305 == 0)
m.c49 = Constraint(expr=-310*m.x51**2*exp(500 - 500*m.x102) + m.x306 == 0)
m.c50 = Constraint(expr=-310*m.x52**2*exp(500 - 500*m.x103) + m.x307 == 0)
m.c51 = Constraint(expr=-310*m.x53**2*exp(500 - 500*m.x104) + m.x308 == 0)
m.c52 = Constraint(expr=-(1/m.x54)**2 + m.x105 == 0)
m.c53 = Constraint(expr=-(1/m.x55)**2 + m.x106 == 0)
m.c54 = Constraint(expr=-(1/m.x56)**2 + m.x107 == 0)
m.c55 = Constraint(expr=-(1/m.x57)**2 + m.x108 == 0)
m.c56 = Constraint(expr=-(1/m.x58)**2 + m.x109 == 0)
m.c57 = Constraint(expr=-(1/m.x59)**2 + m.x110 == 0)
m.c58 = Constraint(expr=-(1/m.x60)**2 + m.x111 == 0)
m.c59 = Constraint(expr=-(1/m.x61)**2 + m.x112 == 0)
m.c60 = Constraint(expr=-(1/m.x62)**2 + m.x113 == 0)
m.c61 = Constraint(expr=-(1/m.x63)**2 + m.x114 == 0)
m.c62 = Constraint(expr=-(1/m.x64)**2 + m.x115 == 0)
m.c63 = Constraint(expr=-(1/m.x65)**2 + m.x116 == 0)
m.c64 = Constraint(expr=-(1/m.x66)**2 + m.x117 == 0)
m.c65 = Constraint(expr=-(1/m.x67)**2 + m.x118 == 0)
m.c66 = Constraint(expr=-(1/m.x68)**2 + m.x119 == 0)
m.c67 = Constraint(expr=-(1/m.x69)**2 + m.x120 == 0)
m.c68 = Constraint(expr=-(1/m.x70)**2 + m.x121 == 0)
m.c69 = Constraint(expr=-(1/m.x71)**2 + m.x122 == 0)
m.c70 = Constraint(expr=-(1/m.x72)**2 + m.x123 == 0)
m.c71 = Constraint(expr=-(1/m.x73)**2 + m.x124 == 0)
m.c72 = Constraint(expr=-(1/m.x74)**2 + m.x125 == 0)
m.c73 = Constraint(expr=-(1/m.x75)**2 + m.x126 == 0)
m.c74 = Constraint(expr=-(1/m.x76)**2 + m.x127 == 0)
m.c75 = Constraint(expr=-(1/m.x77)**2 + m.x128 == 0)
m.c76 = Constraint(expr=-(1/m.x78)**2 + m.x129 == 0)
m.c77 = Constraint(expr=-(1/m.x79)**2 + m.x130 == 0)
m.c78 = Constraint(expr=-(1/m.x80)**2 + m.x131 == 0)
m.c79 = Constraint(expr=-(1/m.x81)**2 + m.x132 == 0)
m.c80 = Constraint(expr=-(1/m.x82)**2 + m.x133 == 0)
m.c81 = Constraint(expr=-(1/m.x83)**2 + m.x134 == 0)
m.c82 = Constraint(expr=-(1/m.x84)**2 + m.x135 == 0)
m.c83 = Constraint(expr=-(1/m.x85)**2 + m.x136 == 0)
m.c84 = Constraint(expr=-(1/m.x86)**2 + m.x137 == 0)
m.c85 = Constraint(expr=-(1/m.x87)**2 + m.x138 == 0)
m.c86 = Constraint(expr=-(1/m.x88)**2 + m.x139 == 0)
m.c87 = Constraint(expr=-(1/m.x89)**2 + m.x140 == 0)
m.c88 = Constraint(expr=-(1/m.x90)**2 + m.x141 == 0)
m.c89 = Constraint(expr=-(1/m.x91)**2 + m.x142 == 0)
m.c90 = Constraint(expr=-(1/m.x92)**2 + m.x143 == 0)
m.c91 = Constraint(expr=-(1/m.x93)**2 + m.x144 == 0)
m.c92 = Constraint(expr=-(1/m.x94)**2 + m.x145 == 0)
m.c93 = Constraint(expr=-(1/m.x95)**2 + m.x146 == 0)
m.c94 = Constraint(expr=-(1/m.x96)**2 + m.x147 == 0)
m.c95 = Constraint(expr=-(1/m.x97)**2 + m.x148 == 0)
m.c96 = Constraint(expr=-(1/m.x98)**2 + m.x149 == 0)
m.c97 = Constraint(expr=-(1/m.x99)**2 + m.x150 == 0)
m.c98 = Constraint(expr=-(1/m.x100)**2 + m.x151 == 0)
m.c99 = Constraint(expr=-(1/m.x101)**2 + m.x152 == 0)
m.c100 = Constraint(expr=-(1/m.x102)**2 + m.x153 == 0)
m.c101 = Constraint(expr=-(1/m.x103)**2 + m.x154 == 0)
m.c102 = Constraint(expr=-(1/m.x104)**2 + m.x155 == 0)
m.c104 = Constraint(expr=-0.5*m.x2*(m.x3 + m.x4) - m.x54 + m.x55 == 0)
m.c105 = Constraint(expr=-0.5*m.x2*(m.x4 + m.x5) - m.x55 + m.x56 == 0)
m.c106 = Constraint(expr=-0.5*m.x2*(m.x5 + m.x6) - m.x56 + m.x57 == 0)
m.c107 = Constraint(expr=-0.5*m.x2*(m.x6 + m.x7) - m.x57 + m.x58 == 0)
m.c108 = Constraint(expr=-0.5*m.x2*(m.x7 + m.x8) - m.x58 + m.x59 == 0)
m.c109 = Constraint(expr=-0.5*m.x2*(m.x8 + m.x9) - m.x59 + m.x60 == 0)
m.c110 = Constraint(expr=-0.5*m.x2*(m.x9 + m.x10) - m.x60 + m.x61 == 0)
m.c111 = Constraint(expr=-0.5*m.x2*(m.x10 + m.x11) - m.x61 + m.x62 == 0)
m.c112 = Constraint(expr=-0.5*m.x2*(m.x11 + m.x12) - m.x62 + m.x63 == 0)
m.c113 = Constraint(expr=-0.5*m.x2*(m.x12 + m.x13) - m.x63 + m.x64 == 0)
m.c114 = Constraint(expr=-0.5*m.x2*(m.x13 + m.x14) - m.x64 + m.x65 == 0)
m.c115 = Constraint(expr=-0.5*m.x2*(m.x14 + m.x15) - m.x65 + m.x66 == 0)
m.c116 = Constraint(expr=-0.5*m.x2*(m.x15 + m.x16) - m.x66 + m.x67 == 0)
m.c117 = Constraint(expr=-0.5*m.x2*(m.x16 + m.x17) - m.x67 + m.x68 == 0)
m.c118 = Constraint(expr=-0.5*m.x2*(m.x17 + m.x18) - m.x68 + m.x69 == 0)
m.c119 = Constraint(expr=-0.5*m.x2*(m.x18 + m.x19) - m.x69 + m.x70 == 0)
m.c120 = Constraint(expr=-0.5*m.x2*(m.x19 + m.x20) - m.x70 + m.x71 == 0)
m.c121 = Constraint(expr=-0.5*m.x2*(m.x20 + m.x21) - m.x71 + m.x72 == 0)
m.c122 = Constraint(expr=-0.5*m.x2*(m.x21 + m.x22) - m.x72 + m.x73 == 0)
m.c123 = Constraint(expr=-0.5*m.x2*(m.x22 + m.x23) - m.x73 + m.x74 == 0)
m.c124 = Constraint(expr=-0.5*m.x2*(m.x23 + m.x24) - m.x74 + m.x75 == 0)
m.c125 = Constraint(expr=-0.5*m.x2*(m.x24 + m.x25) - m.x75 + m.x76 == 0)
m.c126 = Constraint(expr=-0.5*m.x2*(m.x25 + m.x26) - m.x76 + m.x77 == 0)
m.c127 = Constraint(expr=-0.5*m.x2*(m.x26 + m.x27) - m.x77 + m.x78 == 0)
m.c128 = Constraint(expr=-0.5*m.x2*(m.x27 + m.x28) - m.x78 + m.x79 == 0)
m.c129 = Constraint(expr=-0.5*m.x2*(m.x28 + m.x29) - m.x79 + m.x80 == 0)
m.c130 = Constraint(expr=-0.5*m.x2*(m.x29 + m.x30) - m.x80 + m.x81 == 0)
m.c131 = Constraint(expr=-0.5*m.x2*(m.x30 + m.x31) - m.x81 + m.x82 == 0)
m.c132 = Constraint(expr=-0.5*m.x2*(m.x31 + m.x32) - m.x82 + m.x83 == 0)
m.c133 = Constraint(expr=-0.5*m.x2*(m.x32 + m.x33) - m.x83 + m.x84 == 0)
m.c134 = Constraint(expr=-0.5*m.x2*(m.x33 + m.x34) - m.x84 + m.x85 == 0)
m.c135 = Constraint(expr=-0.5*m.x2*(m.x34 + m.x35) - m.x85 + m.x86 == 0)
m.c136 = Constraint(expr=-0.5*m.x2*(m.x35 + m.x36) - m.x86 + m.x87 == 0)
m.c137 = Constraint(expr=-0.5*m.x2*(m.x36 + m.x37) - m.x87 + m.x88 == 0)
m.c138 = Constraint(expr=-0.5*m.x2*(m.x37 + m.x38) - m.x88 + m.x89 == 0)
m.c139 = Constraint(expr=-0.5*m.x2*(m.x38 + m.x39) - m.x89 + m.x90 == 0)
m.c140 = Constraint(expr=-0.5*m.x2*(m.x39 + m.x40) - m.x90 + m.x91 == 0)
m.c141 = Constraint(expr=-0.5*m.x2*(m.x40 + m.x41) - m.x91 + m.x92 == 0)
m.c142 = Constraint(expr=-0.5*m.x2*(m.x41 + m.x42) - m.x92 + m.x93 == 0)
m.c143 = Constraint(expr=-0.5*m.x2*(m.x42 + m.x43) - m.x93 + m.x94 == 0)
m.c144 = Constraint(expr=-0.5*m.x2*(m.x43 + m.x44) - m.x94 + m.x95 == 0)
m.c145 = Constraint(expr=-0.5*m.x2*(m.x44 + m.x45) - m.x95 + m.x96 == 0)
m.c146 = Constraint(expr=-0.5*m.x2*(m.x45 + m.x46) - m.x96 + m.x97 == 0)
m.c147 = Constraint(expr=-0.5*m.x2*(m.x46 + m.x47) - m.x97 + m.x98 == 0)
m.c148 = Constraint(expr=-0.5*m.x2*(m.x47 + m.x48) - m.x98 + m.x99 == 0)
m.c149 = Constraint(expr=-0.5*m.x2*(m.x48 + m.x49) - m.x99 + m.x100 == 0)
m.c150 = Constraint(expr=-0.5*m.x2*(m.x49 + m.x50) - m.x100 + m.x101 == 0)
m.c151 = Constraint(expr=-0.5*m.x2*(m.x50 + m.x51) - m.x101 + m.x102 == 0)
m.c152 = Constraint(expr=-0.5*m.x2*(m.x51 + m.x52) - m.x102 + m.x103 == 0)
m.c153 = Constraint(expr=-0.5*m.x2*(m.x52 + m.x53) - m.x103 + m.x104 == 0)
m.c154 = Constraint(expr=-0.5*((m.x208 - m.x157*m.x106 - m.x259)/m.x157 + (m.x207 - m.x156*m.x105 - m.x258)/m.x156)*m.x2
- m.x3 + m.x4 == 0)
m.c155 = Constraint(expr=-0.5*((m.x209 - m.x158*m.x107 - m.x260)/m.x158 + (m.x208 - m.x157*m.x106 - m.x259)/m.x157)*m.x2
- m.x4 + m.x5 == 0)
m.c156 = Constraint(expr=-0.5*((m.x210 - m.x159*m.x108 - m.x261)/m.x159 + (m.x209 - m.x158*m.x107 - m.x260)/m.x158)*m.x2
- m.x5 + m.x6 == 0)
m.c157 = Constraint(expr=-0.5*((m.x211 - m.x160*m.x109 - m.x262)/m.x160 + (m.x210 - m.x159*m.x108 - m.x261)/m.x159)*m.x2
- m.x6 + m.x7 == 0)
m.c158 = Constraint(expr=-0.5*((m.x212 - m.x161*m.x110 - m.x263)/m.x161 + (m.x211 - m.x160*m.x109 - m.x262)/m.x160)*m.x2
- m.x7 + m.x8 == 0)
m.c159 = Constraint(expr=-0.5*((m.x213 - m.x162*m.x111 - m.x264)/m.x162 + (m.x212 - m.x161*m.x110 - m.x263)/m.x161)*m.x2
- m.x8 + m.x9 == 0)
m.c160 = Constraint(expr=-0.5*((m.x214 - m.x163*m.x112 - m.x265)/m.x163 + (m.x213 - m.x162*m.x111 - m.x264)/m.x162)*m.x2
- m.x9 + m.x10 == 0)
m.c161 = Constraint(expr=-0.5*((m.x215 - m.x164*m.x113 - m.x266)/m.x164 + (m.x214 - m.x163*m.x112 - m.x265)/m.x163)*m.x2
- m.x10 + m.x11 == 0)
m.c162 = Constraint(expr=-0.5*((m.x216 - m.x165*m.x114 - m.x267)/m.x165 + (m.x215 - m.x164*m.x113 - m.x266)/m.x164)*m.x2
- m.x11 + m.x12 == 0)
m.c163 = Constraint(expr=-0.5*((m.x217 - m.x166*m.x115 - m.x268)/m.x166 + (m.x216 - m.x165*m.x114 - m.x267)/m.x165)*m.x2
- m.x12 + m.x13 == 0)
m.c164 = Constraint(expr=-0.5*((m.x218 - m.x167*m.x116 - m.x269)/m.x167 + (m.x217 - m.x166*m.x115 - m.x268)/m.x166)*m.x2
- m.x13 + m.x14 == 0)
m.c165 = Constraint(expr=-0.5*((m.x219 - m.x168*m.x117 - m.x270)/m.x168 + (m.x218 - m.x167*m.x116 - m.x269)/m.x167)*m.x2
- m.x14 + m.x15 == 0)
m.c166 = Constraint(expr=-0.5*((m.x220 - m.x169*m.x118 - m.x271)/m.x169 + (m.x219 - m.x168*m.x117 - m.x270)/m.x168)*m.x2
- m.x15 + m.x16 == 0)
m.c167 = Constraint(expr=-0.5*((m.x221 - m.x170*m.x119 - m.x272)/m.x170 + (m.x220 - m.x169*m.x118 - m.x271)/m.x169)*m.x2
- m.x16 + m.x17 == 0)
m.c168 = Constraint(expr=-0.5*((m.x222 - m.x171*m.x120 - m.x273)/m.x171 + (m.x221 - m.x170*m.x119 - m.x272)/m.x170)*m.x2
- m.x17 + m.x18 == 0)
m.c169 = Constraint(expr=-0.5*((m.x223 - m.x172*m.x121 - m.x274)/m.x172 + (m.x222 - m.x171*m.x120 - m.x273)/m.x171)*m.x2
- m.x18 + m.x19 == 0)
m.c170 = Constraint(expr=-0.5*((m.x224 - m.x173*m.x122 - m.x275)/m.x173 + (m.x223 - m.x172*m.x121 - m.x274)/m.x172)*m.x2
- m.x19 + m.x20 == 0)
m.c171 = Constraint(expr=-0.5*((m.x225 - m.x174*m.x123 - m.x276)/m.x174 + (m.x224 - m.x173*m.x122 - m.x275)/m.x173)*m.x2
- m.x20 + m.x21 == 0)
m.c172 = Constraint(expr=-0.5*((m.x226 - m.x175*m.x124 - m.x277)/m.x175 + (m.x225 - m.x174*m.x123 - m.x276)/m.x174)*m.x2
- m.x21 + m.x22 == 0)
m.c173 = Constraint(expr=-0.5*((m.x227 - m.x176*m.x125 - m.x278)/m.x176 + (m.x226 - m.x175*m.x124 - m.x277)/m.x175)*m.x2
- m.x22 + m.x23 == 0)
m.c174 = Constraint(expr=-0.5*((m.x228 - m.x177*m.x126 - m.x279)/m.x177 + (m.x227 - m.x176*m.x125 - m.x278)/m.x176)*m.x2
- m.x23 + m.x24 == 0)
m.c175 = Constraint(expr=-0.5*((m.x229 - m.x178*m.x127 - m.x280)/m.x178 + (m.x228 - m.x177*m.x126 - m.x279)/m.x177)*m.x2
- m.x24 + m.x25 == 0)
m.c176 = Constraint(expr=-0.5*((m.x230 - m.x179*m.x128 - m.x281)/m.x179 + (m.x229 - m.x178*m.x127 - m.x280)/m.x178)*m.x2
- m.x25 + m.x26 == 0)
m.c177 = Constraint(expr=-0.5*((m.x231 - m.x180*m.x129 - m.x282)/m.x180 + (m.x230 - m.x179*m.x128 - m.x281)/m.x179)*m.x2
- m.x26 + m.x27 == 0)
m.c178 = Constraint(expr=-0.5*((m.x232 - m.x181*m.x130 - m.x283)/m.x181 + (m.x231 - m.x180*m.x129 - m.x282)/m.x180)*m.x2
- m.x27 + m.x28 == 0)
m.c179 = Constraint(expr=-0.5*((m.x233 - m.x182*m.x131 - m.x284)/m.x182 + (m.x232 - m.x181*m.x130 - m.x283)/m.x181)*m.x2
- m.x28 + m.x29 == 0)
m.c180 = Constraint(expr=-0.5*((m.x234 - m.x183*m.x132 - m.x285)/m.x183 + (m.x233 - m.x182*m.x131 - m.x284)/m.x182)*m.x2
- m.x29 + m.x30 == 0)
m.c181 = Constraint(expr=-0.5*((m.x235 - m.x184*m.x133 - m.x286)/m.x184 + (m.x234 - m.x183*m.x132 - m.x285)/m.x183)*m.x2
- m.x30 + m.x31 == 0)
m.c182 = Constraint(expr=-0.5*((m.x236 - m.x185*m.x134 - m.x287)/m.x185 + (m.x235 - m.x184*m.x133 - m.x286)/m.x184)*m.x2
- m.x31 + m.x32 == 0)
m.c183 = Constraint(expr=-0.5*((m.x237 - m.x186*m.x135 - m.x288)/m.x186 + (m.x236 - m.x185*m.x134 - m.x287)/m.x185)*m.x2
- m.x32 + m.x33 == 0)
m.c184 = Constraint(expr=-0.5*((m.x238 - m.x187*m.x136 - m.x289)/m.x187 + (m.x237 - m.x186*m.x135 - m.x288)/m.x186)*m.x2
- m.x33 + m.x34 == 0)
m.c185 = Constraint(expr=-0.5*((m.x239 - m.x188*m.x137 - m.x290)/m.x188 + (m.x238 - m.x187*m.x136 - m.x289)/m.x187)*m.x2
- m.x34 + m.x35 == 0)
m.c186 = Constraint(expr=-0.5*((m.x240 - m.x189*m.x138 - m.x291)/m.x189 + (m.x239 - m.x188*m.x137 - m.x290)/m.x188)*m.x2
- m.x35 + m.x36 == 0)
m.c187 = Constraint(expr=-0.5*((m.x241 - m.x190*m.x139 - m.x292)/m.x190 + (m.x240 - m.x189*m.x138 - m.x291)/m.x189)*m.x2
- m.x36 + m.x37 == 0)
m.c188 = Constraint(expr=-0.5*((m.x242 - m.x191*m.x140 - m.x293)/m.x191 + (m.x241 - m.x190*m.x139 - m.x292)/m.x190)*m.x2
- m.x37 + m.x38 == 0)
m.c189 = Constraint(expr=-0.5*((m.x243 - m.x192*m.x141 - m.x294)/m.x192 + (m.x242 - m.x191*m.x140 - m.x293)/m.x191)*m.x2
- m.x38 + m.x39 == 0)
m.c190 = Constraint(expr=-0.5*((m.x244 - m.x193*m.x142 - m.x295)/m.x193 + (m.x243 - m.x192*m.x141 - m.x294)/m.x192)*m.x2
- m.x39 + m.x40 == 0)
m.c191 = Constraint(expr=-0.5*((m.x245 - m.x194*m.x143 - m.x296)/m.x194 + (m.x244 - m.x193*m.x142 - m.x295)/m.x193)*m.x2
- m.x40 + m.x41 == 0)
m.c192 = Constraint(expr=-0.5*((m.x246 - m.x195*m.x144 - m.x297)/m.x195 + (m.x245 - m.x194*m.x143 - m.x296)/m.x194)*m.x2
- m.x41 + m.x42 == 0)
m.c193 = Constraint(expr=-0.5*((m.x247 - m.x196*m.x145 - m.x298)/m.x196 + (m.x246 - m.x195*m.x144 - m.x297)/m.x195)*m.x2
- m.x42 + m.x43 == 0)
m.c194 = Constraint(expr=-0.5*((m.x248 - m.x197*m.x146 - m.x299)/m.x197 + (m.x247 - m.x196*m.x145 - m.x298)/m.x196)*m.x2
- m.x43 + m.x44 == 0)
m.c195 = Constraint(expr=-0.5*((m.x249 - m.x198*m.x147 - m.x300)/m.x198 + (m.x248 - m.x197*m.x146 - m.x299)/m.x197)*m.x2
- m.x44 + m.x45 == 0)
m.c196 = Constraint(expr=-0.5*((m.x250 - m.x199*m.x148 - m.x301)/m.x199 + (m.x249 - m.x198*m.x147 - m.x300)/m.x198)*m.x2
- m.x45 + m.x46 == 0)
m.c197 = Constraint(expr=-0.5*((m.x251 - m.x200*m.x149 - m.x302)/m.x200 + (m.x250 - m.x199*m.x148 - m.x301)/m.x199)*m.x2
- m.x46 + m.x47 == 0)
m.c198 = Constraint(expr=-0.5*((m.x252 - m.x201*m.x150 - m.x303)/m.x201 + (m.x251 - m.x200*m.x149 - m.x302)/m.x200)*m.x2
- m.x47 + m.x48 == 0)
m.c199 = Constraint(expr=-0.5*((m.x253 - m.x202*m.x151 - m.x304)/m.x202 + (m.x252 - m.x201*m.x150 - m.x303)/m.x201)*m.x2
- m.x48 + m.x49 == 0)
m.c200 = Constraint(expr=-0.5*((m.x254 - m.x203*m.x152 - m.x305)/m.x203 + (m.x253 - m.x202*m.x151 - m.x304)/m.x202)*m.x2
- m.x49 + m.x50 == 0)
m.c201 = Constraint(expr=-0.5*((m.x255 - m.x204*m.x153 - m.x306)/m.x204 + (m.x254 - m.x203*m.x152 - m.x305)/m.x203)*m.x2
- m.x50 + m.x51 == 0)
m.c202 = Constraint(expr=-0.5*((m.x256 - m.x205*m.x154 - m.x307)/m.x205 + (m.x255 - m.x204*m.x153 - m.x306)/m.x204)*m.x2
- m.x51 + m.x52 == 0)
m.c203 = Constraint(expr=-0.5*((m.x257 - m.x206*m.x155 - m.x308)/m.x206 + (m.x256 - m.x205*m.x154 - m.x307)/m.x205)*m.x2
- m.x52 + m.x53 == 0)
m.c204 = Constraint(expr=m.x2*(m.x207 + m.x208) - m.x156 + m.x157 == 0)
m.c205 = Constraint(expr=m.x2*(m.x208 + m.x209) - m.x157 + m.x158 == 0)
m.c206 = Constraint(expr=m.x2*(m.x209 + m.x210) - m.x158 + m.x159 == 0)
m.c207 = Constraint(expr=m.x2*(m.x210 + m.x211) - m.x159 + m.x160 == 0)
m.c208 = Constraint(expr=m.x2*(m.x211 + m.x212) - m.x160 + m.x161 == 0)
m.c209 = Constraint(expr=m.x2*(m.x212 + m.x213) - m.x161 + m.x162 == 0)
m.c210 = Constraint(expr=m.x2*(m.x213 + m.x214) - m.x162 + m.x163 == 0)
m.c211 = Constraint(expr=m.x2*(m.x214 + m.x215) - m.x163 + m.x164 == 0)
m.c212 = Constraint(expr=m.x2*(m.x215 + m.x216) - m.x164 + m.x165 == 0)
m.c213 = Constraint(expr=m.x2*(m.x216 + m.x217) - m.x165 + m.x166 == 0)
m.c214 = Constraint(expr=m.x2*(m.x217 + m.x218) - m.x166 + m.x167 == 0)
m.c215 = Constraint(expr=m.x2*(m.x218 + m.x219) - m.x167 + m.x168 == 0)
m.c216 = Constraint(expr=m.x2*(m.x219 + m.x220) - m.x168 + m.x169 == 0)
m.c217 = Constraint(expr=m.x2*(m.x220 + m.x221) - m.x169 + m.x170 == 0)
m.c218 = Constraint(expr=m.x2*(m.x221 + m.x222) - m.x170 + m.x171 == 0)
m.c219 = Constraint(expr=m.x2*(m.x222 + m.x223) - m.x171 + m.x172 == 0)
m.c220 = Constraint(expr=m.x2*(m.x223 + m.x224) - m.x172 + m.x173 == 0)
m.c221 = Constraint(expr=m.x2*(m.x224 + m.x225) - m.x173 + m.x174 == 0)
m.c222 = Constraint(expr=m.x2*(m.x225 + m.x226) - m.x174 + m.x175 == 0)
m.c223 = Constraint(expr=m.x2*(m.x226 + m.x227) - m.x175 + m.x176 == 0)
m.c224 = Constraint(expr=m.x2*(m.x227 + m.x228) - m.x176 + m.x177 == 0)
m.c225 = Constraint(expr=m.x2*(m.x228 + m.x229) - m.x177 + m.x178 == 0)
m.c226 = Constraint(expr=m.x2*(m.x229 + m.x230) - m.x178 + m.x179 == 0)
m.c227 = Constraint(expr=m.x2*(m.x230 + m.x231) - m.x179 + m.x180 == 0)
m.c228 = Constraint(expr=m.x2*(m.x231 + m.x232) - m.x180 + m.x181 == 0)
m.c229 = Constraint(expr=m.x2*(m.x232 + m.x233) - m.x181 + m.x182 == 0)
m.c230 = Constraint(expr=m.x2*(m.x233 + m.x234) - m.x182 + m.x183 == 0)
m.c231 = Constraint(expr=m.x2*(m.x234 + m.x235) - m.x183 + m.x184 == 0)
m.c232 = Constraint(expr=m.x2*(m.x235 + m.x236) - m.x184 + m.x185 == 0)
m.c233 = Constraint(expr=m.x2*(m.x236 + m.x237) - m.x185 + m.x186 == 0)
m.c234 = Constraint(expr=m.x2*(m.x237 + m.x238) - m.x186 + m.x187 == 0)
m.c235 = Constraint(expr=m.x2*(m.x238 + m.x239) - m.x187 + m.x188 == 0)
m.c236 = Constraint(expr=m.x2*(m.x239 + m.x240) - m.x188 + m.x189 == 0)
m.c237 = Constraint(expr=m.x2*(m.x240 + m.x241) - m.x189 + m.x190 == 0)
m.c238 = Constraint(expr=m.x2*(m.x241 + m.x242) - m.x190 + m.x191 == 0)
m.c239 = Constraint(expr=m.x2*(m.x242 + m.x243) - m.x191 + m.x192 == 0)
m.c240 = Constraint(expr=m.x2*(m.x243 + m.x244) - m.x192 + m.x193 == 0)
m.c241 = Constraint(expr=m.x2*(m.x244 + m.x245) - m.x193 + m.x194 == 0)
m.c242 = Constraint(expr=m.x2*(m.x245 + m.x246) - m.x194 + m.x195 == 0)
m.c243 = Constraint(expr=m.x2*(m.x246 + m.x247) - m.x195 + m.x196 == 0)
m.c244 = Constraint(expr=m.x2*(m.x247 + m.x248) - m.x196 + m.x197 == 0)
m.c245 = Constraint(expr=m.x2*(m.x248 + m.x249) - m.x197 + m.x198 == 0)
m.c246 = Constraint(expr=m.x2*(m.x249 + m.x250) - m.x198 + m.x199 == 0)
m.c247 = Constraint(expr=m.x2*(m.x250 + m.x251) - m.x199 + m.x200 == 0)
m.c248 = Constraint(expr=m.x2*(m.x251 + m.x252) - m.x200 + m.x201 == 0)
m.c249 = Constraint(expr=m.x2*(m.x252 + m.x253) - m.x201 + m.x202 == 0)
m.c250 = Constraint(expr=m.x2*(m.x253 + m.x254) - m.x202 + m.x203 == 0)
m.c251 = Constraint(expr=m.x2*(m.x254 + m.x255) - m.x203 + m.x204 == 0)
m.c252 = Constraint(expr=m.x2*(m.x255 + m.x256) - m.x204 + m.x205 == 0)
m.c253 = Constraint(expr=m.x2*(m.x256 + m.x257) - m.x205 + m.x206 == 0)
|
'''
Created on Feb 15, 2016
@author: jason
'''
from .sklearntools import MultipleResponseEstimator, BackwardEliminationEstimatorCV, \
QuantileRegressor, ResponseTransformingEstimator
from pyearth import Earth
from sklearn.pipeline import Pipeline
from sklearn.calibration import CalibratedClassifierCV
outcomes = ['admission_rate', 'prescription_cost_rate', '']
[('earth', Earth(max_degree=2)), ('elim', BackwardEliminationEstimatorCV())]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import waflib.TaskGen
import waflib.Task as Task
from waflib import Utils
import waflib
import os.path as osp
import os
def uniqify(lst):
rlst = []
for v in lst:
#print v, rlst
if v in rlst:
#print "caught"
continue
rlst.append(v)
return rlst
def ptrquote(st):
res = ""
for v in st:
if v=='"':
res +='\\"'
else:
res+=v
return res
@waflib.TaskGen.feature("build_pkgconfig")
def build_pkgconfig(self):
from waflib.Tools.ccroot import USELIB_VARS
if self.flavor=='c':
USELIB_VARS['build_pkgconfig'] = set(['INCLUDES', 'DEFINES', 'CPPFLAGS', 'CFLAGS']+['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS'])
cf = ['CPPFLAGS', 'CFLAGS']
addlib = ["clik"]
else:
USELIB_VARS['build_pkgconfig'] =set(['FCFLAGS','DEFINES','INCLUDES']+['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS'])
cf = ['FCFLAGS']
addlib = ["clik","clik_f90"]
#USELIB_VARS['cprogram']
self.process_use()
self.propagate_uselib_vars()
vrs = dict([(v,list((self.env[v]))) for v in USELIB_VARS['build_pkgconfig']])
includepath = ptrquote(" ".join([self.env.CPPPATH_ST%v for v in uniqify(vrs["INCLUDES"])]))
libpath = ptrquote(" ".join([self.env.LIBPATH_ST%v for v in uniqify(vrs["LIBPATH"])]))
rlibpath = ptrquote(" ".join([self.env.RPATH_ST%v for v in uniqify(vrs["RPATH"])]))
stlibpath = ptrquote(" ".join([self.env.LIBPATH_ST%v for v in uniqify(vrs["STLIBPATH"])]))
libs = ptrquote(" ".join([self.env.LIB_ST%v for v in uniqify(vrs["LIB"]+addlib)]))
stlibs = ptrquote(" ".join([self.env.STLIB_ST%v for v in uniqify(vrs["STLIB"])]))
defines = ptrquote(" ".join([self.env.DEFINES_ST%v for v in uniqify(vrs["DEFINES"])]))
cfs = []
#print cf
for tt in cf+["LINKFLAGS"]:
#print tt,vrs[tt]
cfs += vrs[tt]
#print cfs
cflags = ptrquote(" ".join(uniqify(cfs)))
#print "YEAH:"
#print includepath
#print libpath
#print rlibpath
#print stlibpath
#print libs
#print stlibs
#print cflags
#print defines
alibs = ""
if libs:
alibs += (self.env.SHLIB_MARKER or "") +" ".join([rlibpath,libpath,libs])
if stlibs:
alibs += (self.env.STLIB_MARKER or "") +" ".join([srlibpath,stlibs])
f=open(osp.join(self.env.BINDIR,self.target),"w")
print(config_tpl%(" ".join((includepath,defines,cflags)),alibs), file=f)
f.close()
os.chmod(osp.join(self.env.BINDIR,self.target),Utils.O755)
config_tpl = """#! /usr/bin/env python
# don't do much for now
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--cflags", action="store_true",
help="only the cflags")
parser.add_option("--libs", action="store_true",
help="only libflags")
(options, args) = parser.parse_args()
res={}
cflags = "%s"
libs = "%s"
if (not options.libs) and (not options.cflags):
options.libs=True
options.cflags=True
if options.cflags:
print cflags,
if options.libs:
print libs,
print
"""
@waflib.TaskGen.feature("*")
@waflib.TaskGen.before_method('process_source')
def process_execrule(self):
if not getattr(self,'execrule',None):
return
self.meths.remove('process_source')
name=str(getattr(self,'name',None)or self.target or self.execrule)
cls=Task.task_factory(name,self.execrule,getattr(self,'vars',[]),shell=getattr(self,'shell',True),color=getattr(self,'color','BLUE'))
tsk=self.create_task(name)
if getattr(self,'target',None):
if isinstance(self.target,str):
self.target=self.target.split()
if not isinstance(self.target,list):
self.target=[self.target]
for x in self.target:
if isinstance(x,str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self,'install_path',None):
self.bld.install_files(self.install_path,tsk.outputs,chmod=Utils.O755)
if getattr(self,'source',None):
tsk.inputs=self.to_nodes(self.source)
self.source=[]
if getattr(self,'scan',None):
cls.scan=self.scan
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
if getattr(self,'update_outputs',None)or getattr(self,'on_results',None):
Task.update_outputs(cls)
if getattr(self,'always',None):
Task.always_run(cls)
for x in['after','before','ext_in','ext_out']:
setattr(cls,x,getattr(self,x,[]))
|
"""
Tests for dit.example_dists.numeric.
"""
import pytest
import numpy as np
from dit.shannon import entropy
from dit.example_dists import bernoulli, binomial, hypergeometric, uniform
def test_bernoulli1():
""" Test bernoulli distribution """
d = bernoulli(1 / 2)
assert d.outcomes == (0, 1)
assert sum(d.pmf) == pytest.approx(1)
@pytest.mark.parametrize('p', [i / 10 for i in range(0, 11)])
def test_bernoulli2(p):
""" Test bernoulli distribution """
d = bernoulli(p)
assert d[0] == pytest.approx(1 - p)
assert d[1] == pytest.approx(p)
@pytest.mark.parametrize('p', [-1, 1.5, 'a', int, []])
def test_bernoulli3(p):
""" Test bernoulli distribution failures """
with pytest.raises(ValueError):
bernoulli(p)
@pytest.mark.parametrize('n', range(1, 10))
def test_binomial1(n):
""" Test binomial distribution """
d = binomial(n, 1 / 2)
assert d.outcomes == tuple(range(n + 1))
assert sum(d.pmf) == pytest.approx(1)
@pytest.mark.parametrize('n', [-1, 1.5, 'a', int, []])
def test_binomial2(n):
""" Test binomial distribution failures """
with pytest.raises(ValueError):
binomial(n, 1 / 2)
def test_uniform1():
""" Test uniform distribution """
for n in range(2, 10):
d = uniform(n)
assert d.outcomes == tuple(range(n))
assert d[0] == pytest.approx(1 / n)
assert entropy(d) == pytest.approx(np.log2(n))
@pytest.mark.parametrize('v', [-1, 1.5, 'a', int, []])
def test_uniform2(v):
""" Test uniform distribution failures """
with pytest.raises(ValueError):
uniform(v)
@pytest.mark.parametrize(('a', 'b'), zip([1, 2, 3, 4, 5], [5, 7, 9, 11, 13]))
def test_uniform3(a, b):
""" Test uniform distribution construction """
d = uniform(a, b)
assert len(d.outcomes) == b - a
assert d[a] == pytest.approx(1 / (b - a))
@pytest.mark.parametrize(('a', 'b'), [(2, 0), (0, [])])
def test_uniform4(a, b):
""" Test uniform distribution failures """
with pytest.raises(ValueError):
uniform(a, b)
def test_hypergeometric1():
""" Test hypergeometric distribution """
d = hypergeometric(50, 5, 10)
assert d[4] == pytest.approx(0.003964583)
assert d[5] == pytest.approx(0.0001189375)
@pytest.mark.parametrize('vals', [
(50, 5, -1),
(50, -1, 10),
(-1, 5, 10),
(50, 5, 1.5),
(50, 1.5, 10),
(1.5, 5, 10),
(50, 5, 'a'),
(50, 'a', 10),
('a', 5, 10),
(50, 5, int),
(50, int, 10),
(int, 5, 10),
(50, 5, []),
(50, [], 10),
([], 5, 10),
])
def test_hypergeometric2(vals):
""" Test hypergeometric distribution failures """
with pytest.raises(ValueError):
hypergeometric(*vals)
|
import os
import logging
import asyncio
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
class OutputWorker(object):
def __init__(self, **kwargs):
self.type = kwargs.get("type", None)
self.logger = logging.getLogger(__name__)
output_dir = kwargs.get("output_dir", None)
if output_dir:
self.root_output_dir = output_dir
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
else:
# TBD: The right error to raise here since this is a required
# keyword
self.logger.error("Need mandatory keyword arg: output_dir")
raise ValueError
def write_data(self, data):
raise NotImplementedError
class ParquetOutputWorker(OutputWorker):
def write_data(self, data):
cdir = "{}/{}/".format(self.root_output_dir, data["topic"])
if not os.path.isdir(cdir):
os.makedirs(cdir)
# dtypes = {x: data['schema'].field(x).type.__str__()
# if 'list' not in data['schema'].field(x).type.__str__()
# else data['schema'].field(x).type.to_pandas_dtype()
# for x in data['schema'].names}
df = pd.DataFrame.from_dict(data["records"])
# df.to_parquet(
# path=cdir,
# partition_cols=data['partition_cols'],
# index=True,
# engine='pyarrow')
# pq.write_metadata(
# self.schema,'{}/_metadata'.format(cdir),
# version='2.0',
# coerce_timestamps='us')
table = pa.Table.from_pandas(df, schema=data["schema"],
preserve_index=False)
pq.write_to_dataset(
table,
root_path=cdir,
partition_cols=data['partition_cols'],
version="2.0",
compression='ZSTD',
row_group_size=100000,
)
class GatherOutputWorker(OutputWorker):
"""This is used to write output for the run-once data gather mode"""
def write_data(self, data):
file = f"{self.root_output_dir}/{data['topic']}.output"
with open(file, 'a') as f:
# Even though we use JSON dump, the output is not valid JSON
f.write(data['records'])
async def run_output_worker(queue, output_workers, logger):
while True:
try:
data = await queue.get()
except asyncio.CancelledError:
logger.error(f"Writer thread received task cancel")
return
if not output_workers:
return
for worker in output_workers:
worker.write_data(data)
def init_output_workers(output_types, output_args):
"""Create the appropriate output worker objects and return them"""
workers = []
for otype in output_types:
if otype == "parquet":
worker = ParquetOutputWorker(output_dir=output_args["output_dir"])
if worker:
workers.append(worker)
elif otype == "gather":
try:
worker = GatherOutputWorker(
output_dir=output_args["output_dir"])
if worker:
workers.append(worker)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error(
f"Unable to create {otype} worker, exception {str(e)}")
else:
raise NotImplementedError("Unknown type: {}".format(otype))
return workers
|
from triangle import Triangle
def saccade_model_mle(gazepoints, src_xy, tgt_xy, init_t_start, init_t_end):
'''
Parameter
gazepoints
src_xy, 2D list, best quess for saccade start location
tgt_xy, 2D list, best guess for saccade end location
init_t_start, best guess for saccade start time
init_t_end, best guess for saccade end time
Return
t_start
optimal saccade start time.
gazepoint[t_start] is the first saccade point.
t_end
optimal saccade end time.
gazepoint[t_end - 1] is the last saccade point.
mse, mean squared error of the model
Here we use two different concepts, times and indices:
Time t 0 1 2 3 4 5
| | | | | |
Vector [ 2 3 1 2 1 ]
| | | | |
Index i 0 1 2 3 4
'''
# Alias
g = gazepoints
# Max t, Max index
max_t = len(g)
max_i = max_t - 1
# Dynamic programming memories:
# 1) For each index i, store the summed square error in t=0..i+1
# so that source_mem[0] gives summed square error in t=0..1
# and that source_mem[max_i] gives summed square error in t=0..max_t.
# Because target_mem[k] = target_mem[k-1] + square_error(k)
# calculate values dynamically from the start.
source_mem = [None for _ in range(max_t)]
# 2) For each index i, j, i<=j, store the summed square error in t=i..j+1
# so that saccade_mem[0][0] gives summed square error in t=0..1
# and that saccade_mem[max_i][max_i] gives s.s.e. in t=max_t-1..max_t.
# Because i <= j, saccade_mem is upper triangular matrix max_t x max_t
saccade_mem_n = max_t * (max_t + 1) / 2
saccade_mem = Triangle(max_t, [None for _ in xrange(saccade_mem_n)])
# 3) For each index i, store the summed square error in t=i..max_t
# so that target_mem[0] gives summed square error in t=0..max_t
# and that target_mem[max_i] gives s.s.e. in t=max_t-1..max_t.
# Because target_mem[k] = square_error(k) + target_mem[k+1]
# calculate values dynamically from the end.
target_mem = [None for _ in range(max_t)]
def square_error(index, mu):
p = g[index]
dx = p[0] - mu[0]
dy = p[1] - mu[1]
return dx * dx + dy * dy
def source_objective(t_start):
'''
Return
summed square error between t=0 and t=t_start
'''
if t_start == 0:
return 0
if source_mem[t_start - 1] is not None:
return source_mem[t_start - 1]
# else calculate
for i in xrange(0, t_start):
if source_mem[i] is not None:
continue
else:
serr = square_error(i, src_xy)
if i == 0:
source_mem[i] = serr
else:
source_mem[i] = serr + source_mem[i - 1]
return source_mem[t_start - 1]
def saccade_objective(t_start, t_end):
'''
Return
summed square error between t=t_start and t=t_end
'''
if t_start == t_end:
return 0
# Now dt = t_end - t_start > 0
if saccade_mem[t_start, t_end - 1] is not None:
# Error from t_start to t_end is already computed
return saccade_mem[t_start, t_end - 1]
# else calculate
sse = 0
for i in xrange(t_start, t_end):
## Alpha in (0, 1] and gives the progression of the saccade.
## Five options (osp = optimal saccade point):
## 1) the first osp is at src_xy and
## the last osp is apart from tgt_xy.
# alpha = float(i - t_start) / (t_end - t_start)
## 2) the first osp is apart from src_xy and
## the last osp is at tgt_xy.
# alpha = float(i + 1 - t_start) / (t_end - t_start)
## 3) the first osp is at src_xy and
## the last osp is at tgt_xy.
# alpha = float(i - t_start) / (t_end - 1 - t_start)
## 4) the first osp is apart from src_xy and
## the last osp is apart from tgt_xy.
# alpha = float(i + 1 - t_start) / (t_end + 1 - t_start)
## 5) the first osp is middleway at and apart from src_xy and
## the last osp is middleway at and apart from tgt_xy.
alpha = float(i + 0.5 - t_start) / (t_end - t_start)
# Take weighted mean of the source and target points.
mu = [0, 0]
mu[0] = src_xy[0] * (1 - alpha) + tgt_xy[0] * alpha
mu[1] = src_xy[1] * (1 - alpha) + tgt_xy[1] * alpha
sse += square_error(i, mu)
saccade_mem[t_start, t_end - 1] = sse
return sse
def target_objective(t_end):
'''
Return
summed square error between t=t_end and t=t_max
'''
if max_t <= t_end:
# t_end is not suitable for index
return 0
if target_mem[t_end] is not None:
# Already computed
return target_mem[t_end]
for i in xrange(max_i, t_end - 1, -1):
# i_first = max_i
# i_last = t_end
if target_mem[i] is not None:
# Already computed
continue
else:
serr = square_error(i, tgt_xy)
if i == max_i:
# No previous sum
target_mem[i] = serr
else:
target_mem[i] = serr + target_mem[i + 1]
return target_mem[t_end]
def find_optimal_t_start(t_end):
'''
Given t_end, find t_start such that the sum of source_objective and
saccade_objective is minimized.
Return
t_start, optimal
src_sse, source summed squared error
sacc_sse, saccade summed squared error
'''
min_sse = float('inf')
min_src_sse = float('inf')
min_sacc_sse = float('inf')
t_min_sse = 0
for t in range(0, t_end + 1):
src_sse = source_objective(t)
sacc_sse = saccade_objective(t, t_end)
sse = src_sse + sacc_sse
if sse < min_sse:
min_sse = sse
min_src_sse = src_sse
min_sacc_sse = sacc_sse
t_min_sse = t
return t_min_sse, min_src_sse, min_sacc_sse
def find_optimal_t_end(t_start):
'''
Given t_start, find t_end such that the sum of saccade_objective and
target_objective is minimized.
Return
t_end, optimal
sacc_sse, saccade summed squared error
target_sse, target summed squared error
'''
min_sse = float('inf')
min_sacc_sse = float('inf')
min_tgt_sse = float('inf')
t_min_sse = 0
for t in range(t_start, max_t + 1):
sacc_sse = saccade_objective(t_start, t)
tgt_sse = target_objective(t)
sse = sacc_sse + tgt_sse
if sse < min_sse:
min_sse = sse
min_sacc_sse = sacc_sse
min_tgt_sse = tgt_sse
t_min_sse = t
return t_min_sse, min_sacc_sse, min_tgt_sse
# Put limits to initial times
t_start = min(init_t_start, max_t)
t_end = min(init_t_end , max_t)
# Ensure order, swap if needed
if t_end < t_start:
t_temp = t_end
t_end = t_start
t_start = t_temp
sum_sse = float('inf')
#import pdb; pdb.set_trace()
# Iterate until no change (converged). Place iteration limits for bugs.
for i in range(20):
t_start_hat, source_sse, saccade_sse = find_optimal_t_start(t_end)
t_end_hat, saccade_sse, target_sse = find_optimal_t_end(t_start_hat)
sum_sse = source_sse + saccade_sse + target_sse
if t_start_hat == t_start and t_end_hat == t_end:
# print 'MLE iterations: ' + str(i)
# print 't_start: ' + str(t_start)
# print 't_end: ' + str(t_end)
# print 'sum_sse: ' + str(sum_sse)
break
else:
t_start = t_start_hat
t_end = t_end_hat
# Mean squared error
mse = float(sum_sse) / len(g)
return t_start, t_end, mse, source_sse, saccade_sse, target_sse
|
import linkr # flake8: noqa: F401
import time
import mock
from models import Link
from test.backend.test_case import LinkrTestCase
with mock.patch.object(time, 'time', return_value=5):
link = Link(
alias='alias',
outgoing_url='outgoing url',
)
link.link_id = 1
class TestLink(LinkrTestCase):
_multiprocess_can_split_ = True
def test_init(self):
self.assertEqual(link.submit_time, 5)
self.assertEqual(link.alias, 'alias')
self.assertEqual(link.outgoing_url, 'outgoing url')
self.assertEqual(link.password_hash, None)
self.assertEqual(link.user_id, None)
self.assertFalse(link.require_recaptcha)
def test_validate_password(self):
self.assertTrue(link.validate_password('password'))
self.assertTrue(link.validate_password('anything'))
self.assertTrue(link.validate_password(None))
def test_update_password(self):
self.assertTrue(link.validate_password('password'))
link.update_password('new password')
self.assertFalse(link.validate_password('password'))
self.assertTrue(link.validate_password('new password'))
link.update_password(None)
def test_as_dict(self):
link_dict = link.as_dict()
del link_dict['full_alias'] # Value is dependent on config
self.assertEqual(link_dict, {
'link_id': 1,
'user_id': None,
'submit_time': 5,
'alias': 'alias',
'outgoing_url': 'outgoing url',
'is_password_protected': False,
'require_recaptcha': False,
})
def test_is_password_protected(self):
self.assertFalse(link.is_password_protected())
|
import pandas as pd
import sqlite3
path = "C:/Users/Grace Sun/physics_hackathon/"
db_path = path+'/Databases/'
strings = ["Attacks", "Characters", "Cleric", "Conditions", "Damage Types", "Fighter", "Items", "Monsters", "Rogue", "Save Types", "Spells", "Weapons", "Wizard"]
for string in strings:
conn = sqlite3.connect(db_path+string+".db")
df = pd.read_csv(path+"CSV/Databases - "+string+".csv")
df.to_sql(string, conn, if_exists = 'replace')
conn.close()
|
import os
import math
import json
import random
import numpy as np
import networkx as nx
from abc import ABC, abstractmethod
from barl_simpleoptions.state import State
class Option(ABC) :
"""
Interface for a reinforcement learning option.
"""
def __init__(self) :
pass
@abstractmethod
def initiation(self, state : 'State') -> bool :
"""
Returns whether or not a given state is in this option's initation set.
Arguments:
state {State} -- The state whose membership to the initiation set is
to be tested.
Returns:
bool -- [description]
"""
pass
@abstractmethod
def policy(self, state : 'State') :
"""
Returns the action specified by this option's policy for a given state.
Arguments:
state {State} -- The environmental state in which the option chooses an action in.
Returns:
action [Hashable] -- The action specified by the option's policy in this state.
"""
pass
@abstractmethod
def termination(self, state : 'State') -> bool :
"""
Returns whether or not the option terminates in the given state.
Arguments:
state {State} -- The state in which to test for termination.
Returns:
bool -- Whether or not this option terminates in this state.
"""
pass
class PrimitiveOption(Option) :
"""
Class representing a primitive option.
Primitive options terminate with probability one in every state, and have
an initiation set consisting of all of the states where their underlying
primitive actions are available.
"""
def __init__(self, action) :
"""Constructs a new primitive option.
Arguments:
action {Hashable} -- The underlying primitive action for this option.
"""
self.action = action
def initiation(self, state : State) :
return state.is_action_legal(self.action)
def policy(self, state : State) :
return self.action
def termination(self, state : State) :
return True
def __str__(self) :
return "PrimitiveOption({})".format(self.action)
def __repr__(self) :
return str(self)
class SubgoalOption(Option) :
"""
Class representing a temporally abstract action in reinforcement learning through a policy to
be executed between an initiation state and a termination state.
"""
def __init__(self, subgoal : 'State', graph : nx.DiGraph, policy_file_path : str, initiation_set_size : int) :
"""Constructs a new subgoal option.
Arguments:
subgoal {State} -- The state to act as this option's subgoal.
graph {nx.Graph} -- The state interaction graph of the reinforcement learning environment.
policy_file_path {str} -- The path to the file containing the policy for this option.
initiation_set_size {int} -- The size of this option's initiation set.
"""
self.graph = graph
self.subgoal = subgoal
self.policy_file_path = policy_file_path
self.initiation_set_size = initiation_set_size
self._build_initiation_set()
# Load the policy file for this option.
with open(policy_file_path, mode = "rb") as f:
self.policy_dict = json.load(f)
def initiation(self, state : 'State') :
return str(state) in self.initiation_set
def policy(self, state : 'State') :
return self.policy_dict[str(state)]
def termination(self, state : State) :
return (state == self.subgoal) or (not self.initiation(state))
def _build_initiation_set(self) :
"""
Constructs the intiation set for this subgoal option.
The initation set consists of the initiation_set_size closest states to the subgoal state.
"""
# Get distances of all nodes from subgoal node.
node_distances = []
for node in self.graph :
# Exclude subgoal node.
if (not node == str(self.subgoal)) :
# Only consider nodes which can reach the subgoal node.
if (nx.has_path(self.graph, source = str(node), target = str(self.subgoal))) :
# Append the tuple (node, distance from subgoal) to the list.
node_distances.append((node, len(list(nx.shortest_path(self.graph, source = node, target = str(self.subgoal))))))
# Sort the list of nodes by distance from the subgoal.
node_distances = sorted(node_distances, key = lambda x: x[1])
# Take the closest set_size nodes to the subgoal as the initiation set.
initiation_set, _ = zip(*node_distances)
if (len(initiation_set) > self.initiation_set_size) :
self.initiation_set = list(initiation_set)[:self.initiation_set_size].copy()
else :
self.initiation_set = list(initiation_set).copy()
def __str__(self) :
return "SubgoalOption({}~{})".format(str(self.subgoal), str(self.policy_file_path))
def __repr__(self):
return str(self)
|
from skimage import io
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from tifffile import imsave
def threshold(imgarray, darkparticles, sigma, thresholdvalue, usesigma):
newarray = imgarray
if (imgarray.ndim) > 1:
newarray = imgarray.flatten
mean = np.average(imgarray)
stdev = np.std(imgarray)
if usesigma == True:
if darkparticles == True:
for i in range(len(newarray)):
if (newarray[i]) > (mean-sigma*stdev):
newarray[i] = 0
else:
(newarray[i]) = 1
else:
for i in range(len(newarray)):
if (newarray[i]) < (mean+sigma*stdev):
newarray[i] = 0
else:
(newarray[i]) = 1
if (usesigma == False):
if darkparticles == True:
for i in range(len(newarray)):
if (newarray[i]) < thresholdvalue:
newarray[i] = 1
else:
newarray[i] = 0
else:
for i in range(len(newarray)):
if (newarray[i]) > thresholdvalue:
newarray[i] = 1
else:
newarray[i] = 0
if (imgarray.ndim) > 1:
np.reshape(newarray, (imgarray.shape))
return newarray
def meshplot(dataset_num, dimensions, temps):
ax = plt.axes(projection='3d')
X = np.linspace(0,dimensions[0]-1,dimensions[0])
Y = X
X,Y = np.meshgrid(X,Y)
#dataset_num = 294 # change this number to look at a different dataset
ax.plot_surface(X, Y, np.reshape(temps[dataset_num],(dimensions[0],dimensions[1])),cmap=cm.coolwarm, linewidth=0) #plots fitted temp values
#ax.plot_surface(X,Y, np.reshape(best_fits[dataset_num],(40,40)), linewidth=0, antialiased=False, cmap=cm.Blues) #plots best fit plane
#ax.plot_surface(X,Y, np.reshape(means[dataset_num],(40,40)), linewidth=0, antialiased=False, cmap = cm.summer) #plots mean value
#ax.plot_surface(X,Y, np.reshape(old_temps[dataset_num],(40,40)), linewidth=0, antialiased=False, cmap = cm.copper) #plots unaltered temp values
|
import tvm
import math
import numpy as np
from functools import reduce
import utils
assert_print = utils.assert_print
gen_enum = utils.gen_enum
any_factor_split = utils.any_factor_split
get_factor_lst = utils.get_factor_lst
gen_group = utils.gen_group
is_power_of_x = utils.is_power_of_x
def able_inline(op, down_graph):
is_compute = isinstance(op, tvm.tensor.ComputeOp)
has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis
is_output = False
for i in range(op.num_outputs):
if op.output(i) not in down_graph:
is_output = True
break
return is_compute and (not has_reduce) and (not is_output)
# class SubSpace(object):
# def __init__(self, entities):
# assert_print(isinstance(entities, (list, tuple)) and len(entities) > 0)
# self.entities = entities
# self.begin = 0
# self.end = len(self.entities)
# def get_entity(self, p):
# if len(self.entities) < 1:
# raise RuntimeError("Query from empty space")
# if 0 <= p < self.end:
# return self.entities[p]
# else:
# raise RuntimeError("Space pointer out of range")
# def range(self, p, left, right=None):
# if right is None:
# right = left
# left = p - left if p - left >= 0 else 0
# right = p + right if p + right <= self.end else self.end
# return range(left, right), self.entities[left:right]
# def __len__(self):
# return self.end
class Space(object):
def __init__(self):
self.subspaces = {}
self.types = {}
self.valid_type_keys = ["fuse", "spatial", "reduce", "reorder", "inline", "unroll", "merge", "special"]
for type_key in self.valid_type_keys:
self.types[type_key] = []
self.dim = 0
def add_subspace(self, name, subspace, type_key, override=False):
if name in self.subspaces and not override:
raise RuntimeError("Same subspace name")
assert_print(type_key in self.valid_type_keys)
self.subspaces[name] = subspace
self.types[type_key].append(name)
self.dim += subspace.dim
def items(self):
return self.subspaces.items()
def __len__(self):
ret = 1
for _, subspace in self.subspaces.items():
ret *= len(subspace)
return ret
def length(self):
ret = {}
total = 1
added = 0
for name, subspace in self.subspaces.items():
ret[name] = len(subspace)
total *= ret[name]
added += ret[name]
ret["total"] = total
ret["added"] = added
return ret
DirectedSubSpaceTypeKeys = ["spatial", "reduce"]
UndirectedSubSpaceTypeKeys = ["fuse", "reorder", "unroll", "inline", "merge", "special"]
class SubSpace(object):
def __init__(self):
self.dim = 0
self.static_entities = []
self.size = 0
self.num_direction = 0
def random_entity(self):
return np.random.choice(self.static_entities)
def next_entity(self, *args, **kwargs):
raise NotImplementedError()
def get_entity(self, p):
return self.static_entities[p]
def get_direction(self, num):
raise NotImplementedError()
def __len__(self):
return self.size
class SplitSpace(SubSpace):
def __init__(self, dim, total, allow_non_divisible='off'):
super(SplitSpace, self).__init__()
self.total = total
self.allow_non_divisible = allow_non_divisible
self.dim = dim
self.static_entities = any_factor_split(total, dim, allow_non_divisible=allow_non_divisible)
self.size = len(self.static_entities)
self.num_direction = dim * (dim - 1)
self.directions = []
for i in range(self.dim):
for j in range(self.dim):
if i != j:
self.directions.append((i, j))
self.type_key = "split"
def next_entity(self, pos, d):
# d is tuple
if len(d) == 1:
next_pos = (pos + d[0]) % self.size
return next_pos
elif len(d) == 2:
asc_pos, dec_pos = d[0], d[1]
assert_print(0 <= asc_pos < self.dim)
assert_print(0 <= dec_pos < self.dim)
assert_print(asc_pos != dec_pos)
current = self.static_entities[pos]
ret = current.copy()
left = current[asc_pos] * current[dec_pos]
canout = False
next_pos = -1
while not canout:
tmp = ret[asc_pos] + 1
while tmp <= left:
if self.allow_non_divisible == 'continuous':
break
elif self.allow_non_divisible == 'power2' and is_power_of_x(2, tmp):
break
elif left % tmp == 0:
break
tmp += 1
tmp = min(tmp, left)
ret[asc_pos] = tmp
ret[dec_pos] = math.ceil(left / tmp)
try:
next_pos = self.static_entities.index(ret)
canout = True
except ValueError:
canout = False
return next_pos
else:
raise RuntimeError(
"Not support for direction more than two dims: {}".format(d))
def get_direction(self, num):
return self.directions[num % self.num_direction]
class FuseSpace(SubSpace):
def __init__(self, dim, elements):
self.dim = dim
self.static_entities = gen_group(elements, most_groups=self.dim)
self.size = len(self.static_entities)
self.num_direction = 2
self.directions = [(-1,), (1,)]
self.type_key = "fuse"
def next_entity(self, pos, d):
# d is tuple
if len(d) == 1:
pos = (pos + d[0]) % self.size
return pos
else:
raise RuntimeError(
"Not support for direction more than one dim: {}".format(d))
def get_direction(self, num):
return self.directions[num % self.num_direction]
class ReorderSpace(SubSpace):
def __init__(self, num_spatial_axis):
self.dim = 1
self.static_entities = [[i] for i in range(num_spatial_axis)]
self.size = len(self.static_entities)
self.num_direction = 2
self.directions = [(-1,), (1,)]
self.type_key = "reorder"
def next_entity(self, pos, d):
# d is tuple
if len(d) == 1:
pos = (pos + d[0]) % self.size
return pos
else:
raise RuntimeError(
"Not support for direction more than one dim: {}".format(d))
def get_direction(self, num):
return self.directions[num % self.num_direction]
class UnrollSpace(SubSpace):
def __init__(self, steps, explicit=False):
super(UnrollSpace, self).__init__()
self.dim = 2
self.static_entities = []
self.steps = steps
explicits = [1] if explicit else [0, 1]
for step in steps:
for _explicit in explicits:
self.static_entities.append([step, _explicit])
self.size = len(self.static_entities)
self.num_direction = 2
self.directions = [(-1,), (1,)]
self.type_key = "unroll"
def next_entity(self, pos, d):
# d is tuple
if len(d) == 1:
pos = (pos + d[0]) % self.size
return pos
else:
raise RuntimeError(
"Not support for direction more than one dim: {}".format(d))
def get_direction(self, num):
return self.directions[num % self.num_direction]
class PosSpace(SubSpace):
def __init__(self, parts, num_axis):
self.dim = 2
self.static_entities = []
self.parts = parts
self.num_axis = num_axis
for i in range(parts):
for j in range(num_axis):
self.static_entities.append([i, j])
self.size = len(self.static_entities)
self.num_direction = 2
self.directions = [(-1,), (1,)]
self.type_key = "local"
def next_entity(self, pos, d):
# d is tuple
if len(d) == 1:
pos = (pos + d[0]) % self.size
return pos
else:
raise RuntimeError(
"Not support for direction more than one dim: {}".format(d))
def get_direction(self, num):
return self.directions[num % self.num_direction]
class InlineSpace(SubSpace):
def __init__(self, inline_op_pos, op_num, force_inline=False):
self.dim = op_num
self.static_entities = []
self.able_inline_list = inline_op_pos
if force_inline:
entity = [0] * op_num
for pos in inline_op_pos:
entity[pos] = 1
self.static_entities.append(entity)
else:
num_inline_ops = len(inline_op_pos)
enums = gen_enum([1, 0], num_inline_ops)
for enum in enums:
entity = [0] * op_num
for i in range(num_inline_ops):
entity[inline_op_pos[i]] = enum[i]
self.static_entities.append(entity)
self.size = len(self.static_entities)
self.num_direction = 2
self.directions = [(-1,), (1,)]
self.type_key = "inline"
def next_entity(self, pos, d):
# d is tuple
if len(d) == 1:
pos = (pos + d[0]) % self.size
return pos
else:
raise RuntimeError(
"Not support for direction more than one dim: {}".format(d))
def get_direction(self, num):
return self.directions[num % self.num_direction]
def able_inline(self, pos):
return pos in self.able_inline_list
class MergeSpce(SubSpace):
def __init__(self, merge_op_pos, op_num, force_merge=False):
self.dim = op_num
self.static_entities = []
self.able_merge_list = merge_op_pos
if force_merge:
entity = [0] * op_num
for pos in merge_op_pos:
entity[pos] = 1
self.static_entities.append(entity)
else:
num_merge_ops = len(merge_op_pos)
enums = gen_enum([1, 0], num_merge_ops)
for enum in enums:
entity = [0] * op_num
for i in range(num_merge_ops):
entity[merge_op_pos[i]] = enum[i]
self.static_entities.append(entity)
self.size = len(self.static_entities)
self.num_direction = 2
self.directions = [(-1,), (1,)]
self.type_key = "merge"
def next_entity(self, pos, d):
# d is tuple
if len(d) == 1:
pos = (pos + d[0]) % self.size
return pos
else:
raise RuntimeError(
"Not support for direction more than one dim: {}".format(d))
def get_direction(self, num):
return self.directions[num % self.num_direction]
def able_merge(self, pos):
return pos in self.able_merge_list
class EnumSpace(SubSpace):
def __init__(self, knobs):
self.dim = 2
self.static_entities = knobs
self.size = len(self.static_entities)
self.num_direction = 2
self.directions = [(-1,), (1,)]
def next_entity(self, pos, d):
# d is tuple
if len(d) == 1:
pos = (pos + d[0]) % self.size
return pos
else:
raise RuntimeError(
"Not support for direction more than one dim: {}".format(d))
def get_direction(self, num):
return self.directions[num % self.num_direction]
def generate_inline_space(op_lst, down_graph, force_inline=False):
inline_op_pos = []
for i, op in enumerate(op_lst):
if able_inline(op, down_graph):
inline_op_pos.append(i)
return InlineSpace(inline_op_pos, len(op_lst), force_inline=force_inline)
def generate_merge_space(op_lst, down_graph, force_merge=False):
merge_ops = list(range(len(op_lst)))
return MergeSpce(merge_ops, len(op_lst), force_merge=force_merge)
def generate_fuse_space(loops, groups):
return FuseSpace(groups, loops)
def generate_split_space(extent, nparts, allow_non_divisible='off'):
return SplitSpace(nparts, extent, allow_non_divisible=allow_non_divisible)
def generate_reorder_space(num_spatial_axis):
return ReorderSpace(num_spatial_axis)
def generate_unroll_space(explicit=False):
return UnrollSpace([0, 1, 512, 1500], explicit=explicit)
def generate_space_intra_op(op, down_graph, slevel=4, rlevel=3, groups=3, split_policy="off",
unroll_policy="off", fuse_policy="fuse_spatial", reorder_policy="last"):
spatial_axis_names = [x.var.name for x in op.axis]
spatial_axis_extents = [x.dom.extent.value for x in op.axis]
reduced_axis_names = [x.var.name for x in op.reduce_axis]
reduced_axis_extents = [x.dom.extent.value for x in op.reduce_axis]
##############################################################
# generate space:
schedule_space = Space()
# - fuse space
if fuse_policy == "fuse_spatial":
fuse_space = generate_fuse_space(spatial_axis_names, groups)
schedule_space.add_subspace("fuse_spatial", fuse_space, "fuse")
# - split space
for i, (name, extent) in enumerate(zip(spatial_axis_names, spatial_axis_extents)):
split_space = generate_split_space(extent, slevel, allow_non_divisible=split_policy)
schedule_space.add_subspace("split_{}_{}".format(name, i), split_space, "spatial")
for i, (name, extent) in enumerate(zip(reduced_axis_names, reduced_axis_extents)):
split_space = generate_split_space(extent, rlevel, allow_non_divisible=split_policy)
schedule_space.add_subspace("split_{}_{}".format(name, i), split_space, "reduce")
# - reorder space
if reorder_policy == "last":
reorder_space = generate_reorder_space(groups)
schedule_space.add_subspace("reorder", reorder_space, "reorder")
# -unroll space
unroll_space = generate_unroll_space(explicit=(unroll_policy == "explicit"))
schedule_space.add_subspace("unroll", unroll_space, "unroll")
# - other special spaces can be added
return schedule_space
def generate_space_inter_op(op_lst, down_graph, force_inline=False, force_merge=False, special_space=None):
##############################################################
# generate space:
schedule_space = Space()
# - inline space
inline_space = generate_inline_space(op_lst, down_graph, force_inline=force_inline)
schedule_space.add_subspace("inline", inline_space, "inline")
# - merge space
# merge_space = generate_merge_space(op_lst, down_graph, force_merge=force_merge)
# schedule_space.add_subspace("merge", merge_space, "merge")
# - other special spaces can be added
special_space = {} if special_space is None else special_space
for key, sspace in special_space.items():
schedule_space.add_subspace(key, sspace, "special")
return schedule_space |
# lambdas are single expressions used in declaring
a = lambda x, y, d: x * 6 - d + y*d -x
ans = a(3,5,8)
print(ans) # --> 47
print((lambda x, y, d: x * 6 - d + y*d - x)(3, 5, 8)) # --> 47
# Lambdas can be used as lexical closures in other functions
def adder(n):
return lambda x: x + n # uses n from outer scope
add4 = adder(4)
print(add4(7)) # --> 11
|
from django.contrib import admin
from .models import Post
# Register your models here.
admin.site.register(Post) |
from ..vector3 import Vector3
gravity = Vector3(0, -9.81, 0)
"""Gravitational constant (9.81 m/s^2)"""
|
import pytest
from pytest_bdd import given, scenario, then, when, parsers
import os
import subprocess
import time
from common.mayastor import container_mod, mayastor_mod
from common.volume import Volume
import grpc
import mayastor_pb2 as pb
def megabytes(n):
return n * 1024 * 1024
def find_child(nexus, uri):
for child in nexus.children:
if child.uri == uri:
return child
return None
def convert_nexus_state(state):
STATES = {
"UNKNOWN": pb.NexusState.NEXUS_UNKNOWN,
"ONLINE": pb.NexusState.NEXUS_ONLINE,
"DEGRADED": pb.NexusState.NEXUS_DEGRADED,
"FAULTED": pb.NexusState.NEXUS_FAULTED,
}
return STATES[state]
def convert_child_state(state):
STATES = {
"UNKNOWN": pb.ChildState.CHILD_UNKNOWN,
"ONLINE": pb.ChildState.CHILD_ONLINE,
"DEGRADED": pb.ChildState.CHILD_DEGRADED,
"FAULTED": pb.ChildState.CHILD_FAULTED,
}
return STATES[state]
def convert_child_action(state):
ACTIONS = {
"OFFLINE": pb.ChildAction.offline,
"ONLINE": pb.ChildAction.online,
}
return ACTIONS[state]
@scenario("features/rebuild.feature", "running rebuild")
def test_running_rebuild():
"Running rebuild."
@scenario("features/rebuild.feature", "stopping rebuild")
def test_stopping_rebuild():
"Stopping rebuild."
@scenario("features/rebuild.feature", "pausing rebuild")
def test_pausing_rebuild():
"Pausing rebuild."
@scenario("features/rebuild.feature", "resuming rebuild")
def test_resuming_rebuild():
"Resuming rebuild."
@scenario("features/rebuild.feature", "setting a child ONLINE")
def test_setting_a_child_online():
"Setting a child ONLINE."
@scenario("features/rebuild.feature", "setting a child OFFLINE")
def test_setting_a_child_offline():
"Setting a child OFFLINE."
@pytest.fixture(scope="module")
def local_files():
files = [f"/tmp/disk-rebuild-{base}.img" for base in ["source", "target"]]
for path in files:
subprocess.run(
["sudo", "sh", "-c", f"rm -f '{path}' && truncate -s 64M '{path}'"],
check=True,
)
yield
for path in files:
subprocess.run(["sudo", "rm", "-f", path], check=True)
@pytest.fixture(scope="module")
def source_uri(local_files):
yield "aio:///tmp/disk-rebuild-source.img?blk_size=4096"
@pytest.fixture(scope="module")
def target_uri(local_files):
yield "aio:///tmp/disk-rebuild-target.img?blk_size=4096"
@pytest.fixture(scope="module")
def nexus_uuid():
yield "2c58c9f0-da89-4cb9-8097-dc67fa132493"
@pytest.fixture(scope="module")
def mayastor_instance(mayastor_mod):
yield mayastor_mod["ms0"]
@pytest.fixture(scope="module")
def find_nexus(mayastor_instance):
def find(uuid):
for nexus in mayastor_instance.ms.ListNexus(pb.Null()).nexus_list:
if nexus.uuid == uuid:
return nexus
return None
yield find
@pytest.fixture
def mayastor_nexus(mayastor_instance, nexus_uuid, source_uri):
nexus = mayastor_instance.ms.CreateNexus(
pb.CreateNexusRequest(
uuid=nexus_uuid, size=megabytes(64), children=[source_uri]
)
)
yield nexus
mayastor_instance.ms.DestroyNexus(pb.DestroyNexusRequest(uuid=nexus_uuid))
@pytest.fixture
def nexus_state(mayastor_nexus, find_nexus, nexus_uuid):
yield find_nexus(nexus_uuid)
@pytest.fixture
def rebuild_state(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
try:
yield mayastor_instance.ms.GetRebuildState(
pb.RebuildStateRequest(uuid=nexus_uuid, uri=target_uri)
).state
except:
yield None
@given("a mayastor instance")
@given(parsers.parse('a mayastor instance "{name}"'))
def get_instance(mayastor_instance):
pass
@given("a nexus")
@given("a nexus with a source child device")
def get_nexus(mayastor_nexus):
pass
@when("a target child is added to the nexus")
def add_child(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.AddChildNexus(
pb.AddChildNexusRequest(uuid=nexus_uuid, uri=target_uri, norebuild=True)
)
@when("the rebuild operation is started")
def start_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.StartRebuild(
pb.StartRebuildRequest(uuid=nexus_uuid, uri=target_uri)
)
@when("the rebuild operation is then paused")
def pause_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.PauseRebuild(
pb.PauseRebuildRequest(uuid=nexus_uuid, uri=target_uri)
)
time.sleep(0.5)
@when("the rebuild operation is then resumed")
def resume_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.ResumeRebuild(
pb.ResumeRebuildRequest(uuid=nexus_uuid, uri=target_uri)
)
@when("the rebuild operation is then stopped")
def stop_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.StopRebuild(
pb.StopRebuildRequest(uuid=nexus_uuid, uri=target_uri)
)
time.sleep(0.5)
@when("the rebuild statistics are requested", target_fixture="rebuild_statistics")
def rebuild_statistics(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
return mayastor_instance.ms.GetRebuildStats(
pb.RebuildStatsRequest(uuid=nexus_uuid, uri=target_uri)
)
@when(parsers.parse("the target child is set {state}"), target_fixture="set_child")
@when(parsers.parse("the target child is then set {state}"), target_fixture="set_child")
def set_child(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri, state):
mayastor_instance.ms.ChildOperation(
pb.ChildNexusRequest(
uuid=nexus_uuid, uri=target_uri, action=convert_child_action(state)
)
)
@then(parsers.parse("the nexus state is {expected}"))
def check_nexus_state(nexus_state, expected):
assert nexus_state.state == convert_nexus_state(expected)
@then(parsers.parse("the source child state is {expected}"))
def check_source_child_state(nexus_state, source_uri, expected):
child = find_child(nexus_state, source_uri)
assert child.state == convert_child_state(expected)
@then(parsers.parse("the target child state is {expected}"))
def check_target_child_state(nexus_state, target_uri, expected):
child = find_child(nexus_state, target_uri)
assert child.state == convert_child_state(expected)
@then(parsers.parse("the rebuild count is {expected:d}"))
def check_rebuild_count(nexus_state, expected):
assert nexus_state.rebuilds == expected
@then(parsers.parse('the rebuild state is "{expected}"'))
def check_rebuild_state(rebuild_state, expected):
assert rebuild_state == expected
@then("the rebuild state is undefined")
def rebuild_state_is_undefined(rebuild_state):
assert rebuild_state is None
@then(parsers.parse('the rebuild statistics counter "{name}" is {expected}'))
def check_rebuild_statistics_counter(rebuild_statistics, name, expected):
assert (getattr(rebuild_statistics, name) == 0) == (expected == "zero")
|
# # from __future__ import print_function
import argparse
from tqdm import tqdm
import tensorflow as tf
import logging
logging.getLogger().setLevel(logging.INFO)
from utils.lr_scheduler import get_lr_scheduler
from model.model_builder import get_model
from generator.generator_builder import get_generator
import sys
physical_devices = tf.config.list_physical_devices('GPU')
if physical_devices:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
def parse_args(args):
parser = argparse.ArgumentParser(description='Simple training script for using snapmix .')
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--start-val-epoch', default=100, type=int)
parser.add_argument('--batch-size', default=16, type=int)
parser.add_argument('--dataset', default='custom', type=str, help="choices=['cub','cars','custom']")
parser.add_argument('--dataset-dir', default='dataset/cat_dog', type=str, help="choices=['dataset/cub','dataset/cars','custom_dataset_dir']")
parser.add_argument('--augment', default='snapmix', type=str, help="choices=['baseline','cutmix','snapmix']")
parser.add_argument('--model', default='ResNet50', type=str, help="choices=['ResNet50','ResNet101','EfficientNetB0']")
parser.add_argument('--pretrain', default='imagenet', help="choices=[None,'imagenet','resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5']")
parser.add_argument('--concat-max-and-average-pool', default=False, type=bool,help="Use concat_max_and_average_pool layer in model")
parser.add_argument('--lr-scheduler', default='warmup_cosinedecay', type=str, help="choices=['step','warmup_cosinedecay']")
parser.add_argument('--init-lr', default=1e-3, type=float)
parser.add_argument('--lr-decay', default=0.1, type=float)
parser.add_argument('--lr-decay-epoch', default=[80, 150, 180], type=int)
parser.add_argument('--warmup-lr', default=1e-4, type=float)
parser.add_argument('--warmup-epochs', default=0, type=int)
parser.add_argument('--weight-decay', default=1e-4, type=float)
parser.add_argument('--optimizer', default='sgd', help="choices=['adam','sgd']")
return parser.parse_args(args)
def main(args):
train_generator, val_generator = get_generator(args)
model = get_model(args, train_generator.num_class)
train_generator.set_model(model.keras_model)
loss_object = tf.keras.losses.CategoricalCrossentropy()
if args.optimizer == 'sgd':
optimizer = tf.keras.optimizers.SGD(args.init_lr,momentum=0.9)
else:
optimizer = tf.keras.optimizers.Adam(args.init_lr)
lr_scheduler = get_lr_scheduler(args)
best_val_loss = float('inf')
best_val_acc = -1
best_val_epoch = -1
for epoch in range(args.epochs):
lr = lr_scheduler(epoch)
optimizer.learning_rate.assign(lr)
# training
train_loss = 0.
train_generator_tqdm = tqdm(enumerate(train_generator), total=len(train_generator))
for batch_index, (batch_imgs, batch_labels) in train_generator_tqdm:
batch_imgs = model.preprocess(batch_imgs)
with tf.GradientTape() as tape:
logits = model.keras_model(batch_imgs, training=True)
data_loss = loss_object(batch_labels, logits)
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in model.keras_model.trainable_variables
if 'bn' not in v.name])
total_loss = data_loss + args.weight_decay * l2_loss
grads = tape.gradient(total_loss, model.keras_model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.keras_model.trainable_variables))
train_loss += total_loss
train_generator_tqdm.set_description(
"epoch:{}/{},train_loss:{:.4f},lr:{:.6f}".format(epoch, args.epochs,
train_loss/((batch_index+1) * train_generator.batch_size),
optimizer.learning_rate.numpy()))
train_generator.on_epoch_end()
# validation
if epoch > args.start_val_epoch:
val_loss = 0.
val_acc = 0.
val_generator_tqdm = tqdm(enumerate(val_generator), total=len(val_generator))
for batch_index, (batch_imgs, batch_labels) in val_generator_tqdm:
batch_imgs = model.preprocess(batch_imgs)
logits = model.keras_model(batch_imgs, training=False)
loss_value = loss_object(batch_labels, logits)
val_loss += loss_value
val_true_num = tf.reduce_sum(
tf.cast(tf.equal(tf.argmax(batch_labels, axis=-1), tf.argmax(logits, axis=-1)),
tf.dtypes.float32))
val_acc += val_true_num
val_generator_tqdm.set_description(
"epoch:{},val_loss:{:.4f}".format(epoch, loss_value))
val_loss /= len(val_generator)
val_acc /= (len(val_generator) * val_generator.batch_size)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_acc = val_acc
best_val_epoch = epoch
logging.info("best_epoch:{},best_val_loss:{},best_val_acc:{}".format(best_val_epoch, best_val_loss, best_val_acc))
if __name__== "__main__":
args = parse_args(sys.argv[1:])
main(args)
|
"""Module controlling the writing of ParticleSets to NetCDF file"""
import os
import random
import shutil
import string
from datetime import timedelta as delta
from glob import glob
import netCDF4
import numpy as np
from parcels.tools.error import ErrorCode
from parcels.tools.loggers import logger
try:
from mpi4py import MPI
except:
MPI = None
try:
from parcels._version import version as parcels_version
except:
raise EnvironmentError('Parcels version can not be retrieved. Have you run ''python setup.py install''?')
try:
from os import getuid
except:
# Windows does not have getuid(), so define to simply return 'tmp'
def getuid():
return 'tmp'
__all__ = ['ParticleFile']
def _is_particle_started_yet(particle, time):
"""We don't want to write a particle that is not started yet.
Particle will be written if:
* particle.time is equal to time argument of pfile.write()
* particle.time is before time (in case particle was deleted between previous export and current one)
"""
return (particle.dt*particle.time <= particle.dt*time or np.isclose(particle.time, time))
def _set_calendar(origin_calendar):
if origin_calendar == 'np_datetime64':
return 'standard'
else:
return origin_calendar
class ParticleFile(object):
"""Initialise trajectory output.
:param name: Basename of the output file
:param particleset: ParticleSet to output
:param outputdt: Interval which dictates the update frequency of file output
while ParticleFile is given as an argument of ParticleSet.execute()
It is either a timedelta object or a positive double.
:param write_ondelete: Boolean to write particle data only when they are deleted. Default is False
:param convert_at_end: Boolean to convert npy files to netcdf at end of run. Default is True
:param tempwritedir: directories to write temporary files to during executing.
Default is out-XXXXXX where Xs are random capitals. Files for individual
processors are written to subdirectories 0, 1, 2 etc under tempwritedir
:param pset_info: dictionary of info on the ParticleSet, stored in tempwritedir/XX/pset_info.npy,
used to create NetCDF file from npy-files.
"""
def __init__(self, name, particleset, outputdt=np.infty, write_ondelete=False, convert_at_end=True,
tempwritedir=None, pset_info=None):
self.write_ondelete = write_ondelete
self.convert_at_end = convert_at_end
self.outputdt = outputdt
self.lasttime_written = None # variable to check if time has been written already
self.dataset = None
self.metadata = {}
if pset_info is not None:
for v in pset_info.keys():
setattr(self, v, pset_info[v])
else:
self.name = name
self.particleset = particleset
self.parcels_mesh = self.particleset.fieldset.gridset.grids[0].mesh
self.time_origin = self.particleset.time_origin
self.lonlatdepth_dtype = self.particleset.lonlatdepth_dtype
self.var_names = []
self.var_names_once = []
for v in self.particleset.ptype.variables:
if v.to_write == 'once':
self.var_names_once += [v.name]
elif v.to_write is True:
self.var_names += [v.name]
if len(self.var_names_once) > 0:
self.written_once = []
self.file_list_once = []
self.file_list = []
self.time_written = []
self.maxid_written = -1
if tempwritedir is None:
tempwritedir = os.path.join(os.path.dirname(str(self.name)), "out-%s"
% ''.join(random.choice(string.ascii_uppercase) for _ in range(8)))
if MPI:
mpi_rank = MPI.COMM_WORLD.Get_rank()
self.tempwritedir_base = MPI.COMM_WORLD.bcast(tempwritedir, root=0)
else:
self.tempwritedir_base = tempwritedir
mpi_rank = 0
self.tempwritedir = os.path.join(self.tempwritedir_base, "%d" % mpi_rank)
if pset_info is None: # otherwise arrive here from convert_npydir_to_netcdf
self.delete_tempwritedir()
def open_netcdf_file(self, data_shape):
"""Initialise NetCDF4.Dataset for trajectory output.
The output follows the format outlined in the Discrete Sampling Geometries
section of the CF-conventions:
http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#discrete-sampling-geometries
The current implementation is based on the NCEI template:
http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/trajectoryIncomplete.cdl
:param data_shape: shape of the variables in the NetCDF4 file
"""
extension = os.path.splitext(str(self.name))[1]
fname = self.name if extension in ['.nc', '.nc4'] else "%s.nc" % self.name
if os.path.exists(str(fname)):
os.remove(str(fname))
self.dataset = netCDF4.Dataset(fname, "w", format="NETCDF4")
self.dataset.createDimension("obs", data_shape[1])
self.dataset.createDimension("traj", data_shape[0])
coords = ("traj", "obs")
self.dataset.feature_type = "trajectory"
self.dataset.Conventions = "CF-1.6/CF-1.7"
self.dataset.ncei_template_version = "NCEI_NetCDF_Trajectory_Template_v2.0"
self.dataset.parcels_version = parcels_version
self.dataset.parcels_mesh = self.parcels_mesh
# Create ID variable according to CF conventions
self.id = self.dataset.createVariable("trajectory", "i4", coords,
fill_value=-2**(31)) # maxint32 fill_value
self.id.long_name = "Unique identifier for each particle"
self.id.cf_role = "trajectory_id"
# Create time, lat, lon and z variables according to CF conventions:
self.time = self.dataset.createVariable("time", "f8", coords, fill_value=np.nan)
self.time.long_name = ""
self.time.standard_name = "time"
if self.time_origin.calendar is None:
self.time.units = "seconds"
else:
self.time.units = "seconds since " + str(self.time_origin)
self.time.calendar = 'standard' if self.time_origin.calendar == 'np_datetime64' else self.time_origin.calendar
self.time.axis = "T"
if self.lonlatdepth_dtype is np.float64:
lonlatdepth_precision = "f8"
else:
lonlatdepth_precision = "f4"
self.lat = self.dataset.createVariable("lat", lonlatdepth_precision, coords, fill_value=np.nan)
self.lat.long_name = ""
self.lat.standard_name = "latitude"
self.lat.units = "degrees_north"
self.lat.axis = "Y"
self.lon = self.dataset.createVariable("lon", lonlatdepth_precision, coords, fill_value=np.nan)
self.lon.long_name = ""
self.lon.standard_name = "longitude"
self.lon.units = "degrees_east"
self.lon.axis = "X"
self.z = self.dataset.createVariable("z", lonlatdepth_precision, coords, fill_value=np.nan)
self.z.long_name = ""
self.z.standard_name = "depth"
self.z.units = "m"
self.z.positive = "down"
for vname in self.var_names:
if vname not in ['time', 'lat', 'lon', 'depth', 'id']:
setattr(self, vname, self.dataset.createVariable(vname, "f4", coords, fill_value=np.nan))
getattr(self, vname).long_name = ""
getattr(self, vname).standard_name = vname
getattr(self, vname).units = "unknown"
for vname in self.var_names_once:
setattr(self, vname, self.dataset.createVariable(vname, "f4", "traj", fill_value=np.nan))
getattr(self, vname).long_name = ""
getattr(self, vname).standard_name = vname
getattr(self, vname).units = "unknown"
for name, message in self.metadata.items():
setattr(self.dataset, name, message)
def __del__(self):
if self.convert_at_end:
self.close()
def close(self, delete_tempfiles=True):
"""Close the ParticleFile object by exporting and then deleting
the temporary npy files"""
self.export()
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
if mpi_rank == 0:
if delete_tempfiles:
self.delete_tempwritedir(tempwritedir=self.tempwritedir_base)
self.convert_at_end = False
def add_metadata(self, name, message):
"""Add metadata to :class:`parcels.particleset.ParticleSet`
:param name: Name of the metadata variabale
:param message: message to be written
"""
if self.dataset is None:
self.metadata[name] = message
else:
setattr(self.dataset, name, message)
def convert_pset_to_dict(self, pset, time, deleted_only=False):
"""Convert all Particle data from one time step to a python dictionary.
:param pset: ParticleSet object to write
:param time: Time at which to write ParticleSet
:param deleted_only: Flag to write only the deleted Particles
returns two dictionaries: one for all variables to be written each outputdt,
and one for all variables to be written once
"""
data_dict = {}
data_dict_once = {}
time = time.total_seconds() if isinstance(time, delta) else time
if self.lasttime_written != time and \
(self.write_ondelete is False or deleted_only is True):
if pset.size == 0:
logger.warning("ParticleSet is empty on writing as array at time %g" % time)
else:
if deleted_only:
pset_towrite = pset
else:
pset_towrite = [p for p in pset if time - np.abs(p.dt/2) <= p.time < time + np.abs(p.dt) and np.isfinite(p.id)]
if len(pset_towrite) > 0:
for var in self.var_names:
data_dict[var] = np.array([getattr(p, var) for p in pset_towrite])
self.maxid_written = np.max([self.maxid_written, np.max(data_dict['id'])])
pset_errs = [p for p in pset_towrite if p.state != ErrorCode.Delete and abs(time-p.time) > 1e-3]
for p in pset_errs:
logger.warning_once(
'time argument in pfile.write() is %g, but a particle has time % g.' % (time, p.time))
if time not in self.time_written:
self.time_written.append(time)
if len(self.var_names_once) > 0:
first_write = [p for p in pset if (p.id not in self.written_once) and _is_particle_started_yet(p, time)]
data_dict_once['id'] = np.array([p.id for p in first_write])
for var in self.var_names_once:
data_dict_once[var] = np.array([getattr(p, var) for p in first_write])
self.written_once += [p.id for p in first_write]
if not deleted_only:
self.lasttime_written = time
return data_dict, data_dict_once
def dump_dict_to_npy(self, data_dict, data_dict_once):
"""Buffer data to set of temporary numpy files, using np.save"""
if not os.path.exists(self.tempwritedir):
os.makedirs(self.tempwritedir)
if len(data_dict) > 0:
tmpfilename = os.path.join(self.tempwritedir, str(len(self.file_list)) + ".npy")
with open(tmpfilename, 'wb') as f:
np.save(f, data_dict)
self.file_list.append(tmpfilename)
if len(data_dict_once) > 0:
tmpfilename = os.path.join(self.tempwritedir, str(len(self.file_list)) + '_once.npy')
with open(tmpfilename, 'wb') as f:
np.save(f, data_dict_once)
self.file_list_once.append(tmpfilename)
def dump_psetinfo_to_npy(self):
pset_info = {}
attrs_to_dump = ['name', 'var_names', 'var_names_once', 'time_origin', 'lonlatdepth_dtype',
'file_list', 'file_list_once', 'maxid_written', 'time_written', 'parcels_mesh',
'metadata']
for a in attrs_to_dump:
if hasattr(self, a):
pset_info[a] = getattr(self, a)
with open(os.path.join(self.tempwritedir, 'pset_info.npy'), 'wb') as f:
np.save(f, pset_info)
def write(self, pset, time, deleted_only=False):
"""Write all data from one time step to a temporary npy-file
using a python dictionary. The data is saved in the folder 'out'.
:param pset: ParticleSet object to write
:param time: Time at which to write ParticleSet
:param deleted_only: Flag to write only the deleted Particles
"""
data_dict, data_dict_once = self.convert_pset_to_dict(pset, time, deleted_only=deleted_only)
self.dump_dict_to_npy(data_dict, data_dict_once)
self.dump_psetinfo_to_npy()
def read_from_npy(self, file_list, time_steps, var):
"""Read NPY-files for one variable using a loop over all files.
:param file_list: List that contains all file names in the output directory
:param time_steps: Number of time steps that were written in out directory
:param var: name of the variable to read
"""
data = np.nan * np.zeros((self.maxid_written+1, time_steps))
time_index = np.zeros(self.maxid_written+1, dtype=int)
t_ind_used = np.zeros(time_steps, dtype=int)
# loop over all files
for npyfile in file_list:
try:
data_dict = np.load(npyfile, allow_pickle=True).item()
except NameError:
raise RuntimeError('Cannot combine npy files into netcdf file because your ParticleFile is '
'still open on interpreter shutdown.\nYou can use '
'"parcels_convert_npydir_to_netcdf %s" to convert these to '
'a NetCDF file yourself.\nTo avoid this error, make sure you '
'close() your ParticleFile at the end of your script.' % self.tempwritedir)
id_ind = np.array(data_dict["id"], dtype=int)
t_ind = time_index[id_ind] if 'once' not in file_list[0] else 0
t_ind_used[t_ind] = 1
data[id_ind, t_ind] = data_dict[var]
time_index[id_ind] = time_index[id_ind] + 1
# remove rows and columns that are completely filled with nan values
tmp = data[time_index > 0, :]
return tmp[:, t_ind_used == 1]
def export(self):
"""Exports outputs in temporary NPY-files to NetCDF file"""
if MPI:
# The export can only start when all threads are done.
MPI.COMM_WORLD.Barrier()
if MPI.COMM_WORLD.Get_rank() > 0:
return # export only on threat 0
# Retrieve all temporary writing directories and sort them in numerical order
temp_names = sorted(glob(os.path.join("%s" % self.tempwritedir_base, "*")),
key=lambda x: int(os.path.basename(x)))
if len(temp_names) == 0:
raise RuntimeError("No npy files found in %s" % self.tempwritedir_base)
global_maxid_written = -1
global_time_written = []
global_file_list = []
if len(self.var_names_once) > 0:
global_file_list_once = []
for tempwritedir in temp_names:
if os.path.exists(tempwritedir):
pset_info_local = np.load(os.path.join(tempwritedir, 'pset_info.npy'), allow_pickle=True).item()
global_maxid_written = np.max([global_maxid_written, pset_info_local['maxid_written']])
global_time_written += pset_info_local['time_written']
global_file_list += pset_info_local['file_list']
if len(self.var_names_once) > 0:
global_file_list_once += pset_info_local['file_list_once']
self.maxid_written = global_maxid_written
self.time_written = np.unique(global_time_written)
for var in self.var_names:
data = self.read_from_npy(global_file_list, len(self.time_written), var)
if var == self.var_names[0]:
self.open_netcdf_file(data.shape)
varout = 'z' if var == 'depth' else var
getattr(self, varout)[:, :] = data
if len(self.var_names_once) > 0:
for var in self.var_names_once:
getattr(self, var)[:] = self.read_from_npy(global_file_list_once, 1, var)
self.dataset.close()
def delete_tempwritedir(self, tempwritedir=None):
"""Deleted all temporary npy files
:param tempwritedir Optional path of the directory to delete
"""
if tempwritedir is None:
tempwritedir = self.tempwritedir
if os.path.exists(tempwritedir):
shutil.rmtree(tempwritedir)
|
'''初始化'''
from .mole import Mole
from .hammer import Hammer |
from collections import OrderedDict
from coreapi.compat import urlparse
from openapi_codec.utils import get_method, get_encoding, get_links_from_document
from openapi_codec import encode
class AbstractCodec:
def _generate_swagger_object(self, document):
parsed_url = urlparse.urlparse(document.url)
swagger = OrderedDict()
swagger['swagger'] = '2.0'
swagger['info'] = OrderedDict()
swagger['info']['title'] = document.title
swagger['info']['version'] = '' # Required by the spec
if parsed_url.netloc:
swagger['host'] = parsed_url.netloc
if parsed_url.scheme:
swagger['schemes'] = [parsed_url.scheme]
swagger['paths'] = self._get_paths_object(document)
return swagger
def _get_paths_object(self, document):
paths = OrderedDict()
links = self._get_links(document)
for operation_id, link, tags in links:
if link.url not in paths:
paths[link.url] = OrderedDict()
method = get_method(link)
operation = self._get_operation(operation_id, link, tags)
paths[link.url].update({method: operation})
return paths
def _get_operation(self, operation_id, link, tags):
encoding = get_encoding(link)
description = link.description.strip()
summary = description.splitlines()[0] if description else None
operation = {
'operationId': operation_id,
'responses': self._get_responses(link),
'parameters': self._get_parameters(link, encoding)
}
if description:
operation['description'] = description
if summary:
operation['summary'] = summary
if encoding:
operation['consumes'] = [encoding]
if tags:
operation['tags'] = tags
return operation
def _get_responses(self, link):
if hasattr(link, '_responses_docs') and isinstance(link._responses_docs, dict):
responses = link._responses_docs.get('{}'.format(link._view_method))
if responses:
return responses
else:
return link._responses_docs.get(
'{}'.format(link.action),
link._responses_docs
)
# if link._responses_docs is not dict execute base _get_responses
return encode._get_responses(link)
def _get_parameters(self, link, encoding):
if hasattr(link, '_parameters_docs') and isinstance(link._parameters_docs, dict):
parameters_doc = link._parameters_docs.get('{}'.format(link._view_method))
if not parameters_doc:
parameters_doc = link._parameters_docs.get(
'{}'.format(link.action), None)
else:
parameters_doc = None
if parameters_doc is not None:
params = []
for parameter in parameters_doc:
params.append(parameter)
return params
# if link._responses_docs is not dict execute base _get_parameters
return encode._get_parameters(link, encoding)
def _get_links(self, document):
"""
Return a list of (operation_id, link, [tags])
"""
# Extract all the links from the first or second level of the document.
links = []
for keys, link in get_links_from_document(document):
if len(keys) > 1:
operation_id = '_'.join(keys[1:])
tags = [keys[0]]
else:
operation_id = keys[0]
tags = []
links.append((operation_id, link, tags))
# Determine if the operation ids each have unique names or not.
operation_ids = [item[0] for item in links]
unique = len(set(operation_ids)) == len(links)
# If the operation ids are not unique, then prefix them with the tag.
if not unique:
return [self._add_tag_prefix(item) for item in links]
return links
def _add_tag_prefix(self, item):
operation_id, link, tags = item
if tags:
operation_id = tags[0] + '_' + operation_id
return (operation_id, link, tags)
|
import subprocess
import time
class LiteClient:
def __init__(self, args, config, log):
self.log = log
self.config = config
self.ls_addr = args.ls_addr
self.ls_key = args.ls_key
self.log.log(self.__class__.__name__, 3, 'liteClient binary : ' + self.config["bin"])
self.log.log(self.__class__.__name__, 3, 'liteServer address: ' + self.ls_addr)
self.log.log(self.__class__.__name__, 3, 'liteServer key : ' + self.ls_key)
def exec(self, cmd, nothrow = False, wait = None):
self.log.log(self.__class__.__name__, 3, 'Executing command : ' + cmd)
args = [self.config["bin"],
"--addr", self.ls_addr,
"--b64", self.ls_key,
"--verbosity", "0",
"--cmd", cmd]
if nothrow:
process = subprocess.run(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
timeout=self.config["timeout"])
return process.stdout.decode("utf-8")
success = False
output = None
for loop in range(0, self.config["retries"]+1):
try:
process = subprocess.run(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
timeout=self.config["timeout"])
output = process.stdout.decode("utf-8")
stderr = process.stderr.decode("utf-8")
if wait:
time.sleep(wait)
if process.returncode == 0:
success = True
continue
except subprocess.TimeoutExpired as e:
continue
if success:
self.log.log(self.__class__.__name__, 3, 'Command succsesful!')
return output
else:
msg = "LiteClient failure after {} retries".format(loop)
self.log.log(self.__class__.__name__, 1, msg)
raise Exception(msg)
# Based on code by https://github.com/igroman787/mytonctrl
#
def parse_output(self, text, path):
result = None
if path is None or text is None:
return None
if not isinstance(path, list):
path = [path]
for idx, element in enumerate(path):
if ':' not in element:
element += ':'
if element not in text:
break
start = text.find(element) + len(element)
count = 0
bcount = 0
textLen = len(text)
end = textLen
for i in range(start, textLen):
letter = text[i]
if letter == '(':
count += 1
bcount += 1
elif letter == ')':
count -= 1
if letter == ')' and count < 1:
end = i + 1
break
elif letter == '\n' and count < 1:
end = i
break
text = text[start:end]
if count != 0 and bcount == 0:
text = text.replace(')', '')
if idx+1 == len(path):
result = text
return result
# end define
# end class
|
import requests
with open('file.txt', 'rb') as f:
data = f.read()
response = requests.put('http://localhost:28139/file.txt', data=data)
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
import csv
import json
import math
import random
import bisect
import os
import argparse
import fnmatch
import sys
import statistics
from enum import Enum
from typing import *
class graph_scaling(Enum):
absolute = 0
relative = 1
class data_label_order(Enum):
ascending = 0,
descending = 1
class data_label_format(Enum):
clock = 1
custom = 256
class scaling_info():
def __init__(self, scaling: graph_scaling, to: str = "") -> None:
self.type = scaling
self.to = to
class category_info():
def __init__(self,
name: str,
scale: scaling_info,
prefix: Optional[str] = None,
suffix: Optional[str] = None) -> None:
self.name = name
self.prefix = prefix if prefix else name + "_"
self.suffix = suffix if suffix else ""
self.scale = scale
class data_label_info():
clock_time_scales = [
("fs", "femtoseconds", 1e-15, 1e+15),
("ps", "picoseconds", 1e-12, 1e+12),
("ns", "nanoseconds", 1e-9, 1e+9),
("µs", "microseconds", .00001, 1000000),
("us", "microseconds", .00001, 1000000),
("ms", "milliseconds", .001, 1000),
("s", "seconds", 1, 1),
("m", "minutes", 60, 1 / 60),
("h", "hours", 60 * 60, (1 / 60) / 60),
]
unknown_time_scales = [("", "", 1, 1)]
def __init__(self,
name: str = "real_time",
order=data_label_order.ascending,
format=data_label_format.clock) -> None:
self.name = "real_time"
self.order = order
self.format = format
self.format_list = data_label_info.clock_time_scales if self.format == data_label_format.clock else data_label_info.unknown_time_scales
class source_info():
def __init__(self,
file_name: str,
prefix: str = "",
required: bool = False) -> None:
self.file_name: str = file_name
self.prefix: str = prefix
self.required: bool = required
class analysis_info():
def __init__(self):
self.name = ""
self.default_scale: scaling_info = scaling_info(
graph_scaling.relative, "base")
self.categories: List[category_info] = [
category_info("find", self.default_scale),
category_info("count", self.default_scale),
category_info("fill", self.default_scale),
category_info("copy", self.default_scale),
category_info("swap_ranges", self.default_scale),
category_info("rotate", self.default_scale),
category_info("equal", self.default_scale)
]
self.data_labels = [
data_label_info("real_time"),
data_label_info("cpu_time")
]
self.sources: List[source_info] = [
source_info(
"libc++_bit_benchmarks.json",
"libc++",
False,
),
source_info(
"libstdc++_bit_benchmarks.json",
"libstdc++",
False,
),
source_info(
"vc++_bit_benchmarks.json",
"vc++",
False,
)
]
class stats():
def __init__(self, data: List[float]) -> None:
if len(data) < 1:
self.min = 0.0
self.max = 0.0
self.stddev = 0.0
self.mean = 0.0
self.median = 0.0
self.mode = 0.0
self.index_of_dispersion = 0.0
else:
self.min = min(data)
self.max = max(data)
self.mean = statistics.mean(data)
self.stddev = statistics.stdev(data, self.mean)
self.median = statistics.median(data)
self.mode = statistics.mode(data)
self.index_of_dispersion = 0
if (self.mean == 0):
self.index_of_dispersion = statistics.variance(
data) / self.mean
class label_data():
def __init__(self, info: analysis_info, data: List[float]) -> None:
self.label = ""
self.name = ""
self.base_name = ""
self.category = ""
self.is_noop_category = ""
self.data = data
self.heuristics = stats(self.data)
self.stats = stats(self.data)
self.name_index = 0,
self.color_index = 0,
self.error: Optional[str] = None
class benchmark():
def __init__(self, data: object) -> None:
self.labels: Dict[str, label_data] = {}
self.stats: Dict[str, stats] = {}
class benchmark_category():
def __init__(self):
self.benchmarks: List[benchmark] = []
self.stats: Dict[str, stats] = {}
def len_sorter(x):
return len(x)
def entry_name_sorter(b):
return b["name"]
def is_noop_category(n):
noop_names = ["noop", "no-op", "no op"]
s = n.casefold()
return s in noop_names
def aggregate_categories(all_benchmarks: List[Dict[str, object]],
info: analysis_info):
benchmarks: Dict[str, benchmark_category] = {}
def mean_sorter(b):
if (b.get("error") != None):
return sys.float_info.max
mean_group = b["statistics"]["mean"]
data_point_name = data_point_names[0][0]
return mean_group[data_point_name]
# find no-op category and use it in all benchmarks
noop_benches: Optional[benchmark] = None
for b in all_benchmarks:
category = b["category"]
if is_noop_category(category):
noop_benches = benchmark(b)
break
for b in all_benchmarks:
category = b["category"]
if is_noop_category(category):
continue
if category not in benchmarks:
target_category: benchmark_category = benchmark_category()
if (noop_benches):
target_category.benchmarks.append(noop_benches)
target_category = benchmarks[category]
target_entries = target_category["benchmarks"]
target_heuristics = target_category["heuristics"]
target_entries.append(b)
target_heuristics["min"] = min(b["heuristics"]["min"],
target_heuristics["min"])
target_heuristics["max"] = max(b["heuristics"]["max"],
target_heuristics["max"])
for category_name in benchmarks:
category_benchmarks = benchmarks[category_name]
# first, sort by name so we can assign colors to each
# benchmark appropriately (and make those
# color assignments stable)
entries = category_benchmarks["benchmarks"]
entries.sort(key=entry_name_sorter)
for bi, entry in enumerate(entries):
entry["name_index"] = bi
ci = entry["color_index"]
if (len(data_point_names) < 2):
dp = data_point_names[0]
ci[dp[0]] = bi
else:
for dpi, dp in enumerate(data_point_names):
ci[dp[0]] = dpi
# then, sort by mean
entries.sort(key=mean_sorter, reverse=lower_is_better)
return benchmarks
def parse_json_info(info: analysis_info, input_name: str, j) -> analysis_info:
input_relative_path = os.path.dirname(input_name)
if info is None:
info = analysis_info()
info.name = j["name"]
jdefault_scale = j.get("scale")
if jdefault_scale:
jtype = jdefault_scale["type"]
if jtype == "relative":
jto = jdefault_scale["to"]
info.default_scale = scaling_info(graph_scaling.relative, jto)
else:
info.default_scale = scaling_info(graph_scaling.absolute, "")
jcategories = j.get("categories")
if jcategories:
for jcategory in jcategories:
name = jcategory["name"]
jprefix = jcategory.get("prefix")
jsuffix = jcategory.get("suffix")
jscale = jcategory.get("scale")
scale: Optional[scaling_info] = None
if jscale is not None:
jtype = jscale.get("type")
if jtype == "relative":
jto = jscale["to"]
scale = scaling_info(graph_scaling.relative, jto)
else:
scale = scaling_info(graph_scaling.absolute, "")
else:
scale = info.default_scale
info.categories.append(
category_info(name, scale, jprefix, jsuffix))
jdata_labels = j.get("data_labels")
if jdata_labels:
info.data_labels.clear()
for jdata_label in jdata_labels:
dli: data_label_info = data_label_info()
dli.name = jdata_label["name"]
jascending = jdata_label.get("ascending")
if jascending is bool and jascending:
dli.order = data_label_order.ascending
jformat = jdata_label.get("format")
if jformat is None or jformat == "clock":
dli.format = data_label_format.clock
dli.format_list = data_label_info.clock_time_scales
else:
dli.format = data_label_format.custom
dli.format_list = data_label_info.unknown_time_scales
info.data_labels.append(dli)
jsources = j["sources"]
for jsource in jsources:
jreq = jsource.get("required")
jprefix = jsource.get("prefix")
jfile = jsource["file_name"]
if not os.path.isabs(jfile):
jfile = os.path.normpath(
os.path.join(input_relative_path, jfile))
info.sources.append(
source_info(jfile, jprefix,
jreq if isinstance(jreq, bool) else False))
return info
def parse_json(all_benchmarks: List[benchmark], j, info: analysis_info):
j_benchmarks_array = j["benchmarks"]
for j_benchmark in j_benchmarks_array:
name = j_benchmark['name']
base_name = j_benchmark['base_name']
benchmark = None
potential_targets = [
b for b in all_benchmarks if b.base_name == base_name
]
potential_categories = None if info.categories == None else [
c for c in info.categories if c in base_name
]
category = ""
benchmark_name = base_name
point_scalar = 1
if (len(potential_categories) > 1):
potential_categories.sort(key=len_sorter, reverse=True)
if len(potential_categories) > 0:
category = potential_categories[0]
if category in scale_categories:
point_scalar = 1 / scale
if (len(potential_targets) < 1):
benchmark_name = base_name.replace(category, "").strip("_")
for chunk in name_removals:
benchmark_name = benchmark_name.replace(chunk, "")
all_benchmarks.append({
"category": category,
"name": benchmark_name,
"base_name": base_name,
"data": {},
"statistics": {},
"heuristics": {
"max": sys.float_info.min,
"min": sys.float_info.max,
},
"name_index": {},
"color_index": {},
"error": None
})
benchmark = all_benchmarks[-1]
else:
benchmark = potential_targets[-1]
data = benchmark["data"]
statistics = benchmark["statistics"]
heuristics = benchmark["heuristics"]
# check for errors
benchmark_error = j_benchmark.get('error_occurred')
if benchmark_error != None and benchmark_error:
benchmark["error"] = j_benchmark['error_message']
continue
# populate data
for point_name_lower in data_point_names:
point_name = point_name_lower[0]
if point_name not in data:
data[point_name] = []
time_unit = j_benchmark['time_unit']
unit_index = timescale_units.index(time_unit)
time_scale = time_scales[unit_index]
to_seconds_multiplier = time_scale[2]
if name == base_name:
# is a data point
for point_name_lower in data_point_names:
point_name = point_name_lower[0]
point_list = data[point_name]
point = j_benchmark[point_name]
point_adjusted = point * to_seconds_multiplier * point_scalar
point_list.append(point_adjusted)
heuristics["min"] = min(heuristics["min"], point_adjusted)
heuristics["max"] = max(heuristics["max"], point_adjusted)
else:
# is a statistic
statistic_name = name.replace(base_name, "").strip("_")
if statistic_name not in statistics:
statistics[statistic_name] = {}
statistic = statistics[statistic_name]
for point_name_lower in data_point_names:
point_name = point_name_lower[0]
point = j_benchmark[point_name]
point_adjusted = point * to_seconds_multiplier * point_scalar
statistic[point_name] = point_adjusted
return aggregate_categories(all_benchmarks, data_point_names)
def draw_graph(name, category, benchmarks_heuristics, data_point_names,
time_scales):
# initialize figures
figures, axes = plt.subplots()
# set name we're going to use
figure_name = name if name != None and len(
name) > 0 else category.replace("_", "")
# get the values of the time scale to perform bisecting
time_scale_values_from_seconds = [x[2] for x in time_scales]
benchmarks = benchmarks_heuristics["benchmarks"]
heuristics = benchmarks_heuristics["heuristics"]
benchmarks_max = heuristics["max"]
benchmarks_min = heuristics["min"]
absolute_range = benchmarks_max - benchmarks_min
# some pattern constants, to help us be pretty
# some color constants, to help us be pretty!
# and differentiate graphs
# yapf: disable
data_point_aesthetics = [
('#a6cee3', '/'),
('#f255bb', 'O'),
('#00c9ab', '\\'),
('#b15928', 'o'),
('#33a02c', '.'),
('#fb9a99', '*'),
('#e31a1c', '+'),
('#fdbf6f', 'x'),
('#ff7f00', '|'),
('#cab2d6', None),
('#6a3d9a', '-'),
('#ffff99', 'xx'),
('#f5f5f5', '..'),
('#1f78b4', '||'),
('#b2df8a', '**'),
('#cc33cc', '--')
]
#yapf: enable
# transpose data into forms we need
benchmark_names = [b["name"] for b in benchmarks]
bars = []
scatters = []
num_data_points = len(data_point_names)
bar_padding = 0.15
bar_height = 0.35
bar_all_sizes = bar_height * num_data_points + bar_padding
quarter_bar_height = bar_height * 0.25
bar_y_positions = []
# draw mean-based bars with error indicators
# and draw scatter-plot points
for bi, benchmark in enumerate(benchmarks):
statistics = benchmark["statistics"]
for di, data_point_name_lower in enumerate(data_point_names):
data_point_name = data_point_name_lower[0]
bar_y = (bi * bar_all_sizes) + (di * bar_height) + (
bar_padding * 0.5)
bar_y_positions.append(bar_y)
err = benchmark.get('error')
color_index = benchmark["color_index"][data_point_name]
aesthetics = data_point_aesthetics[color_index]
color = aesthetics[0]
colorhsv = matplotlib.colors.rgb_to_hsv(
matplotlib.colors.hex2color(color))
colorhsv[2] *= 0.6
edgecolor = matplotlib.colors.hsv_to_rgb(colorhsv)
if err != None:
bars.append(
axes.text(
absolute_range * 0.02,
bar_y + (quarter_bar_height * 2),
err,
color=color,
style='italic',
horizontalalignment='left',
verticalalignment='center',
fontsize='small'))
continue
mean = statistics["mean"][data_point_name]
stddev = statistics["stddev"][data_point_name]
hatch = aesthetics[1]
bar = axes.barh(
bar_y,
mean,
height=bar_height,
xerr=stddev,
linewidth=0.2,
edgecolor=edgecolor,
color=color,
hatch=hatch,
align='edge',
error_kw={
"capsize": 5.0,
"mew": 1.2,
"ecolor": 'black',
},
alpha=0.82)
bars.append(bar)
# the scatter plot should be semi-transparent in color...
xscatter = benchmark["data"][data_point_name]
xscatter_len = len(xscatter)
yscatter = [
bar_y + random.uniform(quarter_bar_height,
bar_height - quarter_bar_height)
for _ in xscatter
]
scatter_alpha = 0.20 if xscatter_len < 11 else 0.10 if xscatter_len < 101 else 0.05 if xscatter_len < 1001 else 0.002
scatter = axes.scatter(
xscatter,
yscatter,
color=color,
edgecolor='#000000',
linewidth=0.5,
alpha=scatter_alpha)
scatters.append(scatter)
xscaleindex = bisect.bisect_left(time_scale_values_from_seconds,
benchmarks_max)
xscale = time_scales[xscaleindex - 1]
def time_axis_formatting(value, pos):
if value == 0:
return '0'
if value.is_integer():
return '{0:.0f}'.format(value * xscale[3])
return '{0:.1f}'.format(value * xscale[3])
axes.set_xlim([0, benchmarks_max + (absolute_range * 0.25)])
axes.xaxis.set_major_formatter(
mticker.FuncFormatter(time_axis_formatting))
# have ticks drawn from base of bar graph
# to text labels
y_ticks = [((y + 0.5) * bar_all_sizes)
for y in range(0, int(len(bar_y_positions) / num_data_points))]
y_limits = [
bar_y_positions[0] - bar_padding,
bar_y_positions[-1] + bar_height + bar_padding
]
# set the tick spacing
axes.set_yticks(y_ticks)
# label each group (each cluster along the y axes)
# with the names of the benchmarks we ran
axes.set_yticklabels(benchmark_names)
# set the visual limits so we have good spacing
axes.set_ylim(y_limits)
# if we have 2 or more data points,
# a legend will help us label it all
if (num_data_points > 1):
# a proper legend for each name in data_point_names
legend_texts = [
(data_point_name[0] +
('- lower=good' if data_point_name[1] else 'higher=good')
for data_point_name in data_point_names)
]
# retrieve the color/shape of the bar as a reference so we can construct
bar_style_references = [bar[0] for bar in bars]
# make legend
axes.legend(bar_style_references, legend_texts)
axes.set_xlabel('measured in ' + xscale[1])
else:
# no need to put a legend, it's basically fine as-is
data_point_name = data_point_names[0]
legend_text = (data_point_name[0], 'lower is better'
if data_point_name[1] else 'higher is better')
axes.set_xlabel(legend_text[0] + ' measured in ' + xscale[1] +
' - ' + legend_text[1])
# set the benchmark name, typically derived from the file name
axes.set_title(figure_name)
# get a nice, clean layout
figures.tight_layout()
# make sure to adjust top and bottoms
figures.subplots_adjust(bottom=0.2)
return figures, axes
def main():
parser = argparse.ArgumentParser(
description=
'Generate graphs from a Google-Benchmark compatible json/csv listing of data'
)
parser.add_argument(
'-i',
'--input',
nargs='?',
default='../results/fresh/bit_benchmarks_sources.json',
type=argparse.FileType('r'))
parser.add_argument('-o', '--output_dir', nargs='?')
args = parser.parse_args()
if not args.output_dir:
directoryname, filename = os.path.split(args.input)
args.output_dir = directoryname
random.seed(1782905257495843795)
jinfo = json.load(args.input)
info: analysis_info = parse_json_info(None, args.input.name, jinfo)
for fname in [s.file_name for s in info.sources]:
with open(fname, "r") as source_json_file:
j = json.load(source_json_file)
benchmarks = parse_json(benchmarks, j, info)
# we are okay to draw
# draw for each category
for benchmarks_key in benchmarks:
b = benchmarks[benchmarks_key]
category = benchmarks_key
if category == None or len(category) < 1:
category = name
benchmark_name = category.replace("_measure",
"").replace("_", " ").strip()
figures, axes = draw_graph(benchmark_name, category, b,
data_point_names, clock_time_scales)
# save drawn figures
save_name = benchmark_name
savetarget = os.path.join(args.output_dir, save_name + '.png')
print("Saving graph: {} (to '{}')".format(save_name, savetarget))
plt.savefig(savetarget, format='png')
plt.close(figures)
|
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
import SimpleITK as sitk
import pymia.filtering.filter as fltr
import pymia.filtering.registration as fltr_reg
import mialab.data.structure as structure
import mialab.utilities.file_access_utilities as futil
import exercise.helper as helper
def collect_image_paths(data_dir):
image_keys = [structure.BrainImageTypes.T1w,
structure.BrainImageTypes.GroundTruth]
class MyFilePathGenerator(futil.FilePathGenerator):
@staticmethod
def get_full_file_path(id_: str, root_dir: str, file_key, file_extension: str) -> str:
if file_key == structure.BrainImageTypes.T1w:
file_name = 'T1native'
elif file_key == structure.BrainImageTypes.GroundTruth:
file_name = 'labels_native'
else:
raise ValueError('Unknown key')
return os.path.join(root_dir, file_name + file_extension)
dir_filter = futil.DataDirectoryFilter()
# todo: create an instance of futil.FileSystemDataCrawler and pass the correpsonding arguments
crawler = futil.FileSystemDataCrawler(data_dir, image_keys, MyFilePathGenerator, dir_filter) # todo: modify here
return crawler
def load_images(image_paths):
# todo: read the images (T1 as sitk.sitkFloat32, GroundTruth as sitk.sitkUInt8)
image_dict = {
structure.BrainImageTypes.T1w: sitk.ReadImage(image_paths.get(structure.BrainImageTypes.T1w), sitk.sitkFloat32), # todo: modify here
structure.BrainImageTypes.GroundTruth: sitk.ReadImage(image_paths.get(structure.BrainImageTypes.GroundTruth), sitk.sitkUInt8) # todo: modify here
}
return image_dict
def register_images(image_dict, atlas_img):
registration = fltr_reg.MultiModalRegistration()
registration_params = fltr_reg.MultiModalRegistrationParams(atlas_img)
# todo execute the registration with the T1-weighted image and the registration parameters
T1w_img = image_dict[structure.BrainImageTypes.T1w]
registered_t1 = registration.execute(T1w_img, registration_params) # todo: modify here
gt_img = image_dict[structure.BrainImageTypes.GroundTruth]
# todo: apply transform to GroundTruth image (gt_img) (hint: sitk.Resample, referenceImage=atlas_img, transform=tegistration.transform, interpolator=sitk.sitkNearestNeighbor
registered_gt = sitk.Resample(gt_img, atlas_img, registration.transform, sitk.sitkNearestNeighbor) # todo: modify here
return registered_t1, registered_gt
def preprocess_filter_rescale_t1(image_dict, new_min_val, new_max_val):
class MinMaxRescaleFilterParams(fltr.FilterParams):
def __init__(self, min_, max_) -> None:
super().__init__()
self.min = min_
self.max = max_
class MinMaxRescaleFilter(fltr.Filter):
def execute(self, img: sitk.Image, params: MinMaxRescaleFilterParams = None) -> sitk.Image:
resacaled_img = sitk.RescaleIntensity(img, params.min, params.max)
return resacaled_img
# todo: use the above filter and parameters to get the rescaled T1-weighted image
filter = MinMaxRescaleFilter() # todo: modify here
filter_params = MinMaxRescaleFilterParams(new_min_val, new_max_val) # todo: modify here
T1w_img = image_dict[structure.BrainImageTypes.T1w]
minmax_rescaled_img = filter.execute(T1w_img, filter_params) # todo: modify here
return minmax_rescaled_img
def extract_feature_median_t1(image_dict):
class MedianFilter(fltr.Filter):
def execute(self, img: sitk.Image, params: fltr.FilterParams = None) -> sitk.Image:
med_img = sitk.Median(img)
return med_img
# todo: use the above filter class to get the median image feature of the T1-weighted image
filter = MedianFilter() # todo: modify here
T1w_img = image_dict[structure.BrainImageTypes.T1w]
median_img = filter.execute(T1w_img) # todo: modify here
return median_img
# --- DO NOT CHANGE
if __name__ == '__main__':
callback = helper.TestCallback()
callback.start('Pipeline')
callback.start_test('collect_image_paths')
crawler = collect_image_paths('../data/exercise/')
if isinstance(crawler, futil.FileSystemDataCrawler):
image_paths = crawler.data
subjectx_paths = image_paths.get('subjectX') # only consider subjectX
identifier = subjectx_paths.pop('subjectX', '')
collect_ok = identifier.endswith('subjectX') and structure.BrainImageTypes.GroundTruth in subjectx_paths \
and structure.BrainImageTypes.T1w in subjectx_paths
else:
collect_ok = False
subjectx_paths = None # for load_images
callback.end_test(collect_ok)
callback.start_test('load_images')
if isinstance(subjectx_paths, dict):
subjectx_images = load_images(subjectx_paths)
load_ok = isinstance(subjectx_images, dict) and all(isinstance(img, sitk.Image) for img in subjectx_images.values())
else:
load_ok = False
subjectx_images = None # for preprocess_filter_rescale_t1
callback.end_test(load_ok)
callback.start_test('register_images')
atlas_img = sitk.ReadImage('../data/exercise/mni_icbm152_t1_tal_nlin_sym_09a.nii.gz')
if isinstance(subjectx_paths, dict):
registered_img, registered_gt = register_images(subjectx_images, atlas_img)
if isinstance(registered_img, sitk.Image) and isinstance(registered_gt, sitk.Image):
stats = sitk.LabelStatisticsImageFilter()
stats.Execute(registered_img, registered_gt)
labels = stats.GetLabels()
register_ok = registered_img.GetSize() == registered_gt.GetSize() == (197, 233, 189) and labels == tuple(range(6))
else:
register_ok = False
else:
register_ok = False
callback.end_test(register_ok)
callback.start_test('preprocess_filter_rescale_t1')
if isinstance(subjectx_images, dict):
pre_rescale = preprocess_filter_rescale_t1(subjectx_images, -3, 101)
if isinstance(pre_rescale, sitk.Image):
min_max = sitk.MinimumMaximumImageFilter()
min_max.Execute(pre_rescale)
pre_ok = min_max.GetMinimum() == -3 and min_max.GetMaximum() == 101
else:
pre_ok = False
else:
pre_ok = False
callback.end_test(pre_ok)
callback.start_test('extract_feature_median_t1')
if isinstance(subjectx_images, dict):
median_img = extract_feature_median_t1(subjectx_images)
if isinstance(median_img, sitk.Image):
median_ref = sitk.ReadImage('../data/exercise/subjectX/T1med.nii.gz')
min_max = sitk.MinimumMaximumImageFilter()
min_max.Execute(median_img - median_ref)
median_ok = min_max.GetMinimum() == 0 and min_max.GetMaximum() == 0
else:
median_ok = False
else:
median_ok = False
callback.end_test(median_ok)
callback.end()
|
from osiris.base.environments import env
def example():
value = env.get_property("sys.aws.region_name")
print(value)
if __name__ == '__main__':
example()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-10-27 15:45
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contato',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('nome', models.CharField(max_length=30)),
('telefone', models.CharField(max_length=15)),
('celular', models.CharField(max_length=15)),
('cidade', models.CharField(blank=True, max_length=20)),
],
options={
'abstract': False,
},
bases=('users.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Contrato',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vagas', models.IntegerField(null=True)),
('valor', models.FloatField(max_length=10)),
('descricao', models.TextField(max_length=1000)),
],
),
migrations.CreateModel(
name='Empresa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30)),
('telefone', models.CharField(max_length=30)),
('endereco', models.CharField(max_length=150)),
('cidade', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='EstagioNegociacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('estagio', models.CharField(max_length=100)),
('descricao', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='contrato',
name='empresa',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='comercial.Empresa'),
),
migrations.AddField(
model_name='contrato',
name='estagio',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='comercial.EstagioNegociacao'),
),
migrations.AddField(
model_name='contato',
name='empresa',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='comercial.Empresa'),
),
]
|
import os
import numpy as np
from PIL import Image
import torch
import torchvision
import torchvision.datasets
from torchvision import transforms
from torch.utils.data import Dataset
from torch.utils.data import DataLoader, Dataset, Sampler
class LT_Dataset(Dataset):
num_classes = 365
def __init__(self, root, txt, transform=None):
self.img_path = []
self.targets = []
self.transform = transform
with open(txt) as f:
for line in f:
self.img_path.append(os.path.join(root, line.split()[0]))
self.targets.append(int(line.split()[1]))
cls_num_list_old = [np.sum(np.array(self.targets) == i) for i in range(self.num_classes)]
# generate class_map: class index sort by num (descending)
sorted_classes = np.argsort(-np.array(cls_num_list_old))
self.class_map = [0 for i in range(self.num_classes)]
for i in range(self.num_classes):
self.class_map[sorted_classes[i]] = i
self.targets = np.array(self.class_map)[self.targets].tolist()
self.class_data = [[] for i in range(self.num_classes)]
for i in range(len(self.targets)):
j = self.targets[i]
self.class_data[j].append(i)
self.cls_num_list = [np.sum(np.array(self.targets)==i) for i in range(self.num_classes)]
def __len__(self):
return len(self.targets)
def __getitem__(self, index):
path = self.img_path[index]
target = self.targets[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, target
class LT_Dataset_Eval(Dataset):
num_classes = 365
def __init__(self, root, txt, class_map, transform=None):
self.img_path = []
self.targets = []
self.transform = transform
self.class_map = class_map
with open(txt) as f:
for line in f:
self.img_path.append(os.path.join(root, line.split()[0]))
self.targets.append(int(line.split()[1]))
self.targets = np.array(self.class_map)[self.targets].tolist()
def __len__(self):
return len(self.targets)
def __getitem__(self, index):
path = self.img_path[index]
target = self.targets[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, target
class Places_LT(DataLoader):
def __init__(self, data_dir="", batch_size=60, num_workers=40, training=True, train_txt = "./data_txt/Places_LT_v2/Places_LT_train.txt",
eval_txt = "./data_txt/Places_LT_v2/Places_LT_val.txt",
test_txt = "./data_txt/Places_LT_v2/Places_LT_test.txt"):
self.num_workers = num_workers
self.batch_size= batch_size
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
train_dataset = LT_Dataset(data_dir, train_txt, transform=transform_train)
if training:
dataset = LT_Dataset(data_dir, train_txt, transform=transform_train)
eval_dataset = LT_Dataset_Eval(data_dir, eval_txt, transform=transform_test, class_map=train_dataset.class_map)
else:
dataset = LT_Dataset_Eval(data_dir, test_txt, transform=transform_test, class_map=train_dataset.class_map)
eval_dataset = None
self.dataset = dataset
self.eval_dataset = eval_dataset
self.cls_num_list = train_dataset.cls_num_list
"""
self.data = torch.utils.data.DataLoader(
self.dataset,
batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory=True)
"""
self.init_kwargs = {
'batch_size': batch_size,
'shuffle': True,
'num_workers': num_workers
}
#super().__init__(dataset=self.data)
super().__init__(dataset=self.dataset, **self.init_kwargs)
def split_validation(self):
# If you do not want to validate:
# return None
# If you want to validate:\
return DataLoader(dataset=self.eval_dataset, **self.init_kwargs)
|
Subsets and Splits