hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794365115aa04eb4c09cbffc450febfcb142bb99 | 343 | py | Python | src/AppiumLibrary/utils/__init__.py | ddavvID/robotframework-appiumlibrary | 9635645c3349624716ebddb3afc158b7219167cd | [
"Apache-2.0"
] | null | null | null | src/AppiumLibrary/utils/__init__.py | ddavvID/robotframework-appiumlibrary | 9635645c3349624716ebddb3afc158b7219167cd | [
"Apache-2.0"
] | null | null | null | src/AppiumLibrary/utils/__init__.py | ddavvID/robotframework-appiumlibrary | 9635645c3349624716ebddb3afc158b7219167cd | [
"Apache-2.0"
] | null | null | null | from .applicationcache import ApplicationCache
def escape_xpath_value(value):
value = unicode(value)
if '"' in value and '\'' in value:
parts_wo_apos = value.split('\'')
return "concat('%s')" % "', \"'\", '".join(parts_wo_apos)
if '\'' in value:
return "\"%s\"" % value
return "'%s'" % value | 31.181818 | 66 | 0.553936 |
7943655df1849f689805b87850caf20fa8429c35 | 712 | py | Python | dax/__init__.py | onealbao/LDax | b3f33c68185d970eb340bed49dfc18889b180645 | [
"MIT"
] | null | null | null | dax/__init__.py | onealbao/LDax | b3f33c68185d970eb340bed49dfc18889b180645 | [
"MIT"
] | 13 | 2020-06-11T20:56:24.000Z | 2022-03-12T00:37:02.000Z | dax/__init__.py | onealbao/LDax | b3f33c68185d970eb340bed49dfc18889b180645 | [
"MIT"
] | 1 | 2018-09-14T15:52:35.000Z | 2018-09-14T15:52:35.000Z | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import absolute_import
from . import bin
from . import dax_tools_utils
from . import log
from . import xnat_tools_utils
from . import XnatUtils
from .task import Task
from .cluster import PBS
from .launcher import Launcher
from .dax_settings import DAX_Settings, DAX_Netrc
from .version import VERSION as __version__
from .XnatUtils import SpiderProcessHandler, AssessorHandler
from .modules import ScanModule, SessionModule
from .spiders import AutoSpider, ScanSpider, SessionSpider
from .processors import ScanProcessor, SessionProcessor, AutoProcessor
| 32.363636 | 73 | 0.79073 |
794365e35c56303005d5fe80bf841130b470ccfb | 5,568 | py | Python | pcdet/models/dense_heads/point_intra_part_head.py | Gltina/OpenPCDet | e32dc7f8f903a3f0e1c93effc68d74dbe16766e2 | [
"Apache-2.0"
] | 1,984 | 2020-07-01T05:13:02.000Z | 2022-03-31T20:34:00.000Z | pcdet/models/dense_heads/point_intra_part_head.py | Gltina/OpenPCDet | e32dc7f8f903a3f0e1c93effc68d74dbe16766e2 | [
"Apache-2.0"
] | 748 | 2020-07-01T07:04:58.000Z | 2022-03-31T07:38:51.000Z | pcdet/models/dense_heads/point_intra_part_head.py | Gltina/OpenPCDet | e32dc7f8f903a3f0e1c93effc68d74dbe16766e2 | [
"Apache-2.0"
] | 764 | 2020-07-01T12:19:13.000Z | 2022-03-31T11:19:17.000Z | import torch
from ...utils import box_coder_utils, box_utils
from .point_head_template import PointHeadTemplate
class PointIntraPartOffsetHead(PointHeadTemplate):
"""
Point-based head for predicting the intra-object part locations.
Reference Paper: https://arxiv.org/abs/1907.03670
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
self.part_reg_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.PART_FC,
input_channels=input_channels,
output_channels=3
)
target_cfg = self.model_cfg.TARGET_CONFIG
if target_cfg.get('BOX_CODER', None) is not None:
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size
)
else:
self.box_layers = None
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=True, ret_box_labels=(self.box_layers is not None)
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict = self.get_cls_layer_loss(tb_dict)
point_loss_part, tb_dict = self.get_part_layer_loss(tb_dict)
point_loss = point_loss_cls + point_loss_part
if self.box_layers is not None:
point_loss_box, tb_dict = self.get_box_layer_loss(tb_dict)
point_loss += point_loss_box
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_part_preds = self.part_reg_layers(point_features)
ret_dict = {
'point_cls_preds': point_cls_preds,
'point_part_preds': point_part_preds,
}
if self.box_layers is not None:
point_box_preds = self.box_layers(point_features)
ret_dict['point_box_preds'] = point_box_preds
point_cls_scores = torch.sigmoid(point_cls_preds)
point_part_offset = torch.sigmoid(point_part_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
batch_dict['point_part_offset'] = point_part_offset
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
ret_dict['point_part_labels'] = targets_dict.get('point_part_labels')
ret_dict['point_box_labels'] = targets_dict.get('point_box_labels')
if self.box_layers is not None and (not self.training or self.predict_boxes_when_training):
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict['point_coords'][:, 1:4],
point_cls_preds=point_cls_preds, point_box_preds=ret_dict['point_box_preds']
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['batch_index'] = batch_dict['point_coords'][:, 0]
batch_dict['cls_preds_normalized'] = False
self.forward_ret_dict = ret_dict
return batch_dict
| 43.5 | 107 | 0.626976 |
794366031d29ab7a2d8b00c5d7a5dfb71d311ff0 | 3,436 | py | Python | HSTB/kluster/dms.py | giumas/kluster | 40abd266551a56b693132a7cb12471601f5a02b4 | [
"CC0-1.0"
] | 18 | 2020-11-01T19:59:33.000Z | 2022-03-31T22:46:48.000Z | HSTB/kluster/dms.py | giumas/kluster | 40abd266551a56b693132a7cb12471601f5a02b4 | [
"CC0-1.0"
] | 46 | 2020-10-23T13:55:24.000Z | 2022-03-31T15:58:26.000Z | HSTB/kluster/dms.py | giumas/kluster | 40abd266551a56b693132a7cb12471601f5a02b4 | [
"CC0-1.0"
] | 9 | 2021-03-18T22:28:26.000Z | 2022-02-23T21:48:09.000Z | import re
import numpy as np
def dms2dd(d: float, m: float, s: float):
"""
convert between deg-min-sec and decimal degrees
Parameters
----------
d
degrees
m
minutes
s
seconds
Returns
-------
float
decimal degrees
"""
sign = 1
try:
if float(d) < 0:
sign = -1
except TypeError:
d = float(d)
m = float(m)
s = float(s)
dd = abs(float(d)) + float(m)/60 + float(s)/(60 * 60)
return dd * sign
def dd2dms(deg: float):
"""
convert between decimal degrees and deg-min-sec
Parameters
----------
deg
decimal degrees
Returns
-------
list
[degrees as float, minutes as float, seconds as float]
"""
try:
d, m = divmod(abs(deg), 1)
except TypeError:
deg = float(deg)
d, m = divmod(abs(deg), 1)
m, s = divmod(m * 60, 1)
s = s * 60
if float(deg) < 0:
d = d * -1
return [d, m, s]
def parse_dms_to_dd(dms: str):
"""
Take in deg-min-sec string in a couple different formats and return the decimal degrees representation.
Supported formats:
"80:38:06.57 W"
"80:38:06.57W"
"-80:38:06.57"
"-80:38:06"
Parameters
----------
dms
deg-min-sec string
Returns
-------
float
decimal degrees
"""
# split by any non-digit, non-letter character except - sign
parts = re.split(r"[^\w-]+", dms)
direct = 1
directions_included = {'N': 1, 'E': 1, 'W': -1, 'S': -1}
if parts[-1] in directions_included: # someone included dir with space, ex: "80:38:06.57 W"
direct = directions_included[parts[-1]]
parts = parts[:-1]
elif parts[-1][-1] in directions_included: # someone included dir without space, ex: "80:38:06.57W"
direct = directions_included[parts[-1][-1]]
parts[-1] = parts[-1][:-1].rstrip()
if parts[0][0] != '-':
parts[0] = int(parts[0]) * direct # add negative if direction was included as a letter but not as sign for deg
dd = ''
if len(parts) == 4: # milliseconds given, ex: "-80:38:06.57"
dec_secs = int(parts[2]) + (int(parts[3]) / (10.0 ** len(parts[3].rstrip())))
dd = dms2dd(float(parts[0]), float(parts[1]), float(dec_secs))
elif len(parts) == 3: # milliseconds not given, ex: "-80:38:06"
dd = dms2dd(float(parts[0]), float(parts[1]), float(parts[2]))
return dd
def return_zone_from_min_max_long(minlon: float, maxlon: float, minlat: float):
"""
Takes min longitude / max longitude and returns the zone that encompasses both. If min/max are in different zones,
prints warning message and returns the higher zone number
Parameters
----------
minlon
the minimum longitude value of the dataset
maxlon
the maximum longitude value of the dataset
minlat
the minimum latitude value of the dataset
Returns
-------
str
zone number with N/S identifier
"""
maxlon_zone = str(int(np.ceil((maxlon + 180) / 6)))
minlon_zone = str(int(np.ceil((minlon + 180) / 6)))
if minlat > 0:
zone_ident = 'N'
else:
zone_ident = 'S'
if int(maxlon_zone) != int(minlon_zone):
print('Spanning more than one UTM zone: MIN {}, MAX {}'.format(minlon_zone, maxlon_zone))
return maxlon_zone + zone_ident
| 24.197183 | 119 | 0.562573 |
7943677c73c476ae1a7ddf1ee604277dd5599bcb | 3,624 | py | Python | salt/utils/mako.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 2 | 2017-09-17T21:10:35.000Z | 2019-08-26T03:00:12.000Z | salt/utils/mako.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | null | null | null | salt/utils/mako.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 3 | 2021-02-23T08:12:48.000Z | 2021-02-23T08:13:13.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
# Import python libs
import os
import urlparse
# Import third party libs
from mako.lookup import TemplateCollection, TemplateLookup
# Import salt libs
import salt.fileclient
class SaltMakoTemplateLookup(TemplateCollection):
"""
Look up Mako template files using file:// or salt:// URLs with <%include/>
or <%namespace/>.
(1) Look up mako template files on local file system via files://... URL.
Make sure mako template file is present locally on minion beforehand.
Examples:
<%include file="file:///etc/salt/lib/templates/sls-parts.mako"/>
<%namespace file="file:///etc/salt/lib/templates/utils.mako" import="helper"/>
(2) Look up mako template files on Salt master via salt://... URL.
If URL is a relative path (without an URL scheme) then assume it's relative
to the directory of the salt file that's doing the lookup. If URL is an absolute
path then it's treated as if it has been prefixed with salt://.
Examples::
<%include file="templates/sls-parts.mako"/>
<%include file="salt://lib/templates/sls-parts.mako"/>
<%include file="/lib/templates/sls-parts.mako"/> ##-- treated as salt://
<%namespace file="templates/utils.mako"/>
<%namespace file="salt://lib/templates/utils.mako" import="helper"/>
<%namespace file="/lib/templates/utils.mako" import="helper"/> ##-- treated as salt://
"""
def __init__(self, opts, saltenv='base', env=None):
if env is not None:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
self.opts = opts
self.saltenv = saltenv
self.file_client = salt.fileclient.get_file_client(self.opts)
self.lookup = TemplateLookup(directories='/')
self.cache = {}
def adjust_uri(self, uri, filename):
scheme = urlparse.urlparse(uri).scheme
if scheme in ('salt', 'file'):
return uri
elif scheme:
raise ValueError(
'Unsupported URL scheme({0}) in {1}'.format(
scheme, uri
)
)
return self.lookup.adjust_uri(uri, filename)
def get_template(self, uri, relativeto=None):
if uri.startswith("file://"):
prefix = "file://"
searchpath = "/"
salt_uri = uri
else:
prefix = "salt://"
if self.opts['file_client'] == 'local':
searchpath = self.opts['file_roots'][self.saltenv]
else:
searchpath = [os.path.join(self.opts['cachedir'],
'files',
self.saltenv)]
salt_uri = uri if uri.startswith(prefix) else (prefix + uri)
self.cache_file(salt_uri)
self.lookup = TemplateLookup(directories=searchpath)
return self.lookup.get_template(salt_uri[len(prefix):])
def cache_file(self, fpath):
if fpath not in self.cache:
self.cache[fpath] = self.file_client.get_file(fpath,
'',
True,
self.saltenv)
| 37.75 | 99 | 0.549945 |
794367a83c37690f7bbe9302d7372524b47a0be3 | 425 | py | Python | WEEKS/CD_Sata-Structures/_MISC/misc-examples/lookup.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/lookup.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/lookup.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | import math
# Inverse Square Root is 1 over the square root of a number (1 / sqrt(n))
inv_sqrt = {}
def build_table(n):
for i in range(1, n):
global inv_sqrt
inv_sqrt[i] = 1 / math.sqrt(i)
print("Building Table")
build_table(1000000)
print("Done Building")
print(inv_sqrt[30000])
print(inv_sqrt[30010])
print(inv_sqrt[32000])
print(inv_sqrt[30030])
print(inv_sqrt[30300])
print(inv_sqrt[30060])
| 15.740741 | 73 | 0.687059 |
794367bc65a9cfd8eec7cf6db37397cb60266b87 | 19,263 | py | Python | lib/perf_uploader.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | lib/perf_uploader.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | lib/perf_uploader.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Uploads performance data to the performance dashboard.
The performance dashboard is owned by Chrome team and is available here:
https://chromeperf.appspot.com/
Users must be logged in with an @google.com account to view perf data there.
For more information on sending data to the dashboard, see:
http://dev.chromium.org/developers/testing/sending-data-to-the-performance-dashboard
Note: This module started off from the autotest/tko/perf_uploader.py but has
been extended significantly since.
"""
from __future__ import print_function
import collections
import httplib
import json
import math
import os
import re
import string
import urllib
import urllib2
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
from chromite.lib import retry_util
# Clearly mark perf values coming from chromite by default.
_DEFAULT_TEST_PREFIX = 'cbuildbot.'
_DEFAULT_PLATFORM_PREFIX = 'cros-'
_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
_PRESENTATION_CONFIG_FILE = os.path.join(_ROOT_DIR,
'perf_dashboard_config.json')
LOCAL_DASHBOARD_URL = 'http://localhost:8080'
STAGE_DASHBOARD_URL = 'https://chrome-perf.googleplex.com'
DASHBOARD_URL = 'https://chromeperf.appspot.com'
_MAX_DESCRIPTION_LENGTH = 256
_MAX_UNIT_LENGTH = 32
# Format for Chrome and Chrome OS version strings.
_VERSION_REGEXP = r'^(\d+)\.(\d+)\.(\d+)\.(\d+)$'
class PerfUploadingError(Exception):
"""A class to wrap errors in this module.
This exception class has two attributes: value and orig_exc. "value" is what
was used to create this exception while "orig_exc" is the optional original
exception that is wrapped by this exception.
"""
def __init__(self, value, orig_exc=None):
super(PerfUploadingError, self).__init__(value)
self.orig_exc = orig_exc
def __str__(self):
r = super(PerfUploadingError, self).__str__()
if self.orig_exc:
r += '\ncaused by: %s' % str(self.orig_exc)
return r
PerformanceValue = collections.namedtuple(
'PerformanceValue',
'description value units higher_is_better graph stdio_uri')
def OutputPerfValue(filename, description, value, units,
higher_is_better=True, graph=None, stdio_uri=None):
"""Record a measured performance value in an output file.
This is originally from autotest/files/client/common_lib/test.py.
The output file will subsequently be parsed by ImageTestStage to have the
information sent to chromeperf.appspot.com.
Args:
filename: A path to the output file. Data will be appended to this file.
description: A string describing the measured perf value. Must
be maximum length 256, and may only contain letters, numbers,
periods, dashes, and underscores. For example:
"page_load_time", "scrolling-frame-rate".
value: A number representing the measured perf value, or a list of
measured values if a test takes multiple measurements. Measured perf
values can be either ints or floats.
units: A string describing the units associated with the measured perf
value(s). Must be maximum length 32, and may only contain letters,
numbers, periods, dashes, and uderscores. For example: "msec", "fps".
higher_is_better: A boolean indicating whether or not a higher measured
perf value is considered better. If False, it is assumed that a "lower"
measured value is better.
graph: A string indicating the name of the graph on which the perf value
will be subsequently displayed on the chrome perf dashboard. This
allows multiple metrics to be grouped together on the same graph.
Default to None, perf values should be graphed individually on separate
graphs.
stdio_uri: A URL relevant to this data point (e.g. the buildbot log).
"""
def ValidateString(param_name, value, max_len):
if len(value) > max_len:
raise ValueError('%s must be at most %d characters.', param_name, max_len)
allowed_chars = string.ascii_letters + string.digits + '-._'
if not set(value).issubset(set(allowed_chars)):
raise ValueError(
'%s may only contain letters, digits, hyphens, periods, and '
'underscores. Its current value is %s.',
param_name, value
)
ValidateString('description', description, _MAX_DESCRIPTION_LENGTH)
ValidateString('units', units, _MAX_UNIT_LENGTH)
entry = {
'description': description,
'value': value,
'units': units,
'higher_is_better': higher_is_better,
'graph': graph,
'stdio_uri': stdio_uri,
}
data = (json.dumps(entry), '\n')
osutils.WriteFile(filename, data, 'a')
def LoadPerfValues(filename):
"""Return a list of PerformanceValue objects from |filename|."""
lines = osutils.ReadFile(filename).splitlines()
entries = []
for line in lines:
entry = json.loads(line)
entries.append(PerformanceValue(**entry))
return entries
def _AggregateIterations(perf_values):
"""Aggregate same measurements from multiple iterations.
Each perf measurement may exist multiple times across multiple iterations
of a test. Here, the results for each unique measured perf metric are
aggregated across multiple iterations.
Args:
perf_values: A list of PerformanceValue objects.
Returns:
A dictionary mapping each unique measured perf value (keyed by tuple of
its description and graph name) to information about that perf value
(in particular, the value is a list of values for each iteration).
"""
aggregated_data = {}
for perf_value in perf_values:
key = (perf_value.description, perf_value.graph)
try:
aggregated_entry = aggregated_data[key]
except KeyError:
aggregated_entry = {
'units': perf_value.units,
'higher_is_better': perf_value.higher_is_better,
'graph': perf_value.graph,
'value': [],
}
aggregated_data[key] = aggregated_entry
# Note: the stddev will be recomputed later when the results
# from each of the multiple iterations are averaged together.
aggregated_entry['value'].append(perf_value.value)
return aggregated_data
def _MeanAndStddev(data, precision=4):
"""Computes mean and standard deviation from a list of numbers.
Args:
data: A list of numeric values.
precision: The integer number of decimal places to which to
round the results.
Returns:
A 2-tuple (mean, standard_deviation), in which each value is
rounded to |precision| decimal places.
"""
n = len(data)
if n == 0:
raise ValueError('Cannot compute mean and stddev of an empty list.')
if n == 1:
return round(data[0], precision), 0
mean = math.fsum(data) / n
# Divide by n-1 to compute "sample standard deviation".
variance = math.fsum((elem - mean) ** 2 for elem in data) / (n - 1)
return round(mean, precision), round(math.sqrt(variance), precision)
def _ComputeAvgStddev(perf_data):
"""Compute average and standard deviations as needed for perf measurements.
For any perf measurement that exists in multiple iterations (has more than
one measured value), compute the average and standard deviation for it and
then store the updated information in the dictionary (in place).
Args:
perf_data: A dictionary of measured perf data as computed by
_AggregateIterations(), except each "value" is now a single value, not
a list of values.
"""
for perf in perf_data.itervalues():
perf['value'], perf['stddev'] = _MeanAndStddev(perf['value'])
return perf_data
PresentationInfo = collections.namedtuple(
'PresentationInfo',
'master_name test_name')
def _GetPresentationInfo(test_name):
"""Get presentation info for |test_name| from config file.
Args:
test_name: The test name.
Returns:
A PresentationInfo object for this test.
"""
infos = osutils.ReadFile(_PRESENTATION_CONFIG_FILE)
infos = json.loads(infos)
for info in infos:
if info['test_name'] == test_name:
try:
return PresentationInfo(**info)
except:
raise PerfUploadingError('No master found for %s' % test_name)
raise PerfUploadingError('No presentation config found for %s' % test_name)
def _FormatForUpload(perf_data, platform_name, presentation_info, revision=None,
cros_version=None, chrome_version=None, test_prefix=None,
platform_prefix=None):
"""Formats perf data suitably to upload to the perf dashboard.
The perf dashboard expects perf data to be uploaded as a
specially-formatted JSON string. In particular, the JSON object must be a
dictionary with key "data", and value being a list of dictionaries where
each dictionary contains all the information associated with a single
measured perf value: master name, bot name, test name, perf value, units,
and build version numbers.
See also google3/googleclient/chrome/speed/dashboard/add_point.py for the
server side handler.
Args:
platform_name: The string name of the platform.
perf_data: A dictionary of measured perf data. This is keyed by
(description, graph name) tuple.
presentation_info: A PresentationInfo object of the given test.
revision: The raw X-axis value; normally it represents a VCS repo, but may
be any monotonic increasing value integer.
cros_version: A string identifying Chrome OS version e.g. '6052.0.0'.
chrome_version: A string identifying Chrome version e.g. '38.0.2091.2'.
test_prefix: Arbitrary string to automatically prefix to the test name.
If None, then 'cbuildbot.' is used to guarantee namespacing.
platform_prefix: Arbitrary string to automatically prefix to
|platform_name|. If None, then 'cros-' is used to guarantee namespacing.
Returns:
A dictionary containing the formatted information ready to upload
to the performance dashboard.
"""
if test_prefix is None:
test_prefix = _DEFAULT_TEST_PREFIX
if platform_prefix is None:
platform_prefix = _DEFAULT_PLATFORM_PREFIX
dash_entries = []
for (desc, graph), data in perf_data.iteritems():
# Each perf metric is named by a path that encodes the test name,
# a graph name (if specified), and a description. This must be defined
# according to rules set by the Chrome team, as implemented in:
# chromium/tools/build/scripts/slave/results_dashboard.py.
desc = desc.replace('/', '_')
test_name = test_prefix + presentation_info.test_name
test_parts = [test_name, desc]
if graph:
test_parts.insert(1, graph)
test_path = '/'.join(test_parts)
supp_cols = {'a_default_rev': 'r_cros_version'}
if data.get('stdio_uri'):
supp_cols['a_stdio_uri'] = data['stdio_uri']
if cros_version is not None:
supp_cols['r_cros_version'] = cros_version
if chrome_version is not None:
supp_cols['r_chrome_version'] = chrome_version
new_dash_entry = {
'master': presentation_info.master_name,
'bot': platform_prefix + platform_name,
'test': test_path,
'value': data['value'],
'error': data['stddev'],
'units': data['units'],
'higher_is_better': data['higher_is_better'],
'supplemental_columns': supp_cols,
}
if revision is not None:
new_dash_entry['revision'] = revision
dash_entries.append(new_dash_entry)
json_string = json.dumps(dash_entries)
return {'data': json_string}
def _SendToDashboard(data_obj, dashboard=DASHBOARD_URL):
"""Sends formatted perf data to the perf dashboard.
Args:
data_obj: A formatted data object as returned by _FormatForUpload().
dashboard: The dashboard to upload data to.
Raises:
PerfUploadingError if an exception was raised when uploading.
"""
upload_url = os.path.join(dashboard, 'add_point')
encoded = urllib.urlencode(data_obj)
req = urllib2.Request(upload_url, encoded)
try:
urllib2.urlopen(req)
except urllib2.HTTPError as e:
raise PerfUploadingError('HTTPError: %d %s for JSON %s\n' %
(e.code, e.msg, data_obj['data']), e)
except urllib2.URLError as e:
raise PerfUploadingError('URLError: %s for JSON %s\n' %
(str(e.reason), data_obj['data']), e)
except httplib.HTTPException as e:
raise PerfUploadingError(
'HTTPException for JSON %s\n' % data_obj['data'], e)
def _ComputeRevisionFromVersions(chrome_version, cros_version):
"""Computes the point ID to use, from Chrome and Chrome OS version numbers.
For ChromeOS row data, data values are associated with both a Chrome
version number and a ChromeOS version number (unlike for Chrome row data
that is associated with a single revision number). This function takes
both version numbers as input, then computes a single, unique integer ID
from them, which serves as a 'fake' revision number that can uniquely
identify each ChromeOS data point, and which will allow ChromeOS data points
to be sorted by Chrome version number, with ties broken by ChromeOS version
number.
To compute the integer ID, we take the portions of each version number that
serve as the shortest unambiguous names for each (as described here:
http://www.chromium.org/developers/version-numbers). We then force each
component of each portion to be a fixed width (padded by zeros if needed),
concatenate all digits together (with those coming from the Chrome version
number first), and convert the entire string of digits into an integer.
We ensure that the total number of digits does not exceed that which is
allowed by AppEngine NDB for an integer (64-bit signed value).
For example:
Chrome version: 27.0.1452.2 (shortest unambiguous name: 1452.2)
ChromeOS version: 27.3906.0.0 (shortest unambiguous name: 3906.0.0)
concatenated together with padding for fixed-width columns:
('01452' + '002') + ('03906' + '000' + '00') = '014520020390600000'
Final integer ID: 14520020390600000
Args:
chrome_version: The Chrome version number as a string.
cros_version: The ChromeOS version number as a string.
Returns:
A unique integer ID associated with the two given version numbers.
"""
# Number of digits to use from each part of the version string for Chrome
# and Chrome OS versions when building a point ID out of these two versions.
chrome_version_col_widths = [0, 0, 5, 3]
cros_version_col_widths = [0, 5, 3, 2]
def get_digits_from_version(version_num, column_widths):
if re.match(_VERSION_REGEXP, version_num):
computed_string = ''
version_parts = version_num.split('.')
for i, version_part in enumerate(version_parts):
if column_widths[i]:
computed_string += version_part.zfill(column_widths[i])
return computed_string
else:
return None
chrome_digits = get_digits_from_version(
chrome_version, chrome_version_col_widths)
cros_digits = get_digits_from_version(
cros_version, cros_version_col_widths)
if not chrome_digits or not cros_digits:
return None
result_digits = chrome_digits + cros_digits
max_digits = sum(chrome_version_col_widths + cros_version_col_widths)
if len(result_digits) > max_digits:
return None
return int(result_digits)
def _RetryIfServerError(perf_exc):
"""Exception handler to retry an upload if error code is 5xx.
Args:
perf_exc: The exception from _SendToDashboard.
Returns:
True if the cause of |perf_exc| is HTTP 5xx error.
"""
return (isinstance(perf_exc.orig_exc, urllib2.HTTPError) and
perf_exc.orig_exc.code >= 500)
def UploadPerfValues(perf_values, platform_name, test_name, revision=None,
cros_version=None, chrome_version=None,
dashboard=DASHBOARD_URL, master_name=None,
test_prefix=None, platform_prefix=None, dry_run=False):
"""Uploads any perf data associated with a test to the perf dashboard.
Note: If |revision| is used, then |cros_version| & |chrome_version| are not
necessary. Conversely, if |revision| is not used, then |cros_version| and
|chrome_version| must both be specified.
Args:
perf_values: List of PerformanceValue objects.
platform_name: A string identifying platform e.g. 'x86-release'. 'cros-'
will be prepended to |platform_name| internally, by _FormatForUpload.
test_name: A string identifying the test
revision: The raw X-axis value; normally it represents a VCS repo, but may
be any monotonic increasing value integer.
cros_version: A string identifying Chrome OS version e.g. '6052.0.0'.
chrome_version: A string identifying Chrome version e.g. '38.0.2091.2'.
dashboard: The dashboard to upload data to.
master_name: The "master" field to use; by default it is looked up in the
perf_dashboard_config.json database.
test_prefix: Arbitrary string to automatically prefix to the test name.
If None, then 'cbuildbot.' is used to guarantee namespacing.
platform_prefix: Arbitrary string to automatically prefix to
|platform_name|. If None, then 'cros-' is used to guarantee namespacing.
dry_run: Do everything but upload the data to the server.
"""
if not perf_values:
return
# Aggregate values from multiple iterations together.
perf_data = _AggregateIterations(perf_values)
# Compute averages and standard deviations as needed for measured perf
# values that exist in multiple iterations. Ultimately, we only upload a
# single measurement (with standard deviation) for every unique measured
# perf metric.
_ComputeAvgStddev(perf_data)
# Format the perf data for the upload, then upload it.
if revision is None:
# No "revision" field, calculate one. Chrome and CrOS fields must be given.
cros_version = chrome_version[:chrome_version.find('.') + 1] + cros_version
revision = _ComputeRevisionFromVersions(chrome_version, cros_version)
try:
if master_name is None:
presentation_info = _GetPresentationInfo(test_name)
else:
presentation_info = PresentationInfo(master_name, test_name)
formatted_data = _FormatForUpload(perf_data, platform_name,
presentation_info,
revision=revision,
cros_version=cros_version,
chrome_version=chrome_version,
test_prefix=test_prefix,
platform_prefix=platform_prefix)
if dry_run:
logging.debug('UploadPerfValues: skipping upload due to dry-run')
else:
retry_util.GenericRetry(_RetryIfServerError, 3, _SendToDashboard,
formatted_data, dashboard=dashboard)
except PerfUploadingError:
logging.exception('Error when uploading perf data to the perf '
'dashboard for test %s.', test_name)
raise
else:
logging.info('Successfully uploaded perf data to the perf '
'dashboard for test %s.', test_name)
| 38.836694 | 84 | 0.712713 |
79436840c2a6914408d5ecd48e477349eb37de44 | 2,367 | py | Python | vaccine_allocation/constant_hazard.py | COVID-IWG/epimargin-studies | 7d4a78e2e6713c6a0aea2cd2440529153e9a635d | [
"MIT"
] | null | null | null | vaccine_allocation/constant_hazard.py | COVID-IWG/epimargin-studies | 7d4a78e2e6713c6a0aea2cd2440529153e9a635d | [
"MIT"
] | null | null | null | vaccine_allocation/constant_hazard.py | COVID-IWG/epimargin-studies | 7d4a78e2e6713c6a0aea2cd2440529153e9a635d | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from studies.vaccine_allocation.commons import *
from tqdm import tqdm
May15 = 30 # days since April 15
simulation_initial_conditions = pd.read_csv(data/f"all_india_coalesced_initial_conditions{simulation_start.strftime('%b%d')}.csv")\
.drop(columns = ["Unnamed: 0"])\
.set_index(["state", "district"])\
.assign(
frac_R = lambda _: _.R0 / _.N_tot,
frac_RV = lambda _: (_.R0 + _.V0) / _.N_tot,
V0 = lambda _: _.V0.astype(int),
D0 = lambda _: _.D0.astype(int),
scaled_new_cases = lambda _: _.dT0.astype(int)
)\
[["Rt", "frac_R", "frac_RV", "V0", "scaled_new_cases"]]
def load_projections(state, district, t = May15):
state_code = state_name_lookup[state]
f = np.load(epi_dst / f'{state_code}_{district}_phi25_novax.npz')
return [np.median(f["dD"], axis = 1).astype(int)[t], np.median(f["dD"], axis = 1).astype(int)[t]]
projections = [load_projections(*idx) for idx in tqdm(simulation_initial_conditions.index)]
# prioritization = simulation_initial_conditions\
# .join(pd.DataFrame(projections, columns = ["projected_new_cases_may15", "projected_new_deaths_may15"], index = simulation_initial_conditions.index))
prioritization = pd.read_csv(data / "apr15_sero_prioritization.csv").set_index(["state", "district"])
crosswalk = pd.read_stata(Path.home() / "Dropbox/COVID Vaccination Policy/India/data/districts/all_crosswalk.dta")\
.drop(columns = ["state", "district"])\
.rename(columns = lambda s: s.replace("_api", ""))\
.set_index(["state", "district"])\
.sort_index()\
.filter(like = "lgd", axis = 1)
crosswalk.loc[coalesce_states].reset_index()\
.assign(
district = lambda _:_.state,
lgd_district_id = lambda _:_.lgd_state_id,
lgd_district_name = lambda _:_.lgd_state_name
).drop_duplicates()
prioritization.join(pd.concat([
crosswalk.drop(labels = coalesce_states),
crosswalk.loc[coalesce_states].reset_index()\
.assign(
district = lambda _:_.state,
lgd_district_id = lambda _:_.lgd_state_id,
lgd_district_name = lambda _:_.lgd_state_name
)\
.drop_duplicates()\
.set_index(["state", "district"])
]).sort_index())\
.to_csv(data / "apr15_sero_prioritization_lgd.csv" ) | 42.267857 | 154 | 0.662442 |
7943684ce59aafc1a6b4a946adad0e0c94f0e850 | 9,274 | py | Python | Codes/2DCNN/Models/UNet3P.py | Sakib1263/1D-2D-Segmentation-AutoEncoder-TF2-KERAS | bdeeed8913686d5141a5178bddc0137cce3f7212 | [
"MIT"
] | 1 | 2022-03-10T13:36:49.000Z | 2022-03-10T13:36:49.000Z | Codes/2DCNN/Models/UNet3P.py | Sakib1263/1D-2D-Segmentation-AutoEncoder-TF2-KERAS | bdeeed8913686d5141a5178bddc0137cce3f7212 | [
"MIT"
] | null | null | null | Codes/2DCNN/Models/UNet3P.py | Sakib1263/1D-2D-Segmentation-AutoEncoder-TF2-KERAS | bdeeed8913686d5141a5178bddc0137cce3f7212 | [
"MIT"
] | 2 | 2022-02-13T12:08:56.000Z | 2022-03-10T13:36:49.000Z | # Import Necessary Libraries
import numpy as np
import tensorflow as tf
def Conv_Block(inputs, model_width, kernel, multiplier):
# 2D Convolutional Block
x = tf.keras.layers.Conv2D(model_width * multiplier, kernel, padding='same')(inputs)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
return x
def trans_conv2D(inputs, model_width, multiplier):
# 2D Transposed Convolutional Block, used instead of UpSampling
x = tf.keras.layers.Conv2DTranspose(model_width * multiplier, (2, 2), strides=(2, 2), padding='same')(inputs) # Stride = 2, Kernel Size = 2
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
return x
def Concat_Block(input1, *argv):
# Concatenation Block from the Keras Library
cat = input1
for arg in range(0, len(argv)):
cat = tf.keras.layers.concatenate([cat, argv[arg]], axis=-1)
return cat
def upConv_Block(inputs, size=(2, 2)):
# 2D UpSampling Block
up = tf.keras.layers.UpSampling2D(size=size)(inputs)
return up
def Feature_Extraction_Block(inputs, model_width, feature_number):
# Feature Extraction Block for the AutoEncoder Mode
shape = inputs.shape
latent = tf.keras.layers.Flatten()(inputs)
latent = tf.keras.layers.Dense(feature_number, name='features')(latent)
latent = tf.keras.layers.Dense(model_width * shape[1] * shape[2])(latent)
latent = tf.keras.layers.Reshape((shape[1], shape[2], model_width))(latent)
return latent
def Attention_Block(skip_connection, gating_signal, num_filters, multiplier):
# Attention Block
conv1x1_1 = tf.keras.layers.Conv2D(num_filters*multiplier, (1, 1), strides=(2, 2))(skip_connection)
conv1x1_1 = tf.keras.layers.BatchNormalization()(conv1x1_1)
conv1x1_2 = tf.keras.layers.Conv2D(num_filters*multiplier, (1, 1), strides=(1, 1))(gating_signal)
conv1x1_2 = tf.keras.layers.BatchNormalization()(conv1x1_2)
conv1_2 = tf.keras.layers.add([conv1x1_1, conv1x1_2])
conv1_2 = tf.keras.layers.Activation('relu')(conv1_2)
conv1_2 = tf.keras.layers.Conv2D(1, (1, 1), strides=(1, 1))(conv1_2)
conv1_2 = tf.keras.layers.BatchNormalization()(conv1_2)
conv1_2 = tf.keras.layers.Activation('sigmoid')(conv1_2)
resampler1 = upConv_Block(conv1_2)
resampler2 = trans_conv2D(conv1_2, 1, 1)
resampler = tf.keras.layers.add([resampler1, resampler2])
out = skip_connection * resampler
return out
class UNet:
def __init__(self, length, width, model_depth, num_channel, model_width, kernel_size, problem_type='Regression',
output_nums=1, ds=0, ae=0, ag=0, lstm=0, feature_number=1024, is_transconv=True):
# length: Input Signal Length
# width: Input Image Width (y-dim) [Normally same as the x-dim i.e., Square shape]
# model_depth: Depth of the Model
# model_width: Width of the Input Layer of the Model
# num_channel: Number of Channels allowed by the Model
# kernel_size: Kernel or Filter Size of the Convolutional Layers
# problem_type: Classification (Binary or Multiclass) or Regression
# output_nums: Output Classes (Classification Mode) or Features (Regression Mode)
# ds: Checks where Deep Supervision is active or not, either 0 or 1 [Default value set as 0]
# ae: Enables or diables the AutoEncoder Mode, either 0 or 1 [Default value set as 0]
# ag: Checks where Attention Guided is active or not, either 0 or 1 [Default value set as 0]
# lstm: Checks where Bidirectional LSTM is active or not, either 0 or 1 [Default value set as 0]
# feature_number: Number of Features or Embeddings to be extracted from the AutoEncoder in the A_E Mode
# is_transconv: (TRUE - Transposed Convolution, FALSE - UpSampling) in the Encoder Layer
self.length = length
self.width = width
self.model_depth = model_depth
self.num_channel = num_channel
self.model_width = model_width
self.kernel_size = kernel_size
self.problem_type = problem_type
self.output_nums = output_nums
self.D_S = ds
self.A_E = ae
self.A_G = ag
self.LSTM = lstm
self.feature_number = feature_number
self.is_transconv = is_transconv
def UNet3P(self):
# Variable UNet3+ Model Design
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
raise ValueError("Please Check the Values of the Input Parameters!")
convs = {}
levels = []
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
pool = inputs
for i in range(1, (self.model_depth + 1)):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** (i - 1))
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** (i - 1))
pool = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv)
convs["conv%s" % i] = conv
if self.A_E == 1:
# Collect Latent Features or Embeddings from AutoEncoders
pool = Feature_Extraction_Block(pool, self.model_width, self.feature_number)
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
# Decoding
deconv = conv
deconvs = {}
convs_list = list(convs.values())
for j in range(0, self.model_depth):
skip_connections_all = convs_list[self.model_depth - j - 1]
skip_connections_all = Conv_Block(skip_connections_all, self.model_width, self.kernel_size, 2 ** 0)
for k in range(0, self.model_depth - j - 1):
skip_connection = convs_list[k]
skip_connection = tf.keras.layers.MaxPooling2D(pool_size=(2 ** ((self.model_depth-j)-k-1),2 ** ((self.model_depth-j)-k-1)))(skip_connection)
skip_connection = Conv_Block(skip_connection, self.model_width, self.kernel_size, 2 ** 0)
skip_connections_all = tf.keras.layers.concatenate([skip_connections_all, skip_connection], axis=-1)
deconv_tot = upConv_Block(deconv, size=(2 ** 1,2 ** 1))
deconv_tot = Conv_Block(deconv_tot, self.model_width, self.kernel_size, 2 ** 0)
deconv_tot = tf.keras.layers.concatenate([skip_connections_all, deconv_tot], axis=-1)
if j > 0:
for m in range(0, j):
deconv = upConv_Block(deconvs["deconv%s" % m], size=(2 ** (j-m),2 ** (j-m)))
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** 0)
deconv_tot = tf.keras.layers.concatenate([deconv_tot, deconv], axis=-1)
deconv = Conv_Block(deconv_tot, self.model_width, self.kernel_size, self.model_depth + 1)
deconvs["deconv%s" % j] = deconv
if self.D_S == 1:
# For Deep Supervision
level = tf.keras.layers.Conv2D(1, (1, 1), (2, 2), name=f'level{self.model_depth - j}')(deconv)
levels.append(level)
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
if __name__ == '__main__':
# Configurations
length = 224 # Length of the Image (2D Signal)
width = 224 # Width of the Image
model_name = 'UNet3P' # Name of the Model
model_depth = 5 # Number of Levels in the CNN Model
model_width = 64 # Width of the Initial Layer, subsequent layers start from here
kernel_size = 3 # Size of the Kernels/Filter
num_channel = 1 # Number of Channels in the Model
D_S = 1 # Turn on Deep Supervision
A_E = 0 # Turn on AutoEncoder Mode for Feature Extraction
A_G = 1 # Turn on for Guided Attention
LSTM = 1 # Turn on for BiConvLSTM
problem_type = 'Regression' # Problem Type: Regression or Classification
output_nums = 1 # Number of Class for Classification Problems, always '1' for Regression Problems
is_transconv = True # True: Transposed Convolution, False: UpSampling
'''Only required if the AutoEncoder Mode is turned on'''
feature_number = 1024 # Number of Features to be Extracted
#
Model = UNet(length, width, model_depth, num_channel, model_width, kernel_size, problem_type=problem_type, output_nums=output_nums,
ds=D_S, ae=A_E, ag=A_G, lstm=LSTM, is_transconv=is_transconv).UNet3P()
Model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0003), loss=tf.keras.losses.MeanAbsoluteError(), metrics=tf.keras.metrics.MeanSquaredError())
Model.summary()
| 47.804124 | 162 | 0.660017 |
794368ca201a13a7ea89add820a3ec46ea3c0524 | 15,350 | py | Python | play/play_bagging_on_main.py | GavrilovMike/EnsembleLearning | 6badedf2b6e9f2d3b01c11246c32916864ad3848 | [
"MIT"
] | null | null | null | play/play_bagging_on_main.py | GavrilovMike/EnsembleLearning | 6badedf2b6e9f2d3b01c11246c32916864ad3848 | [
"MIT"
] | null | null | null | play/play_bagging_on_main.py | GavrilovMike/EnsembleLearning | 6badedf2b6e9f2d3b01c11246c32916864ad3848 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
https://github.com/oxwhirl/smac
"""
from smac.env import StarCraft2Env
import numpy as np
# import sys
import random
import pickle
# from gym.spaces import Discrete, Box, Dict
# Вывод массива целиком
np.set_printoptions(threshold=np.inf)
# определяем может ли агент сделать заданнное действие action_is
def is_possible_action(avail_actions_ind, action_is):
ia = 0
# print ("in def len(avail_actions_ind) = ", len(avail_actions_ind))
while ia < len(avail_actions_ind):
# print ("ia = ", ia)
if avail_actions_ind[ia] == action_is:
ia = len(avail_actions_ind) + 1
return True
else:
ia = ia + 1
return False
# получаем состояние агента как позицию на карте
def get_stateFox(agent_posX, agent_posY):
error_count = 0
state = 67
if 6 < agent_posX < 7 and 16.2 < agent_posY < 18:
state = 0
elif 7 < agent_posX < 8 and 16.2 < agent_posY < 18:
state = 1
elif 8 < agent_posX < 8.9 and 16.2 < agent_posY < 18:
state = 2
elif 8.9 < agent_posX < 9.1 and 16.2 < agent_posY < 18:
state = 3
elif 9.1 < agent_posX < 10 and 16.2 < agent_posY < 18:
state = 4
elif 10 < agent_posX < 11 and 16.2 < agent_posY < 18:
state = 5
elif 11 < agent_posX < 12 and 16.2 < agent_posY < 18:
state = 6
elif 12 < agent_posX < 13.1 and 16.2 < agent_posY < 18:
state = 7
elif 6 < agent_posX < 7 and 15.9 < agent_posY < 16.2:
state = 8
elif 7 < agent_posX < 8 and 15.9 < agent_posY < 16.2:
state = 9
elif 8 < agent_posX < 8.9 and 15.9 < agent_posY < 16.2:
state = 10
elif 8.9 < agent_posX < 9.1 and 15.9 < agent_posY < 16.2:
state = 11
elif 9.1 < agent_posX < 10 and 15.9 < agent_posY < 16.2:
state = 12
elif 10 < agent_posX < 11 and 15.9 < agent_posY < 16.2:
state = 13
elif 11 < agent_posX < 12 and 15.9 < agent_posY < 16.2:
state = 14
elif 12 < agent_posX < 13.1 and 15.9 < agent_posY < 16.2:
state = 15
elif 6 < agent_posX < 7 and 15 < agent_posY < 15.9:
state = 16
elif 7 < agent_posX < 8 and 15 < agent_posY < 15.9:
state = 17
elif 8 < agent_posX < 8.9 and 15 < agent_posY < 15.9:
state = 18
elif 8.9 < agent_posX < 9.1 and 15 < agent_posY < 15.9:
state = 19
elif 9.1 < agent_posX < 10 and 15 < agent_posY < 15.9:
state = 20
elif 10 < agent_posX < 11 and 15 < agent_posY < 15.9:
state = 21
elif 11 < agent_posX < 12 and 15 < agent_posY < 15.9:
state = 22
elif 12 < agent_posX < 13.1 and 15 < agent_posY < 15.9:
state = 23
elif 6 < agent_posX < 7 and 14 < agent_posY < 15:
state = 24
elif 7 < agent_posX < 8 and 14 < agent_posY < 15:
state = 25
elif 8 < agent_posX < 8.9 and 14 < agent_posY < 15:
state = 26
elif 8.9 < agent_posX < 9.1 and 14 < agent_posY < 15:
state = 27
elif 9.1 < agent_posX < 10 and 14 < agent_posY < 15:
state = 28
elif 10 < agent_posX < 11 and 14 < agent_posY < 15:
state = 29
elif 11 < agent_posX < 12 and 14 < agent_posY < 15:
state = 30
elif 12 < agent_posX < 13.1 and 14 < agent_posY < 15:
state = 31
if 13.1 < agent_posX < 14 and 16.2 < agent_posY < 18:
state = 32
elif 14 < agent_posX < 15 and 16.2 < agent_posY < 18:
state = 33
elif 15 < agent_posX < 16 and 16.2 < agent_posY < 18:
state = 34
elif 16 < agent_posX < 17 and 16.2 < agent_posY < 18:
state = 35
elif 17 < agent_posX < 18 and 16.2 < agent_posY < 18:
state = 36
elif 18 < agent_posX < 19 and 16.2 < agent_posY < 18:
state = 37
elif 19 < agent_posX < 20 and 16.2 < agent_posY < 18:
state = 38
elif 20 < agent_posX < 21 and 16.2 < agent_posY < 18:
state = 39
elif 21 < agent_posX < 22 and 16.2 < agent_posY < 18:
state = 40
elif 22 < agent_posX < 23 and 16.2 < agent_posY < 18:
state = 41
elif 23 < agent_posX < 24 and 16.2 < agent_posY < 18:
state = 42
if 13.1 < agent_posX < 14 and 15.9 < agent_posY < 16.2:
state = 43
elif 14 < agent_posX < 15 and 15.9 < agent_posY < 16.2:
state = 44
elif 15 < agent_posX < 16 and 15.9 < agent_posY < 16.2:
state = 45
elif 16 < agent_posX < 17 and 15.9 < agent_posY < 16.2:
state = 46
elif 17 < agent_posX < 18 and 15.9 < agent_posY < 16.2:
state = 47
elif 18 < agent_posX < 19 and 15.9 < agent_posY < 16.2:
state = 48
elif 19 < agent_posX < 20 and 15.9 < agent_posY < 16.2:
state = 49
elif 20 < agent_posX < 21 and 15.9 < agent_posY < 16.2:
state = 50
elif 21 < agent_posX < 22 and 15.9 < agent_posY < 16.2:
state = 51
elif 22 < agent_posX < 23 and 15.9 < agent_posY < 16.2:
state = 52
elif 23 < agent_posX < 24 and 15.9 < agent_posY < 16.2:
state = 53
if 13.1 < agent_posX < 14 and 15 < agent_posY < 15.9:
state = 54
elif 14 < agent_posX < 15 and 15 < agent_posY < 15.9:
state = 55
elif 15 < agent_posX < 16 and 15 < agent_posY < 15.9:
state = 56
elif 16 < agent_posX < 17 and 15 < agent_posY < 15.9:
state = 57
elif 17 < agent_posX < 18 and 15 < agent_posY < 15.9:
state = 58
elif 18 < agent_posX < 19 and 15 < agent_posY < 15.9:
state = 59
elif 19 < agent_posX < 20 and 15 < agent_posY < 15.9:
state = 60
elif 20 < agent_posX < 21 and 15 < agent_posY < 15.9:
state = 61
elif 21 < agent_posX < 22 and 15 < agent_posY < 15.9:
state = 62
elif 22 < agent_posX < 23 and 15 < agent_posY < 15.9:
state = 63
elif 23 < agent_posX < 24 and 15 < agent_posY < 15.9:
state = 64
if 13.1 < agent_posX < 14 and 14 < agent_posY < 15:
state = 65
elif 14 < agent_posX < 15 and 14 < agent_posY < 15:
state = 66
elif 15 < agent_posX < 16 and 14 < agent_posY < 15:
state = 67
elif 16 < agent_posX < 17 and 14 < agent_posY < 15:
state = 68
elif 17 < agent_posX < 18 and 14 < agent_posY < 15:
state = 69
elif 18 < agent_posX < 19 and 14 < agent_posY < 15:
state = 70
elif 19 < agent_posX < 20 and 14 < agent_posY < 15:
state = 71
elif 20 < agent_posX < 21 and 14 < agent_posY < 15:
state = 72
elif 21 < agent_posX < 22 and 14 < agent_posY < 15:
state = 73
elif 22 < agent_posX < 23 and 14 < agent_posY < 15:
state = 74
elif 23 < agent_posX < 24 and 14 < agent_posY < 15:
state = 75
# if (state > 31):
# print('Mistake\n')
# error_count += 1
#
# if (state < 31):
# print('Mistake\n')
# error_count += 1
# print ('Error_count+: ',error_count)
return state
"""
keys = [0 1 2 3 4 5]
act_ind_decode= {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6}
qt_arr[act_ind]= 0.0
qt_arr[act_ind]= 0.0
qt_arr[act_ind]= 0.0
qt_arr[act_ind]= 0.0
qt_arr[act_ind]= 0.0
qt_arr[act_ind]= 0.0
"""
def select_actionFox(state, avail_actions_ind, n_actionsFox, epsilon, Q_table):
qt_arr = np.zeros(len(avail_actions_ind))
# Функция arange() возвращает одномерный массив с равномерно разнесенными значениями внутри заданного интервала.
keys = np.arange(len(avail_actions_ind))
# print ("keys =", keys)
# act_ind_decode= {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6}
# Функция zip объединяет в кортежи элементы из последовательностей переданных в качестве аргументов.
act_ind_decode = dict(zip(keys, avail_actions_ind))
# print ("act_ind_decode=", act_ind_decode)
for act_ind in range(len(avail_actions_ind)):
qt_arr[act_ind] = Q_table[state, act_ind_decode[act_ind]]
# print ("qt_arr[act_ind]=",qt_arr[act_ind])
# Returns the indices of the maximum values along an axis.
# Exploit learned values
action = act_ind_decode[np.argmax(qt_arr)]
return action
# MAIN
def main():
"""The StarCraft II environment for decentralised multi-agent micromanagement scenarios."""
'''difficulty ="1" is VeryEasy'''
# replay_dir="D:\StarCraft II\Replays\smacfox"
env = StarCraft2Env(map_name="1mFOX", difficulty="1")
'''env_info= {'state_shape': 48, 'obs_shape': 30, 'n_actions': 9, 'n_agents': 3, 'episode_limit': 60}'''
env_info = env.get_env_info()
# print("env_info = ", env_info)
"""Returns the size of the observation."""
"""obssize = 10"""
"""obs= [array([ 1. , 1. , 1. , 1. , 1. ,
0.63521415, 0.63517255, -0.00726997, 0.06666667, 0.06666667],
dtype=float32)]"""
obssize = env.get_obs_size()
# print("obssize = ", obssize)
######################################################################
"""
ready_agents = []
#observation_space= Dict(action_mask:Box(9,), obs:Box(30,))
observation_space = Dict({
"obs": Box(-1, 1, shape=(env.get_obs_size())),
"action_mask": Box(0, 1, shape=(env.get_total_actions())) })
#print ("observation_space=", observation_space)
#action_space= Discrete(9)
action_space = Discrete(env.get_total_actions())
#print ("action_space=", action_space)
"""
########################################################################
n_actions = env_info["n_actions"]
# print ("n_actions=", n_actions)
n_agents = env_info["n_agents"]
n_episodes = 20 # количество эпизодов
############### Параметры обучения здесь нужны для функции select_actionFox ################################
alpha = 0.9 # learning rate sayon - 0.5
gamma = 0.5 # discount factor sayon - 0.9
epsilon = 0.7 # e-greedy
n_statesFox = 76 # количество состояний нашего мира-сетки
n_actionsFox = 7 # вводим свое количество действий, которые понадобятся
##################################################################################################
total_reward = 0
with open("/Users/mgavrilov/Study/ENSEMBLEALGS/ensebmles/Bagging/Bagging_QTable.pkl", 'rb') as f:
Q_table = pickle.load(f)
print(Q_table)
# print (Q_table)
for e in range(n_episodes):
# print("n_episode = ", e)
"""Reset the environment. Required after each full episode.Returns initial observations and states."""
env.reset()
''' Battle is over terminated = True'''
terminated = False
episode_reward = 0
actions_history = []
# n_steps = 1 #пока не берем это количество шагов для уменьгения награды за долгий поиск
"""
# вывод в файл
fileobj = open("файл.txt", "wt")
print("text",file=fileobj)
fileobj.close()
"""
"""
#динамический epsilon
if e % 15 == 0:
epsilon += (1 - epsilon) * 10 / n_episodes
print("epsilon = ", epsilon)
"""
# stoprun = [0,0,0,0,0]
while not terminated:
"""Returns observation for agent_id."""
obs = env.get_obs()
# print ("obs=", obs)
"""Returns the global state."""
# state = env.get_state()
actions = []
action = 0
'''agent_id= 0, agent_id= 1, agent_id= 2'''
for agent_id in range(n_agents):
# получаем характеристики юнита
unit = env.get_unit_by_id(agent_id)
# получаем состояние по координатам юнита
stateFox = get_stateFox(unit.pos.x, unit.pos.y)
# print ("state=", stateFox)
'''
tag = unit.tag #много разных характеристик юнита
x = unit.pos.x
y = unit.pos.y
'''
"""Returns the available actions for agent_id."""
"""avail_actions= [0, 1, 1, 1, 1, 1, 0, 0, 0]"""
avail_actions = env.get_avail_agent_actions(agent_id)
'''Функция nonzero() возвращает индексы ненулевых элементов массива.'''
"""avail_actions_ind of agent_id == 0: [1 2 3 4 5]"""
avail_actions_ind = np.nonzero(avail_actions)[0]
# выбираем действие
action = select_actionFox(stateFox, avail_actions_ind, n_actionsFox, epsilon, Q_table)
# собираем действия от разных агентов
actions.append(action)
actions_history.append(action)
###############_Бежим вправо и стреляем_################################
"""
if is_possible_action(avail_actions_ind, 6) == True:
action = 6
else:
if is_possible_action(avail_actions_ind, 4) == True:
action = 4
else:
action = np.random.choice(avail_actions_ind)
#Случайная выборка из значений заданного одномерного массива
"""
#####################################################################
"""Функция append() добавляет элементы в конец массива."""
# print("agent_id=",agent_id,"avail_actions_ind=", avail_actions_ind, "action = ", action, "actions = ", actions)
# f.write(agent_id)
# f.write(avail_actions_ind)
# собираем действия от разных агентов
# actions.append(action)
# как узнать куда стрелять? в определенного человека?
# как узнать что делают другие агенты? самому создавать для них глобальное состояние
# раз я ими управляю?
"""A single environment step. Returns reward, terminated, info."""
reward, terminated, _ = env.step(actions)
episode_reward += reward
###################_Обучаем_##############################################
"""
for agent_id in range(n_agents):
#получаем характеристики юнита
unit = env.get_unit_by_id(agent_id)
#получаем состояние по координатам юнита
stateFox_next = get_stateFox(unit.pos.x, unit.pos.y)
#поменять название на Qlearn
#подумать над action ведь здесь это последнее действие
#Qlearn(stateFox, stateFox_next, reward, action)
Q_table[stateFox, action] = Q_table[stateFox, action] + alpha * \
(reward + gamma * np.max(Q_table[stateFox_next, :]) - Q_table[stateFox, action])
"""
##########################################################################
total_reward += episode_reward
# Total reward in episode 4 = 20.0
print("Total reward in episode {} = {}".format(e, episode_reward))
# get_stats()= {'battles_won': 2, 'battles_game': 5, 'battles_draw': 0, 'win_rate': 0.4, 'timeouts': 0, 'restarts': 0}
print("get_stats()=", env.get_stats())
print("actions_history=", actions_history)
# env.save_replay() """Save a replay."""
print("Average reward = ", total_reward / n_episodes)
""""Close StarCraft II."""""
env.close()
if __name__ == "__main__":
main()
| 36.634845 | 129 | 0.549902 |
79436959148f0e0285d36b4e93e303c4f50a26d2 | 123 | py | Python | Day2/Q7.py | nkem1010/python-challenge-solutions | 203cedc691094a83b110fc75764aac51dbbc1a03 | [
"MIT"
] | 1 | 2020-05-24T21:53:59.000Z | 2020-05-24T21:53:59.000Z | Day2/Q7.py | nkem1010/python-challenge-solutions | 203cedc691094a83b110fc75764aac51dbbc1a03 | [
"MIT"
] | null | null | null | Day2/Q7.py | nkem1010/python-challenge-solutions | 203cedc691094a83b110fc75764aac51dbbc1a03 | [
"MIT"
] | null | null | null | file = input('Input name of file')
extension = file.split('.')
print('The extension of the file is :' + extension[-1])
| 30.75 | 57 | 0.650407 |
79436a2db622eac37a8a711cc3b784358db3b345 | 372 | py | Python | backend/mainshop/email.py | mbranko/webshop | b7c2ebb8720922f5277fee98fe826e54760b29d2 | [
"MIT"
] | null | null | null | backend/mainshop/email.py | mbranko/webshop | b7c2ebb8720922f5277fee98fe826e54760b29d2 | [
"MIT"
] | 5 | 2021-03-19T01:53:49.000Z | 2022-03-02T08:11:51.000Z | backend/mainshop/email.py | mbranko/webshop | b7c2ebb8720922f5277fee98fe826e54760b29d2 | [
"MIT"
] | null | null | null | ACTIVATE_ACCOUNT_TITLE = "Webshop Account Activation"
ACTIVATE_ACCOUNT_TEXT = """
Dear %s %s,
In order to complete registration of your account at the Webshop
please follow this link:
https://badasswebshop.com/activate/%s/
If you have not requested a Webshop account at our website
https://badasswebshop.com please ignore this message.
Best regards,
Webshop Team
"""
| 21.882353 | 64 | 0.782258 |
79436a6e24f6c9af2d3c2ad83f6201dd4e19cc97 | 15,924 | py | Python | pyquil/simulation/_reference.py | stjordanis/pyquil | 36987ecb78d5dc85d299dd62395b7669a1cedd5a | [
"Apache-2.0"
] | 677 | 2017-01-09T23:20:22.000Z | 2018-11-26T10:57:49.000Z | pyquil/simulation/_reference.py | stjordanis/pyquil | 36987ecb78d5dc85d299dd62395b7669a1cedd5a | [
"Apache-2.0"
] | 574 | 2018-11-28T05:38:40.000Z | 2022-03-23T20:38:28.000Z | pyquil/simulation/_reference.py | stjordanis/pyquil | 36987ecb78d5dc85d299dd62395b7669a1cedd5a | [
"Apache-2.0"
] | 202 | 2018-11-30T06:36:28.000Z | 2022-03-29T15:38:18.000Z | ##############################################################################
# Copyright 2016-2019 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import warnings
from typing import Any, List, Optional, Sequence, Tuple, Union, cast
import numpy as np
from numpy.random.mtrand import RandomState
from pyquil.paulis import PauliTerm, PauliSum
from pyquil.pyqvm import AbstractQuantumSimulator
from pyquil.quilbase import Gate
from pyquil.simulation.matrices import P0, P1, KRAUS_OPS, QUANTUM_GATES
from pyquil.simulation.tools import lifted_gate_matrix, lifted_gate, all_bitstrings
def _term_expectation(wf: np.ndarray, term: PauliTerm, n_qubits: int) -> Any:
# Computes <psi|XYZ..XXZ|psi>
wf2 = wf
for qubit_i, op_str in term._ops.items():
assert isinstance(qubit_i, int)
# Re-use QUANTUM_GATES since it has X, Y, Z
op_mat = QUANTUM_GATES[op_str]
op_mat = lifted_gate_matrix(matrix=op_mat, qubit_inds=[qubit_i], n_qubits=n_qubits)
wf2 = op_mat @ wf2
# `wf2` is XYZ..XXZ|psi>
# hit it with a <psi| i.e. `wf.dag`
return term.coefficient * (wf.conj().T @ wf2)
def _is_valid_quantum_state(state_matrix: np.ndarray, rtol: float = 1e-05, atol: float = 1e-08) -> bool:
"""
Checks if a quantum state is valid, i.e. the matrix is Hermitian; trace one, and that the
eigenvalues are non-negative.
:param state_matrix: a D by D np.ndarray representing a quantum state
:param rtol: The relative tolerance parameter in np.allclose and np.isclose
:param atol: The absolute tolerance parameter in np.allclose and np.isclose
:return: bool
"""
hermitian = np.allclose(state_matrix, np.conjugate(state_matrix.transpose()), rtol, atol)
if not hermitian:
raise ValueError("The state matrix is not Hermitian.")
trace_one = np.isclose(np.trace(state_matrix), 1, rtol, atol)
if not trace_one:
raise ValueError("The state matrix is not trace one.")
evals = np.linalg.eigvals(state_matrix)
non_neg_eigs = all([False if val < -atol else True for val in evals])
if not non_neg_eigs:
raise ValueError("The state matrix has negative Eigenvalues of order -" + str(atol) + ".")
return hermitian and trace_one and non_neg_eigs
class ReferenceWavefunctionSimulator(AbstractQuantumSimulator):
def __init__(self, n_qubits: int, rs: Optional[RandomState] = None):
"""
A wavefunction simulator that prioritizes readability over performance.
Please consider using
:py:class:`PyQVM(..., wf_simulator_type=ReferenceWavefunctionSimulator)` rather
than using this class directly.
This class uses a flat state-vector of length 2^n_qubits to store wavefunction
amplitudes. The basis is taken to be bitstrings ordered lexicographically with
qubit 0 as the rightmost bit. This is the same as the Rigetti Lisp QVM.
:param n_qubits: Number of qubits to simulate.
:param rs: a RandomState (should be shared with the owning :py:class:`PyQVM`) for
doing anything stochastic. A value of ``None`` disallows doing anything stochastic.
"""
super().__init__(n_qubits=n_qubits, rs=rs)
self.n_qubits = n_qubits
self.rs = rs
self.wf = np.zeros(2 ** n_qubits, dtype=np.complex128)
self.wf[0] = complex(1.0, 0)
def sample_bitstrings(self, n_samples: int) -> np.ndarray:
"""
Sample bitstrings from the distribution defined by the wavefunction.
Qubit 0 is at ``out[:, 0]``.
:param n_samples: The number of bitstrings to sample
:return: An array of shape (n_samples, n_qubits)
"""
if self.rs is None:
raise ValueError(
"You have tried to perform a stochastic operation without setting the "
"random state of the simulator. Might I suggest using a PyQVM object?"
)
probabilities = np.abs(self.wf) ** 2
possible_bitstrings = all_bitstrings(self.n_qubits)
inds = self.rs.choice(2 ** self.n_qubits, n_samples, p=probabilities)
bitstrings = possible_bitstrings[inds, :]
bitstrings = np.flip(bitstrings, axis=1) # qubit ordering: 0 on the left.
return bitstrings # type: ignore
def do_gate(self, gate: Gate) -> "ReferenceWavefunctionSimulator":
"""
Perform a gate.
:return: ``self`` to support method chaining.
"""
unitary = lifted_gate(gate=gate, n_qubits=self.n_qubits)
self.wf = unitary.dot(self.wf)
return self
def do_gate_matrix(self, matrix: np.ndarray, qubits: Sequence[int]) -> "ReferenceWavefunctionSimulator":
"""
Apply an arbitrary unitary; not necessarily a named gate.
:param matrix: The unitary matrix to apply. No checks are done.
:param qubits: The qubits to apply the unitary to.
:return: ``self`` to support method chaining.
"""
unitary = lifted_gate_matrix(matrix, list(qubits), n_qubits=self.n_qubits)
self.wf = unitary.dot(self.wf)
return self
def do_measurement(self, qubit: int) -> int:
"""
Measure a qubit, collapse the wavefunction, and return the measurement result.
:param qubit: Index of the qubit to measure.
:return: measured bit
"""
if self.rs is None:
raise ValueError(
"You have tried to perform a stochastic operation without setting the "
"random state of the simulator. Might I suggest using a PyQVM object?"
)
# lift projective measure operator to Hilbert space
# prob(0) = <psi P0 | P0 psi> = psi* . P0* . P0 . psi
measure_0 = lifted_gate_matrix(matrix=P0, qubit_inds=[qubit], n_qubits=self.n_qubits)
proj_psi = measure_0 @ self.wf
prob_zero = np.conj(proj_psi).T @ proj_psi
# generate random number to 'roll' for measure
if self.rs.uniform() < prob_zero:
# decohere state using the measure_0 operator
unitary = measure_0 @ (np.eye(2 ** self.n_qubits) / np.sqrt(prob_zero))
self.wf = unitary.dot(self.wf)
return 0
else: # measure one
measure_1 = lifted_gate_matrix(matrix=P1, qubit_inds=[qubit], n_qubits=self.n_qubits)
unitary = measure_1 @ (np.eye(2 ** self.n_qubits) / np.sqrt(1 - prob_zero))
self.wf = unitary.dot(self.wf)
return 1
def expectation(self, operator: Union[PauliTerm, PauliSum]) -> float:
"""
Compute the expectation of an operator.
:param operator: The operator
:return: The operator's expectation value
"""
if not isinstance(operator, PauliSum):
operator = PauliSum([operator])
return sum(_term_expectation(self.wf, term, n_qubits=self.n_qubits) for term in operator)
def reset(self) -> "ReferenceWavefunctionSimulator":
"""
Reset the wavefunction to the ``|000...00>`` state.
:return: ``self`` to support method chaining.
"""
self.wf.fill(0)
self.wf[0] = complex(1.0, 0)
return self
def do_post_gate_noise(self, noise_type: str, noise_prob: float, qubits: List[int]) -> "AbstractQuantumSimulator":
raise NotImplementedError("The reference wavefunction simulator cannot handle noise")
def zero_state_matrix(n_qubits: int) -> np.ndarray:
"""
Construct a matrix corresponding to the tensor product of `n` ground states ``|0><0|``.
:param n_qubits: The number of qubits.
:return: The state matrix ``|000...0><000...0|`` for `n_qubits`.
"""
state_matrix = np.zeros((2 ** n_qubits, 2 ** n_qubits), dtype=np.complex128)
state_matrix[0, 0] = complex(1.0, 0)
return state_matrix
class ReferenceDensitySimulator(AbstractQuantumSimulator):
"""
A density matrix simulator that prioritizes readability over performance.
Please consider using
:py:class:`PyQVM(..., wf_simulator_type=ReferenceDensitySimulator)` rather
than using this class directly.
This class uses a dense matrix of shape ``(2^n_qubits, 2^n_qubits)`` to store the
density matrix.
:param n_qubits: Number of qubits to simulate.
:param rs: a RandomState (should be shared with the owning :py:class:`PyQVM`) for
doing anything stochastic. A value of ``None`` disallows doing anything stochastic.
"""
def __init__(self, n_qubits: int, rs: Optional[RandomState] = None):
super().__init__(n_qubits=n_qubits, rs=rs)
self.n_qubits = n_qubits
self.rs = rs
self.density: Optional[np.ndarray] = None
self.set_initial_state(zero_state_matrix(n_qubits)).reset()
def set_initial_state(self, state_matrix: np.ndarray) -> "ReferenceDensitySimulator":
"""
This method is the correct way (TM) to update the initial state matrix that is
initialized every time reset() is called. The default initial state of
ReferenceDensitySimulator is ``|000...00>``.
Note that the current state matrix, i.e. ``self.density`` is not affected by this
method; you must change it directly or else call reset() after calling this method.
To restore default state initialization behavior of ReferenceDensitySimulator pass in
a ``state_matrix`` equal to the default initial state on `n_qubits` (i.e. ``|000...00>``)
and then call ``reset()``. We have provided a helper function ``n_qubit_zero_state``
in the ``_reference.py`` module to simplify this step.
:param state_matrix: numpy.ndarray or None.
:return: ``self`` to support method chaining.
"""
rows, cols = state_matrix.shape
if rows != cols:
raise ValueError("The state matrix is not square.")
if self.n_qubits != int(np.log2(rows)):
raise ValueError("The state matrix is not defined on the same numbers of qubits as the QVM.")
if _is_valid_quantum_state(state_matrix):
self.initial_density = state_matrix
else:
raise ValueError(
"The state matrix is not valid. It must be Hermitian, trace one, " "and have non-negative eigenvalues."
)
return self
def sample_bitstrings(self, n_samples: int, tol_factor: float = 1e8) -> np.ndarray:
"""
Sample bitstrings from the distribution defined by the wavefunction.
Qubit 0 is at ``out[:, 0]``.
:param n_samples: The number of bitstrings to sample
:param tol_factor: Tolerance to set imaginary probabilities to zero, relative to
machine epsilon.
:return: An array of shape (n_samples, n_qubits)
"""
if self.rs is None:
raise ValueError(
"You have tried to perform a stochastic operation without setting the "
"random state of the simulator. Might I suggest using a PyQVM object?"
)
# for np.real_if_close the actual tolerance is (machine_eps * tol_factor),
# where `machine_epsilon = np.finfo(float).eps`. If we use tol_factor = 1e8, then the
# overall tolerance is \approx 2.2e-8.
probabilities = np.real_if_close(np.diagonal(self.density), tol=tol_factor) # type: ignore
# Next set negative probabilities to zero
probabilities = np.array([0 if p < 0.0 else p for p in probabilities])
# Ensure they sum to one
probabilities = probabilities / np.sum(probabilities)
possible_bitstrings = all_bitstrings(self.n_qubits)
inds = self.rs.choice(2 ** self.n_qubits, n_samples, p=probabilities)
bitstrings = possible_bitstrings[inds, :]
bitstrings = np.flip(bitstrings, axis=1) # qubit ordering: 0 on the left.
return bitstrings # type: ignore
def do_gate(self, gate: Gate) -> "AbstractQuantumSimulator":
"""
Perform a gate.
:return: ``self`` to support method chaining.
"""
unitary = lifted_gate(gate=gate, n_qubits=self.n_qubits)
self.density = unitary.dot(self.density).dot(np.conj(unitary).T) # type: ignore
return self
def do_gate_matrix(self, matrix: np.ndarray, qubits: Sequence[int]) -> "AbstractQuantumSimulator":
"""
Apply an arbitrary unitary; not necessarily a named gate.
:param matrix: The unitary matrix to apply. No checks are done
:param qubits: A list of qubits to apply the unitary to.
:return: ``self`` to support method chaining.
"""
unitary = lifted_gate_matrix(matrix=matrix, qubit_inds=qubits, n_qubits=self.n_qubits)
self.density = unitary.dot(self.density).dot(np.conj(unitary).T) # type: ignore
return self
def do_measurement(self, qubit: int) -> int:
"""
Measure a qubit and collapse the wavefunction
:return: The measurement result. A 1 or a 0.
"""
if self.rs is None:
raise ValueError(
"You have tried to perform a stochastic operation without setting the "
"random state of the simulator. Might I suggest using a PyQVM object?"
)
measure_0 = lifted_gate_matrix(matrix=P0, qubit_inds=[qubit], n_qubits=self.n_qubits)
prob_zero = np.trace(measure_0 @ self.density) # type: ignore
# generate random number to 'roll' for measurement
if self.rs.uniform() < prob_zero:
# decohere state using the measure_0 operator
unitary = measure_0 @ (np.eye(2 ** self.n_qubits) / np.sqrt(prob_zero))
self.density = unitary.dot(self.density).dot(np.conj(unitary.T))
return 0
else: # measure one
measure_1 = lifted_gate_matrix(matrix=P1, qubit_inds=[qubit], n_qubits=self.n_qubits)
unitary = measure_1 @ (np.eye(2 ** self.n_qubits) / np.sqrt(1 - prob_zero))
self.density = unitary.dot(self.density).dot(np.conj(unitary.T))
return 1
def expectation(self, operator: Union[PauliTerm, PauliSum]) -> complex:
raise NotImplementedError("To implement")
def reset(self) -> "AbstractQuantumSimulator":
"""
Resets the current state of ReferenceDensitySimulator ``self.density`` to
``self.initial_density``.
:return: ``self`` to support method chaining.
"""
self.density = self.initial_density
return self
def do_post_gate_noise(self, noise_type: str, noise_prob: float, qubits: List[int]) -> "ReferenceDensitySimulator":
kraus_ops = cast(Tuple[np.ndarray, ...], KRAUS_OPS[noise_type](p=noise_prob))
if np.isclose(noise_prob, 0.0):
warnings.warn(f"Skipping {noise_type} post-gate noise because noise_prob is close to 0")
return self
for q in qubits:
new_density = np.zeros_like(self.density) # type: ignore
for kraus_op in kraus_ops:
lifted_kraus_op = lifted_gate_matrix(matrix=kraus_op, qubit_inds=[q], n_qubits=self.n_qubits)
new_density += lifted_kraus_op.dot(self.density).dot(np.conj(lifted_kraus_op.T)) # type: ignore
self.density = new_density
return self
| 43.98895 | 119 | 0.646822 |
79436b03a99bfdbf3a6fe58a16d67e8863aefd1f | 3,403 | py | Python | pypureclient/flasharray/FA_2_10/models/directory_policy_export_post.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_10/models/directory_policy_export_post.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_10/models/directory_policy_export_post.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_10 import models
class DirectoryPolicyExportPost(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'policies': 'list[DirectorypolicyexportpostPolicies]'
}
attribute_map = {
'policies': 'policies'
}
required_args = {
}
def __init__(
self,
policies=None, # type: List[models.DirectorypolicyexportpostPolicies]
):
"""
Keyword args:
policies (list[DirectorypolicyexportpostPolicies]): A list of export policies to apply to the directory. The `id` and `name` fields in each `policy` parameter are required, but cannot be set together.
"""
if policies is not None:
self.policies = policies
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryPolicyExportPost`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DirectoryPolicyExportPost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DirectoryPolicyExportPost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.383929 | 212 | 0.570673 |
79436b2aab1c38baca65b992127fe364691ab9c9 | 2,379 | py | Python | pipeline/helper_components.py | younesslanda/MLops-on-GCP | 430a453389fc23e19bd6f7585e08fdce511b8920 | [
"Apache-2.0"
] | 1 | 2022-03-17T04:58:05.000Z | 2022-03-17T04:58:05.000Z | pipeline/helper_components.py | younesslanda/MLops-on-GCP | 430a453389fc23e19bd6f7585e08fdce511b8920 | [
"Apache-2.0"
] | null | null | null | pipeline/helper_components.py | younesslanda/MLops-on-GCP | 430a453389fc23e19bd6f7585e08fdce511b8920 | [
"Apache-2.0"
] | null | null | null | """Helper components."""
from googleapiclient import discovery
from googleapiclient import errors
#import joblib
import pickle
import json
import pandas as pd
import subprocess
import sys
from sklearn.metrics import accuracy_score, recall_score
from typing import NamedTuple
def retrieve_best_run(project_id: str, job_id: str) -> NamedTuple('Outputs', [('metric_value', float), ('alpha', float),
('max_iter', int)]):
"""Retrieves the parameters of the best Hypertune run."""
ml = discovery.build('ml', 'v1')
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = ml.projects().jobs().get(name=job_name)
try:
response = request.execute()
except errors.HttpError as err:
print(err)
except:
print('Unexpected error')
print(response)
best_trial = response['trainingOutput']['trials'][0]
metric_value = best_trial['finalMetric']['objectiveValue']
alpha = float(best_trial['hyperparameters']['alpha'])
max_iter = int(best_trial['hyperparameters']['max_iter'])
return (metric_value, alpha, max_iter)
def evaluate_model(dataset_path: str, model_path: str, metric_name: str) -> NamedTuple('Outputs', [('metric_name', str), ('metric_value', float),
('mlpipeline_metrics', 'Metrics')]):
"""Evaluates a trained sklearn model."""
df_test = pd.read_csv(dataset_path)
X_test = df_test.drop('Cover_Type', axis=1)
y_test = df_test['Cover_Type']
# Copy the model from GCS
model_filename = 'model.pkl'
gcs_model_filepath = '{}/{}'.format(model_path, model_filename)
print(gcs_model_filepath)
subprocess.check_call(['gsutil', 'cp', gcs_model_filepath, model_filename],
stderr=sys.stdout)
with open(model_filename, 'rb') as model_file:
model = pickle.load(model_file)
y_hat = model.predict(X_test)
if metric_name == 'accuracy':
metric_value = accuracy_score(y_test, y_hat)
elif metric_name == 'recall':
metric_value = recall_score(y_test, y_hat)
else:
metric_name = 'N/A'
metric_value = 0
# Export the metric
metrics = {
'metrics': [{
'name': metric_name,
'numberValue': float(metric_value)
}]
}
return (metric_name, metric_value, json.dumps(metrics)) | 29.37037 | 145 | 0.649853 |
79436b95bc43ee6466e0336fb33e37e57a42086a | 1,246 | py | Python | coffea/nanoaod/methods/__init__.py | dnoonan08/coffea | fb52a8a31245c6f4cf5bbd13ea51cdda5262dfa0 | [
"BSD-3-Clause"
] | null | null | null | coffea/nanoaod/methods/__init__.py | dnoonan08/coffea | fb52a8a31245c6f4cf5bbd13ea51cdda5262dfa0 | [
"BSD-3-Clause"
] | null | null | null | coffea/nanoaod/methods/__init__.py | dnoonan08/coffea | fb52a8a31245c6f4cf5bbd13ea51cdda5262dfa0 | [
"BSD-3-Clause"
] | 1 | 2019-06-14T15:24:26.000Z | 2019-06-14T15:24:26.000Z | from .common import METVector, LorentzVector, Candidate
from .leptons import Electron, Muon, Photon, Tau
from .jets import Jet, FatJet
from .generator import GenParticle, GenVisTau
collection_methods = {
'CaloMET': METVector,
'ChsMET': METVector,
'GenMET': METVector,
'MET': METVector,
'METFixEE2017': METVector,
'PuppiMET': METVector,
'RawMET': METVector,
'TkMET': METVector,
# pseudo-lorentz: pt, eta, phi, mass=0
'IsoTrack': LorentzVector,
'SoftActivityJet': LorentzVector,
'TrigObj': LorentzVector,
# True lorentz: pt, eta, phi, mass
'FatJet': FatJet,
'GenDressedLepton': LorentzVector,
'GenJet': LorentzVector,
'GenJetAK8': FatJet,
'Jet': Jet,
'LHEPart': LorentzVector,
'SV': LorentzVector,
'SubGenJetAK8': LorentzVector,
'SubJet': LorentzVector,
# Candidate: LorentzVector + charge
'Electron': Electron,
'Muon': Muon,
'Photon': Photon,
'Tau': Tau,
'GenVisTau': GenVisTau,
# special
'GenPart': GenParticle,
}
__all__ = [
'METVector',
'LorentzVector',
'Candidate',
'Electron',
'Muon',
'Photon',
'Tau',
'Jet',
'FatJet',
'GenParticle',
'GenVisTau',
'collection_methods',
]
| 23.074074 | 55 | 0.631621 |
79436bd7814ecf9e1c295937509df98d8fbb09eb | 673 | py | Python | autobahn/wamp/gen/wamp/proto/Principal.py | andriyor/autobahn-python | 4b6d825bb308d695f440be6ebe5e713af85bf143 | [
"MIT"
] | null | null | null | autobahn/wamp/gen/wamp/proto/Principal.py | andriyor/autobahn-python | 4b6d825bb308d695f440be6ebe5e713af85bf143 | [
"MIT"
] | 3 | 2019-07-10T12:37:53.000Z | 2021-12-07T14:14:56.000Z | autobahn/wamp/gen/wamp/proto/Principal.py | andriyor/autobahn-python | 4b6d825bb308d695f440be6ebe5e713af85bf143 | [
"MIT"
] | 4 | 2019-03-01T14:57:06.000Z | 2022-01-06T16:31:10.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Principal(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 8
# Principal
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Principal
def Session(self): return self._tab.Get(flatbuffers.number_types.Uint64Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
def CreatePrincipal(builder, session):
builder.Prep(8, 8)
builder.PrependUint64(session)
return builder.Offset()
| 24.925926 | 148 | 0.720654 |
79436c5601928c492585143de43f0536374ffca4 | 6,594 | py | Python | GPro/validations.py | 1989Ryan/GPro | 368ac6b349f83287cc683b4d50b77036cc9deafa | [
"MIT"
] | null | null | null | GPro/validations.py | 1989Ryan/GPro | 368ac6b349f83287cc683b4d50b77036cc9deafa | [
"MIT"
] | null | null | null | GPro/validations.py | 1989Ryan/GPro | 368ac6b349f83287cc683b4d50b77036cc9deafa | [
"MIT"
] | null | null | null | import numpy as np
def assert_array(x):
"""Throw a TypeError if X is not array-like."""
if not (isinstance(x, np.ndarray) or isinstance(x, list)):
raise TypeError('Only lists and numpy arrays are supported.')
def convert_array(x):
"""Convert a list to a numpy array."""
if isinstance(x, list):
# data-type is inferred from the input data.
x = np.asarray(x)
return x
def set_d_type(x, d_type):
"""Sets the d_type of a numpy array."""
if not isinstance(x[0, 0], d_type):
x = x.astype(d_type)
return x
def assert_finite(x):
"""Throw a ValueError if x contains NaNs or infinity."""
if not np.isfinite(x.sum()):
raise ValueError('Only finite numbers are supported.')
def assert_dim(x):
"""Throw an Assertion error if x is a 1d array."""
assert len(x.shape) > 1, \
"Array %r is of inconsistent dimensions." % x
def assert_object(x):
"""Throw a Type error if X is an object."""
if x.dtype.kind == "O":
raise TypeError('Object type is not supported for %r.' % x)
def check_x_m(x, m):
"""Input validation for standard estimators.
Checks x and m for consistent shapes. By default, x and m are
checked to be non-empty and containing only finite values. m
is also checked to be containing only positives indexes of x.
Parameters
----------
x : array-like.
Input data.
m : array-like.
Preferences.
Returns
-------
x : the validated x.
m : the validated m.
"""
# Pandas data frame not supported.
assert_array(x), assert_array(m)
# If list converts to numpy array.
x = convert_array(x)
m = convert_array(m)
# Check valid dimensions
assert_dim(x), assert_dim(m)
# Only real values are supported.
assert_object(x), assert_object(m)
if x.dtype.kind not in ('f', 'i', 'u'):
raise TypeError('Only floating-point, signed or unsigned integer,\
training data supported.')
if m.dtype.kind not in ('i', 'u'):
raise TypeError('Only integer preference data supported.')
# float64 for x and int8 for m.
x = set_d_type(x, d_type=np.float64)
m = set_d_type(m, d_type=np.int8)
# Only finite numbers are supported.
assert_finite(x), assert_finite(m)
# Only positive numbers are supported for preferences.
if any(m.ravel() < 0):
raise ValueError('Only positive integers are supported for m.')
# A preference set should contain two values.
assert m.shape[1] == 2, \
"Array %r is of inconsistent dimensions." % m
assert x.shape[0] > 1, \
"Array %r is of inconsistent size." % x
# Check if indexes of m are consistent with size of x.
if m.max() > x.shape[0]:
raise ValueError('Preferences should be indexes of X.')
if any(np.subtract(m[:, 0], m[:, 1]) == 0):
raise ValueError('m contains at least one set of preferences'
' with the same values.')
return x, m
def check_post_approx(**params):
"""Input validation for the Laplace approximation.
Checks s_eval, max_iter, eta, tol for consistent values and shapes.
"""
s_eval = params['s_eval']
max_iter = params['max_iter']
eta = params['eta']
tol = params['tol']
if np.isscalar(s_eval) and not isinstance(s_eval, str):
if max_iter <= 0:
raise ValueError("s_eval must be a positive scalar.")
else:
raise ValueError("s_eval must be a positive scalar.")
if np.isscalar(max_iter) and not isinstance(max_iter, str):
if not (isinstance(max_iter, int) and max_iter > 0):
raise ValueError("max_iter must be a positive integer scalar.")
else:
raise ValueError("max_iter must be a positive integer scalar.")
if np.isscalar(eta) and not isinstance(eta, str):
if eta < 0:
raise ValueError("eta must be a positive scalar.")
else:
raise ValueError("eta must be a positive scalar.")
if np.isscalar(tol) and not isinstance(tol, str):
if tol < 0:
raise ValueError("tol must be a positive scalar.")
else:
raise ValueError("tol must be a positive scalar.")
return
def check_kernel(x, **params):
"""Input validation for the RBF and Matern kernel.
Checks length_scale and nu for consistent shape and value.
Parameters
----------
x : array-like.
Input data.
Returns
-------
None
"""
length_scale = params['length_scale']
if np.iterable(length_scale):
if np.asarray(length_scale).dtype.kind not in ('f', 'i', 'u'):
raise TypeError('Only floating-point, signed or unsigned integer,\
length_scale supported.')
elif any(length_scale) <= 0:
raise ValueError("length_scale values must be positive.")
assert x.shape[0] == len(length_scale), \
"Array length_scale is of inconsistent dimension."
elif isinstance(length_scale, str):
raise ValueError("length_scale must be a positive scalar.")
if len(params) > 1:
nu = params['nu']
if np.isscalar(nu) and not isinstance(nu, str):
if nu <= 0:
raise ValueError("nu must be a positive scalar.")
else:
raise ValueError("nu must be a positive scalar.")
return
def check_acquisition(**params):
"""Input validation for acquisition functions.
Checks kappa and nu for consistent values and shapes.
"""
key = list(params)[0]
value = params[key]
if np.isscalar(value) and not isinstance(value, str):
if value < 0:
raise ValueError("%s must be a positive scalar." % key)
else:
raise ValueError("%s must be a positive scalar." % key)
def check_bounds(x, bounds):
"""Input validation for .
Checks kappa and nu for consistent values and shapes.
"""
if not isinstance(bounds, dict):
raise TypeError('bounds should be a dictionary')
assert x.shape[1] == len(bounds), \
"bounds is of inconsistent size."
for key_value in bounds.items():
values = key_value[1]
if not (isinstance(values, tuple) or isinstance(values, list)):
raise TypeError('bounds values should be stored in list or tuple')
assert len(values) == 2, "bounds is of inconsistent size."
inf, sup = values
if isinstance(inf, str) or isinstance(sup, str):
raise ValueError('bounds values should be numeric.')
assert inf < sup, "inf bound cannot be superior to sup bound."
| 31.701923 | 78 | 0.624507 |
79436d3789fd6d745dd0555280cc906dec6a9158 | 33,175 | py | Python | sdk/lusid/models/resource_list_of_value_type.py | fossabot/lusid-sdk-python-preview | 2c95d870489d93dee921593877256d3869c090e6 | [
"MIT"
] | null | null | null | sdk/lusid/models/resource_list_of_value_type.py | fossabot/lusid-sdk-python-preview | 2c95d870489d93dee921593877256d3869c090e6 | [
"MIT"
] | null | null | null | sdk/lusid/models/resource_list_of_value_type.py | fossabot/lusid-sdk-python-preview | 2c95d870489d93dee921593877256d3869c090e6 | [
"MIT"
] | 1 | 2020-10-29T08:35:32.000Z | 2020-10-29T08:35:32.000Z | # coding: utf-8
"""
LUSID API
# Introduction This page documents the [LUSID APIs](https://www.lusid.com/api/swagger), which allows authorised clients to query and update their data within the LUSID platform. SDKs to interact with the LUSID APIs are available in the following languages : * [C#](https://github.com/finbourne/lusid-sdk-csharp) * [Java](https://github.com/finbourne/lusid-sdk-java) * [JavaScript](https://github.com/finbourne/lusid-sdk-js) * [Python](https://github.com/finbourne/lusid-sdk-python) # Data Model The LUSID API has a relatively lightweight but extremely powerful data model. One of the goals of LUSID was not to enforce on clients a single rigid data model but rather to provide a flexible foundation onto which clients can map their own data models. The core entities in LUSID provide a minimal structure and set of relationships, and the data model can be extended using Properties. The LUSID data model is exposed through the LUSID APIs. The APIs provide access to both business objects and the meta data used to configure the systems behaviours. The key business entities are: - * **Portfolios** A portfolio is a container for transactions and holdings (a **Transaction Portfolio**) or constituents (a **Reference Portfolio**). * **Derived Portfolios**. Derived Portfolios allow Portfolios to be created based on other Portfolios, by overriding or adding specific items. * **Holdings** A Holding is a quantity of an Instrument or a balance of cash within a Portfolio. Holdings can only be adjusted via Transactions. * **Transactions** A Transaction is an economic event that occurs in a Portfolio, causing its holdings to change. * **Corporate Actions** A corporate action is a market event which occurs to an Instrument and thus applies to all portfolios which holding the instrument. Examples are stock splits or mergers. * **Constituents** A constituent is a record in a Reference Portfolio containing an Instrument and an associated weight. * **Instruments** An instrument represents a currency, tradable instrument or OTC contract that is attached to a transaction and a holding. * **Properties** All major entities allow additional user defined properties to be associated with them. For example, a Portfolio manager may be associated with a portfolio. Meta data includes: - * **Transaction Types** Transactions are booked with a specific transaction type. The types are client defined and are used to map the Transaction to a series of movements which update the portfolio holdings. * **Properties Types** Types of user defined properties used within the system. ## Scope All data in LUSID is segregated at the client level. Entities in LUSID are identifiable by a unique code. Every entity lives within a logical data partition known as a Scope. Scope is an identity namespace allowing two entities with the same unique code to co-exist within individual address spaces. For example, prices for equities from different vendors may be uploaded into different scopes such as `client/vendor1` and `client/vendor2`. A portfolio may then be valued using either of the price sources by referencing the appropriate scope. LUSID Clients cannot access scopes of other clients. ## Instruments LUSID has its own built-in instrument master which you can use to master your own instrument universe. Every instrument must be created with one or more unique market identifiers, such as [FIGI](https://openfigi.com/). For any non-listed instruments (eg OTCs), you can upload an instrument against a custom ID of your choosing. In addition, LUSID will allocate each instrument a unique 'LUSID instrument identifier'. The LUSID instrument identifier is what is used when uploading transactions, holdings, prices, etc. The API exposes an `instrument/lookup` endpoint which can be used to lookup these LUSID identifiers using their market identifiers. Cash can be referenced using the ISO currency code prefixed with \"`CCY_`\" e.g. `CCY_GBP` ## Instrument Data Instrument data can be uploaded to the system using the [Instrument Properties](#tag/InstrumentProperties) endpoint. | Field|Type|Description | | ---|---|--- | | Key|propertykey|The key of the property. This takes the format {domain}/{scope}/{code} e.g. 'Instrument/system/Name' or 'Transaction/strategy/quantsignal'. | | Value|string|The value of the property. | | EffectiveFrom|datetimeoffset|The effective datetime from which the property is valid. | ## Transaction Portfolios Portfolios are the top-level entity containers within LUSID, containing transactions, corporate actions and holdings. The transactions build up the portfolio holdings on which valuations, analytics profit & loss and risk can be calculated. Properties can be associated with Portfolios to add in additional data. Portfolio properties can be changed over time, for example to allow a Portfolio Manager to be linked with a Portfolio. Additionally, portfolios can be securitised and held by other portfolios, allowing LUSID to perform \"drill-through\" into underlying fund holdings ### Derived Portfolios LUSID also allows for a portfolio to be composed of another portfolio via derived portfolios. A derived portfolio can contain its own transactions and also inherits any transactions from its parent portfolio. Any changes made to the parent portfolio are automatically reflected in derived portfolio. Derived portfolios in conjunction with scopes are a powerful construct. For example, to do pre-trade what-if analysis, a derived portfolio could be created a new namespace linked to the underlying live (parent) portfolio. Analysis can then be undertaken on the derived portfolio without affecting the live portfolio. ### Transactions A transaction represents an economic activity against a Portfolio. Transactions are processed according to a configuration. This will tell the LUSID engine how to interpret the transaction and correctly update the holdings. LUSID comes with a set of transaction types you can use out of the box, or you can configure your own set(s) of transactions. For more details see the [LUSID Getting Started Guide for transaction configuration.](https://support.lusid.com/configuring-transaction-types) | Field|Type|Description | | ---|---|--- | | TransactionId|string|The unique identifier for the transaction. | | Type|string|The type of the transaction e.g. 'Buy', 'Sell'. The transaction type should have been pre-configured via the System Configuration API endpoint. If it hasn't been pre-configured the transaction will still be updated or inserted however you will be unable to generate the resultant holdings for the portfolio that contains this transaction as LUSID does not know how to process it. | | InstrumentIdentifiers|map|A set of instrument identifiers to use to resolve the transaction to a unique instrument. | | TransactionDate|dateorcutlabel|The date of the transaction. | | SettlementDate|dateorcutlabel|The settlement date of the transaction. | | Units|decimal|The number of units transacted in the associated instrument. | | TransactionPrice|transactionprice|The price for each unit of the transacted instrument in the transaction currency. | | TotalConsideration|currencyandamount|The total value of the transaction in the settlement currency. | | ExchangeRate|decimal|The exchange rate between the transaction and settlement currency. For example if the transaction currency is in USD and the settlement currency is in GBP this this the USD/GBP rate. | | TransactionCurrency|currency|The transaction currency. | | Properties|map|Set of unique transaction properties and associated values to store with the transaction. Each property must be from the 'Transaction' domain. | | CounterpartyId|string|The identifier for the counterparty of the transaction. | | Source|string|The source of the transaction. This is used to look up the appropriate transaction group set in the transaction type configuration. | From these fields, the following values can be calculated * **Transaction value in Transaction currency**: TotalConsideration / ExchangeRate * **Transaction value in Portfolio currency**: Transaction value in Transaction currency * TradeToPortfolioRate #### Example Transactions ##### A Common Purchase Example Three example transactions are shown in the table below. They represent a purchase of USD denominated IBM shares within a Sterling denominated portfolio. * The first two transactions are for separate buy and fx trades * Buying 500 IBM shares for $71,480.00 * A spot foreign exchange conversion to fund the IBM purchase. (Buy $71,480.00 for £54,846.60) * The third transaction is an alternate version of the above trades. Buying 500 IBM shares and settling directly in Sterling. | Column | Buy Trade | Fx Trade | Buy Trade with foreign Settlement | | ----- | ----- | ----- | ----- | | TransactionId | FBN00001 | FBN00002 | FBN00003 | | Type | Buy | FxBuy | Buy | | InstrumentIdentifiers | { \"figi\", \"BBG000BLNNH6\" } | { \"CCY\", \"CCY_USD\" } | { \"figi\", \"BBG000BLNNH6\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | 2018-08-02 | | SettlementDate | 2018-08-06 | 2018-08-06 | 2018-08-06 | | Units | 500 | 71480 | 500 | | TransactionPrice | 142.96 | 1 | 142.96 | | TradeCurrency | USD | USD | USD | | ExchangeRate | 1 | 0.7673 | 0.7673 | | TotalConsideration.Amount | 71480.00 | 54846.60 | 54846.60 | | TotalConsideration.Currency | USD | GBP | GBP | | Trade/default/TradeToPortfolioRate* | 0.7673 | 0.7673 | 0.7673 | [* This is a property field] ##### A Forward FX Example LUSID has a flexible transaction modelling system, meaning there are a number of different ways of modelling forward fx trades. The default LUSID transaction types are FwdFxBuy and FwdFxSell. Using these transaction types, LUSID will generate two holdings for each Forward FX trade, one for each currency in the trade. An example Forward Fx trade to sell GBP for USD in a JPY-denominated portfolio is shown below: | Column | Forward 'Sell' Trade | Notes | | ----- | ----- | ---- | | TransactionId | FBN00004 | | | Type | FwdFxSell | | | InstrumentIdentifiers | { \"Instrument/default/Currency\", \"GBP\" } | | | TransactionDate | 2018-08-02 | | | SettlementDate | 2019-02-06 | Six month forward | | Units | 10000.00 | Units of GBP | | TransactionPrice | 1 | | | TradeCurrency | GBP | Currency being sold | | ExchangeRate | 1.3142 | Agreed rate between GBP and USD | | TotalConsideration.Amount | 13142.00 | Amount in the settlement currency, USD | | TotalConsideration.Currency | USD | Settlement currency | | Trade/default/TradeToPortfolioRate | 142.88 | Rate between trade currency, GBP and portfolio base currency, JPY | Please note that exactly the same economic behaviour could be modelled using the FwdFxBuy Transaction Type with the amounts and rates reversed. ### Holdings A holding represents a position in an instrument or cash on a given date. | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|The unqiue Lusid Instrument Id (LUID) of the instrument that the holding is in. | | SubHoldingKeys|map|The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. | | Properties|map|The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' or 'Holding' domain. | | HoldingType|string|The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. | | Units|decimal|The total number of units of the holding. | | SettledUnits|decimal|The total number of settled units of the holding. | | Cost|currencyandamount|The total cost of the holding in the transaction currency. | | CostPortfolioCcy|currencyandamount|The total cost of the holding in the portfolio currency. | | Transaction|transaction|The transaction associated with an unsettled holding. | ## Corporate Actions Corporate actions are represented within LUSID in terms of a set of instrument-specific 'transitions'. These transitions are used to specify the participants of the corporate action, and the effect that the corporate action will have on holdings in those participants. ### Corporate Action | Field|Type|Description | | ---|---|--- | | CorporateActionCode|code|The unique identifier of this corporate action | | Description|string| | | AnnouncementDate|datetimeoffset|The announcement date of the corporate action | | ExDate|datetimeoffset|The ex date of the corporate action | | RecordDate|datetimeoffset|The record date of the corporate action | | PaymentDate|datetimeoffset|The payment date of the corporate action | | Transitions|corporateactiontransition[]|The transitions that result from this corporate action | ### Transition | Field|Type|Description | | ---|---|--- | | InputTransition|corporateactiontransitioncomponent|Indicating the basis of the corporate action - which security and how many units | | OutputTransitions|corporateactiontransitioncomponent[]|What will be generated relative to the input transition | ### Example Corporate Action Transitions #### A Dividend Action Transition In this example, for each share of IBM, 0.20 units (or 20 pence) of GBP are generated. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"ccy\" : \"CCY_GBP\" } | | Units Factor | 1 | 0.20 | | Cost Factor | 1 | 0 | #### A Split Action Transition In this example, for each share of IBM, we end up with 2 units (2 shares) of IBM, with total value unchanged. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | | Units Factor | 1 | 2 | | Cost Factor | 1 | 1 | #### A Spinoff Action Transition In this example, for each share of IBM, we end up with 1 unit (1 share) of IBM and 3 units (3 shares) of Celestica, with 85% of the value remaining on the IBM share, and 5% in each Celestica share (15% total). | Column | Input Transition | Output Transition 1 | Output Transition 2 | | ----- | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000HBGRF3\" } | | Units Factor | 1 | 1 | 3 | | Cost Factor | 1 | 0.85 | 0.15 | ## Reference Portfolios Reference portfolios are portfolios that contain constituents with weights. They are designed to represent entities such as indices and benchmarks. ### Constituents | Field|Type|Description | | ---|---|--- | | InstrumentIdentifiers|map|Unique instrument identifiers | | InstrumentUid|string|LUSID's internal unique instrument identifier, resolved from the instrument identifiers | | Currency|decimal| | | Weight|decimal| | | FloatingWeight|decimal| | ## Portfolio Groups Portfolio groups allow the construction of a hierarchy from portfolios and groups. Portfolio operations on the group are executed on an aggregated set of portfolios in the hierarchy. For example: * Global Portfolios _(group)_ * APAC _(group)_ * Hong Kong _(portfolio)_ * Japan _(portfolio)_ * Europe _(group)_ * France _(portfolio)_ * Germany _(portfolio)_ * UK _(portfolio)_ In this example **Global Portfolios** is a group that consists of an aggregate of **Hong Kong**, **Japan**, **France**, **Germany** and **UK** portfolios. ## Properties Properties are key-value pairs that can be applied to any entity within a domain (where a domain is `trade`, `portfolio`, `security` etc). Properties must be defined before use with a `PropertyDefinition` and can then subsequently be added to entities. ## Schema A detailed description of the entities used by the API and parameters for endpoints which take a JSON document can be retrieved via the `schema` endpoint. ## Meta data The following headers are returned on all responses from LUSID | Name | Purpose | | --- | --- | | lusid-meta-duration | Duration of the request | | lusid-meta-success | Whether or not LUSID considered the request to be successful | | lusid-meta-requestId | The unique identifier for the request | | lusid-schema-url | Url of the schema for the data being returned | | lusid-property-schema-url | Url of the schema for any properties | # Error Codes | Code|Name|Description | | ---|---|--- | | <a name=\"-10\">-10</a>|Server Configuration Error| | | <a name=\"-1\">-1</a>|Unknown error|An unexpected error was encountered on our side. | | <a name=\"102\">102</a>|Version Not Found| | | <a name=\"103\">103</a>|Api Rate Limit Violation| | | <a name=\"104\">104</a>|Instrument Not Found| | | <a name=\"105\">105</a>|Property Not Found| | | <a name=\"106\">106</a>|Portfolio Recursion Depth| | | <a name=\"108\">108</a>|Group Not Found| | | <a name=\"109\">109</a>|Portfolio Not Found| | | <a name=\"110\">110</a>|Property Schema Not Found| | | <a name=\"111\">111</a>|Portfolio Ancestry Not Found| | | <a name=\"112\">112</a>|Portfolio With Id Already Exists| | | <a name=\"113\">113</a>|Orphaned Portfolio| | | <a name=\"119\">119</a>|Missing Base Claims| | | <a name=\"121\">121</a>|Property Not Defined| | | <a name=\"122\">122</a>|Cannot Delete System Property| | | <a name=\"123\">123</a>|Cannot Modify Immutable Property Field| | | <a name=\"124\">124</a>|Property Already Exists| | | <a name=\"125\">125</a>|Invalid Property Life Time| | | <a name=\"126\">126</a>|Property Constraint Style Excludes Properties| | | <a name=\"127\">127</a>|Cannot Modify Default Data Type| | | <a name=\"128\">128</a>|Group Already Exists| | | <a name=\"129\">129</a>|No Such Data Type| | | <a name=\"130\">130</a>|Undefined Value For Data Type| | | <a name=\"131\">131</a>|Unsupported Value Type Defined On Data Type| | | <a name=\"132\">132</a>|Validation Error| | | <a name=\"133\">133</a>|Loop Detected In Group Hierarchy| | | <a name=\"134\">134</a>|Undefined Acceptable Values| | | <a name=\"135\">135</a>|Sub Group Already Exists| | | <a name=\"138\">138</a>|Price Source Not Found| | | <a name=\"139\">139</a>|Analytic Store Not Found| | | <a name=\"141\">141</a>|Analytic Store Already Exists| | | <a name=\"143\">143</a>|Client Instrument Already Exists| | | <a name=\"144\">144</a>|Duplicate In Parameter Set| | | <a name=\"147\">147</a>|Results Not Found| | | <a name=\"148\">148</a>|Order Field Not In Result Set| | | <a name=\"149\">149</a>|Operation Failed| | | <a name=\"150\">150</a>|Elastic Search Error| | | <a name=\"151\">151</a>|Invalid Parameter Value| | | <a name=\"153\">153</a>|Command Processing Failure| | | <a name=\"154\">154</a>|Entity State Construction Failure| | | <a name=\"155\">155</a>|Entity Timeline Does Not Exist| | | <a name=\"156\">156</a>|Concurrency Conflict Failure| | | <a name=\"157\">157</a>|Invalid Request| | | <a name=\"158\">158</a>|Event Publish Unknown| | | <a name=\"159\">159</a>|Event Query Failure| | | <a name=\"160\">160</a>|Blob Did Not Exist| | | <a name=\"162\">162</a>|Sub System Request Failure| | | <a name=\"163\">163</a>|Sub System Configuration Failure| | | <a name=\"165\">165</a>|Failed To Delete| | | <a name=\"166\">166</a>|Upsert Client Instrument Failure| | | <a name=\"167\">167</a>|Illegal As At Interval| | | <a name=\"168\">168</a>|Illegal Bitemporal Query| | | <a name=\"169\">169</a>|Invalid Alternate Id| | | <a name=\"170\">170</a>|Cannot Add Source Portfolio Property Explicitly| | | <a name=\"171\">171</a>|Entity Already Exists In Group| | | <a name=\"173\">173</a>|Entity With Id Already Exists| | | <a name=\"174\">174</a>|Derived Portfolio Details Do Not Exist| | | <a name=\"176\">176</a>|Portfolio With Name Already Exists| | | <a name=\"177\">177</a>|Invalid Transactions| | | <a name=\"178\">178</a>|Reference Portfolio Not Found| | | <a name=\"179\">179</a>|Duplicate Id| | | <a name=\"180\">180</a>|Command Retrieval Failure| | | <a name=\"181\">181</a>|Data Filter Application Failure| | | <a name=\"182\">182</a>|Search Failed| | | <a name=\"183\">183</a>|Movements Engine Configuration Key Failure| | | <a name=\"184\">184</a>|Fx Rate Source Not Found| | | <a name=\"185\">185</a>|Accrual Source Not Found| | | <a name=\"186\">186</a>|Access Denied| | | <a name=\"187\">187</a>|Invalid Identity Token| | | <a name=\"188\">188</a>|Invalid Request Headers| | | <a name=\"189\">189</a>|Price Not Found| | | <a name=\"190\">190</a>|Invalid Sub Holding Keys Provided| | | <a name=\"191\">191</a>|Duplicate Sub Holding Keys Provided| | | <a name=\"192\">192</a>|Cut Definition Not Found| | | <a name=\"193\">193</a>|Cut Definition Invalid| | | <a name=\"194\">194</a>|Time Variant Property Deletion Date Unspecified| | | <a name=\"195\">195</a>|Perpetual Property Deletion Date Specified| | | <a name=\"196\">196</a>|Time Variant Property Upsert Date Unspecified| | | <a name=\"197\">197</a>|Perpetual Property Upsert Date Specified| | | <a name=\"200\">200</a>|Invalid Unit For Data Type| | | <a name=\"201\">201</a>|Invalid Type For Data Type| | | <a name=\"202\">202</a>|Invalid Value For Data Type| | | <a name=\"203\">203</a>|Unit Not Defined For Data Type| | | <a name=\"204\">204</a>|Units Not Supported On Data Type| | | <a name=\"205\">205</a>|Cannot Specify Units On Data Type| | | <a name=\"206\">206</a>|Unit Schema Inconsistent With Data Type| | | <a name=\"207\">207</a>|Unit Definition Not Specified| | | <a name=\"208\">208</a>|Duplicate Unit Definitions Specified| | | <a name=\"209\">209</a>|Invalid Units Definition| | | <a name=\"210\">210</a>|Invalid Instrument Identifier Unit| | | <a name=\"211\">211</a>|Holdings Adjustment Does Not Exist| | | <a name=\"212\">212</a>|Could Not Build Excel Url| | | <a name=\"213\">213</a>|Could Not Get Excel Version| | | <a name=\"214\">214</a>|Instrument By Code Not Found| | | <a name=\"215\">215</a>|Entity Schema Does Not Exist| | | <a name=\"216\">216</a>|Feature Not Supported On Portfolio Type| | | <a name=\"217\">217</a>|Quote Not Found| | | <a name=\"218\">218</a>|Invalid Quote Identifier| | | <a name=\"219\">219</a>|Invalid Metric For Data Type| | | <a name=\"220\">220</a>|Invalid Instrument Definition| | | <a name=\"221\">221</a>|Instrument Upsert Failure| | | <a name=\"222\">222</a>|Reference Portfolio Request Not Supported| | | <a name=\"223\">223</a>|Transaction Portfolio Request Not Supported| | | <a name=\"224\">224</a>|Invalid Property Value Assignment| | | <a name=\"230\">230</a>|Transaction Type Not Found| | | <a name=\"231\">231</a>|Transaction Type Duplication| | | <a name=\"232\">232</a>|Portfolio Does Not Exist At Given Date| | | <a name=\"233\">233</a>|Query Parser Failure| | | <a name=\"234\">234</a>|Duplicate Constituent| | | <a name=\"235\">235</a>|Unresolved Instrument Constituent| | | <a name=\"236\">236</a>|Unresolved Instrument In Transition| | | <a name=\"237\">237</a>|Missing Side Definitions| | | <a name=\"299\">299</a>|Invalid Recipe| | | <a name=\"300\">300</a>|Missing Recipe| | | <a name=\"301\">301</a>|Dependencies| | | <a name=\"304\">304</a>|Portfolio Preprocess Failure| | | <a name=\"310\">310</a>|Valuation Engine Failure| | | <a name=\"311\">311</a>|Task Factory Failure| | | <a name=\"312\">312</a>|Task Evaluation Failure| | | <a name=\"313\">313</a>|Task Generation Failure| | | <a name=\"314\">314</a>|Engine Configuration Failure| | | <a name=\"315\">315</a>|Model Specification Failure| | | <a name=\"320\">320</a>|Market Data Key Failure| | | <a name=\"321\">321</a>|Market Resolver Failure| | | <a name=\"322\">322</a>|Market Data Failure| | | <a name=\"330\">330</a>|Curve Failure| | | <a name=\"331\">331</a>|Volatility Surface Failure| | | <a name=\"332\">332</a>|Volatility Cube Failure| | | <a name=\"350\">350</a>|Instrument Failure| | | <a name=\"351\">351</a>|Cash Flows Failure| | | <a name=\"352\">352</a>|Reference Data Failure| | | <a name=\"360\">360</a>|Aggregation Failure| | | <a name=\"361\">361</a>|Aggregation Measure Failure| | | <a name=\"370\">370</a>|Result Retrieval Failure| | | <a name=\"371\">371</a>|Result Processing Failure| | | <a name=\"372\">372</a>|Vendor Result Processing Failure| | | <a name=\"373\">373</a>|Vendor Result Mapping Failure| | | <a name=\"374\">374</a>|Vendor Library Unauthorised| | | <a name=\"375\">375</a>|Vendor Connectivity Error| | | <a name=\"376\">376</a>|Vendor Interface Error| | | <a name=\"377\">377</a>|Vendor Pricing Failure| | | <a name=\"378\">378</a>|Vendor Translation Failure| | | <a name=\"379\">379</a>|Vendor Key Mapping Failure| | | <a name=\"380\">380</a>|Vendor Reflection Failure| | | <a name=\"390\">390</a>|Attempt To Upsert Duplicate Quotes| | | <a name=\"391\">391</a>|Corporate Action Source Does Not Exist| | | <a name=\"392\">392</a>|Corporate Action Source Already Exists| | | <a name=\"393\">393</a>|Instrument Identifier Already In Use| | | <a name=\"394\">394</a>|Properties Not Found| | | <a name=\"395\">395</a>|Batch Operation Aborted| | | <a name=\"400\">400</a>|Invalid Iso4217 Currency Code| | | <a name=\"401\">401</a>|Cannot Assign Instrument Identifier To Currency| | | <a name=\"402\">402</a>|Cannot Assign Currency Identifier To Non Currency| | | <a name=\"403\">403</a>|Currency Instrument Cannot Be Deleted| | | <a name=\"404\">404</a>|Currency Instrument Cannot Have Economic Definition| | | <a name=\"405\">405</a>|Currency Instrument Cannot Have Lookthrough Portfolio| | | <a name=\"406\">406</a>|Cannot Create Currency Instrument With Multiple Identifiers| | | <a name=\"407\">407</a>|Specified Currency Is Undefined| | | <a name=\"410\">410</a>|Index Does Not Exist| | | <a name=\"411\">411</a>|Sort Field Does Not Exist| | | <a name=\"413\">413</a>|Negative Pagination Parameters| | | <a name=\"414\">414</a>|Invalid Search Syntax| | | <a name=\"415\">415</a>|Filter Execution Timeout| | | <a name=\"420\">420</a>|Side Definition Inconsistent| | | <a name=\"450\">450</a>|Invalid Quote Access Metadata Rule| | | <a name=\"451\">451</a>|Access Metadata Not Found| | | <a name=\"452\">452</a>|Invalid Access Metadata Identifier| | | <a name=\"460\">460</a>|Standard Resource Not Found| | | <a name=\"461\">461</a>|Standard Resource Conflict| | | <a name=\"462\">462</a>|Calendar Not Found| | | <a name=\"463\">463</a>|Date In A Calendar Not Found| | | <a name=\"464\">464</a>|Invalid Date Source Data| | | <a name=\"465\">465</a>|Invalid Timezone| | | <a name=\"601\">601</a>|Person Identifier Already In Use| | | <a name=\"602\">602</a>|Person Not Found| | | <a name=\"603\">603</a>|Cannot Set Identifier| | | <a name=\"617\">617</a>|Invalid Recipe Specification In Request| | | <a name=\"618\">618</a>|Inline Recipe Deserialisation Failure| | | <a name=\"619\">619</a>|Identifier Types Not Set For Entity| | | <a name=\"620\">620</a>|Cannot Delete All Client Defined Identifiers| | | <a name=\"650\">650</a>|The Order requested was not found.| | | <a name=\"654\">654</a>|The Allocation requested was not found.| | | <a name=\"655\">655</a>|Cannot build the fx forward target with the given holdings.| | | <a name=\"656\">656</a>|Group does not contain expected entities.| | | <a name=\"667\">667</a>|Relation definition already exists| | | <a name=\"673\">673</a>|Missing entitlements for entities in Group| | | <a name=\"674\">674</a>|Next Best Action not found| | | <a name=\"676\">676</a>|Relation definition not defined| | | <a name=\"677\">677</a>|Invalid entity identifier for relation| | | <a name=\"681\">681</a>|Sorting by specified field not supported|One or more of the provided fields to order by were either invalid or not supported. | | <a name=\"682\">682</a>|Too many fields to sort by|The number of fields to sort the data by exceeds the number allowed by the endpoint | | <a name=\"684\">684</a>|Sequence Not Found| | | <a name=\"685\">685</a>|Sequence Already Exists| | | <a name=\"686\">686</a>|Non-cycling sequence has been exhausted| | | <a name=\"687\">687</a>|Legal Entity Identifier Already In Use| | | <a name=\"688\">688</a>|Legal Entity Not Found| | | <a name=\"689\">689</a>|The supplied pagination token is invalid| | | <a name=\"690\">690</a>|Property Type Is Not Supported| | | <a name=\"691\">691</a>|Multiple Tax-lots For Currency Type Is Not Supported| | # noqa: E501
The version of the OpenAPI document: 0.11.2220
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResourceListOfValueType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'values': 'list[ValueType]',
'href': 'str',
'links': 'list[Link]'
}
attribute_map = {
'values': 'values',
'href': 'href',
'links': 'links'
}
required_map = {
'values': 'required',
'href': 'optional',
'links': 'optional'
}
def __init__(self, values=None, href=None, links=None): # noqa: E501
"""
ResourceListOfValueType - a model defined in OpenAPI
:param values: (required)
:type values: list[lusid.ValueType]
:param href:
:type href: str
:param links:
:type links: list[lusid.Link]
""" # noqa: E501
self._values = None
self._href = None
self._links = None
self.discriminator = None
self.values = values
self.href = href
self.links = links
@property
def values(self):
"""Gets the values of this ResourceListOfValueType. # noqa: E501
:return: The values of this ResourceListOfValueType. # noqa: E501
:rtype: list[ValueType]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ResourceListOfValueType.
:param values: The values of this ResourceListOfValueType. # noqa: E501
:type: list[ValueType]
"""
if values is None:
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def href(self):
"""Gets the href of this ResourceListOfValueType. # noqa: E501
:return: The href of this ResourceListOfValueType. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this ResourceListOfValueType.
:param href: The href of this ResourceListOfValueType. # noqa: E501
:type: str
"""
self._href = href
@property
def links(self):
"""Gets the links of this ResourceListOfValueType. # noqa: E501
:return: The links of this ResourceListOfValueType. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourceListOfValueType.
:param links: The links of this ResourceListOfValueType. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceListOfValueType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 182.28022 | 28,439 | 0.682773 |
79436d4f640ec16220716633ea1d0436609e6fe0 | 75 | py | Python | wxwork_auth_oauth/__init__.py | rainbow-studio-solution/wxwork | 344a0a8f8f0ac364101a1bb4a98c132588118839 | [
"MulanPSL-1.0"
] | 9 | 2021-01-02T15:42:21.000Z | 2021-08-13T08:09:16.000Z | wxwork_auth_oauth/__init__.py | rainbow-studio-solution/wxwork | 344a0a8f8f0ac364101a1bb4a98c132588118839 | [
"MulanPSL-1.0"
] | null | null | null | wxwork_auth_oauth/__init__.py | rainbow-studio-solution/wxwork | 344a0a8f8f0ac364101a1bb4a98c132588118839 | [
"MulanPSL-1.0"
] | 4 | 2021-01-11T04:57:07.000Z | 2021-05-21T06:01:55.000Z | # -*- coding: utf-8 -*-
from . import models
from . import controllers
| 9.375 | 25 | 0.626667 |
79436e4e5ba1681caa1cf71e2a9788db7e877c95 | 4,303 | py | Python | cyder/management/commands/lib/dhcpd_compare/parser.py | drkitty/cyder | 1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8 | [
"BSD-3-Clause"
] | 6 | 2015-04-16T23:18:22.000Z | 2020-08-25T22:50:13.000Z | cyder/management/commands/lib/dhcpd_compare/parser.py | drkitty/cyder | 1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8 | [
"BSD-3-Clause"
] | 267 | 2015-01-01T00:18:57.000Z | 2015-10-14T00:01:13.000Z | cyder/management/commands/lib/dhcpd_compare/parser.py | drkitty/cyder | 1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8 | [
"BSD-3-Clause"
] | 5 | 2015-03-23T00:57:09.000Z | 2019-09-09T22:42:37.000Z | from parsley import wrapGrammar
from ometa.grammar import OMeta
from ometa.runtime import OMetaBase
from constants import *
from dhcp_objects import (Host, Pool, Parameter, Option, Subnet, Group, Allow,
Deny, ClientClass)
from utils import prepare_arguments, is_mac, is_ip
import sys
from bisect import insort_left, bisect_left
from ipaddr import IPv4Address, IPv6Address
from sys import stdout
def strip_comments(content):
return "".join(line[:line.find('#')] if '#' in line else line for line in content)
grammar = open('cyder/management/commands/lib/dhcpd_compare/'
'isc.parsley').read()
class DhcpConfigContext(
OMeta.makeGrammar(
grammar,
name='DhcpConfigContext').createParserClass(OMetaBase, globals())):
stdout = stdout
def __init__(self, *args, **kwargs):
self.hosts = set()
self.subnets = set()
self.groups = set()
self.classes = set()
self.options = set()
self.parameters = set()
super(DhcpConfigContext, self).__init__(*args, **kwargs)
def apply_attrs(self, host, attrs):
for attr in attrs:
host.add_option_or_parameter(attr)
def add_subnet(self, subnet):
self.subnets.add(subnet)
def add_host(self, host):
self.hosts.add(host)
def add_group(self, group):
self.groups.add(group)
def add_option(self, option):
self.options.add(option)
def add_parameter(self, parameter):
self.parameters.add(parameter)
def add_class(self, dhcp_class):
self.classes.add(dhcp_class)
def add_subclass(self, name, mac):
for _class in self.classes:
if _class.name == name:
_class.add_subclass(mac)
return True
return False
def __eq__(self, other):
return self.hosts == other.hosts and \
self.subnets == other.subnets and \
self.groups == other.groups and \
self.classes == other.classes
def diff(self, other):
if not (self == other):
first_subnets = self.subnets - other.subnets
second_subnets = other.subnets - self.subnets
first_hosts = self.hosts - other.hosts
second_hosts = other.hosts - self.hosts
first_groups = self.groups - other.groups
second_groups = other.groups - self.groups
first_classes = self.classes - other.classes
second_classes = other.classes - self.classes
if first_subnets:
print '### Subnets found only in the first config ###'
for subnet in first_subnets:
stdout.write(str(subnet))
if second_subnets:
print '### Subnets found only in the second config ###'
for subnet in second_subnets:
stdout.write(str(subnet))
if first_hosts:
print '### Hosts found only in the first config ###'
for host in first_hosts:
stdout.write(str(host))
if second_hosts:
print '### Hosts found only in the second config ###'
for host in second_hosts:
stdout.write(str(host))
if first_groups:
print '### Groups found only in the first config ###'
for group in first_groups:
stdout.write(str(group))
if second_groups:
print '### Groups found only in the second config ###'
for group in second_groups:
stdout.write(str(group))
if first_classes:
print '### Classes found only in the first config ###'
for klass in first_classes:
stdout.write(str(klass))
if second_classes:
print '### Classes found only in the second config ###'
for klass in second_classes:
stdout.write(str(klass))
iscgrammar = wrapGrammar(DhcpConfigContext)
def compare(file1, file2):
parse1 = iscgrammar(strip_comments(open(file1))).GlobalParse()
parse2 = iscgrammar(strip_comments(open(file2))).GlobalParse()
parse1.diff(parse2)
| 34.150794 | 86 | 0.585638 |
79436e5fe9692a0fba7726d128aa89f8245fb76b | 610 | py | Python | tests/nlp/encoders/test_label_encoder.py | ChristophAlt/pytorch-quasar | 7b957b1b4cba83677b415d752dcac6acf682f15b | [
"BSD-3-Clause"
] | 4 | 2018-10-02T20:20:26.000Z | 2019-07-26T12:57:26.000Z | tests/nlp/encoders/test_label_encoder.py | ChristophAlt/pytorch-quasar | 7b957b1b4cba83677b415d752dcac6acf682f15b | [
"BSD-3-Clause"
] | null | null | null | tests/nlp/encoders/test_label_encoder.py | ChristophAlt/pytorch-quasar | 7b957b1b4cba83677b415d752dcac6acf682f15b | [
"BSD-3-Clause"
] | null | null | null | from quasar.nlp.encoders import LabelEncoder
def test_label_encoder():
input_ = 'label_b'
sample = ['label_a', 'label_b']
encoder = LabelEncoder(sample)
output = encoder.encode(input_)
assert encoder.vocab_size == 2
assert len(output) == 1
assert encoder.decode(output) == input_
def test_label_encoder_sequence():
input_ = ['label_b', 'label_c']
sample = ['label_a', 'label_b', 'label_c']
encoder = LabelEncoder(sample)
output = encoder.encode(input_)
assert encoder.vocab_size == 3
assert len(output) == 2
assert encoder.decode(output) == input_
| 25.416667 | 46 | 0.67541 |
79436f2d2db70c9225f587c4836538dc75ce15b2 | 15,129 | py | Python | webapp/ENV/lib/python3.6/site-packages/dask/dataframe/io/hdf.py | linkehub/linkehub_api | b5579a6156d6ae01f0cbd8526c8ed8264b5deeb5 | [
"MIT"
] | null | null | null | webapp/ENV/lib/python3.6/site-packages/dask/dataframe/io/hdf.py | linkehub/linkehub_api | b5579a6156d6ae01f0cbd8526c8ed8264b5deeb5 | [
"MIT"
] | 1 | 2021-04-30T20:41:53.000Z | 2021-04-30T20:41:53.000Z | webapp/ENV/lib/python3.6/site-packages/dask/dataframe/io/hdf.py | linkehub/linkehub_api | b5579a6156d6ae01f0cbd8526c8ed8264b5deeb5 | [
"MIT"
] | 1 | 2018-07-06T03:48:08.000Z | 2018-07-06T03:48:08.000Z | from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from glob import glob
import os
import uuid
from warnings import warn
import pandas as pd
from toolz import merge
from .io import _link
from ..core import DataFrame, new_dd_object
from ... import multiprocessing
from ...base import tokenize, compute_as_if_collection
from ...bytes.utils import build_name_function
from ...compatibility import PY3
from ...context import _globals
from ...delayed import Delayed, delayed
from ...local import get_sync
from ...utils import effective_get, get_scheduler_lock
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
def to_hdf(df, path, key, mode='a', append=False, get=None,
name_function=None, compute=True, lock=None, dask_kwargs={},
**kwargs):
""" Store Dask Dataframe to Hierarchical Data Format (HDF) files
This is a parallel version of the Pandas function of the same name. Please
see the Pandas docstring for more detailed information about shared keyword
arguments.
This function differs from the Pandas version by saving the many partitions
of a Dask DataFrame in parallel, either to many files, or to many datasets
within the same file. You may specify this parallelism with an asterix
``*`` within the filename or datapath, and an optional ``name_function``.
The asterix will be replaced with an increasing sequence of integers
starting from ``0`` or with the result of calling ``name_function`` on each
of those integers.
This function only supports the Pandas ``'table'`` format, not the more
specialized ``'fixed'`` format.
Parameters
----------
path: string
Path to a target filename. May contain a ``*`` to denote many filenames
key: string
Datapath within the files. May contain a ``*`` to denote many locations
name_function: function
A function to convert the ``*`` in the above options to a string.
Should take in a number from 0 to the number of partitions and return a
string. (see examples below)
compute: bool
Whether or not to execute immediately. If False then this returns a
``dask.Delayed`` value.
lock: Lock, optional
Lock to use to prevent concurrency issues. By default a
``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``
will be used depending on your scheduler if a lock is required. See
dask.utils.get_scheduler_lock for more information about lock
selection.
**other:
See pandas.to_hdf for more information
Examples
--------
Save Data to a single file
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Save data to multiple datapaths within the same file:
>>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP
Save data to multiple files:
>>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP
Save data to multiple files, using the multiprocessing scheduler:
>>> df.to_hdf('output-*.hdf', '/data', get=dask.multiprocessing.get) # doctest: +SKIP
Specify custom naming scheme. This writes files as
'2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..
>>> from datetime import date, timedelta
>>> base = date(year=2000, month=1, day=1)
>>> def name_function(i):
... ''' Convert integer 0 to n to a string '''
... return base + timedelta(days=i)
>>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP
Returns
-------
None: if compute == True
delayed value: if compute == False
See Also
--------
read_hdf:
to_parquet:
"""
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
single_file = True
single_node = True
# if path is string, format using i_name
if isinstance(path, str):
if path.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file "
"path and dataset key")
fmt_obj = lambda path, i_name: path.replace('*', i_name)
if '*' in path:
single_file = False
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in "
"dataset key")
fmt_obj = lambda path, _: path
if '*' in key:
single_node = False
if 'format' in kwargs and kwargs['format'] not in ['t', 'table']:
raise ValueError("Dask only support 'table' format in hdf files.")
if mode not in ('a', 'w', 'r+'):
raise ValueError("Mode must be one of 'a', 'w' or 'r+'")
if name_function is None:
name_function = build_name_function(df.npartitions - 1)
# we guarantee partition order is preserved when its saved and read
# so we enforce name_function to maintain the order of its input.
if not (single_file and single_node):
formatted_names = [name_function(i) for i in range(df.npartitions)]
if formatted_names != sorted(formatted_names):
warn("To preserve order between partitions name_function "
"must preserve the order of its input")
# If user did not specify scheduler and write is sequential default to the
# sequential scheduler. otherwise let the _get method choose the scheduler
if get is None and 'get' not in _globals and single_node and single_file:
get = get_sync
# handle lock default based on whether we're writing to a single entity
_actual_get = effective_get(get, df)
if lock is None:
if not single_node:
lock = True
elif not single_file and _actual_get is not multiprocessing.get:
# if we're writing to multiple files with the multiprocessing
# scheduler we don't need to lock
lock = True
else:
lock = False
if lock:
lock = get_scheduler_lock(get, df)
kwargs.update({'format': 'table', 'mode': mode, 'append': append})
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, 0), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs)
kwargs2 = kwargs.copy()
if single_file:
kwargs2['mode'] = 'a'
if single_node:
kwargs2['append'] = True
filenames = []
for i in range(0,df.npartitions):
i_name = name_function(i)
filenames.append(fmt_obj(path, i_name))
for i in range(1, df.npartitions):
i_name = name_function(i)
task = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, i), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs2)
if single_file:
link_dep = i - 1 if single_node else 0
task = (_link, (name, link_dep), task)
dsk[(name, i)] = task
dsk = merge(df.dask, dsk)
if single_file and single_node:
keys = [(name, df.npartitions - 1)]
else:
keys = [(name, i) for i in range(df.npartitions)]
if compute:
compute_as_if_collection(DataFrame, dsk, keys, get=get, **dask_kwargs)
return filenames
else:
return delayed([Delayed(k, dsk) for k in keys])
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), sorted_index=False, lock=None,
mode='a'):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path, mode=mode) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
divisions = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
elif stop > storer.nrows:
raise ValueError("Stop keyword exceeds dataset number "
"of rows ({})".format(storer.nrows))
else:
stops.append(stop)
if sorted_index:
division = [storer.read_column('index', start=start, stop=start + 1)[0]
for start in range(0, storer.nrows, chunksize)]
division_end = storer.read_column('index',
start=storer.nrows - 1,
stop=storer.nrows)[0]
division.append(division_end)
divisions.append(division)
else:
divisions.append(None)
return keys, stops, divisions
def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, mode=mode, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize, division))
name = 'read-hdf-' + token
if empty.ndim == 1:
base = {'name': empty.name, 'mode': mode}
else:
base = {'columns': empty.columns, 'mode': mode}
if start >= stop:
raise ValueError("Start row number ({}) is above or equal to stop "
"row number ({})".format(start, stop))
def update(s):
new = base.copy()
new.update({'start': s, 'stop': s + chunksize})
return new
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
update(s)))
for i, s in enumerate(range(start, stop, chunksize)))
if division:
divisions = division
else:
divisions = [None] * (len(dsk) + 1)
return new_dd_object(dsk, name, empty, divisions)
keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from ..multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)
for k, s, d in zip(keys, stops, divisions)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, sorted_index=False, lock=True, mode='a'):
"""
Read HDF files into a Dask DataFrame
Read hdf files into a dask dataframe. This function is like
``pandas.read_hdf``, except it can read from a single large file, or from
multiple files, or from multiple keys from the same file.
Parameters
----------
pattern : string, list
File pattern (string), buffer to read from, or list of file
paths. Can contain wildcards.
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : list of columns, optional
A list of columns that if not None, will limit the return
columns (default is None)
chunksize : positive integer, optional
Maximal number of rows per partition (default is 1000000).
sorted_index : boolean, optional
Option to specify whether or not the input hdf files have a sorted
index (default is False).
lock : boolean, optional
Option to use a lock to prevent concurrency issues (default is True).
mode : {'a', 'r', 'r+'}, default 'a'. Mode to use when opening file(s).
'r'
Read-only; no data can be modified.
'a'
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
'r+'
It is similar to 'a', but the file must already exist.
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
>>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = get_scheduler_lock()
key = key if key.startswith('/') else '/' + key
if isinstance(pattern, str):
paths = sorted(glob(pattern))
else:
paths = pattern
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError("When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes")
from ..multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
sorted_index=sorted_index,
lock=lock, mode=mode)
for path in paths])
if PY3:
from ..core import _Frame
_Frame.to_hdf.__doc__ = to_hdf.__doc__
| 35.850711 | 95 | 0.603609 |
79436f4cb36dbb6d556879c6278c8c5c8da6d81b | 14,469 | py | Python | fomautomator.py | johnmgregoire/2013JCAPDataProcess | 4533e72b09084860b3753d8864c75ac3c6b66b1a | [
"BSD-3-Clause"
] | 1 | 2018-06-03T01:15:16.000Z | 2018-06-03T01:15:16.000Z | fomautomator.py | johnmgregoire/2013JCAPDataProcess | 4533e72b09084860b3753d8864c75ac3c6b66b1a | [
"BSD-3-Clause"
] | null | null | null | fomautomator.py | johnmgregoire/2013JCAPDataProcess | 4533e72b09084860b3753d8864c75ac3c6b66b1a | [
"BSD-3-Clause"
] | null | null | null | # Allison Schubauer and Daisy Hernandez
# Created: 6/26/2013
# Last Updated: 7/25/2013
# For JCAP
"""
runs functions to produce figures of merit automatically, and
replaces dictionaries of data produced by old versions with
updated data
"""
import sys, os
import argparse
import cPickle as pickle
from multiprocessing import Process, Pool, Manager
from inspect import *
from rawdataparser import RAW_DATA_PATH
from qhtest import * # this also imports queue
import jsontranslator
import xmltranslator
import importlib
import distutils.util
import path_helpers
import fomautomator_helpers
import filerunner
import time
import datetime
from infodbcomm import infoDictfromDB
# the directory where the versions of the fomfunctions are
FUNC_DIR = os.path.normpath(os.path.expanduser("~/Desktop/Working Folder/AutoAnalysisFunctions"))
MOD_NAME = 'fomfunctions'
UPDATE_MOD_NAME = 'fomfunctions_update'
""" The FOMAutomator class provides the framework for processing data files
automatically. Its main method, defined in fom_commandline, can be accessed
through the command line. Alternatively, the FOMAutomator can be started
with the user interface in fomautomator_menu. The automator can either
process files in sequence on a single process or use Python's multiprocessing
framework to process files on an optimal number of processes for your
system (determined by Python). Both options are available through the command
line and user interface, but the command line defaults to running sequentially.
In both implementations, status messages and errors are logged to a file in the
output directory, and the FileRunner class (defined in filerunner.py) is used
to process each individual file.
"""
class FOMAutomator(object):
""" initializes the automator with all necessary information """
def __init__(self, rawDataFiles, versionName, prevVersion,funcModule,
updateModule, technique_names, srcDir, dstDir, rawDataDir,errorNum,jobname):
# initializing all the basic info
self.version = versionName
self.lastVersion = prevVersion
# the os.path.insert in the gui or in main is what makes
# we select the correct function module
self.funcMod = __import__(funcModule)
self.modname = funcModule
self.updatemod = updateModule
self.technique_names = technique_names
self.srcDir = srcDir
self.dstDir = dstDir
self.rawDataDir = rawDataDir
# the max number of errors allowed by the user
self.errorNum = errorNum
self.jobname = jobname
self.files = rawDataFiles
self.infoDicts=infoDictfromDB(self.files) # required to have keys 'reference_Eo' and 'technique_name'
self.processFuncs()
""" returns a dictionary with all of the parameters and batch variables
for the fom functions that will be run """
def processFuncs(self):
self.params = {}
self.funcDicts = {}
self.allFuncs = []
# if we have the type of experiment, we can just get the specific functions
if self.technique_names:
for tech in self.technique_names:
techDict = self.funcMod.EXPERIMENT_FUNCTIONS.get(tech)
if techDict:
[self.allFuncs.append(func) for func in techDict
if func not in self.allFuncs]
# if not we just get them all
else:
self.allFuncs = [f[0] for f in getmembers(self.funcMod, isfunction)]
# now that we have all the functions, we get all the parameters
for fname in self.allFuncs:
funcObj = [f[1] for f in getmembers(self.funcMod, isfunction) if
f[0] == fname][0]
funcdict = {'batchvars': [], 'params': []}
try:
dictargs = funcObj.func_code.co_argcount - len(funcObj.func_defaults)
funcdict['numdictargs'] = dictargs
arglist = zip(funcObj.func_code.co_varnames[dictargs:],
funcObj.func_defaults)
except TypeError: # if there are no keyword arguments
dictargs = funcObj.func_code.co_argcount
funcdict['numdictargs'] = dictargs
arglist = []
# note: we're assuming any string argument to the functions that the user wrote is data
# for example t = 't(s)' in the function would mean t is equal to the raw data column t(s)
for arg, val in arglist:
if isinstance(val, list):
funcdict['batchvars'].append(arg)
funcdict['~'+arg] = val
elif isinstance(val, str):
funcdict[arg] = val
## ---- VSHIFT -------------------------------------------------------
## elif arg == 'vshift':
## pass
## -------------------------------------------------------------------
else:
self.params[fname+'_'+arg] = val
funcdict['params'].append(arg)
funcdict['#'+arg] = val
self.funcDicts[fname] = funcdict
""" Returns a list of functions and their parameters, which can be
changed by the user if running fomautomator_menu. This function is
only called by fomautomator_menu. If 'default' is true, the default
parameters defined in the fom functions file are used; otherwise, the
parameters are requested from the user. """
def requestParams(self,default=True):
funcNames = self.funcDicts.keys()
funcNames.sort()
params_full = [[ fname, [(pname,type(pval),pval) for pname in self.funcDicts[fname]['params']
for pval in [self.funcDicts[fname]['#'+pname]]]]
for fname in funcNames if self.funcDicts[fname]['params'] != []]
if not default:
return params_full
else:
funcs_names = [func[0] for func in params_full for num in range(len(func[1]))]
params_and_answers = [[pname,pval] for func in params_full for (pname,ptype,pval) in func[1]]
return funcs_names, params_and_answers
""" If the parameter values were changed by fomautomator_menu, save
the changed values in the automator's parameter dictionary and
function dictionary. """
def setParams(self, funcNames, paramsList):
for fname, params in zip(funcNames, paramsList):
fdict = self.funcDicts[fname]
param,val = params
fdict['#'+param] = val
self.params[fname+'_'+param] = val
""" processes the files in parallel, logs status messages and errors """
def runParallel(self):
# the path to which to log - will change depending on the way
# processing ends and if a statusFile with the same
# name already exists
statusFileName = path_helpers.createPathWExtention(self.dstDir,self.jobname,".run")
# set up the manager and objects required for logging due to multiprocessing
pmanager = Manager()
# this queue takes messages from individual processes and passes them
# to the QueueListener
loggingQueue = pmanager.Queue()
processPool = Pool()
# handler for the logging file
fileHandler = logging.FileHandler(statusFileName)
logFormat = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fileHandler.setFormatter(logFormat)
# the QueueListener takes messages from the logging queue and passes
# them through another queue to the fileHandler (logs safely because
# only this main process writes to the fileHandler)
fileLogger = QueueListener(loggingQueue, fileHandler)
fileLogger.start()
# keep track of when processing started
bTime = time.time()
# the jobs to process each of the files
# jobs = [(loggingQueue, filename, self.version, self.lastVersion,
# self.modname, self.updatemod,self.params, self.funcDicts,
# self.srcDir, self.dstDir, self.rawDataDir)
# for filename in self.files]
jobs = [(loggingQueue, filename, self.version, self.lastVersion,
self.modname, self.updatemod, self.params, self.funcDicts,
self.srcDir, self.dstDir, self.rawDataDir, infodict['reference_Eo'], infodict['technique_name']) \
for (filename, infodict)
in zip(self.files, self.infoDicts)]
processPool.map(makeFileRunner, jobs)
# keep track of when processing ended
eTime = time.time()
timeStamp = time.strftime('%Y%m%d%H%M%S',time.gmtime())
# clean up the pool
processPool.close()
processPool.join()
root = logging.getLogger()
if fileLogger.errorCount > self.errorNum:
root.info("The job encountered %d errors and the max number of them allowed is %d" %(fileLogger.errorCount,self.errorNum))
root.info("Processed for %s H:M:S" %(str(datetime.timedelta(seconds=eTime-bTime)),))
fileLogger.stop()
fileHandler.close()
if fileLogger.errorCount > self.errorNum:
try:
os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname,".error"))
except:
os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname+timeStamp,".error"))
else:
try:
os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname,".done"))
except:
os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname+timeStamp,".done"))
""" runs the files in order on a single process and logs errors """
def runSequentially(self):
# set up everything needed for logging the errors
root = logging.getLogger()
root.setLevel(logging.INFO)
statusFileName = path_helpers.createPathWExtention(self.dstDir,self.jobname,".run")
fileHandler = logging.FileHandler(statusFileName)
logFormat = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fileHandler.setFormatter(logFormat)
root.addHandler(fileHandler)
numberOfFiles = len(self.files)
numberOfErrors = 0
bTime= time.time()
# The file processing occurs here
logQueue = None
for i, (filename, infodict) in enumerate(zip(self.files, self.infoDicts)):
if numberOfErrors > self.errorNum:
root.info("The job encountered %d errors and the max number of them allowed is %d" %(numberOfErrors,self.errorNum))
break
try:
# returns 1 if file was processed and 0 if file was skipped
exitcode = filerunner.FileRunner(logQueue,filename, self.version,
self.lastVersion, self.modname, self.updatemod,
self.params, self.funcDicts,self.srcDir,
self.dstDir, self.rawDataDir, infodict['reference_Eo'], infodict['technique_name'])
if exitcode.exitSuccess:
root.info('File %s completed %d/%d' %(os.path.basename(filename),i+1,numberOfFiles))
except Exception as someException:
# root.exception will log an ERROR with printed traceback;
# root.error will log an ERROR without traceback
# root.exception(someException)
root.error('Exception raised in file %s:\n' %filename +repr(someException))
numberOfErrors +=1
exitcode = -1
eTime= time.time()
root.info("Processed for %s H:M:S" %(str(datetime.timedelta(seconds=eTime-bTime)),))
timeStamp = time.strftime('%Y%m%d%H%M%S',time.gmtime())
# closing the fileHandler is important or else we cannot rename the file
root.removeHandler(fileHandler)
fileHandler.close()
# the renaming of the run file based on the way the file processing ended
if numberOfErrors > self.errorNum:
try:
os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname,".error"))
except:
os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname+timeStamp,".error"))
else:
try:
os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname,".done"))
except:
os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname+timeStamp,".done"))
""" This function is started in a separate process by ProcessPool.map.
Here, a FileRunner is created and a processHandler is added temporarily
to log status or error messages from the FileRunner. The argument to
makeFileRunner is the list of arguments to the FileRunner, but this function
is only allowed a single argument because of ProcessPool.map. """
def makeFileRunner(args):
# the multiprocessing queue
queue = args[0]
filename = os.path.basename(args[1])
root = logging.getLogger()
root.setLevel(logging.INFO)
# a logging handler which sends messages to the multiprocessing queue
processHandler = QueueHandler(queue)
root.addHandler(processHandler)
try:
# exitSuccess is 1 if file was processed or 0 if file was too short
exitcode = filerunner.FileRunner(*args)
# if file was processed, write logging message
if exitcode.exitSuccess:
root.info('File %s completed' %filename)
except Exception as someException:
# root.exception will log an ERROR with printed traceback;
# root.error will log an ERROR without traceback
root.error('Exception raised in file %s:\n' %filename +repr(someException))
#root.exception(someException)
exitcode = -1
finally:
# remove handler for this file (because a new handler is created
# for every file)
root.removeHandler(processHandler)
return exitcode
| 47.130293 | 134 | 0.632939 |
79436f9f7cbc8886ea9ff50c62390ccdc2a56869 | 391 | py | Python | altcore/core/wsgi.py | artkra/altcore | de3e2f0520c55f0390e9964155818a78110bbdb1 | [
"MIT"
] | null | null | null | altcore/core/wsgi.py | artkra/altcore | de3e2f0520c55f0390e9964155818a78110bbdb1 | [
"MIT"
] | null | null | null | altcore/core/wsgi.py | artkra/altcore | de3e2f0520c55f0390e9964155818a78110bbdb1 | [
"MIT"
] | null | null | null | """
WSGI config for altcore project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "altcore.settings")
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
794370347d544fb058ff684fcf7b44e8576cbfc2 | 98 | py | Python | broadcast-db/broadcastdb/common/models/__init__.py | faical-yannick-congo/broadcast-backend | bf16c047696c27bc53dd40fb8370b46f7cf9a4cb | [
"MIT"
] | null | null | null | broadcast-db/broadcastdb/common/models/__init__.py | faical-yannick-congo/broadcast-backend | bf16c047696c27bc53dd40fb8370b46f7cf9a4cb | [
"MIT"
] | null | null | null | broadcast-db/broadcastdb/common/models/__init__.py | faical-yannick-congo/broadcast-backend | bf16c047696c27bc53dd40fb8370b46f7cf9a4cb | [
"MIT"
] | null | null | null | """SMS Broadcast Service Mongoengine Database Models.
"""
from .broadcast_model import Broadcast
| 19.6 | 53 | 0.795918 |
7943714bdca40de97b70c9a58e851a3ff904c4bd | 6,742 | py | Python | qa/rpc-tests/test_framework/test_framework.py | zahidaliayub/protoncoin-PROTON | bf415b60cbec0e52e174878adf0c5344b860723e | [
"MIT"
] | 5 | 2018-04-06T15:38:50.000Z | 2018-05-18T09:29:13.000Z | qa/rpc-tests/test_framework/test_framework.py | zahidaliayub/protoncoin-PROTON | bf415b60cbec0e52e174878adf0c5344b860723e | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/test_framework.py | zahidaliayub/protoncoin-PROTON | bf415b60cbec0e52e174878adf0c5344b860723e | [
"MIT"
] | 18 | 2018-03-05T15:18:36.000Z | 2018-05-22T01:44:46.000Z | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-bitcoinrpc to module search path:
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
assert_equal,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
wait_bitcoinds,
enable_coverage,
check_json_precision,
initialize_chain_clean,
)
from .authproxy import AuthServiceProxy, JSONRPCException
class BitcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*500)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave protonds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop protonds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing protond/proton-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
wait_bitcoinds()
else:
print("Note: protonds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
# Can override the num_nodes variable to indicate how many nodes to run.
def __init__(self):
self.num_nodes = 2
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("PROTOND", "protond"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("PROTOND", "protond"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| 34.050505 | 100 | 0.617324 |
794371eccc65a4086e60fe5da73d78c99db83b66 | 1,070 | py | Python | ch06/prepare_airplanes.py | wikibook/agile-data-science | 7769fc2d6c810e9f1a64e45d3684e9260d99d983 | [
"MIT"
] | 1 | 2020-02-13T05:45:13.000Z | 2020-02-13T05:45:13.000Z | ch06/prepare_airplanes.py | wikibook/agile-data-science | 7769fc2d6c810e9f1a64e45d3684e9260d99d983 | [
"MIT"
] | null | null | null | ch06/prepare_airplanes.py | wikibook/agile-data-science | 7769fc2d6c810e9f1a64e45d3684e9260d99d983 | [
"MIT"
] | null | null | null | # FAA N-Number 검색 레코드를 적재
faa_tail_number_inquiry = spark.read.json('data/faa_tail_number_inquiry.jsonl')
faa_tail_number_inquiry.show()
# 레코드 수 세기
faa_tail_number_inquiry.count()
# 고유한 꼬리 번호 적재
unique_tail_numbers = spark.read.json('data/tail_numbers.jsonl')
unique_tail_numbers.show()
# 꼬리 번호를 조회 레코드와 조인
tail_num_plus_inquiry = unique_tail_numbers.join(
faa_tail_number_inquiry,
unique_tail_numbers.TailNum == faa_tail_number_inquiry.TailNum,
)
tail_num_plus_inquiry = tail_num_plus_inquiry.drop(unique_tail_numbers.TailNum)
tail_num_plus_inquiry.show()
# 불필요한 필드를 제거하고 꼬리 번호를 추가한 조회 레코드를 저장
tail_num_plus_inquiry.registerTempTable("tail_num_plus_inquiry")
airplanes = spark.sql("""SELECT
TailNum AS TailNum,
engine_manufacturer AS EngineManufacturer,
engine_model AS EngineModel,
manufacturer AS Manufacturer,
mfr_year AS ManufacturerYear,
model AS Model,
owner AS Owner,
owner_state AS OwnerState,
serial_number AS SerialNumber
FROM
tail_num_plus_inquiry""")
airplanes.repartition(1).write.mode("overwrite").json('data/airplanes.json')
| 29.722222 | 79 | 0.808411 |
79437273867f2d753c5e37507a5a67577dd0f65f | 21,929 | py | Python | aries_cloudagent/messaging/decorators/attach_decorator.py | zanost/aries-cloudagent-python | 9541edfb957742e9db8082981c8397b45f8de987 | [
"Apache-2.0"
] | null | null | null | aries_cloudagent/messaging/decorators/attach_decorator.py | zanost/aries-cloudagent-python | 9541edfb957742e9db8082981c8397b45f8de987 | [
"Apache-2.0"
] | 8 | 2021-07-27T01:13:56.000Z | 2022-03-15T01:12:40.000Z | aries_cloudagent/messaging/decorators/attach_decorator.py | zanost/aries-cloudagent-python | 9541edfb957742e9db8082981c8397b45f8de987 | [
"Apache-2.0"
] | 1 | 2022-02-02T17:05:27.000Z | 2022-02-02T17:05:27.000Z | """
A message decorator for attachments.
An attach decorator embeds content or specifies appended content.
"""
import json
import uuid
from typing import Any, Mapping, Sequence, Tuple, Union
from marshmallow import EXCLUDE, fields, pre_load
from ...wallet.base import BaseWallet
from ...wallet.util import (
b58_to_bytes,
b64_to_bytes,
b64_to_str,
bytes_to_b58,
bytes_to_b64,
set_urlsafe_b64,
str_to_b64,
unpad,
)
from ...wallet.key_type import KeyType
from ...did.did_key import DIDKey
from ..models.base import BaseModel, BaseModelError, BaseModelSchema
from ..valid import (
BASE64,
BASE64URL_NO_PAD,
INDY_ISO8601_DATETIME,
JWS_HEADER_KID,
SHA256,
UUIDFour,
)
class AttachDecoratorDataJWSHeader(BaseModel):
"""Attach decorator data JWS header."""
class Meta:
"""AttachDecoratorDataJWS metadata."""
schema_class = "AttachDecoratorDataJWSHeaderSchema"
def __init__(self, kid: str):
"""Initialize JWS header to include in attach decorator data."""
self.kid = kid
def __eq__(self, other: Any):
"""Compare equality with another."""
return type(self) == type(other) and self.kid == other.kid
class AttachDecoratorDataJWSHeaderSchema(BaseModelSchema):
"""Attach decorator data JWS header schema."""
class Meta:
"""Attach decorator data schema metadata."""
model_class = AttachDecoratorDataJWSHeader
unknown = EXCLUDE
kid = fields.Str(
description="Key identifier, in W3C did:key or DID URL format",
required=True,
**JWS_HEADER_KID,
)
class AttachDecoratorData1JWS(BaseModel):
"""Single Detached JSON Web Signature for inclusion in attach decorator data."""
class Meta:
"""AttachDecoratorData1JWS metadata."""
schema_class = "AttachDecoratorData1JWSSchema"
def __init__(
self,
*,
header: AttachDecoratorDataJWSHeader,
protected: str = None,
signature: str,
):
"""Initialize flattened single-JWS to include in attach decorator data."""
self.header = header
self.protected = protected
self.signature = signature
def __eq__(self, other: Any):
"""Compare equality with another."""
return (
type(self) == type(other)
and self.header == other.header
and self.protected == other.protected
and self.signature == other.signature
)
class AttachDecoratorData1JWSSchema(BaseModelSchema):
"""Single attach decorator data JWS schema."""
class Meta:
"""Single attach decorator data JWS schema metadata."""
model_class = AttachDecoratorData1JWS
unknown = EXCLUDE
header = fields.Nested(AttachDecoratorDataJWSHeaderSchema, required=True)
protected = fields.Str(
description="protected JWS header", required=False, **BASE64URL_NO_PAD
)
signature = fields.Str(description="signature", required=True, **BASE64URL_NO_PAD)
class AttachDecoratorDataJWS(BaseModel):
"""
Detached JSON Web Signature for inclusion in attach decorator data.
May hold one signature in flattened format, or multiple signatures in the
"signatures" member.
"""
class Meta:
"""AttachDecoratorDataJWS metadata."""
schema_class = "AttachDecoratorDataJWSSchema"
def __init__(
self,
*,
header: AttachDecoratorDataJWSHeader = None,
protected: str = None,
signature: str = None,
signatures: Sequence[AttachDecoratorData1JWS] = None,
):
"""Initialize JWS to include in attach decorator multi-sig data."""
self.header = header
self.protected = protected
self.signature = signature
self.signatures = signatures
class AttachDecoratorDataJWSSchema(BaseModelSchema):
"""Schema for detached JSON Web Signature for inclusion in attach decorator data."""
class Meta:
"""Metadata for schema for detached JWS for inclusion in attach deco data."""
model_class = AttachDecoratorDataJWS
unknown = EXCLUDE
@pre_load
def validate_single_xor_multi_sig(self, data: Mapping, **kwargs):
"""Ensure model is for either 1 or many sigatures, not mishmash of both."""
if "signatures" in data:
if any(k in data for k in ("header", "protected", "signature")):
raise BaseModelError(
"AttachDecoratorDataJWSSchema: "
"JWS must be flattened or general JSON serialization format"
)
elif not all(k in data for k in ("header", "signature")):
raise BaseModelError(
"AttachDecoratorDataJWSSchema: "
"Flattened JSON serialization format must include header and signature"
)
return data
header = fields.Nested(
AttachDecoratorDataJWSHeaderSchema,
required=False, # packed in signatures if multi-sig
)
protected = fields.Str(
description="protected JWS header",
required=False, # packed in signatures if multi-sig
**BASE64URL_NO_PAD,
)
signature = fields.Str(
description="signature",
required=False, # packed in signatures if multi-sig
**BASE64URL_NO_PAD,
)
signatures = fields.List(
fields.Nested(AttachDecoratorData1JWSSchema),
required=False, # only present if multi-sig
description="List of signatures",
)
def did_key(verkey: str) -> str:
"""Qualify verkey into DID key if need be."""
if verkey.startswith("did:key:"):
return verkey
return DIDKey.from_public_key_b58(verkey, KeyType.ED25519).did
def raw_key(verkey: str) -> str:
"""Strip qualified key to raw key if need be."""
if verkey.startswith("did:key:"):
return DIDKey.from_did(verkey).public_key_b58
return verkey
class AttachDecoratorData(BaseModel):
"""Attach decorator data."""
class Meta:
"""AttachDecoratorData metadata."""
schema_class = "AttachDecoratorDataSchema"
def __init__(
self,
*,
jws_: AttachDecoratorDataJWS = None,
sha256_: str = None,
links_: Union[Sequence[str], str] = None,
base64_: str = None,
json_: dict = None,
):
"""
Initialize decorator data.
Specify content for one of:
- `base64_`
- `json_`
- `links_`.
Args:
jws_: detached JSON Web Signature over base64 or linked attachment content
sha256_: optional sha-256 hash for content
links_: URL or list of URLs
base64_: base64 encoded content for inclusion
json_: dict content for inclusion as json
"""
if jws_:
self.jws_ = jws_
assert not json_
if base64_:
self.base64_ = base64_
elif json_:
self.json_ = json_
else:
assert isinstance(links_, (str, Sequence))
self.links_ = [links_] if isinstance(links_, str) else list(links_)
if sha256_:
self.sha256_ = sha256_
@property
def base64(self):
"""Accessor for base64 decorator data, or None."""
return getattr(self, "base64_", None)
@property
def jws(self):
"""Accessor for JWS, or None."""
return getattr(self, "jws_", None)
@property
def signatures(self) -> int:
"""Accessor for number of signatures."""
if self.jws:
return 1 if self.jws.signature else len(self.jws.signatures)
return 0
@property
def signed(self) -> bytes:
"""Accessor for signed content (payload), None for unsigned."""
return (
b64_to_bytes(unpad(set_urlsafe_b64(self.base64, urlsafe=True)))
if self.signatures
else None
)
def header_map(self, idx: int = 0, jose: bool = True) -> Mapping:
"""
Accessor for header info at input index, default 0 or unique for singly-signed.
Args:
idx: index of interest, zero-based (default 0)
jose: True to return unprotected header attributes, False for protected only
"""
if not self.signatures:
return None
headers = {}
sig = self.jws if self.jws.signature else self.jws.signatures[idx]
if sig.protected:
headers.update(json.loads(b64_to_str(sig.protected, urlsafe=True)))
if jose:
headers.update(sig.header.serialize())
return headers
@property
def json(self):
"""Accessor for json decorator data, or None."""
return getattr(self, "json_", None)
@property
def links(self):
"""Accessor for links decorator data, or None."""
return getattr(self, "links_", None)
@property
def sha256(self):
"""Accessor for sha256 decorator data, or None."""
return getattr(self, "sha256_", None)
async def sign(
self,
verkeys: Union[str, Sequence[str]],
wallet: BaseWallet,
):
"""
Sign base64 data value of attachment.
Args:
verkeys: verkey(s) of the signing party (in raw or DID key format)
wallet: The wallet to use for the signature
"""
def build_protected(verkey: str):
"""Build protected header."""
return str_to_b64(
json.dumps(
{
"alg": "EdDSA",
"kid": did_key(verkey),
"jwk": {
"kty": "OKP",
"crv": "Ed25519",
"x": bytes_to_b64(
b58_to_bytes(raw_key(verkey)), urlsafe=True, pad=False
),
"kid": did_key(verkey),
},
}
),
urlsafe=True,
pad=False,
)
assert self.base64
b64_payload = unpad(set_urlsafe_b64(self.base64, True))
if isinstance(verkeys, str) or (
isinstance(verkeys, Sequence) and len(verkeys) == 1
):
kid = did_key(verkeys if isinstance(verkeys, str) else verkeys[0])
verkey = raw_key(verkeys if isinstance(verkeys, str) else verkeys[0])
b64_protected = build_protected(verkey)
b64_sig = bytes_to_b64(
await wallet.sign_message(
message=(b64_protected + "." + b64_payload).encode("ascii"),
from_verkey=verkey,
),
urlsafe=True,
pad=False,
)
self.jws_ = AttachDecoratorDataJWS.deserialize(
{
"header": AttachDecoratorDataJWSHeader(kid).serialize(),
"protected": b64_protected, # always present by construction
"signature": b64_sig,
}
)
else:
jws = {"signatures": []}
for verkey in verkeys:
b64_protected = build_protected(verkey)
b64_sig = bytes_to_b64(
await wallet.sign_message(
message=(b64_protected + "." + b64_payload).encode("ascii"),
from_verkey=raw_key(verkey),
),
urlsafe=True,
pad=False,
)
jws["signatures"].append(
{
"protected": b64_protected, # always present by construction
"header": {"kid": did_key(verkey)},
"signature": b64_sig,
}
)
self.jws_ = AttachDecoratorDataJWS.deserialize(jws)
async def verify(self, wallet: BaseWallet) -> bool:
"""
Verify the signature(s).
Args:
wallet: Wallet to use to verify signature
Returns:
True if verification succeeds else False
"""
assert self.jws
b64_payload = unpad(set_urlsafe_b64(self.base64, True))
for sig in [self.jws] if self.signatures == 1 else self.jws.signatures:
b64_protected = sig.protected
b64_sig = sig.signature
protected = json.loads(b64_to_str(b64_protected, urlsafe=True))
assert "jwk" in protected and protected["jwk"].get("kty") == "OKP"
sign_input = (b64_protected + "." + b64_payload).encode("ascii")
b_sig = b64_to_bytes(b64_sig, urlsafe=True)
verkey = bytes_to_b58(b64_to_bytes(protected["jwk"]["x"], urlsafe=True))
if not await wallet.verify_message(
sign_input, b_sig, verkey, KeyType.ED25519
):
return False
return True
def __eq__(self, other):
"""Compare equality with another."""
for attr in ["jws_", "sha256_", "base64_"]:
if getattr(self, attr, None) != getattr(other, attr, None):
return False
if set(getattr(self, "links_", [])) != set(getattr(other, "links_", [])):
return False
return True
class AttachDecoratorDataSchema(BaseModelSchema):
"""Attach decorator data schema."""
class Meta:
"""Attach decorator data schema metadata."""
model_class = AttachDecoratorData
unknown = EXCLUDE
@pre_load
def validate_data_spec(self, data: Mapping, **kwargs):
"""Ensure model chooses exactly one of base64, json, or links."""
if len(set(data.keys()) & {"base64", "json", "links"}) != 1:
raise BaseModelError(
"AttachDecoratorSchema: choose exactly one of base64, json, or links"
)
return data
base64_ = fields.Str(
description="Base64-encoded data", required=False, data_key="base64", **BASE64
)
jws_ = fields.Nested(
AttachDecoratorDataJWSSchema,
description="Detached Java Web Signature",
required=False,
data_key="jws",
)
json_ = fields.Dict(
description="JSON-serialized data",
required=False,
example='{"sample": "content"}',
data_key="json",
)
links_ = fields.List(
fields.Str(example="https://link.to/data"),
description="List of hypertext links to data",
required=False,
data_key="links",
)
sha256_ = fields.Str(
description="SHA256 hash (binhex encoded) of content",
required=False,
data_key="sha256",
**SHA256,
)
class AttachDecorator(BaseModel):
"""Class representing attach decorator."""
class Meta:
"""AttachDecorator metadata."""
schema_class = "AttachDecoratorSchema"
def __init__(
self,
*,
ident: str = None,
description: str = None,
filename: str = None,
mime_type: str = None,
lastmod_time: str = None,
byte_count: int = None,
data: AttachDecoratorData,
**kwargs,
):
"""
Initialize an AttachDecorator instance.
The attachment decorator allows for embedding or appending
content to a message.
Args:
ident ("@id" in serialization): identifier for the appendage
mime_type ("mime-type" in serialization): MIME type for attachment
filename: file name
lastmod_time: last modification time, "%Y-%m-%d %H:%M:%SZ"
description: content description
data: payload, as per `AttachDecoratorData`
"""
super().__init__(**kwargs)
self.ident = ident
self.description = description
self.filename = filename
self.mime_type = mime_type
self.lastmod_time = lastmod_time
self.byte_count = byte_count
self.data = data
@property
def content(self) -> Union[Mapping, Tuple[Sequence[str], str]]:
"""
Return attachment content.
Returns:
data attachment, decoded if necessary and json-loaded, or data links
and sha-256 hash.
"""
if hasattr(self.data, "base64_"):
return json.loads(b64_to_bytes(self.data.base64))
elif hasattr(self.data, "json_"):
return self.data.json
elif hasattr(self.data, "links_"):
return ( # fetching would be async; we want a property here
self.data.links,
self.data.sha256,
)
else:
return None
@classmethod
def data_base64(
cls,
mapping: Mapping,
*,
ident: str = None,
description: str = None,
filename: str = None,
lastmod_time: str = None,
byte_count: int = None,
):
"""
Create `AttachDecorator` instance on base64-encoded data from input mapping.
Given mapping, JSON dump, base64-encode, and embed
it as data; mark `application/json` MIME type.
Args:
mapping: (dict) data structure; e.g., indy production
ident: optional attachment identifier (default random UUID4)
description: optional attachment description
filename: optional attachment filename
lastmod_time: optional attachment last modification time
byte_count: optional attachment byte count
"""
return AttachDecorator(
ident=ident or str(uuid.uuid4()),
description=description,
filename=filename,
mime_type="application/json",
lastmod_time=lastmod_time,
byte_count=byte_count,
data=AttachDecoratorData(
base64_=bytes_to_b64(json.dumps(mapping).encode())
),
)
@classmethod
def data_json(
cls,
mapping: dict,
*,
ident: str = None,
description: str = None,
filename: str = None,
lastmod_time: str = None,
byte_count: int = None,
):
"""
Create `AttachDecorator` instance on json-encoded data from input mapping.
Given message object (dict), JSON dump, and embed
it as data; mark `application/json` MIME type.
Args:
mapping: (dict) data structure; e.g., Aries message
ident: optional attachment identifier (default random UUID4)
description: optional attachment description
filename: optional attachment filename
lastmod_time: optional attachment last modification time
byte_count: optional attachment byte count
"""
return AttachDecorator(
ident=ident or str(uuid.uuid4()),
description=description,
filename=filename,
mime_type="application/json",
lastmod_time=lastmod_time,
byte_count=byte_count,
data=AttachDecoratorData(json_=mapping),
)
@classmethod
def data_links(
cls,
links: Union[str, Sequence[str]],
sha256: str = None,
*,
ident: str = None,
mime_type: str = None,
description: str = None,
filename: str = None,
lastmod_time: str = None,
byte_count: int = None,
):
"""
Create `AttachDecorator` instance on json-encoded data from input mapping.
Given message object (dict), JSON dump, and embed
it as data; mark `application/json` MIME type.
Args:
links: URL or list of URLs
sha256: optional sha-256 hash for content
ident: optional attachment identifier (default random UUID4)
mime_type: optional MIME type
description: optional attachment description
filename: optional attachment filename
lastmod_time: optional attachment last modification time
byte_count: optional attachment byte count
"""
return AttachDecorator(
ident=ident or str(uuid.uuid4()),
description=description,
filename=filename,
mime_type=mime_type or "application/json",
lastmod_time=lastmod_time,
byte_count=byte_count,
data=AttachDecoratorData(sha256_=sha256, links_=links),
)
class AttachDecoratorSchema(BaseModelSchema):
"""Attach decorator schema used in serialization/deserialization."""
class Meta:
"""AttachDecoratorSchema metadata."""
model_class = AttachDecorator
unknown = EXCLUDE
ident = fields.Str(
description="Attachment identifier",
example=UUIDFour.EXAMPLE,
required=False,
allow_none=False,
data_key="@id",
)
mime_type = fields.Str(
description="MIME type",
example="image/png",
required=False,
data_key="mime-type",
)
filename = fields.Str(
description="File name", example="IMG1092348.png", required=False
)
byte_count = fields.Int(
description="Byte count of data included by reference",
example=1234,
required=False,
strict=True,
)
lastmod_time = fields.Str(
description="Hint regarding last modification datetime, in ISO-8601 format",
required=False,
**INDY_ISO8601_DATETIME,
)
description = fields.Str(
description="Human-readable description of content",
example="view from doorway, facing east, with lights off",
required=False,
)
data = fields.Nested(
AttachDecoratorDataSchema,
required=True,
)
| 30.080933 | 88 | 0.582015 |
794372a1f4b0dcc41bcf0da611f5bc2ec9301973 | 8,125 | py | Python | tensorflow/contrib/nccl/python/ops/nccl_ops.py | zhangyujing/tensorflow | c7a04561fb8972fb64907acc5f10f3c6d4cef9f2 | [
"Apache-2.0"
] | 54 | 2018-05-29T19:52:44.000Z | 2021-11-30T10:41:12.000Z | tensorflow/contrib/nccl/python/ops/nccl_ops.py | hiflyin/tensorflow | 8e86dcd1c59bb3f1dc978fcb5398dd3f2f51d9ad | [
"Apache-2.0"
] | 20 | 2017-12-06T18:20:54.000Z | 2021-11-10T09:54:23.000Z | tensorflow/contrib/nccl/python/ops/nccl_ops.py | hiflyin/tensorflow | 8e86dcd1c59bb3f1dc978fcb5398dd3f2f51d9ad | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for GPU collective operations implemented using NVIDIA nccl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.nccl.ops import gen_nccl_ops
from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_nccl_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile('_nccl_ops.so'))
def all_sum(tensors):
"""Returns a list of tensors with the all-reduce sum across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to sum; must be assigned
to GPU devices.
Returns:
List of tensors, each with the sum of the input tensors, where tensor i has
the same device as `tensors[i]`.
"""
return _apply_all_reduce('sum', tensors)
@ops.RegisterGradient('NcclAllReduce')
def _all_sum_grad(op, grad):
"""The gradients for `all_sum`.
Args:
op: The `all_sum` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `all_sum` op.
Returns:
The gradient with respect to the output of `all_sum`.
Raises:
LookupError: If `reduction` is not `sum`.
"""
if op.get_attr('reduction') != 'sum':
raise LookupError('No gradient defined for NcclAllReduce except sum.')
_check_device(grad, expected=op.device)
num_devices = op.get_attr('num_devices')
shared_name = op.get_attr('shared_name') + '_grad'
with ops.device(op.device):
return gen_nccl_ops.nccl_all_reduce(
input=grad,
reduction='sum',
num_devices=num_devices,
shared_name=shared_name)
def all_prod(tensors):
"""Returns a list of tensors with the all-reduce product across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to multiply; must be assigned
to GPU devices.
Returns:
List of tensors, each with the product of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('prod', tensors)
def all_min(tensors):
"""Returns a list of tensors with the all-reduce min across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of tensors, each with the minimum of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('min', tensors)
def all_max(tensors):
"""Returns a list of tensors with the all-reduce max across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of tensors, each with the maximum of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('max', tensors)
def reduce_sum(tensors):
"""Returns a tensor with the reduce sum across `tensors`.
The computation is done with a reduce operation, so only one tensor is
returned.
Args:
tensors: The input tensors across which to sum; must be assigned
to GPU devices.
Returns:
A tensor containing the sum of the input tensors.
Raises:
LookupError: If context is not currently using a GPU device.
"""
return _apply_reduce('sum', tensors)
@ops.RegisterGradient('NcclReduce')
def _reduce_sum_grad(op, grad):
"""The gradients for input `Operation` of `reduce_sum`.
Args:
op: The `sum send` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `reduce_sum` op.
Returns:
The gradient with respect to the input of `reduce_sum` op.
Raises:
LookupError: If the reduction attribute of op is not `sum`.
"""
if op.get_attr('reduction') != 'sum':
raise LookupError('No gradient defined for NcclReduce except sum.')
_check_device(grad, expected=op.device)
with ops.device(op.device):
result = gen_nccl_ops.nccl_broadcast(input=grad, shape=grad.shape)
return [result] * len(op.inputs)
def broadcast(tensor):
"""Returns a tensor that can be efficiently transferred to other devices.
Args:
tensor: The tensor to send; must be assigned to a GPU device.
Returns:
A tensor with the value of `src_tensor`, which can be used as input to
ops on other GPU devices.
"""
_check_graph_mode()
_check_device(tensor)
with ops.device(tensor.device):
return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape)
@ops.RegisterGradient('NcclBroadcast')
def _broadcast_grad(op, accumulated_grad):
"""The gradients for input `Operation` of `broadcast`.
Args:
op: The `broadcast send` `Operation` that we are differentiating.
accumulated_grad: Accumulated gradients with respect to the output of the
`broadcast` op.
Returns:
Gradients with respect to the input of `broadcast`.
"""
# Grab inputs of accumulated_grad and replace accumulation with reduce_sum.
grads = [t for t in accumulated_grad.op.inputs]
for t in grads:
_check_device(t)
with ops.device(op.device):
return gen_nccl_ops.nccl_reduce(input=grads, reduction='sum')
def _apply_all_reduce(reduction, tensors):
"""Helper function for all_* functions."""
if not tensors:
raise ValueError('Must pass >0 tensors to all reduce operations')
_check_graph_mode()
shared_name = _get_shared_name()
res = []
for t in tensors:
_check_device(t)
with ops.device(t.device):
res.append(
gen_nccl_ops.nccl_all_reduce(
input=t,
reduction=reduction,
num_devices=len(tensors),
shared_name=shared_name))
return res
def _apply_reduce(reduction, tensors):
"""Helper function for reduce_* functions."""
if not tensors:
raise ValueError('Must pass >0 tensors to reduce operations')
_check_graph_mode()
for t in tensors:
_check_device(t)
result = gen_nccl_ops.nccl_reduce(input=tensors, reduction=reduction)
try:
next(t for t in tensors if t.device == result.device)
except StopIteration:
raise ValueError('One input tensor must be assigned to current device')
return result
_lock = threading.Lock()
_shared_name_counter = 0
def _get_shared_name():
global _shared_name_counter
with _lock:
val = _shared_name_counter
_shared_name_counter += 1
return 'c%s' % val
def _check_device(tensor, expected=None):
if not device.canonical_name(tensor.device):
raise ValueError('Device assignment required for nccl collective ops')
if expected and expected != tensor.device:
raise ValueError('Expected device %s, got %s' % (expected, tensor.device))
def _check_graph_mode():
if context.executing_eagerly():
raise ValueError('Nccl ops are not supported in eager mode')
| 29.871324 | 80 | 0.720123 |
79437319d9e87ccb06608cfd0c2654f6db2e39e2 | 457 | py | Python | plotly/validators/scatterpolargl/_customdatasrc.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/scatterpolargl/_customdatasrc.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/scatterpolargl/_customdatasrc.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='customdatasrc',
parent_name='scatterpolargl',
**kwargs
):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='none',
role='info',
**kwargs
)
| 24.052632 | 72 | 0.610503 |
794377aa6e0f26e11e87e5959fe9b78f9811a5bb | 2,632 | py | Python | tests/test_charm.py | AlexsJones/charmed-sfs | 4d94c803a1811660d24aa95326d675bde56377c5 | [
"Apache-2.0"
] | null | null | null | tests/test_charm.py | AlexsJones/charmed-sfs | 4d94c803a1811660d24aa95326d675bde56377c5 | [
"Apache-2.0"
] | null | null | null | tests/test_charm.py | AlexsJones/charmed-sfs | 4d94c803a1811660d24aa95326d675bde56377c5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 jonesax
# See LICENSE file for licensing details.
#
# Learn more about testing at: https://juju.is/docs/sdk/testing
import unittest
from unittest.mock import Mock
from charm import SfsCharm
from ops.model import ActiveStatus
from ops.testing import Harness
class TestCharm(unittest.TestCase):
def setUp(self):
self.harness = Harness(SfsCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
def test_config_changed(self):
self.assertEqual(list(self.harness.charm._stored.things), [])
self.harness.update_config({"thing": "foo"})
self.assertEqual(list(self.harness.charm._stored.things), ["foo"])
def test_action(self):
# the harness doesn't (yet!) help much with actions themselves
action_event = Mock(params={"fail": ""})
self.harness.charm._on_fortune_action(action_event)
self.assertTrue(action_event.set_results.called)
def test_action_fail(self):
action_event = Mock(params={"fail": "fail this"})
self.harness.charm._on_fortune_action(action_event)
self.assertEqual(action_event.fail.call_args, [("fail this",)])
def test_httpbin_pebble_ready(self):
# Check the initial Pebble plan is empty
initial_plan = self.harness.get_container_pebble_plan("httpbin")
self.assertEqual(initial_plan.to_yaml(), "{}\n")
# Expected plan after Pebble ready with default config
expected_plan = {
"services": {
"httpbin": {
"override": "replace",
"summary": "httpbin",
"command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent",
"startup": "enabled",
"environment": {"thing": "🎁"},
}
},
}
# Get the httpbin container from the model
container = self.harness.model.unit.get_container("httpbin")
# Emit the PebbleReadyEvent carrying the httpbin container
self.harness.charm.on.httpbin_pebble_ready.emit(container)
# Get the plan now we've run PebbleReady
updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict()
# Check we've got the plan we expected
self.assertEqual(expected_plan, updated_plan)
# Check the service was started
service = self.harness.model.unit.get_container("httpbin").get_service("httpbin")
self.assertTrue(service.is_running())
# Ensure we set an ActiveStatus with no message
self.assertEqual(self.harness.model.unit.status, ActiveStatus())
| 39.283582 | 89 | 0.649316 |
794378f4465298f29eb5f72120e249fcdcedbf34 | 734 | py | Python | src/rust/iced-x86-py/src/iced_x86/EncodingKind.py | clayne/iced | dcd3db725b1137fec4d2bda9b17587cead49bf4d | [
"MIT"
] | 1,018 | 2018-09-07T20:12:43.000Z | 2021-01-17T18:41:10.000Z | src/rust/iced-x86-py/src/iced_x86/EncodingKind.py | clayne/iced | dcd3db725b1137fec4d2bda9b17587cead49bf4d | [
"MIT"
] | 127 | 2018-09-07T19:33:48.000Z | 2021-01-17T22:20:33.000Z | src/rust/iced-x86-py/src/iced_x86/EncodingKind.py | clayne/iced | dcd3db725b1137fec4d2bda9b17587cead49bf4d | [
"MIT"
] | 146 | 2018-09-09T12:38:30.000Z | 2021-01-18T23:37:11.000Z | # SPDX-License-Identifier: MIT
# Copyright (C) 2018-present iced project and contributors
# ⚠️This file was generated by GENERATOR!🦹♂️
# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
"""
Instruction encoding
"""
import typing
if typing.TYPE_CHECKING:
from ._iced_x86_py import EncodingKind
else:
EncodingKind = int
LEGACY: EncodingKind = 0 # type: ignore
"""
Legacy encoding
"""
VEX: EncodingKind = 1 # type: ignore
"""
VEX encoding
"""
EVEX: EncodingKind = 2 # type: ignore
"""
EVEX encoding
"""
XOP: EncodingKind = 3 # type: ignore
"""
XOP encoding
"""
D3NOW: EncodingKind = 4 # type: ignore
"""
3DNow! encoding
"""
MVEX: EncodingKind = 5 # type: ignore
"""
MVEX encoding
"""
| 16.681818 | 58 | 0.702997 |
7943792e88a220c8a46d4c362f88faacf61c4cf9 | 9,375 | py | Python | eod/data/samplers/sampler.py | scott-mao/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | [
"Apache-2.0"
] | 1 | 2022-01-12T01:51:39.000Z | 2022-01-12T01:51:39.000Z | eod/data/samplers/sampler.py | YZW-explorer/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | [
"Apache-2.0"
] | null | null | null | eod/data/samplers/sampler.py | YZW-explorer/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | [
"Apache-2.0"
] | null | null | null | # Standard Library
import math
from collections import defaultdict
# Import from third library
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
from eod.utils.env.dist_helper import env, get_rank, get_world_size
from eod.utils.general.log_helper import default_logger as logger
from eod.utils.general.registry_factory import SAMPLER_REGISTRY
__all__ = ['DistributedSampler', 'LocalSampler', 'TestDistributedSampler']
@SAMPLER_REGISTRY.register('dist')
class DistributedSampler(Sampler):
"""
Sampler that restricts data loading to a subset of the dataset.
.. note:
Dataset is assumed to be of constant size.
Arguments:
dataset (Dataset): dataset used for sampling.
num_replicas (int): number of processes participating in distributed training, optional.
rank (int): rank of the current process within num_replicas, optional.
"""
def __init__(self, dataset, num_replicas=None, rank=None, fix_seed=False):
"""
Arguments:
- dataset (:obj:`dataset`): instance of dataset object
"""
if num_replicas is None:
num_replicas = env.world_size
if rank is None:
rank = env.rank
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.fix_seed = fix_seed
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch * (not self.fix_seed))
indices = list(torch.randperm(len(self.dataset), generator=g))
# add extra samples to make it evenly divisible
# indices += indices[:(self.total_size - len(indices))]
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
@SAMPLER_REGISTRY.register('local')
class LocalSampler(Sampler):
def __init__(self, dataset, rank=None):
if rank is None:
rank = env.rank
self.dataset = dataset
self.rank = rank
self.epoch = 0
self.num_samples = len(self.dataset)
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch + self.rank)
indices = list(torch.randperm(self.num_samples, generator=g))
return iter(indices)
def set_epoch(self, epoch):
self.epoch = epoch
def __len__(self):
return self.num_samples
@SAMPLER_REGISTRY.register('dist_test')
class TestDistributedSampler(Sampler):
"""
Sampler that restricts data loading to a subset of the dataset, but won't align the total data
size to be divisible by world_size bacause this will lead to duplicate detecton results
"""
def __init__(self, dataset, num_replicas=None, rank=None):
"""
Arguments:
- dataset (:obj:`dataset`): instance of dataset object
"""
if num_replicas is None:
num_replicas = env.world_size
if rank is None:
rank = env.rank
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = len(range(rank, len(self.dataset), num_replicas))
self.total_size = len(self.dataset)
def __iter__(self):
indices = torch.arange(len(self.dataset))
indices = indices[self.rank::self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
@SAMPLER_REGISTRY.register('repeat_factor')
class DistributedRepeatFactorReSampler(Sampler):
""" Suitable for long-tail distribution datasets.
Refer to `LVIS <https://arxiv.org/abs/1908.03195>`_ paper
"""
def __init__(self, dataset, t=0.001, ri_mode='random_round', pn=0.5,
ri_if_empty=1, num_replicas=None, static_size=True, rank=None):
"""
Arguments:
- dataset (:obj:`Dataset`): dataset used for sampling.
- t (:obj:`float`): thresh- old that intuitively controls the point at which oversampling kicks in
- ri_mode (:obj:`str`): choices={floor, round, random_round, ceil, c_ceil_r_f_floor}, method to compute
repeat factor for one image
- pn (:obj:`float`): power number
- num_replicas (int): number of processes participating in distributed training, optional.
- rank (int): rank of the current process within num_replicas, optional.
"""
if num_replicas is None:
num_replicas = get_world_size()
if rank is None:
rank = get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.original_num_samples = self.num_samples
self.t = t
self.ri_mode = ri_mode
self.ri_if_empty = int(ri_if_empty)
self.pn = pn
self.static_size = static_size
self._prepare()
logger.info('init re-sampler, ri mode: {}'.format(self.ri_mode))
def _prepare(self):
# prepare re-sampling factor for category
rc = defaultdict(int)
img_num_per_class = defaultdict(int)
for cls, img_num in sorted(self.dataset.num_images_per_class.items()):
f = img_num / len(self.dataset)
img_num_per_class[cls] = img_num
rc[cls] = max(1, math.pow(self.t / f, self.pn))
logger.info('class id {}, image count {}, rc {}'.format(cls, img_num, rc[cls]))
self.rc = rc
def _compute_ri(self, img_index):
classes = self.dataset.get_image_classes(img_index)
ris = [self.rc[cls] for cls in classes]
if len(ris) == 0:
return self.ri_if_empty
if self.ri_mode == 'floor':
ri = int(max(ris))
elif self.ri_mode == 'round':
ri = round(max(ris))
elif self.ri_mode == 'random_round':
ri_max = max(ris)
p = ri_max - int(ri_max)
if np.random.rand() < p:
ri = math.ceil(ri_max)
else:
ri = int(ri_max)
elif self.ri_mode == 'ceil':
ri = math.ceil(max(ris))
elif self.ri_mode == 'c_ceil_r_f_floor':
max_ind = np.argmax(ris)
assert hasattr(self.dataset, 'lvis'), 'Only lvis dataset supportted for c_ceil_r_f_floor mode'
img_id = self.dataset.img_ids[img_index]
meta_annos = self.dataset.lvis.img_ann_map[img_id]
f = self.dataset.lvis.cats[meta_annos[max_ind]['category_id']]['frequency']
assert f in ['f', 'c', 'r']
if f in ['r', 'f']:
ri = int(max(ris))
else:
ri = math.ceil(max(ris))
else:
raise NotImplementedError
return ri
def _get_new_indices(self):
indices = []
for idx in range(len(self.dataset)):
ri = self._compute_ri(idx)
indices += [idx] * ri
logger.info('dataset size {}, indexes size {}'.format(len(self.dataset), len(indices)))
return indices
def __iter__(self):
# deterministically shuffle based on epoch
# generate a perm based using class-aware balance for this epoch
indices = self._get_new_indices()
# override num_sample total size
self.num_samples = int(math.ceil(len(indices) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
indices = np.random.RandomState(seed=self.epoch).permutation(np.array(indices))
indices = list(indices)
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
# convert to int because this array will be converted to torch.tensor,
# but torch.as_tensor dosen't support numpy.int64
# a = torch.tensor(np.float64(1)) # works
# b = torch.tensor(np.int64(1)) # fails
indices = list(map(lambda x: int(x), indices))
return iter(indices)
def __len__(self):
return self.original_num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 35.511364 | 115 | 0.620053 |
79437956e5607d5e0a159baa5ff8342e2b0ed99c | 2,283 | py | Python | server1/models/__init__.py | cchangr/Animal | f6701c9780dc06a3420bcec8664b3b89ed67174f | [
"MIT"
] | null | null | null | server1/models/__init__.py | cchangr/Animal | f6701c9780dc06a3420bcec8664b3b89ed67174f | [
"MIT"
] | null | null | null | server1/models/__init__.py | cchangr/Animal | f6701c9780dc06a3420bcec8664b3b89ed67174f | [
"MIT"
] | null | null | null | import json
from server1.utils import log
def save(data, path):
s = json.dumps(data, indent=2, ensure_ascii=False)
with open(path, 'w+', encoding='utf-8') as f:
log('save', path, s, data)
f.write(s)
def load(path):
with open(path, 'r', encoding='utf-8') as f:
s = f.read()
log('load', s)
return json.loads(s)
class Model(object):
@classmethod
def db_path(cls):
classname = cls.__name__
path = 'db/{}.txt'.format(classname)
return path
@classmethod
def new(cls, form):
m = cls(form)
return m
@classmethod
def all(cls):
path = cls.db_path()
models = load(path)
ms = [cls.new(m) for m in models]
return ms
@classmethod
def find_by(cls, **kwargs):
k, v = '', ''
for key, value in kwargs.items():
k, v = key, value
all = cls.all()
for m in all:
if v == m.__dict__[k]:
return m
return None
@classmethod
def find_all(cls, **kwargs):
res = []
k, v = '', ''
for key, value in kwargs:
k, v = key, value
all = cls.all()
for m in all:
if v == m.__dict__[k]:
res.append(m)
return res
def save(self):
models = self.all()
log('models', models)
first_index = 0
if self.__dict__.get('id') is None:
if len(models) > 0:
log('用 log 可以查看代码执行的走向')
self.id = models[-1].id + 1
else:
log('first index', first_index)
self.id = first_index
models.append(self)
else:
index = -1
for i, m in enumerate(models):
if m.id == self.id:
index = i
break
if index > -1:
models[index] = self
l = [m.__dict__ for m in models]
path = self.db_path()
save(l, path)
def __repr__(self):
classname = self.__class__.__name__
properties = ['{}: ({})'.format(k, v) for k, v in self.__dict__.items()]
s = '\n'.join(properties)
return '< {}\n{} >\n'.format(classname, s)
| 24.815217 | 80 | 0.473062 |
794379e2c7b785601a571290b407dd50a94b8577 | 3,839 | py | Python | tools/c7n_azure/c7n_azure/resources/storage_container.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 2,415 | 2018-12-04T00:37:58.000Z | 2022-03-31T12:28:56.000Z | tools/c7n_azure/c7n_azure/resources/storage_container.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 3,272 | 2018-12-03T23:58:17.000Z | 2022-03-31T21:15:32.000Z | tools/c7n_azure/c7n_azure/resources/storage_container.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 773 | 2018-12-06T09:43:23.000Z | 2022-03-30T20:44:43.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n_azure.provider import resources
from c7n_azure.query import ChildTypeInfo
from c7n_azure.actions.base import AzureBaseAction
from c7n_azure.resources.arm import ChildArmResourceManager
from c7n.filters.core import type_schema
from c7n_azure.utils import ResourceIdParser
from msrestazure.tools import parse_resource_id
@resources.register('storage-container')
class StorageContainer(ChildArmResourceManager):
"""Storage Container Resource
:example:
Finds all containers with public access enabled
.. code-block:: yaml
policies:
- name: storage-container-public
description: |
Find all containers with public access enabled
resource: azure.storage-container
filters:
- type: value
key: properties.publicAccess
op: not-equal
value: None # Possible values: Blob, Container, None
"""
class resource_type(ChildTypeInfo):
doc_groups = ['Storage']
service = 'azure.mgmt.storage'
client = 'StorageManagementClient'
enum_spec = ('blob_containers', 'list', None)
parent_manager_name = 'storage'
diagnostic_settings_enabled = False
resource_type = 'Microsoft.Storage/storageAccounts/blobServices/containers'
raise_on_exception = False
default_report_fields = (
'name',
'properties.publicAccess',
'"c7n:parent-id"'
)
@classmethod
def extra_args(cls, parent_resource):
return {'resource_group_name': parent_resource['resourceGroup'],
'account_name': parent_resource['name']}
def get_resources(self, resource_ids):
client = self.get_client()
data = [
self.get_storage_container(rid, client)
for rid in resource_ids
]
return self.augment([r.serialize(True) for r in data])
def get_storage_container(self, resource_id, client):
parsed = parse_resource_id(resource_id)
return client.blob_containers.get(parsed.get('resource_group'),
parsed.get('name'), # Account name
parsed.get('resource_name')) # Container name
@StorageContainer.action_registry.register('set-public-access')
class StorageContainerSetPublicAccessAction(AzureBaseAction):
"""Action that updates the access level setting on Storage Containers.
Programmatically, this will be seen by updating the Public Access setting
:example:
Finds all Blob Storage Containers that are not private and sets them to private
.. code-block:: yaml
policies:
- name: set-non-production-accounts-private
resource: azure.storage-container
filters:
- type: value
key: properties.publicAccess
op: not-equal
value: None
actions:
- type: set-public-access
value: None
"""
schema = type_schema(
'set-public-access',
required=['value'],
**{
'value': {'enum': ['Container', 'Blob', 'None']}
}
)
def _prepare_processing(self):
self.client = self.manager.get_client()
def _process_resource(self, resource):
resource_group = ResourceIdParser.get_resource_group(resource['id'])
account_name = ResourceIdParser.get_resource_name(resource['c7n:parent-id'])
self.client.blob_containers.update(
resource_group,
account_name,
resource['name'],
public_access=self.data['value']
)
| 33.675439 | 90 | 0.620995 |
79437a49fed0591607b2c696668a5a25ac5bdf85 | 1,999 | py | Python | ugali/analysis/pipeline.py | mcnanna/ugali | 2572915b82af5b25e8762013e6d5baabdaa24b21 | [
"MIT"
] | 12 | 2016-10-26T20:45:33.000Z | 2021-11-24T04:07:43.000Z | ugali/analysis/pipeline.py | mcnanna/ugali | 2572915b82af5b25e8762013e6d5baabdaa24b21 | [
"MIT"
] | 64 | 2017-04-14T15:04:24.000Z | 2022-02-03T19:42:57.000Z | ugali/analysis/pipeline.py | kadrlica/ugali | dcf53594658a2b577f4da271783b43ed0a79fec9 | [
"MIT"
] | 12 | 2016-06-23T21:42:46.000Z | 2021-06-19T05:29:49.000Z | #!/usr/bin/env python
"""
Base functionality for pipeline scripts
"""
import ugali.utils.batch
from ugali.utils.parser import Parser
from ugali.utils.logger import logger
from ugali.utils.config import Config
class Pipeline(object):
"""
A pipeline script owns:
- A set of command line arguments
- A set of runtime components
"""
defaults = None
def __init__(self, description=__doc__, components=[]):
self.description = description
self.components = components
if not self.defaults: self.defaults = self.components
self._setup_parser()
def _setup_parser(self):
self.parser = Parser(description=self.description)
self.parser.add_config()
self.parser.add_debug()
self.parser.add_force()
self.parser.add_queue()
self.parser.add_run(choices=self.components)
self.parser.add_verbose()
self.parser.add_version()
def parse_args(self):
self.opts = self.parser.parse_args()
if not self.opts.run:
self.opts.run = self.defaults
self.config = Config(self.opts.config)
# Setup the batch system
#kwargs = self.config['batch'].get(self.opts.queue,dict())
self.batch = ugali.utils.batch.batch_factory(self.opts.queue)
def run(self):
logger.warning("Doing nothing...")
return
def execute(self):
ret = self.run()
logger.info("Done.")
return ret
if __name__ == "__main__":
description = "Pipeline test"
components = ['test']
def run(self):
logger.info("Testing pipeline...")
if 'test' in self.opts.run:
logger.info(" This should run.")
if 'foo' in self.opts.run:
logger.error(" This should NOT run")
raise Exception
Pipeline.run = run
pipeline = Pipeline(description,components)
pipeline.parser.print_help()
pipeline.parse_args()
pipeline.execute()
| 27.763889 | 69 | 0.624812 |
79437b40f20170296113ffbe7df62bc10bfd99e4 | 2,160 | py | Python | BOT.py | GoodDalek/BOTDiscord-Jogo-de-escolhas | 91199b4ae75f1953ebf1028a002bcc6dcec79b20 | [
"MIT"
] | null | null | null | BOT.py | GoodDalek/BOTDiscord-Jogo-de-escolhas | 91199b4ae75f1953ebf1028a002bcc6dcec79b20 | [
"MIT"
] | null | null | null | BOT.py | GoodDalek/BOTDiscord-Jogo-de-escolhas | 91199b4ae75f1953ebf1028a002bcc6dcec79b20 | [
"MIT"
] | null | null | null | ########################################################################################################################
# BOT DE ADVENTURE GAME PARA DISCORD COM DISCORD.PY #
########################################################################################################################
#Usar python 3.6 ou inferior - incompatibilidade com python 3.7.
#Inompatibilidade com rewrite usado o async.
#Ultima modificação: 23/09/2018.
import discord #API do discord.
import chave #Função que retorna o token do bot.
import asyncio
import random
TOKEN = chave.token() #Chama a funçao que tem o token e armazena o resultado na variavel.
client = discord.Client() #Importa o metodo Client da api do discord e atribui o nome "client".
#Colocar as variaveis e funções aqui - elas serao chamadas dependendo do que o usuario diigitar
historia ="""Aqui voce coloca o bloco de historia do seu jogo""" #nomei a variavel como quiser mas evite nomes iguais
inicio ="""Deseja começar o jogo: !Sim !Não""" #Lembre-se de dar alternativas para que possam chamar outro bloco
#Colocar aqui as funções do bot.
@client.event #chama os eventos gerados pelo usuario.
async def on_ready(): #funçao propria da API - Gera alteraçoes no back end.
print('BOT ONLINE') #imprime uma mensagem de confirmaçao para o bot.
@client.event
async def on_message(message): #funçao propria da API - Gera alteraçoes no servidor do usuario.
if message.content.lower().startswith('!jogar'): #Checa a entrada do usuario deu e faz um tratamento de erro
await client.send_message(message.channel, inicio) #Envia uma resposta ao usuario - neste caso uma variavel
if message.content.lower().startswith('!sim'):
await client.send_message(message.channel, historia)
client.run(TOKEN) #inicia o bot e passa como paramentro o token do bot.
########################################################################################################################
| 42.352941 | 122 | 0.556944 |
79437b528788810f0366e538c931dddf3b6380e8 | 5,186 | py | Python | build/X86_MESI_Two_Level/python/m5/internal/param_SrcClockDomain.py | hoho20000000/gem5-fy | b59f6feed22896d6752331652c4d8a41a4ca4435 | [
"BSD-3-Clause"
] | null | null | null | build/X86_MESI_Two_Level/python/m5/internal/param_SrcClockDomain.py | hoho20000000/gem5-fy | b59f6feed22896d6752331652c4d8a41a4ca4435 | [
"BSD-3-Clause"
] | 1 | 2020-08-20T05:53:30.000Z | 2020-08-20T05:53:30.000Z | build/X86_MESI_Two_Level/python/m5/internal/param_SrcClockDomain.py | hoho20000000/gem5-fy | b59f6feed22896d6752331652c4d8a41a4ca4435 | [
"BSD-3-Clause"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_param_SrcClockDomain')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_param_SrcClockDomain')
_param_SrcClockDomain = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_SrcClockDomain', [dirname(__file__)])
except ImportError:
import _param_SrcClockDomain
return _param_SrcClockDomain
try:
_mod = imp.load_module('_param_SrcClockDomain', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_param_SrcClockDomain = swig_import_helper()
del swig_import_helper
else:
import _param_SrcClockDomain
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import m5.internal.Clock_vector
import m5.internal.param_VoltageDomain
import m5.internal.Voltage_vector
import m5.internal.param_SimObject
import m5.internal.drain
import m5.internal.serialize
import m5.internal.param_ClockDomain
class SrcClockDomain(m5.internal.param_ClockDomain.ClockDomain):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
SrcClockDomain_swigregister = _param_SrcClockDomain.SrcClockDomain_swigregister
SrcClockDomain_swigregister(SrcClockDomain)
class SrcClockDomainParams(m5.internal.param_ClockDomain.ClockDomainParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def create(self):
return _param_SrcClockDomain.SrcClockDomainParams_create(self)
clock = _swig_property(_param_SrcClockDomain.SrcClockDomainParams_clock_get, _param_SrcClockDomain.SrcClockDomainParams_clock_set)
domain_id = _swig_property(_param_SrcClockDomain.SrcClockDomainParams_domain_id_get, _param_SrcClockDomain.SrcClockDomainParams_domain_id_set)
init_perf_level = _swig_property(_param_SrcClockDomain.SrcClockDomainParams_init_perf_level_get, _param_SrcClockDomain.SrcClockDomainParams_init_perf_level_set)
voltage_domain = _swig_property(_param_SrcClockDomain.SrcClockDomainParams_voltage_domain_get, _param_SrcClockDomain.SrcClockDomainParams_voltage_domain_set)
def __init__(self):
this = _param_SrcClockDomain.new_SrcClockDomainParams()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _param_SrcClockDomain.delete_SrcClockDomainParams
__del__ = lambda self: None
SrcClockDomainParams_swigregister = _param_SrcClockDomain.SrcClockDomainParams_swigregister
SrcClockDomainParams_swigregister(SrcClockDomainParams)
| 37.309353 | 164 | 0.71828 |
79437bcd9b3988078b9b47ff92c7e7159dfb4f65 | 27,654 | py | Python | salt/modules/win_update.py | tschmittni/salt | ccfcd5ed1272576799797ec7f259b676fd130585 | [
"Apache-2.0"
] | 2 | 2018-11-08T02:59:24.000Z | 2021-01-04T00:30:50.000Z | salt/modules/win_update.py | The-Loeki/salt | 8ff8212cc1eacfe409eb9cc017b21250f28dd305 | [
"Apache-2.0"
] | 4 | 2020-09-04T10:19:34.000Z | 2020-11-09T12:55:59.000Z | salt/modules/win_update.py | The-Loeki/salt | 8ff8212cc1eacfe409eb9cc017b21250f28dd305 | [
"Apache-2.0"
] | 5 | 2017-06-16T23:48:13.000Z | 2021-04-08T17:43:48.000Z | # -*- coding: utf-8 -*-
'''
Module for running windows updates.
This module is being deprecated and will be removed in Salt Fluorine. Please use
the ``win_wua`` module instead.
:depends: - win32com
- win32con
- win32api
- pywintypes
.. versionadded:: 2014.7.0
Set windows updates to run by category. Default behavior is to install
all updates that do not require user interaction to complete.
Optionally set ``categories`` to a category of your choice to only
install certain updates. Default is to set to install all available but driver updates.
The following example will install all Security and Critical Updates,
and download but not install standard updates.
.. code-block:: bash
salt '*' win_update.install_updates categories="['Critical Updates', 'Security Updates']"
You can also specify a number of features about the update to have a
fine grain approach to specific types of updates. These are the following
features/states of updates available for configuring:
.. code-block:: text
'UI' - User interaction required, skipped by default
'downloaded' - Already downloaded, included by default
'present' - Present on computer, included by default
'installed' - Already installed, skipped by default
'reboot' - Reboot required, included by default
'hidden' - Skip hidden updates, skipped by default
'software' - Software updates, included by default
'driver' - Driver updates, included by default
The following example installs all updates that don't require a reboot:
.. code-block:: bash
salt '*' win_update.install_updates skips="[{'reboot':True}]"
Once installed Salt will return a similar output:
.. code-block:: bash
2 : Windows Server 2012 Update (KB123456)
4 : Internet Explorer Security Update (KB098765)
2 : Malware Definition Update (KB321456)
...
The number at the beginning of the line is an OperationResultCode from the Windows Update Agent,
it's enumeration is described here: https://msdn.microsoft.com/en-us/library/windows/desktop/aa387095(v=vs.85).aspx.
The result code is then followed by the update name and its KB identifier.
'''
# pylint: disable=invalid-name,missing-docstring
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=no-name-in-module,redefined-builtin
try:
import win32com.client
import pythoncom
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
# pylint: enable=import-error
# Import Salt libs
import salt.utils.platform
import salt.utils.locales
import salt.utils.versions
log = logging.getLogger(__name__)
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
salt.utils.versions.warn_until(
'Fluorine',
'The \'win_update\' module is being deprecated and will be removed '
'in Salt {version}. Please use the \'win_wua\' module instead.'
)
return True
return (False, "Module win_update: module has failed dependencies or is not on Windows client")
def _gather_update_categories(updateCollection):
'''
this is a convenience method to gather what categories of updates are available in any update
collection it is passed. Typically though, the download_collection.
Some known categories:
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
'''
categories = []
for i in range(updateCollection.Count):
update = updateCollection.Item(i)
for j in range(update.Categories.Count):
name = update.Categories.Item(j).Name
if name not in categories:
log.debug('found category: %s', name)
categories.append(name)
return categories
class PyWinUpdater(object):
def __init__(self, categories=None, skipUI=True, skipDownloaded=False,
skipInstalled=True, skipReboot=False, skipPresent=False,
skipSoftwareUpdates=False, skipDriverUpdates=False, skipHidden=True):
log.debug('CoInitializing the pycom system')
pythoncom.CoInitialize()
self.skipUI = skipUI
self.skipDownloaded = skipDownloaded
self.skipInstalled = skipInstalled
self.skipReboot = skipReboot
self.skipPresent = skipPresent
self.skipHidden = skipHidden
self.skipSoftwareUpdates = skipSoftwareUpdates
self.skipDriverUpdates = skipDriverUpdates
# the list of categories that the user wants to be searched for.
self.categories = categories
# the list of categories that are present in the updates found.
self.foundCategories = []
# careful not to get those two confused.
log.debug('dispatching update_session to keep the session object.')
self.update_session = win32com.client.Dispatch('Microsoft.Update.Session')
log.debug('update_session got. Now creating a win_searcher to seek out the updates')
self.win_searcher = self.update_session.CreateUpdateSearcher()
# list of updates that are applicable by current settings.
self.download_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
# list of updates to be installed.
self.install_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
# the object responsible for fetching the actual downloads.
self.win_downloader = self.update_session.CreateUpdateDownloader()
self.win_downloader.Updates = self.download_collection
# the object responsible for the installing of the updates.
self.win_installer = self.update_session.CreateUpdateInstaller()
self.win_installer.Updates = self.install_collection
# the results of the download process
self.download_results = None
# the results of the installation process
self.install_results = None
# search results from CreateUpdateSearcher()
self.search_results = None
def Search(self, searchString):
try:
log.debug('beginning search of the passed string: %s', searchString)
self.search_results = self.win_searcher.Search(searchString)
log.debug('search completed successfully.')
except Exception as exc:
log.info('search for updates failed. %s', exc)
return exc
log.debug('parsing results. %s updates were found.',
self.search_results.Updates.Count)
try:
# step through the list of the updates to ensure that the updates match the
# features desired.
for update in self.search_results.Updates:
# this skipps an update if UI updates are not desired.
if update.InstallationBehavior.CanRequestUserInput:
log.debug(U'Skipped update {0} - requests user input'.format(update.title))
continue
# if this update is already downloaded, it doesn't need to be in
# the download_collection. so skipping it unless the user mandates re-download.
if self.skipDownloaded and update.IsDownloaded:
log.debug(
'Skipped update %s - already downloaded',
update.title
)
continue
# check this update's categories against the ones desired.
for category in update.Categories:
# this is a zero guard. these tests have to be in this order
# or it will error out when the user tries to search for
# updates with out specifying categories.
if self.categories is None or category.Name in self.categories:
# adds it to the list to be downloaded.
self.download_collection.Add(update)
log.debug('added update %s', update.title)
# ever update has 2 categories. this prevents the
# from being added twice.
break
log.debug('download_collection made. gathering found categories.')
# gets the categories of the updates available in this collection of updates
self.foundCategories = _gather_update_categories(self.download_collection)
log.debug('found categories: %s',
six.text_type(self.foundCategories))
return True
except Exception as exc:
log.info('parsing updates failed. %s', exc)
return exc
def AutoSearch(self):
'''
this function generates a search string. simplifying the search function while
still providing as many features as possible.
'''
search_string = ''
searchParams = []
if self.skipInstalled:
searchParams.append('IsInstalled=0')
else:
searchParams.append('IsInstalled=1')
if self.skipHidden:
searchParams.append('IsHidden=0')
else:
searchParams.append('IsHidden=1')
if self.skipReboot:
searchParams.append('RebootRequired=0')
else:
searchParams.append('RebootRequired=1')
if self.skipPresent:
searchParams.append('IsPresent=0')
else:
searchParams.append('IsPresent=1')
for i in searchParams:
search_string += '{0} and '.format(i)
if not self.skipSoftwareUpdates and not self.skipDriverUpdates:
search_string += 'Type=\'Software\' or Type=\'Driver\''
elif not self.skipSoftwareUpdates:
search_string += 'Type=\'Software\''
elif not self.skipDriverUpdates:
search_string += 'Type=\'Driver\''
else:
return False
# if there is no type, the is nothing to search.
log.debug('generated search string: %s', search_string)
return self.Search(search_string)
def Download(self):
# chase the download_collection! do the actual download process.
try:
# if the download_collection is empty. no need to download things.
if self.download_collection.Count != 0:
self.download_results = self.win_downloader.Download()
else:
log.debug('Skipped downloading, all updates were already cached.')
return True
except Exception as exc:
log.debug('failed in the downloading %s.', exc)
return exc
def Install(self):
# beat those updates into place!
try:
# this does not draw from the download_collection. important thing to know.
# the blugger is created regardless of what the download_collection has done. but it
# will only download those updates which have been downloaded and are ready.
for update in self.search_results.Updates:
if update.IsDownloaded:
self.install_collection.Add(update)
log.debug('Updates prepared. beginning installation')
except Exception as exc:
log.info('Preparing install list failed: %s', exc)
return exc
# accept eula if not accepted
try:
for update in self.search_results.Updates:
if not update.EulaAccepted:
log.debug('Accepting EULA: %s', update.Title)
update.AcceptEula()
except Exception as exc:
log.info('Accepting Eula failed: %s', exc)
return exc
# if the blugger is empty. no point it starting the install process.
if self.install_collection.Count != 0:
log.debug('Install list created, about to install')
try:
# the call to install.
self.install_results = self.win_installer.Install()
log.info('Installation of updates complete')
return True
except Exception as exc:
log.info('Installation failed: %s', exc)
return exc
else:
log.info('no new updates.')
return True
def GetInstallationResults(self):
'''
this gets results of installation process.
'''
# if the blugger is empty, the results are nil.
log.debug('blugger has {0} updates in it'.format(self.install_collection.Count))
if self.install_collection.Count == 0:
return {}
updates = []
log.debug('repairing update list')
for i in range(self.install_collection.Count):
# this gets the result from install_results, but the title comes from the update
# collection install_collection.
updates.append('{0}: {1}'.format(
self.install_results.GetUpdateResult(i).ResultCode,
self.install_collection.Item(i).Title))
log.debug('Update results enumerated, now making a library to pass back')
results = {}
# translates the list of update results into a library that salt expects.
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
log.debug('Update information complied. returning')
return results
def GetInstallationResultsPretty(self):
'''
converts the installation results into a pretty print.
'''
updates = self.GetInstallationResults()
ret = 'The following are the updates and their return codes.\n'
for i in updates:
ret += '\t{0}\n'.format(updates[i])
return ret
def GetDownloadResults(self):
updates = []
for i in range(self.download_collection.Count):
updates.append('{0}: {1}'.format(
six.text_type(self.download_results.GetUpdateResult(i).ResultCode),
six.text_type(self.download_collection.Item(i).Title)))
results = {}
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
return results
def GetSearchResultsVerbose(self):
updates = []
log.debug('parsing results. %s updates were found.',
self.download_collection.count)
for update in self.download_collection:
if update.InstallationBehavior.CanRequestUserInput:
log.debug('Skipped update %s', update.title)
continue
# More fields can be added from https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
update_com_fields = ['Categories', 'Deadline', 'Description',
'Identity', 'IsMandatory',
'KBArticleIDs', 'MaxDownloadSize', 'MinDownloadSize',
'MoreInfoUrls', 'MsrcSeverity', 'ReleaseNotes',
'SecurityBulletinIDs', 'SupportUrl', 'Title']
simple_enums = ['KBArticleIDs', 'MoreInfoUrls', 'SecurityBulletinIDs']
# update_dict = {k: getattr(update, k) for k in update_com_fields}
update_dict = {}
for f in update_com_fields:
v = getattr(update, f)
if not any([isinstance(v, bool), isinstance(v, six.string_types)]):
# Fields that require special evaluation.
if f in simple_enums:
v = [x for x in v]
elif f == 'Categories':
v = [{'Name': cat.Name, 'Description': cat.Description} for cat in v]
elif f == 'Deadline':
# Deadline will be useful and should be added.
# However, until it can be tested with a date object
# as returned by the COM, it is unclear how to
# handle this field.
continue
elif f == 'Identity':
v = {'RevisionNumber': v.RevisionNumber,
'UpdateID': v.UpdateID}
update_dict[f] = v
updates.append(update_dict)
log.debug('added update %s', update.title)
return updates
def GetSearchResults(self, fields=None):
"""Reduce full updates information to the most important information."""
updates_verbose = self.GetSearchResultsVerbose()
if fields is not None:
updates = [dict((k, v) for k, v in update.items() if k in fields)
for update in updates_verbose]
return updates
# Return list of titles.
return [update['Title'] for update in updates_verbose]
def SetCategories(self, categories):
self.categories = categories
def GetCategories(self):
return self.categories
def GetAvailableCategories(self):
return self.foundCategories
def SetSkips(self, skips):
if skips:
for i in skips:
value = i[next(six.iterkeys(i))]
skip = next(six.iterkeys(i))
self.SetSkip(skip, value)
log.debug('was asked to set %s to %s', skip, value)
def SetSkip(self, skip, state):
if skip == 'UI':
self.skipUI = state
elif skip == 'downloaded':
self.skipDownloaded = state
elif skip == 'installed':
self.skipInstalled = state
elif skip == 'reboot':
self.skipReboot = state
elif skip == 'present':
self.skipPresent = state
elif skip == 'hidden':
self.skipHidden = state
elif skip == 'software':
self.skipSoftwareUpdates = state
elif skip == 'driver':
self.skipDriverUpdates = state
log.debug('new search state: \n\tUI: %s\n\tDownload: %s\n\tInstalled: %s\n\treboot :%s\n\tPresent: %s\n\thidden: %s\n\tsoftware: %s\n\tdriver: %s',
self.skipUI, self.skipDownloaded, self.skipInstalled, self.skipReboot,
self.skipPresent, self.skipHidden, self.skipSoftwareUpdates, self.skipDriverUpdates)
def __str__(self):
results = 'There are {0} updates, by category there are:\n'.format(
self.download_collection.count)
for category in self.foundCategories:
count = 0
for update in self.download_collection:
for cat in update.Categories:
if category == cat.Name:
count += 1
results += '\t{0}: {1}\n'.format(category, count)
return results
def _search(quidditch, retries=5):
'''
a wrapper method for the pywinupdater class. I might move this into the class, but right now,
that is to much for one class I think.
'''
passed = False
clean = True
comment = ''
while not passed:
log.debug('Searching. tries left: %s', retries)
# let the updater make its own search string. MORE POWER this way.
passed = quidditch.AutoSearch()
log.debug('Done searching: %s', passed)
if isinstance(passed, Exception):
clean = False
comment += 'Failed in the seeking/parsing process:\n\t\t{0}\n'.format(passed)
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(str(retries))
else:
comment += 'out of retries. this update round failed.\n'
return (comment, True, retries)
passed = False
if clean:
# bragging rights.
comment += 'Search was done without error.\n'
return (comment, True, retries)
def _download(quidditch, retries=5):
'''
another wrapper method.
'''
passed = False
clean = True
comment = ''
while not passed:
log.debug('Downloading. tries left: %s', retries)
passed = quidditch.Download()
log.debug('Done downloading: %s', passed)
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to download updates:\n\t\t{0}\n'.format(str(passed))
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(str(retries))
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Download was done without error.\n'
return (comment, True, retries)
def _install(quidditch, retries=5):
'''
and the last wrapper method. keeping things simple.
'''
passed = False
clean = True
comment = ''
while not passed:
log.debug('download_collection is this long: %s',
quidditch.install_collection.Count)
log.debug('Installing. tries left: %s', retries)
passed = quidditch.Install()
log.info('Done installing: %s', passed)
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to install the updates.\n\t\t{0}\n'.format(str(passed))
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(str(retries))
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Install was done without error.\n'
return (comment, True, retries)
# this is where the actual functions available to salt begin.
def list_updates(verbose=False, fields=None, skips=None, retries=5, categories=None):
'''
Returns a summary of available updates, grouped into their non-mutually
exclusive categories.
verbose
Return full set of results, including several fields from the COM.
fields
Return a list of specific fields for each update. The optional
values here are those at the root level of the verbose list. This
is superseded by the verbose option.
retries
Number of retries to make before giving up. This is total, not per
step.
categories
Specify the categories to list. Must be passed as a list.
.. code-block:: bash
salt '*' win_update.list_updates categories="['Updates']"
Categories include, but are not limited to, the following:
* Updates
* Windows 7
* Critical Updates
* Security Updates
* Update Rollups
CLI Examples:
.. code-block:: bash
# Normal Usage
salt '*' win_update.list_updates
# Specific Fields
salt '*' win_update.list_updates fields="['Title', 'Description']"
# List all critical updates list in verbose detail
salt '*' win_update.list_updates categories="['Critical Updates']" verbose=True
'''
log.debug('categories to search for are: %s', categories)
updates = PyWinUpdater()
if categories:
updates.SetCategories(categories)
updates.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(updates, retries)
if not passed:
return (comment, str(passed))
log.debug('verbose: %s', verbose)
if verbose:
return updates.GetSearchResultsVerbose()
return updates.GetSearchResults(fields=fields)
def download_updates(skips=None, retries=5, categories=None):
'''
Downloads all available updates, skipping those that require user
interaction.
Various aspects of the updates can be included or excluded. this feature is
still in development.
retries
Number of retries to make before giving up. This is total, not per
step.
categories
Specify the categories to update. Must be passed as a list.
.. code-block:: bash
salt '*' win_update.download_updates categories="['Updates']"
Categories include the following:
* Updates
* Windows 7
* Critical Updates
* Security Updates
* Update Rollups
CLI Examples:
.. code-block:: bash
# Normal Usage
salt '*' win_update.download_updates
# Download critical updates only
salt '*' win_update.download_updates categories="['Critical Updates']"
'''
log.debug('categories to search for are: %s', categories)
quidditch = PyWinUpdater(skipDownloaded=True)
quidditch.SetCategories(categories)
quidditch.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(quidditch, retries)
if not passed:
return (comment, str(passed))
# this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(quidditch, retries)
if not passed:
return (comment, str(passed))
try:
comment = quidditch.GetDownloadResults()
except Exception as exc:
comment = 'could not get results, but updates were installed. {0}'.format(exc)
return 'Windows is up to date. \n{0}'.format(comment)
def install_updates(skips=None, retries=5, categories=None):
'''
Downloads and installs all available updates, skipping those that require
user interaction.
Add ``cached`` to only install those updates which have already been downloaded.
you can set the maximum number of retries to ``n`` in the search process by
adding: ``retries=n``
various aspects of the updates can be included or excluded. This function is
still under development.
retries
Number of retries to make before giving up. This is total, not per
step.
categories
Specify the categories to install. Must be passed as a list.
.. code-block:: bash
salt '*' win_update.install_updates categories="['Updates']"
Categories include the following:
* Updates
* Windows 7
* Critical Updates
* Security Updates
* Update Rollups
CLI Examples:
.. code-block:: bash
# Normal Usage
salt '*' win_update.install_updates
# Install all critical updates
salt '*' win_update.install_updates categories="['Critical Updates']"
'''
log.debug('categories to search for are: %s', categories)
quidditch = PyWinUpdater()
quidditch.SetCategories(categories)
quidditch.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(quidditch, retries)
if not passed:
return (comment, str(passed))
# this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(quidditch, retries)
if not passed:
return (comment, str(passed))
# this is where we put things in their place!
comment, passed, retries = _install(quidditch, retries)
if not passed:
return (comment, str(passed))
try:
comment = quidditch.GetInstallationResultsPretty()
except Exception as exc:
comment = 'Could not get results, but updates were installed. {0}'.format(exc)
return 'Windows is up to date. \n{0}'.format(comment)
| 36.7251 | 155 | 0.618898 |
79437c420094127c5b84b9c82a8f30b8f1e0ccf1 | 337 | py | Python | invoice/migrations/0002_remove_invoice_due_days.py | kaviarasanmani/test123- | 3995a28826edca5d2694a44c5295af9031780396 | [
"MIT"
] | null | null | null | invoice/migrations/0002_remove_invoice_due_days.py | kaviarasanmani/test123- | 3995a28826edca5d2694a44c5295af9031780396 | [
"MIT"
] | null | null | null | invoice/migrations/0002_remove_invoice_due_days.py | kaviarasanmani/test123- | 3995a28826edca5d2694a44c5295af9031780396 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.1 on 2022-02-16 07:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('invoice', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='invoice',
name='due_days',
),
]
| 18.722222 | 48 | 0.551929 |
79437c6dd27ce024cff688b9354d43357225f2b0 | 2,846 | py | Python | tests/test_units/test_templating.py | KinSai1975/Menira.py | ca275ce244ee4804444e1827ba60010a55acc07c | [
"BSD-3-Clause"
] | 118 | 2015-01-04T06:55:14.000Z | 2022-01-14T08:32:41.000Z | tests/test_units/test_templating.py | KinSai1975/Menira.py | ca275ce244ee4804444e1827ba60010a55acc07c | [
"BSD-3-Clause"
] | 21 | 2015-01-03T02:16:28.000Z | 2021-03-24T06:10:57.000Z | tests/test_units/test_templating.py | KinSai1975/Menira.py | ca275ce244ee4804444e1827ba60010a55acc07c | [
"BSD-3-Clause"
] | 53 | 2015-01-04T03:21:08.000Z | 2021-08-04T20:52:01.000Z | import os
import re
import sys
from beaker.cache import CacheManager
from beaker.middleware import SessionMiddleware, CacheMiddleware
from mako.lookup import TemplateLookup
from nose.tools import raises
from paste.fixture import TestApp
from paste.registry import RegistryManager
from paste.deploy.converters import asbool
from routes import Mapper
from routes.middleware import RoutesMiddleware
from nose.tools import raises
from __init__ import test_root
def make_app(global_conf, full_stack=True, static_files=True, include_cache_middleware=False, attribsafe=False, **app_conf):
import pylons
import pylons.configuration as configuration
from pylons import url
from pylons.decorators import jsonify
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.error import handle_mako_error
from pylons.wsgiapp import PylonsApp
root = os.path.dirname(os.path.abspath(__file__))
paths = dict(root=os.path.join(test_root, 'sample_controllers'), controllers=os.path.join(test_root, 'sample_controllers', 'controllers'),
templates=os.path.join(test_root, 'sample_controllers', 'templates'))
sys.path.append(test_root)
config = configuration.PylonsConfig()
config.init_app(global_conf, app_conf, package='sample_controllers', paths=paths)
map = Mapper(directory=config['pylons.paths']['controllers'])
map.connect('/{controller}/{action}')
config['routes.map'] = map
class AppGlobals(object): pass
config['pylons.app_globals'] = AppGlobals()
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'], imports=['from markupsafe import escape']
)
if attribsafe:
config['pylons.strict_tmpl_context'] = False
app = PylonsApp(config=config)
app = RoutesMiddleware(app, config['routes.map'], singleton=False)
if include_cache_middleware:
app = CacheMiddleware(app, config)
app = SessionMiddleware(app, config)
if asbool(full_stack):
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
if asbool(config['debug']):
app = StatusCodeRedirect(app)
else:
app = StatusCodeRedirect(app, [401, 403, 404, 500])
app = RegistryManager(app)
app.config = config
return app
class TestTemplatingApp(object):
def setUp(self):
self.app = TestApp(make_app({'cache_dir': os.path.join(os.path.dirname(__file__), 'cache')}, include_cache_middleware=True))
def test_testvars(self):
resp = self.app.get('/hello/intro_template')
assert 'Hi there 6' in resp
def test_template_cache(self):
resp = self.app.get('/hello/time_template')
resp2 = self.app.get('/hello/time_template')
assert resp.body == resp2.body
| 35.135802 | 142 | 0.71293 |
79437d1aac83d1a8f6ea1ecbe13078296f917e31 | 327 | py | Python | setup.py | mumrah/cloudcache | f11422c338070c9b212c82d83f46f8e501f8e8a7 | [
"MIT"
] | 1 | 2021-11-15T09:39:25.000Z | 2021-11-15T09:39:25.000Z | setup.py | mumrah/cloudcache | f11422c338070c9b212c82d83f46f8e501f8e8a7 | [
"MIT"
] | 4 | 2016-04-13T15:21:24.000Z | 2016-04-13T15:24:30.000Z | setup.py | mumrah/cloudcache | f11422c338070c9b212c82d83f46f8e501f8e8a7 | [
"MIT"
] | null | null | null | from setuptools import setup,find_packages
setup (
name = 'CloudCached',
version = '0.1',
install_requires = {'Boto':'boto>=1.8','Nose':'nose>=0.11'},
packages = find_packages(),
tests_require = {'Nose':'nose>=0.1'},
test_suite = "nose.collector",
author = 'David Arthur',
author_email = '[email protected]',
)
| 25.153846 | 62 | 0.654434 |
79437d3f9400627008f93abce47ffda936af4563 | 7,284 | py | Python | grr/server/grr_response_server/timeseries.py | BA7JCM/grr | c6f3b19e73e1d76a195d3c9a63e894ace6ea2508 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/timeseries.py | BA7JCM/grr | c6f3b19e73e1d76a195d3c9a63e894ace6ea2508 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/timeseries.py | BA7JCM/grr | c6f3b19e73e1d76a195d3c9a63e894ace6ea2508 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Operations on a series of points, indexed by time.
"""
import copy
from grr_response_core.lib import rdfvalue
NORMALIZE_MODE_GAUGE = 1
NORMALIZE_MODE_COUNTER = 2
class Timeseries(object):
"""Timeseries contains a sequence of points, each with a timestamp."""
def __init__(self, initializer=None):
"""Create a timeseries with an optional initializer.
Args:
initializer: An optional Timeseries to clone.
Raises:
RuntimeError: If initializer is not understood.
"""
if initializer is None:
self.data = []
return
if isinstance(initializer, Timeseries):
self.data = copy.deepcopy(initializer.data)
return
raise RuntimeError("Unrecognized initializer.")
def _NormalizeTime(self, time):
"""Normalize a time to be an int measured in microseconds."""
if isinstance(time, rdfvalue.RDFDatetime):
return time.AsMicrosecondsSinceEpoch()
if isinstance(time, rdfvalue.Duration):
return time.microseconds
return int(time)
def Append(self, value, timestamp):
"""Adds value at timestamp.
Values must be added in order of increasing timestamp.
Args:
value: An observed value.
timestamp: The timestamp at which value was observed.
Raises:
RuntimeError: If timestamp is smaller than the previous timstamp.
"""
timestamp = self._NormalizeTime(timestamp)
if self.data and timestamp < self.data[-1][1]:
raise RuntimeError("Next timestamp must be larger.")
self.data.append([value, timestamp])
def MultiAppend(self, value_timestamp_pairs):
"""Adds multiple value<->timestamp pairs.
Args:
value_timestamp_pairs: Tuples of (value, timestamp).
"""
for value, timestamp in value_timestamp_pairs:
self.Append(value, timestamp)
def FilterRange(self, start_time=None, stop_time=None):
"""Filter the series to lie between start_time and stop_time.
Removes all values of the series which are outside of some time range.
Args:
start_time: If set, timestamps before start_time will be dropped.
stop_time: If set, timestamps at or past stop_time will be dropped.
"""
start_time = self._NormalizeTime(start_time)
stop_time = self._NormalizeTime(stop_time)
self.data = [
p for p in self.data
if (start_time is None or p[1] >= start_time) and
(stop_time is None or p[1] < stop_time)
]
def Normalize(self, period, start_time, stop_time, mode=NORMALIZE_MODE_GAUGE):
"""Normalize the series to have a fixed period over a fixed time range.
Supports two modes, depending on the type of data:
NORMALIZE_MODE_GAUGE: support gauge values. If multiple original data
points lie within an output interval, the output value is an average of
the original data point. if no original data points lie within an
output interval, the output value is None.
NORMALIZE_MODE_COUNTER: supports counter values. Assumes that the sequence
is already increasing (typically, MakeIncreasing will have been
called). Each output value is the largest value seen during or before
the corresponding output interval.
Args:
period: The desired time between points. Should be an
rdfvalue.Duration or a count of microseconds.
start_time: The first timestamp will be at start_time. Should be an
rdfvalue.RDFDatetime or a count of microseconds since epoch.
stop_time: The last timestamp will be at stop_time - period. Should be an
rdfvalue.RDFDatetime or a count of microseconds since epoch.
mode: The type of normalization to perform. May be NORMALIZE_MODE_GAUGE or
NORMALIZE_MODE_COUNTER.
Raises:
RuntimeError: In case the sequence timestamps are misordered.
"""
period = self._NormalizeTime(period)
start_time = self._NormalizeTime(start_time)
stop_time = self._NormalizeTime(stop_time)
if not self.data:
return
self.FilterRange(start_time, stop_time)
grouped = {}
for value, timestamp in self.data:
offset = timestamp - start_time
shifted_offset = offset - (offset % period)
grouped.setdefault(shifted_offset, []).append(value)
self.data = []
last_value = None
for offset in range(0, stop_time - start_time, period):
g = grouped.get(offset)
if mode == NORMALIZE_MODE_GAUGE:
v = None
if g:
v = sum(g) / len(g)
self.data.append([v, offset + start_time])
else:
if g:
for v in g:
if last_value is not None and v < last_value:
raise RuntimeError("Next value must not be smaller.")
last_value = v
self.data.append([last_value, offset + start_time])
def MakeIncreasing(self):
"""Makes the time series increasing.
Assumes that series is based on a counter which is occasionally reset, and
using this assumption converts the sequence to estimate the total number of
counts which occurred.
NOTE: Could give inaccurate numbers in either of the following cases: 1)
Multiple resets occur between samples. 2) A reset is followed by a spike
larger than the previous level.
"""
offset = 0
last_value = None
for p in self.data:
if last_value and last_value > p[0]:
# Assume that it was only reset once.
offset += last_value
last_value = p[0]
if offset:
p[0] += offset
def ToDeltas(self):
"""Convert the sequence to the sequence of differences between points.
The value of each point v[i] is replaced by v[i+1] - v[i], except for the
last point which is dropped.
"""
if len(self.data) < 2:
self.data = []
return
for i in range(0, len(self.data) - 1):
if self.data[i][0] is None or self.data[i + 1][0] is None:
self.data[i][0] = None
else:
self.data[i][0] = self.data[i + 1][0] - self.data[i][0]
del self.data[-1]
def Add(self, other):
"""Add other to self pointwise.
Requires that both self and other are of the same length, and contain
identical timestamps. Typically this means that Normalize has been called
on both with identical time parameters.
Args:
other: The sequence to add to self.
Raises:
RuntimeError: other does not contain the same timestamps as self.
"""
if len(self.data) != len(other.data):
raise RuntimeError("Can only add series of identical lengths.")
for i in range(len(self.data)):
if self.data[i][1] != other.data[i][1]:
raise RuntimeError("Timestamp mismatch.")
if self.data[i][0] is None and other.data[i][0] is None:
continue
self.data[i][0] = (self.data[i][0] or 0) + (other.data[i][0] or 0)
def Rescale(self, multiplier):
"""Multiply pointwise by multiplier."""
for p in self.data:
if p[0] is not None:
p[0] *= multiplier
def Mean(self):
"""Return the arithmetic mean of all values."""
values = [v for v, _ in self.data if v is not None]
if not values:
return None
# TODO(hanuszczak): Why do we return a floored division result instead of
# the exact value?
return sum(values) // len(values)
| 32.230088 | 80 | 0.66804 |
79437e01cb5e0766fa805972b4bb57c9f142d8db | 8,491 | py | Python | var/spack/repos/builtin/packages/vtk-m/package.py | BetsyMcPhail/spack | 42ed6e25e16099c866af90e6222f5283f25026ae | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-02-08T15:05:27.000Z | 2021-02-08T15:05:27.000Z | var/spack/repos/builtin/packages/vtk-m/package.py | gmt3141/spack | e05ac5c944e086ab558ad53ca929c29b1770a818 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/vtk-m/package.py | gmt3141/spack | e05ac5c944e086ab558ad53ca929c29b1770a818 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
import sys
class VtkM(CMakePackage, CudaPackage):
"""VTK-m is a toolkit of scientific visualization algorithms for emerging
processor architectures. VTK-m supports the fine-grained concurrency for
data analysis and visualization algorithms required to drive extreme scale
computing by providing abstract models for data and execution that can be
applied to a variety of algorithms across many different processor
architectures."""
homepage = "https://m.vtk.org/"
maintainers = ['robertmaynard', 'kmorel', 'vicentebolea']
url = "https://gitlab.kitware.com/vtk/vtk-m/-/archive/v1.5.1/vtk-m-v1.5.1.tar.gz"
git = "https://gitlab.kitware.com/vtk/vtk-m.git"
version('master', branch='master')
version('1.5.1', sha256="64c19e66c0d579cfb21bb0df10d649b523b470b0c9a6c2ea5fd979dfeda2c25e")
version('1.5.0', sha256="b1b13715c7fcc8d17f5c7166ff5b3e9025f6865dc33eb9b06a63471c21349aa8")
version('1.4.0', sha256="8d83cca7cd5e204d10da151ce4f1846c1f7414c7c1e579173d15c5ea0631555a")
version('1.3.0', sha256="f88c1b0a1980f695240eeed9bcccfa420cc089e631dc2917c9728a2eb906df2e")
version('1.2.0', sha256="607272992e05f8398d196f0acdcb4af025a4a96cd4f66614c6341f31d4561763")
version('1.1.0', sha256="78618c81ca741b1fbba0853cb5d7af12c51973b514c268fc96dfb36b853cdb18")
# version used by ascent
version('ascent_ver', commit="a3b8525ef97d94996ae843db0dd4f675c38e8b1e")
# patches, required for ascent
patch('vtkmdiy_fpic.patch', when='@ascent_ver')
patch('disable_flying_edges.patch', when='@ascent_ver')
# use release, instead of release with debug symbols b/c vtkm libs
# can overwhelm compilers with too many symbols
variant('build_type', default='Release', description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
variant("shared", default=False, description="build shared libs")
variant("doubleprecision", default=True,
description='enable double precision')
variant("logging", default=False, description="build logging support")
variant("mpi", default=False, description="build mpi support")
variant("rendering", default=True, description="build rendering support")
variant("64bitids", default=False,
description="enable 64 bits ids")
# Device variants
variant("cuda", default=False, description="build cuda support")
variant("openmp", default=(sys.platform != 'darwin'), description="build openmp support")
variant("tbb", default=(sys.platform == 'darwin'), description="build TBB support")
variant("hip", default=False, description="build hip support")
# it doesn't look like spack has a amd gpu abstraction
amdgpu_targets = (
'gfx701', 'gfx801', 'gfx802', 'gfx803',
'gfx900', 'gfx906', 'gfx908', 'gfx1010',
'gfx1011', 'gfx1012'
)
variant('amdgpu_target', default='none', multi=True, values=amdgpu_targets)
conflicts("+hip", when="amdgpu_target=none")
depends_on("[email protected]:", type="build") # CMake >= 3.12
depends_on("[email protected]:", when="+hip", type="build") # CMake >= 3.18
depends_on('[email protected]:', when='+cuda')
depends_on("tbb", when="+tbb")
depends_on("mpi", when="+mpi")
depends_on("[email protected]:+hip", when="+hip")
depends_on("[email protected]:", when="+hip")
depends_on("[email protected]:", when="+hip")
conflicts("+hip", when="+cuda")
conflicts("~shared", when="~pic")
def cmake_args(self):
spec = self.spec
options = []
gpu_name_table = {'30': 'kepler', '32': 'kepler', '35': 'kepler',
'50': 'maxwell', '52': 'maxwell', '53': 'maxwell',
'60': 'pascal', '61': 'pascal', '62': 'pascal',
'70': 'volta', '72': 'turing', '75': 'turing',
'80': 'ampere', '86': 'ampere'}
with working_dir('spack-build', create=True):
options = ["-DVTKm_ENABLE_TESTING:BOOL=OFF"]
# shared vs static libs logic
# force building statically with cuda
if "+cuda" in spec:
options.append('-DBUILD_SHARED_LIBS=OFF')
else:
if "+shared" in spec:
options.append('-DBUILD_SHARED_LIBS=ON')
else:
options.append('-DBUILD_SHARED_LIBS=OFF')
# double precision
if "+doubleprecision" in spec:
options.append("-DVTKm_USE_DOUBLE_PRECISION:BOOL=ON")
else:
options.append("-DVTKm_USE_DOUBLE_PRECISION:BOOL=OFF")
# logging support
if "+logging" in spec:
if not spec.satisfies('@1.3.0:,ascent_ver'):
raise InstallError('logging is not supported for\
vtkm version lower than 1.3')
options.append("-DVTKm_ENABLE_LOGGING:BOOL=ON")
else:
options.append("-DVTKm_ENABLE_LOGGING:BOOL=OFF")
# mpi support
if "+mpi" in spec:
if not spec.satisfies('@1.3.0:,ascent_ver'):
raise InstallError('mpi is not supported for\
vtkm version lower than 1.3')
options.append("-DVTKm_ENABLE_MPI:BOOL=ON")
else:
options.append("-DVTKm_ENABLE_MPI:BOOL=OFF")
# rendering support
if "+rendering" in spec:
options.append("-DVTKm_ENABLE_RENDERING:BOOL=ON")
else:
options.append("-DVTKm_ENABLE_RENDERING:BOOL=OFF")
# 64 bit ids
if "+64bitids" in spec:
options.append("-DVTKm_USE_64BIT_IDS:BOOL=ON")
print("64 bit ids enabled")
else:
options.append("-DVTKm_USE_64BIT_IDS:BOOL=OFF")
if spec.variants["build_type"].value != 'Release':
options.append("-DVTKm_NO_ASSERT:BOOL=ON")
# cuda support
if "+cuda" in spec:
options.append("-DVTKm_ENABLE_CUDA:BOOL=ON")
options.append("-DCMAKE_CUDA_HOST_COMPILER={0}".format(
env["SPACK_CXX"]))
if 'cuda_arch' in spec.variants:
cuda_value = spec.variants['cuda_arch'].value
cuda_arch = cuda_value[0]
if cuda_arch in gpu_name_table:
vtkm_cuda_arch = gpu_name_table[cuda_arch]
options.append('-DVTKm_CUDA_Architecture={0}'.format(
vtkm_cuda_arch))
else:
# this fix is necessary if compiling platform has cuda, but
# no devices (this is common for front end nodes on hpc
# clusters). We choose volta as a lowest common denominator
options.append("-DVTKm_CUDA_Architecture=volta")
else:
options.append("-DVTKm_ENABLE_CUDA:BOOL=OFF")
# hip support
if "+hip" in spec:
options.append("-DVTKm_ENABLE_HIP:BOOL=ON")
archs = ",".join(self.spec.variants['amdgpu_target'].value)
options.append(
"-DCMAKE_HIP_ARCHITECTURES:STRING={0}".format(archs))
else:
options.append("-DVTKm_ENABLE_HIP:BOOL=OFF")
# openmp support
if "+openmp" in spec:
# openmp is added since version 1.3.0
if not spec.satisfies('@1.3.0:,ascent_ver'):
raise InstallError('OpenMP is not supported for\
vtkm version lower than 1.3')
options.append("-DVTKm_ENABLE_OPENMP:BOOL=ON")
else:
options.append("-DVTKm_ENABLE_OPENMP:BOOL=OFF")
# tbb support
if "+tbb" in spec:
# vtk-m detectes tbb via TBB_ROOT env var
os.environ["TBB_ROOT"] = spec["tbb"].prefix
options.append("-DVTKm_ENABLE_TBB:BOOL=ON")
else:
options.append("-DVTKm_ENABLE_TBB:BOOL=OFF")
return options
| 44.689474 | 95 | 0.589919 |
79437fb438922eee8a35a3da4888482710ed2633 | 2,435 | py | Python | data/p4VQE/R4/benchmark/startQiskit_Class718.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit_Class718.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit_Class718.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=13
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=10
prog.cz(input_qubit[1],input_qubit[2]) # number=11
prog.h(input_qubit[2]) # number=12
prog.x(input_qubit[2]) # number=6
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_Class718.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.359551 | 118 | 0.634908 |
7943814b3a25193766fc2d5ba6d81f51f2ef6298 | 203 | py | Python | venmo_client/model/__init__.py | sharadmv/venmo-client | 2b236053ab5f233645b0a64f5333a4e9723ebf30 | [
"MIT"
] | null | null | null | venmo_client/model/__init__.py | sharadmv/venmo-client | 2b236053ab5f233645b0a64f5333a4e9723ebf30 | [
"MIT"
] | null | null | null | venmo_client/model/__init__.py | sharadmv/venmo-client | 2b236053ab5f233645b0a64f5333a4e9723ebf30 | [
"MIT"
] | null | null | null | from venmo_client.model.transaction import Notification
from venmo_client.model.transaction import Transaction
from venmo_client.model.transaction import Payment
from venmo_client.model.user import User
| 40.6 | 55 | 0.881773 |
7943824d53294043734ffa67046f42136422c29f | 1,361 | py | Python | api/views/SaleItemViewSet.py | ghalonso94/wswallet | 8f1f13a0d646166adad45b3872c2db6558d48f38 | [
"MIT"
] | null | null | null | api/views/SaleItemViewSet.py | ghalonso94/wswallet | 8f1f13a0d646166adad45b3872c2db6558d48f38 | [
"MIT"
] | null | null | null | api/views/SaleItemViewSet.py | ghalonso94/wswallet | 8f1f13a0d646166adad45b3872c2db6558d48f38 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from core.models import SaleItem
from api.serializer import SaleItemSerializer
from rest_framework.authentication import BasicAuthentication
from rest_framework.permissions import IsAuthenticated
class SaleItemViewSet(viewsets.ModelViewSet):
# Show all Customers
serializer_class = SaleItemSerializer
http_method_names = ['get']
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated]
def list(self, request):
""" Method for listing all sale items """
if request.user.is_staff:
queryset = SaleItem.objects.all()
else:
queryset = SaleItem.objects.filter(sale__company__user__exact=request.user)
serializer = SaleItemSerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
""" Method to recover a single sale item """
if request.user.is_staff:
queryset = SaleItem.objects.all()
else:
queryset = SaleItem.objects.filter(sale__company__user__exact=request.user)
cashback = get_object_or_404(queryset, pk=pk)
serializer = SaleItemSerializer(cashback)
return Response(serializer.data) | 36.783784 | 87 | 0.728876 |
794382e928536c41b822332348976703fd1d2f36 | 14,017 | py | Python | tests/test_canonicalization.py | cthoyt/pybel | ed66f013a77f9cbc513892b0dad1025b8f68bb46 | [
"Apache-2.0"
] | null | null | null | tests/test_canonicalization.py | cthoyt/pybel | ed66f013a77f9cbc513892b0dad1025b8f68bb46 | [
"Apache-2.0"
] | 11 | 2017-12-28T08:03:14.000Z | 2019-01-15T02:13:58.000Z | tests/test_canonicalization.py | cthoyt/pybel | ed66f013a77f9cbc513892b0dad1025b8f68bb46 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for canonicalization functions."""
import unittest
from typing import Iterable
from pybel import BELGraph
from pybel.canonicalize import _to_bel_lines_body, postpend_location
from pybel.constants import CITATION_TYPE_PUBMED, EXTRACELLULAR, INTRACELLULAR, MODIFIER
from pybel.dsl import (
Abundance,
BiologicalProcess,
ComplexAbundance,
CompositeAbundance,
EnumeratedFusionRange,
Fragment,
Gene,
GeneFusion,
GeneModification,
Hgvs,
MicroRna,
NamedComplexAbundance,
Pathology,
Protein,
ProteinModification,
ProteinSubstitution,
Reaction,
Rna,
RnaFusion,
activity,
degradation,
secretion,
translocation,
)
from pybel.language import Entity
from pybel.testing.utils import n
from pybel.utils import canonicalize_edge
class TestCanonicalize(unittest.TestCase):
def test_postpend_location_failure(self):
with self.assertRaises(ValueError):
postpend_location("", dict(name="failure"))
def test_canonicalize_variant_dsl(self):
"""Use the __str__ functions in the DSL to create BEL instead of external pybel.canonicalize."""
self.assertEqual('var("p.Val600Glu")', str(Hgvs("p.Val600Glu")))
self.assertEqual('var("p.Val600Glu")', str(ProteinSubstitution("Val", 600, "Glu")))
self.assertEqual(
'pmod(go:0006468 ! "protein phosphorylation")',
str(ProteinModification("Ph")),
)
self.assertEqual("pmod(TEST:Ph)", str(ProteinModification("Ph", namespace="TEST")))
self.assertEqual(
"pmod(TEST:Ph, Ser)",
str(ProteinModification("Ph", namespace="TEST", code="Ser")),
)
self.assertEqual(
"pmod(TEST:Ph, Ser, 5)",
str(ProteinModification("Ph", namespace="TEST", code="Ser", position=5)),
)
self.assertEqual(
'pmod(GO:"protein phosphorylation", Thr, 308)',
str(
ProteinModification(
name="protein phosphorylation",
namespace="GO",
code="Thr",
position=308,
)
),
)
self.assertEqual('frag("?")', str(Fragment()))
self.assertEqual('frag("672_713")', str(Fragment(start=672, stop=713)))
self.assertEqual('frag("?", "descr")', str(Fragment(description="descr")))
self.assertEqual(
'frag("672_713", "descr")',
str(Fragment(start=672, stop=713, description="descr")),
)
self.assertEqual('gmod(go:0006306 ! "DNA methylation")', str(GeneModification("Me")))
self.assertEqual("gmod(TEST:Me)", str(GeneModification("Me", namespace="TEST")))
self.assertEqual(
'gmod(GO:"DNA Methylation")',
str(GeneModification("DNA Methylation", namespace="GO")),
)
def test_canonicalize_fusion_range_dsl(self):
"""Test canonicalization of enumerated fusion ranges."""
self.assertEqual("p.1_15", str(EnumeratedFusionRange("p", 1, 15)))
self.assertEqual("p.*_15", str(EnumeratedFusionRange("p", "*", 15)))
def test_Abundance(self):
"""Test canonicalization of abundances."""
short = Abundance(namespace="CHEBI", name="water")
self.assertEqual("a(CHEBI:water)", str(short))
long = Abundance(namespace="CHEBI", name="test name")
self.assertEqual('a(CHEBI:"test name")', str(long))
def test_protein_reference(self):
self.assertEqual("p(HGNC:AKT1)", str(Protein(namespace="HGNC", name="AKT1")))
def test_gene_reference(self):
node = Gene(namespace="EGID", name="780")
self.assertEqual("g(EGID:780)", str(node))
def test_protein_pmod(self):
node = Protein(
name="PLCG1",
namespace="HGNC",
variants=[ProteinModification(name="Ph", code="Tyr")],
)
self.assertEqual(
'p(HGNC:PLCG1, pmod(go:0006468 ! "protein phosphorylation", Tyr))',
str(node),
)
def test_protein_fragment(self):
node = Protein(name="APP", namespace="HGNC", variants=[Fragment(start=672, stop=713)])
self.assertEqual('p(HGNC:APP, frag("672_713"))', str(node))
def test_mirna_reference(self):
self.assertEqual("m(HGNC:MIR1)", str(MicroRna(namespace="HGNC", name="MIR1")))
def test_rna_fusion_specified(self):
node = RnaFusion(
partner_5p=Rna(namespace="HGNC", name="TMPRSS2"),
range_5p=EnumeratedFusionRange("r", 1, 79),
partner_3p=Rna(namespace="HGNC", name="ERG"),
range_3p=EnumeratedFusionRange("r", 312, 5034),
)
self.assertEqual('r(fus(HGNC:TMPRSS2, "r.1_79", HGNC:ERG, "r.312_5034"))', str(node))
def test_rna_fusion_unspecified(self):
node = RnaFusion(
partner_5p=Rna(namespace="HGNC", name="TMPRSS2"),
partner_3p=Rna(namespace="HGNC", name="ERG"),
)
self.assertEqual('r(fus(HGNC:TMPRSS2, "?", HGNC:ERG, "?"))', str(node))
def test_gene_fusion_specified(self):
node = GeneFusion(
partner_5p=Gene(namespace="HGNC", name="TMPRSS2"),
range_5p=EnumeratedFusionRange("c", 1, 79),
partner_3p=Gene(namespace="HGNC", name="ERG"),
range_3p=EnumeratedFusionRange("c", 312, 5034),
)
self.assertEqual('g(fus(HGNC:TMPRSS2, "c.1_79", HGNC:ERG, "c.312_5034"))', str(node))
def test_pathology(self):
node = Pathology(namespace="DO", name="Alzheimer disease")
self.assertEqual('path(DO:"Alzheimer disease")', str(node))
def test_bioprocess(self):
node = BiologicalProcess(namespace="GO", name="apoptosis")
self.assertEqual("bp(GO:apoptosis)", str(node))
def test_named_complex_abundance(self):
node = NamedComplexAbundance(namespace="SCOMP", name="Calcineurin Complex")
self.assertEqual('complex(SCOMP:"Calcineurin Complex")', str(node))
def test_complex_abundance(self):
node = ComplexAbundance(
members=[
Protein(namespace="HGNC", name="FOS"),
Protein(namespace="HGNC", name="JUN"),
]
)
self.assertEqual("complex(p(HGNC:FOS), p(HGNC:JUN))", str(node))
def test_composite_abundance(self):
node = CompositeAbundance(
members=[
Protein(namespace="HGNC", name="FOS"),
Protein(namespace="HGNC", name="JUN"),
]
)
self.assertEqual("composite(p(HGNC:FOS), p(HGNC:JUN))", str(node))
def test_reaction(self):
node = Reaction(
reactants=[Abundance(namespace="CHEBI", name="A")],
products=[Abundance(namespace="CHEBI", name="B")],
)
self.assertEqual("rxn(reactants(a(CHEBI:A)), products(a(CHEBI:B)))", str(node))
class TestCanonicalizeEdge(unittest.TestCase):
"""This class houses all testing for the canonicalization of edges such that the relation/modifications can be used
as a second level hash"""
def setUp(self):
self.g = BELGraph()
self.g.annotation_pattern["Species"] = r"\d+"
self.u = Protein(name="u", namespace="TEST")
self.v = Protein(name="v", namespace="TEST")
self.g.add_node_from_data(self.u)
self.g.add_node_from_data(self.v)
def get_data(self, k):
return self.g[self.u][self.v][k]
def add_edge(self, source_modifier=None, target_modifier=None, annotations=None):
key = self.g.add_increases(
self.u,
self.v,
evidence=n(),
citation=n(),
source_modifier=source_modifier,
target_modifier=target_modifier,
annotations=annotations,
)
return canonicalize_edge(self.get_data(key))
def test_failure(self):
with self.assertRaises(ValueError):
self.add_edge(source_modifier={MODIFIER: "nope"})
def test_canonicalize_edge_info(self):
c1 = self.add_edge(annotations={"Species": "9606"})
c2 = self.add_edge(annotations={"Species": "9606"})
c3 = self.add_edge(
source_modifier=activity("tport"),
)
c4 = self.add_edge(
source_modifier=activity(namespace="go", name="transporter activity", identifier="0005215"),
)
self.assertEqual(c1, c2)
self.assertNotEqual(c1, c3)
self.assertEqual(c3, c4)
def test_subject_degradation_location(self):
self.assertEqual(
self.add_edge(source_modifier=degradation()),
self.add_edge(source_modifier=degradation()),
)
self.assertEqual(
self.add_edge(source_modifier=degradation(location=Entity(name="somewhere", namespace="GO"))),
self.add_edge(source_modifier=degradation(location=Entity(name="somewhere", namespace="GO"))),
)
self.assertNotEqual(
self.add_edge(source_modifier=degradation()),
self.add_edge(source_modifier=degradation(location=Entity(name="somewhere", namespace="GO"))),
)
def test_translocation(self):
self.assertEqual(
self.add_edge(source_modifier=secretion()),
self.add_edge(source_modifier=secretion()),
)
self.assertEqual(
self.add_edge(source_modifier=secretion()),
self.add_edge(source_modifier=translocation(INTRACELLULAR, EXTRACELLULAR)),
)
class TestSerializeBEL(unittest.TestCase):
def setUp(self):
self.citation = n()
self.evidence = n()
self.url = n()
self.graph = BELGraph()
self.graph.namespace_url["HGNC"] = self.url
def _help_check_lines(self, lines: Iterable[str]):
"""Check the given lines match the graph built during the tests."""
self.assertEqual(lines, list(_to_bel_lines_body(self.graph)))
def test_simple(self):
"""Test a scenario with a qualified edge, but no annotations."""
self.graph.add_increases(
Protein(namespace="HGNC", name="YFG1"),
Protein(namespace="HGNC", name="YFG"),
citation=self.citation,
evidence=self.evidence,
)
self.assertEqual(2, self.graph.number_of_nodes())
self.assertEqual(1, self.graph.number_of_edges())
expected_lines = [
f'SET Citation = {{"{CITATION_TYPE_PUBMED}", "{self.citation}"}}\n',
'SET SupportingText = "{}"'.format(self.evidence),
"p(HGNC:YFG1) increases p(HGNC:YFG)",
"UNSET SupportingText",
"UNSET Citation\n",
"#" * 80,
]
self._help_check_lines(expected_lines)
def test_different_key_and_namespace(self):
key, namespace, value = map(lambda _: n(), range(3))
self.graph.annotation_curie.add(key)
self.graph.add_increases(
Protein(namespace="HGNC", name="YFG1"),
Protein(namespace="HGNC", name="YFG"),
citation=self.citation,
evidence=self.evidence,
annotations={
key: Entity(namespace=namespace, identifier=value),
},
)
self.assertEqual(2, self.graph.number_of_nodes())
self.assertEqual(1, self.graph.number_of_edges())
expected_lines = [
f'SET Citation = {{"{CITATION_TYPE_PUBMED}", "{self.citation}"}}\n',
f'SET SupportingText = "{self.evidence}"',
f'SET {key} = "{namespace}:{value}"',
"p(HGNC:YFG1) increases p(HGNC:YFG)",
f"UNSET {key}",
"UNSET SupportingText",
"UNSET Citation\n",
("#" * 80),
]
self._help_check_lines(expected_lines)
def test_single_annotation(self):
"""Test a scenario with a qualified edge, but no annotations."""
a1, v1 = map(lambda _: n(), range(2))
self.graph.annotation_list[a1] = {v1}
self.graph.add_increases(
Protein(namespace="HGNC", name="YFG1"),
Protein(namespace="HGNC", name="YFG"),
citation=self.citation,
evidence=self.evidence,
annotations={
a1: {v1},
},
)
self.assertEqual(2, self.graph.number_of_nodes())
self.assertEqual(1, self.graph.number_of_edges())
# Means that only the identifier needs to be written out
self.assertNotIn(a1, self.graph.annotation_curie)
expected_lines = [
f'SET Citation = {{"{CITATION_TYPE_PUBMED}", "{self.citation}"}}\n',
f'SET SupportingText = "{self.evidence}"',
f'SET {a1} = "{v1}"',
"p(HGNC:YFG1) increases p(HGNC:YFG)",
f"UNSET {a1}",
"UNSET SupportingText",
"UNSET Citation\n",
"#" * 80,
]
self._help_check_lines(expected_lines)
def test_multiple_annotations(self):
a1, v1, v2 = map(lambda _: n(), range(3))
v1, v2 = sorted([v1, v2])
self.graph.annotation_list[a1] = {v1, v2}
self.graph.add_increases(
Protein(namespace="HGNC", name="YFG1"),
Protein(namespace="HGNC", name="YFG"),
citation=self.citation,
evidence=self.evidence,
annotations={
a1: {v1, v2},
},
)
self.assertEqual(2, self.graph.number_of_nodes())
self.assertEqual(1, self.graph.number_of_edges())
expected_lines = [
f'SET Citation = {{"{CITATION_TYPE_PUBMED}", "{self.citation}"}}\n',
f'SET SupportingText = "{self.evidence}"',
f'SET {a1} = {{"{v1}", "{v2}"}}',
"p(HGNC:YFG1) increases p(HGNC:YFG)",
f"UNSET {a1}",
"UNSET SupportingText",
"UNSET Citation\n",
("#" * 80),
]
self._help_check_lines(expected_lines)
| 35.0425 | 119 | 0.591567 |
794382f2adca5bfa500dfec904cf220384f3479c | 994 | py | Python | data_terbuka_id/items.py | seagatesoft/data-terbuka-id | 57e0531fa4a978483852ee1333cc5bf0b80637f7 | [
"MIT"
] | null | null | null | data_terbuka_id/items.py | seagatesoft/data-terbuka-id | 57e0531fa4a978483852ee1333cc5bf0b80637f7 | [
"MIT"
] | null | null | null | data_terbuka_id/items.py | seagatesoft/data-terbuka-id | 57e0531fa4a978483852ee1333cc5bf0b80637f7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Field, Item
from scrapy.loader import ItemLoader
from scrapylib.processors import default_input_processor, default_output_processor
class MasjidItem(Item):
id_masjid = Field()
nama_masjid = Field()
link_detail = Field()
kabupaten_kota = Field()
kecamatan = Field()
tipologi = Field()
alamat = Field()
luas_tanah = Field()
status_tanah = Field()
luas_bangunan = Field()
tahun_berdiri = Field()
jamaah = Field()
imam = Field()
khatib = Field()
muazin = Field()
remaja = Field()
no_telepon = Field()
keterangan = Field()
longitude = Field()
latitude = Field()
class MasjidItemLoader(ItemLoader):
default_item_class = MasjidItem
default_input_processor = default_input_processor
default_output_processor = default_output_processor
| 24.85 | 82 | 0.695171 |
7943830a68d11d1bab1e92768af6ca55088f447e | 19,254 | py | Python | quart_openapi/swagger.py | kowbot/quart-openapi | d259bd1f6dd8315ddd6f601e395ff08313921196 | [
"Apache-2.0"
] | null | null | null | quart_openapi/swagger.py | kowbot/quart-openapi | d259bd1f6dd8315ddd6f601e395ff08313921196 | [
"Apache-2.0"
] | null | null | null | quart_openapi/swagger.py | kowbot/quart-openapi | d259bd1f6dd8315ddd6f601e395ff08313921196 | [
"Apache-2.0"
] | null | null | null | """swagger.py
Provides the View class for generating the openapi.json file on the fly based on the Pint instance and decorators
"""
from collections import OrderedDict
from http import HTTPStatus
from itertools import chain
from typing import (Any, Callable, Dict, Generator, Iterable, List, Mapping,
Optional, Tuple, Union)
from jsonschema import Draft4Validator
from quart.routing import Map as RouteMap
from werkzeug.routing import _rule_re as ROUTE_VAR_RE
from .resource import Resource, get_expect_args
from .typing import HeaderType, ValidatorTypes
from .utils import extract_path, merge, not_none, parse_docstring
DEFAULT_RESPONSE_DESCRIPTION = 'Success'
DEFAULT_RESPONSE = {'description': DEFAULT_RESPONSE_DESCRIPTION}
PY_TYPES = {
int: 'integer',
float: 'number',
str: 'string',
bool: 'boolean',
None: 'void'
}
PATH_TYPES = {
'int': 'integer',
'float': 'number',
'string': 'string',
'default': 'string'
}
def _clean_header(header: HeaderType) -> Dict[str, Any]:
"""Convert headers to dict representation
:param header: Either a header description, a type, a validator, or a dict of keys for the
header param object
:return: The dict of properties for the given header param normalized to the openapi 3.0 spec
"""
if isinstance(header, str):
header = {'description': header}
typedef = header.get('type', 'string')
if typedef in PY_TYPES:
header['type'] = PY_TYPES[typedef]
elif isinstance(typedef, (list, tuple)) and len(typedef) == 1 and typedef[0] in PY_TYPES:
header['type'] = 'array'
header['items'] = {'type': PY_TYPES[typedef[0]]}
elif hasattr(typedef, '__schema__'):
header.update(typedef.__schema__)
else:
header['type'] = typedef
return not_none(header)
def _parse_rule(rule: str) -> Generator[Tuple[str, str], None, None]:
"""Generator for the converters for the path parameters
:param rule: a route string
:return: each iteration yields the next tuple of (converter name, variable name)
"""
for match in ROUTE_VAR_RE.finditer(rule):
named_groups = match.groupdict()
yield (named_groups['converter'], named_groups['variable'])
def _extract_path_params(path: str) -> OrderedDict:
"""Generate the path params from the route
:param path: The route string
:return: An :class:`~collections.OrderedDict` of param names to definitions
"""
params = OrderedDict()
for converter, variable in _parse_rule(path):
if not converter:
continue
param = {
'name': variable,
'in': 'path',
'required': True,
'schema': {}
}
if converter in PATH_TYPES:
param['schema']['type'] = PATH_TYPES[converter]
elif converter == 'uuid':
param['schema']['type'] = 'string'
param['schema']['format'] = 'uuid'
elif converter in RouteMap.default_converters:
param['schema']['type'] = 'string'
else:
raise ValueError('Unsupported type converter: %s' % converter)
params[variable] = param
return params
class Swagger():
"""Class for generating a openapi.json from the resources and information defined with
:class:`~factset.quart_openapi.Pint`"""
def __init__(self, api: 'Pint') -> None:
"""Construct a Swagger object for generating the openapi Json
:param api: the main app interface for getting the base model and resources
"""
self.api = api
self._components = OrderedDict([('schemas', OrderedDict()),
('responses', OrderedDict()),
('parameters', OrderedDict()),
('examples', OrderedDict()),
('requestBodies', OrderedDict()),
('headers', OrderedDict()),
('securitySchemes', OrderedDict())])
def as_dict(self) -> Dict[str, Any]:
"""Return a dict which can be used with the :mod:`json` module to return valid json"""
infos = {
'title': self.api.title or 'OpenApi Rest Documentation',
'version': self.api.version or '1.0'
}
if self.api.description:
infos['description'] = self.api.description
if self.api.contact and (self.api.contact_email or self.api.contact_url):
infos['contact'] = not_none({
'name': self.api.contact,
'email': self.api.contact_email,
'url': self.api.contact_url
})
components = self.serialize_components() or None
paths = {}
for resource, path, methods in self.api.resources:
paths[extract_path(path)] = self.serialize_resource(resource, path, methods)
scheme = self.api.config.get('PREFERRED_URL_SCHEME',
'http' if not self.api.config.get('PREFER_SECURE_URLS', False) else 'https')
spec = {
'openapi': '3.0.0',
'info': infos,
'servers': [
{
'url': ''.join([scheme, '://', self.api.config['SERVER_NAME'] or ''])
}
],
'paths': paths,
'components': components
}
return not_none(spec)
def register_component(self, category: str, name: str, schema: Dict[str, Any]) -> None:
"""Used for populating the components_ section of the openapi docs
:param category: The category under the component section
:param name: The name of the model for reference
:param schema: the actual schema for this object
"""
if category not in self._components:
raise ValueError('invalid category for components')
self._components[category][name] = schema
def serialize_components(self) -> Mapping[str, Dict[str, Any]]:
"""Generate the json for the components_ section
:return: An :class:`~collections.OrderedDict` of the components
"""
if self.api.base_model is None:
return {}
base_components = self.api.base_model.resolve('#/components')[1]
for category, val in base_components.items():
for name, schema in val.items():
self.register_component(category, name, schema)
return OrderedDict((k, v) for k, v in self._components.items() if v)
@staticmethod
def tags_for(doc: List[str]) -> Iterable[List[str]]:
"""Get the list of tags for output
:param doc: a mapping from HTTP verb to the properties for serialization
:return: a list of string containing tags as described by the openapi 3.0 spec
"""
tags = []
for name in doc['tags']:
tags.append(name)
return tags
@staticmethod
def description_for(doc: Dict[str, Any], method: str) -> str:
"""Extract the description metadata and fallback on the whole docstring
:param doc: a mapping from HTTP verb to the properties for serialization
:param method: The HTTP Verb function for the route
:return: The description as pulled from the docstring for the description property
"""
parts = []
if 'description' in doc:
parts.append(doc['description'])
if method in doc and 'description' in doc[method]:
parts.append(doc[method]['description'])
if doc[method]['docstring']['details']:
parts.append(doc[method]['docstring']['details'])
return '\n'.join(parts).strip()
def parameters_for(self, doc: Dict[str, Any]) -> Iterable[Dict[str, Any]]:
"""Get the list of param descriptions for output
:param doc: a mapping from HTTP verb to the properties for serialization
:return: a list of dict objects containing params as described by the openapi 3.0 spec
"""
params = []
for name, param in doc['params'].items():
if 'ref' in param:
if isinstance(param['ref'], str) and param['ref'].startswith('#/components/'):
params.append({'$ref': param['ref']})
else:
params.append(self.serialize_schema(param['ref']))
continue
param['name'] = name
if 'schema' not in param:
param['schema'] = {}
if 'type' not in param['schema'] and '$ref' not in param['schema']:
param['schema']['type'] = 'string'
if 'in' not in param:
param['in'] = 'query'
params.append(param)
return params
def operation_id_for(self, doc: Dict[str, Any], method: str) -> str:
"""Return the operation id to be used for openapi docs
:param doc: a mapping from HTTP verb to the properties for serialization
:param method: the HTTP Verb
:return: The id str
"""
return doc[method]['id'] if 'id' in doc[method] else self.api.default_id(doc['name'], method)
def responses_for(self, doc: Dict[str, Any], method: str) -> Dict[HTTPStatus, Dict[str, Any]]:
"""Get the Response dictionary for a given route and HTTP verb
:param doc: a mapping from HTTP verb to the properties for serialization
:param method: the HTTP Verb to get the responses for
:return: A dict mapping status codes to object descriptions as per the `openapi response object`__ spec.
__ https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#responseObject
"""
def process_response(resp: Union[str, Tuple]) -> Tuple[str, Any, Dict[str, Any]]:
description = ''
validator = None
kwargs = {}
if isinstance(resp, str):
description = resp
validator = None
kwargs = {}
elif len(resp) == 3:
description, validator, kwargs = resp
elif len(resp) == 2:
description, validator = resp
kwargs = {}
else:
raise ValueError('Unsupported response specification')
return (description, validator, kwargs)
responses = {}
for obj in doc, doc[method]:
if 'responses' in obj:
for code, response in obj['responses'].items():
description, validator, kwargs = process_response(response)
description = description or DEFAULT_RESPONSE_DESCRIPTION
if code in responses:
responses[code].update(description=description)
else:
responses[code] = {'description': description}
if validator:
if 'content' not in responses[code]:
responses[code]['content'] = {}
content_type = kwargs.get('content_type') or 'application/json'
if content_type not in responses[code]['content']:
responses[code]['content'][content_type] = {}
responses[code]['content'][content_type]['schema'] = self.serialize_schema(validator)
self.process_headers(responses[code], doc, method, kwargs.get('headers'))
if not responses:
responses[HTTPStatus.OK.value] = self.process_headers(DEFAULT_RESPONSE.copy(), doc, method)
return responses
@staticmethod
def process_headers(response: Dict[str, Any], doc: Dict[str, Any], method: Optional[str] = None,
headers: Optional[Dict[str, Union[str, Dict[str, Any]]]] = None) -> Dict[str, Any]:
"""Properly form the header parameter objects according to the openapi 3.0 spec
:param response: Response object definition
:param doc: a mapping from HTTP verb to the properties for serialization
:param method: the HTTP verb for specific requests or None for all in the resource
:param headers: Header object dict to add to whatever is already in the resource and function decorators
:return: The full set of headers for this particular route and request method joining the resource
level, method level and any additional headers passed in
"""
method_doc = doc.get(method, {})
if 'headers' in doc or 'headers' in method_doc or headers:
response['headers'] = dict(
(k, _clean_header(v)) for k, v in chain(
doc.get('headers', {}).items(),
method_doc.get('headers', {}).items(),
(headers or {}).items())
)
return response
def serialize_schema(self, validator: ValidatorTypes) -> Dict[str, Any]:
"""Given a validator normalize the schema definition
:param validator: either the name of a validator, a :class:`~jsonschema.Draft4Validator` instance,
or the actual type of the value. Passing a list or tuple will create a schema
for an array of that type
:return: The schema as defined by the openapi 3.0 spec as a dict
"""
if isinstance(validator, (list, tuple)):
validator = validator[0]
return {
'type': 'array',
'items': self.serialize_schema(validator)
}
if isinstance(validator, Draft4Validator):
return validator.schema
if isinstance(validator, str):
validator = self.api.get_validator(validator)
return validator.schema
if isinstance(validator, (type, type(None))) and validator in PY_TYPES:
return {'type': PY_TYPES[validator]}
return {}
def serialize_resource(self, resource: Union[Resource, Callable], path: str,
methods: Iterable[str]) -> Dict[str, Any]:
"""Use the docstring and any decorated info to create the resource object
:param resource: the Resource object or view function
:param path: the route path for this resource
:param methods: The list of available HTTP verbs for this route
:return: The dict conforming to the openapi 3.0 spec for a `path item object`__
__ https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#pathItemObject
"""
doc = self.extract_resource_doc(resource, path)
if doc is False:
return {}
path = {}
for method in [m.lower() for m in resource.methods or []]:
methods = [m.lower() for m in methods or []]
if doc[method] is False or methods and method not in methods:
continue
path[method] = self.serialize_operation(doc, method)
return not_none(path)
def serialize_operation(self, doc: Mapping[str, Any], method: str) -> Dict[str, Any]:
"""Serialize a single operation on the resource corresponding to a single HTTP verb
:param doc: a mapping from HTTP verb to the properties for serialization
:param method: The HTTP verb for this operation
:return: The dict openapi representation to be converted to json for this operation
"""
operation = {
'summary': doc[method]['docstring']['summary'],
'description': self.description_for(doc, method),
'tags': self.tags_for(doc[method]),
'parameters': self.parameters_for(doc[method]) or None,
'responses': self.responses_for(doc, method) or None,
'operationId': self.operation_id_for(doc, method)
}
body = merge(self.expected_params(doc), self.expected_params(doc[method]))
if body:
operation['requestBody'] = body
if doc.get('deprecated') or doc[method].get('deprecated'):
operation['deprecated'] = True
return not_none(operation)
@staticmethod
def extract_resource_doc(resource: Union[Resource, Callable], path: str) -> Dict[str, Any]:
"""Return the doc mapping for this resource that we saved on it
:param resource: The :class:`Resource` derived class or decorated view function
:param path: The route for this resource
:return: a mapping from HTTP verb to the properties for serialization
This returns the object that is passed into the `serialize_*` functions that expect
a `doc` parameter
"""
doc = getattr(resource, '__apidoc__', {})
if doc is False:
return False
doc['name'] = resource.__name__
params = merge(doc.get('params', OrderedDict()), _extract_path_params(path))
doc['params'] = params
tags = doc.get('tags', list())
doc['tags'] = tags
for method in [m.lower() for m in resource.methods or []]:
method_doc = doc.get(method, OrderedDict())
method_impl = getattr(resource, method)
if hasattr(method_impl, 'im_func'):
method_impl = method_impl.im_func
elif hasattr(method_impl, '__func__'):
method_impl = method_impl.__func__
method_doc = merge(method_doc, getattr(method_impl, '__apidoc__', OrderedDict()))
if method_doc is not False:
method_doc['docstring'] = parse_docstring(method_impl)
method_params = method_doc.get('params', {})
inherited_params = OrderedDict((k, v) for k, v in params.items())
method_doc['params'] = merge(inherited_params, method_params)
method_tags = method_doc.get('tags', [])
inherited_tags = sorted(list(tags))
method_doc['tags'] = merge(inherited_tags, method_tags)
doc[method] = method_doc
return doc
def expected_params(self, doc: Dict[str, Any]) -> Dict[str, Any]:
"""Return the `Media Type object
<https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#mediaTypeObject>`_
for the expected request body.
:param doc: a mapping from HTTP verb to the properties for serialization
:return: a dict containing the content type and schemas for the requestBody
"""
params = OrderedDict()
if 'expect' not in doc:
return params
for expect in doc.get('expect', []):
validator, content_type, kwargs = get_expect_args(expect)
if isinstance(validator, str):
validator = self.api.get_validator(validator)
elif not isinstance(validator, Draft4Validator):
continue
schema = self.serialize_schema(validator)
if '$ref' in schema and '/components/requestBodies/' in schema['$ref']:
return schema
params[content_type] = not_none(dict({
'schema': self.serialize_schema(validator)
}, **kwargs))
return {'content': params}
| 43.073826 | 113 | 0.595357 |
79438484e40112f9be328425cf71095e0f093eeb | 2,421 | py | Python | tests/test_vector3.py | crazymaik/ard-python | ef6dc62ae9853ac636be3a343aabf69082b74b8b | [
"MIT"
] | null | null | null | tests/test_vector3.py | crazymaik/ard-python | ef6dc62ae9853ac636be3a343aabf69082b74b8b | [
"MIT"
] | null | null | null | tests/test_vector3.py | crazymaik/ard-python | ef6dc62ae9853ac636be3a343aabf69082b74b8b | [
"MIT"
] | null | null | null | import context
import math
import pytest
from ard.vector3 import Vector3
class TestVector3:
def test_add_example(self):
u = Vector3(x=1, y=2, z=3)
v = Vector3(x=4, y=5, z=6)
actual = u.add(v)
assert actual.x == 5
assert actual.y == 7
assert actual.z == 9
def test_add_and_sub_equalize(self):
u = Vector3(x=1, y=2, z=3)
v = Vector3(x=4, y=5, z=6)
actual = u.add(v).sub(v)
assert actual.x == u.x
assert actual.y == u.y
assert actual.z == u.z
def test_add_and_sub_operators(self):
u = Vector3(x=1, y=2, z=3)
v = Vector3(x=4, y=5, z=6)
actual = u + v
assert actual.x == 5
assert actual.y == 7
assert actual.z == 9
actual = actual - v
assert actual.x == 1
assert actual.y == 2
assert actual.z == 3
def test_length_squared_example(self):
u = Vector3(x=1, y=2, z=3)
assert u.length_squared() == 14
def test_length_example(self):
u = Vector3(x=1, y=1, z=1)
assert u.length() == math.sqrt(3)
def test_dot_perpendicular_vector_is_zero(self):
u = Vector3(x=1, y=0, z=0)
v = Vector3(x=0, y=1, z=0)
assert u.dot(v) == 0
def test_dot_of_unit_vector_is_one(self):
u = Vector3(x=0, y=1, z=0)
v = Vector3(x=0, y=1, z=0)
assert u.dot(v) == 1
def test_cross_of_vector_is_perpendicular(self):
u = Vector3(x=0.5, y=0.5, z=0)
v = Vector3(x=-0.5, y=0.5, z=0)
actual = u.cross(v)
assert actual.x == 0
assert actual.y == 0
assert actual.z != 0
def test_cross_uv_and_vu_point_in_opposite_direction(self):
u = Vector3(x=1, y=2, z=3)
v = Vector3(x=2, y=3, z=1)
c0 = u.cross(v)
c1 = v.cross(u)
assert c0.x == -c1.x
assert c0.y == -c1.y
assert c0.z == -c1.z
def test_normalized_vector_has_length_one(self):
u = Vector3(x=1, y=1, z=0)
n = u.normalized()
assert n.length() == pytest.approx(1.0)
def test_equality_compares_values(self):
assert Vector3(x=1, y=2, z=3) == Vector3(x=1, y=2, z=3)
assert Vector3(x=1, y=2, z=3) != Vector3(x=0, y=0, z=0)
def test_hash_is_based_on_values(self):
u = Vector3(x=1, y=2, z=3)
v = Vector3(x=1, y=2, z=3)
assert hash(u) == hash(v)
| 28.821429 | 63 | 0.539447 |
794384874d17809add381bd0ed1c52b5bf3f5f4c | 670 | py | Python | examples/example_3_inducible/setup_bmss.py | EngBioNUS/BMSS2 | 41163c61a4e0ef3c6430e5954d81a77832e49a9d | [
"Apache-2.0"
] | null | null | null | examples/example_3_inducible/setup_bmss.py | EngBioNUS/BMSS2 | 41163c61a4e0ef3c6430e5954d81a77832e49a9d | [
"Apache-2.0"
] | null | null | null | examples/example_3_inducible/setup_bmss.py | EngBioNUS/BMSS2 | 41163c61a4e0ef3c6430e5954d81a77832e49a9d | [
"Apache-2.0"
] | 4 | 2020-08-24T13:35:55.000Z | 2022-03-07T16:48:12.000Z |
'''
Adds base directory to path so BMSS can be imported. You can just use import BMSS
if you have successfully installed it using pip.
'''
import sys
from os import getcwd, listdir
from os.path import abspath, dirname, join
#Get base directory
__base_dir__ = dirname(dirname(dirname(__file__)))
try:
import BMSS
except:
#Append to path
sys.path.insert(0, __base_dir__)
#Add Styles
try:
__src_dir__ = join(__base_dir__, 'BMSS')
library = join(__src_dir__, 'stylelib')
styles = {file.split('.')[0]: abspath(join(library,file)) for file in listdir(library)}
except Exception as e:
print(e.args)
styles = {}
| 23.103448 | 97 | 0.679104 |
79438b20506d0e78e13663f9417f24e5064adc78 | 2,170 | py | Python | agents/network/base_network.py | samuelfneumann/RLControl | 71430b1de2e4262483908932eb44579c2ec8216d | [
"Apache-2.0"
] | 9 | 2018-07-30T20:12:47.000Z | 2021-02-05T17:02:04.000Z | agents/network/base_network.py | samuelfneumann/RLControl | 71430b1de2e4262483908932eb44579c2ec8216d | [
"Apache-2.0"
] | 14 | 2020-01-28T22:38:58.000Z | 2022-02-10T00:11:21.000Z | agents/network/base_network.py | samuelfneumann/RLControl | 71430b1de2e4262483908932eb44579c2ec8216d | [
"Apache-2.0"
] | 3 | 2018-08-08T14:52:53.000Z | 2021-01-23T18:00:05.000Z | import tensorflow as tf
class BaseNetwork(object):
def __init__(self, sess, config, learning_rate):
"""
base network for actor and critic network.
Args:
sess: tf.Session()
config: Configuration object
learning_rate: learning rate for training (Could be an array if two-headed network)
"""
self.sess = sess
# Env config
self.state_dim = config.state_dim
self.state_min = config.state_min
self.state_max = config.state_max
self.action_dim = config.action_dim
self.action_min = config.action_min
self.action_max = config.action_max
self.learning_rate = learning_rate
self.tau = config.tau
self.norm_type = config.norm_type
def set_session(self, session):
self.session = session
def build_network(self, *args):
"""
build network.
"""
raise NotImplementedError("build network first!")
def train(self, *args):
raise NotImplementedError("train network!")
def predict(self, *args):
raise NotImplementedError("predict output for network!")
def predict_target(self, *args):
raise NotImplementedError("predict output for target network!")
def update_target_network(self):
raise NotImplementedError("update target network!")
def get_num_trainable_vars(self):
raise NotImplementedError("update target network!")
def apply_norm(self, net, activation_fn, phase, layer_num):
if self.norm_type == 'layer':
norm_net = tf.contrib.layers.layer_norm(net, center=True, scale=True, activation_fn=activation_fn)
elif self.norm_type == 'batch':
norm_net = tf.contrib.layers.batch_norm(net, fused=True, center=True, scale=True, activation_fn=activation_fn,
is_training=phase, scope='batchnorm_'+str(layer_num))
elif self.norm_type == 'none' or self.norm_type == 'input_norm':
norm_net = activation_fn(net)
else:
raise ValueError('unknown norm type')
return norm_net
| 31.911765 | 122 | 0.632719 |
79438c9b854902e87e38789dfcddc1cd111f4d6d | 272 | py | Python | notifications/tests/sample_notifications/admin.py | pandafy/django-notifications | 720c40576a9387a035a44aa800f423efd15c8038 | [
"BSD-3-Clause"
] | 1,354 | 2015-01-03T17:22:58.000Z | 2022-03-29T11:49:12.000Z | notifications/tests/sample_notifications/admin.py | pandafy/django-notifications | 720c40576a9387a035a44aa800f423efd15c8038 | [
"BSD-3-Clause"
] | 275 | 2015-01-19T21:32:51.000Z | 2022-03-30T10:07:14.000Z | notifications/tests/sample_notifications/admin.py | pandafy/django-notifications | 720c40576a9387a035a44aa800f423efd15c8038 | [
"BSD-3-Clause"
] | 385 | 2015-01-08T19:51:12.000Z | 2022-03-29T10:19:16.000Z | import swapper
from django.contrib import admin
from notifications.base.admin import AbstractNotificationAdmin
Notification = swapper.load_model('notifications', 'Notification')
@admin.register(Notification)
class NotificationAdmin(AbstractNotificationAdmin):
pass
| 24.727273 | 66 | 0.838235 |
79438d8404eedf800fd113aedf8ce572ec5e86fd | 613 | py | Python | data_cleaning/main.py | JuaniRios/4p-chess-prediction | f0fa49f16bade6089108d0b06bf2bbd1be8366f8 | [
"MIT"
] | null | null | null | data_cleaning/main.py | JuaniRios/4p-chess-prediction | f0fa49f16bade6089108d0b06bf2bbd1be8366f8 | [
"MIT"
] | null | null | null | data_cleaning/main.py | JuaniRios/4p-chess-prediction | f0fa49f16bade6089108d0b06bf2bbd1be8366f8 | [
"MIT"
] | null | null | null | from data_cleaning.filter_data import filter_data
from data_cleaning.data_manipulation import mk_move
from data_cleaning.to_hdf import to_hdf
def txt_to_h5(file_name):
"""
takes the name of a .txt file with games from chess.com, and converts it
to an hdf5 db.
"""
print("txt to h5. Step 1/3. \n Please wait...")
step1 = filter_data(file_name) # .txt to json
print("txt to h5... step 2/3. \n Please wait...")
step2 = mk_move(step1) # add additional data to json
print("txt to h5... step 3/3. \n Please wait...")
step3 = to_hdf(step2) # convert to hdf
return step3
| 32.263158 | 76 | 0.675367 |
79438ec4b0f18c2b90027c54e46052098b0b1220 | 1,170 | py | Python | sc/graphRegistry.py | Omegaice/smartcontainers | 0d2e75734dbf76c6aed73ee10b9590ed82c8f7e5 | [
"Apache-2.0"
] | 6 | 2016-04-26T20:22:31.000Z | 2021-05-03T23:38:11.000Z | sc/graphRegistry.py | Omegaice/smartcontainers | 0d2e75734dbf76c6aed73ee10b9590ed82c8f7e5 | [
"Apache-2.0"
] | 43 | 2016-03-10T15:03:01.000Z | 2016-06-06T15:28:27.000Z | sc/graphRegistry.py | Omegaice/smartcontainers | 0d2e75734dbf76c6aed73ee10b9590ed82c8f7e5 | [
"Apache-2.0"
] | 4 | 2016-03-02T17:18:26.000Z | 2016-03-18T14:13:11.000Z | # -*- coding: utf-8 -*-
"""RDFlib Graph Registry for SmartContainers.
This module provides a common interface to all RDFlib graphs created by all
vocabularies. New vocabularies should subclass baseVocabulary.
Since the registry has access to the SmartContainer global provenance graph
it also manages the named graph objects. The design specification is to have
a named graph for each docker state change (build, commit, run). Provenance
of the named graphs can then be provided by referencing the graph as a quad.
For more information about RDF 1.1 Datasets and named graphs see:
https://dvcs.w3.org/hg/rdf/raw-file/default/rdf-dataset/index.html
http://patterns.dataincubator.org/book/named-graphs.html
RDFLib Dataset graph object reference:
https://rdflib.readthedocs.org/en/stable/apidocs/rdflib.html#dataset
"""
import graphManager
import provVocabulary
import envVocabulary
# Create instances of registry and register vocabularies
scVocabRegistry = graphManager.VocabularyRegistry()
scProvVocab = provVocabulary.provVocabulary()
scVocabRegistry.register(scProvVocab)
scEnvVocabulary = envVocabulary.envVocabulary()
scVocabRegistry.register(envVocabulary)
| 43.333333 | 77 | 0.816239 |
7943903d9e45dd79dbdbdf01e333de7721d4c6ee | 14,197 | py | Python | affiliate/model/mysql_model.py | gods-view/AdclickIO | ccb73867e568aac5f40bd5890149626ce0be1897 | [
"BSD-2-Clause"
] | null | null | null | affiliate/model/mysql_model.py | gods-view/AdclickIO | ccb73867e568aac5f40bd5890149626ce0be1897 | [
"BSD-2-Clause"
] | null | null | null | affiliate/model/mysql_model.py | gods-view/AdclickIO | ccb73867e568aac5f40bd5890149626ce0be1897 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from peewee import *
from affiliate.model.config import mysql, mysql_report
import time
db = MySQLDatabase(mysql['name'],
host=mysql['host'],
port=int(mysql['port']),
user=mysql['user'],
passwd=mysql['passwd']
)
# 旧的数据库连接
class BaseModel(Model):
"""A base model that will use our MySQL database"""
class Meta:
database = db
class CampaignMap(BaseModel):
OurCampId = IntegerField(null=False, default=0)
TheirCampId = CharField(null=False, default=0)
class Meta:
db_table = "CampaignMap"
index = (('OurCampId', True), ('TheirCampId', True))
class User(BaseModel):
idText = CharField(max_length=8, null=False)
email = CharField(max_length=50, null=False)
emailVerified = IntegerField(null=False, default=0)
contact = TextField(null=False)
password = CharField(max_length=256, null=False, default='')
firstname = CharField(max_length=256, null=False, default='')
lastname = CharField(max_length=256, null=False, default='')
campanyName = CharField(max_length=256, null=False, default='')
status = IntegerField(null=False, default=0)
registerts = IntegerField()
lastLogon = IntegerField()
timezone = CharField(max_length=6, null=False, default='+00:00')
timezoneId = IntegerField(null=False)
rootdomainredirect = CharField(max_length=512, null=False, default='')
json = TextField(null=False)
setting = TextField(null=False)
referralToken = CharField(max_length=128, null=False)
deleted = IntegerField(null=False, default=0)
class Meta:
db_table = "User"
index = (('idText', True), ('email', True))
class OfferSyncTask(BaseModel):
"""
task
"""
userId = IntegerField(null=False)
thirdPartyANId = IntegerField()
status = IntegerField(default=0) # 0:新建;1:运行中;2:出错;3:完成
executor = CharField(max_length=32, null=False) # 执行者的唯一标识 mac地址
message = TextField()
createdAt = IntegerField(null=False)
startedAt = IntegerField(null=False)
endedAt = IntegerField(null=False)
deleted = IntegerField(null=False, default=0) # 0:未删除;1:已删除
class Meta:
db_table = "OfferSyncTask"
class ThirdPartyAffiliateNetwork(BaseModel):
"""
affiliate login info
"""
userId = IntegerField(null=False)
trustedANId = IntegerField(null=False) # TemplateAffiliateNetwork
name = CharField(max_length=256, null=False, default='')
token = TextField()
userName = TextField()
password = TextField()
createdAt = IntegerField(null=False)
deleted = IntegerField(null=False, default=0)
class Meta:
db_table = "ThirdPartyAffiliateNetwork"
class TemplateAffiliateNetwork(BaseModel):
"""
provider
"""
name = CharField(max_length=256, null=False)
postbackParams = TextField(null=False) # 回调url中参数的写法:{cid:%subid1%;p:%commission%}
desc = TextField(null=False) # 关于该AfflicateNetwork的描述,HTML
apiOffer = IntegerField(null=False) # 0:不支持api拉取Offer;1:支持拉取Offer
apiName = CharField(max_length=256, null=False, help_text='api拉取时,区分用')
apiUrl = TextField(null=False)
apiParams = TextField(null=False)
apiMode = IntegerField(null=False) # 1:仅token;2:仅Username/password;3:token/up都支持
apiInterval = IntegerField(null=False, default=0) # 连续两次Task之间的最小间隔时间,0表示没有限制,单位:秒
apiOfferAutoSuffix = CharField(max_length=256, null=False, default='')
deleted = IntegerField(null=False, default=0)
class Meta:
db_table = "TemplateAffiliateNetwork"
class TemplateTrafficSource(BaseModel):
"""
TemplateTrafficSource
"""
id = IntegerField(null=False)
order = IntegerField(null=False)
name = CharField(max_length=256, null=False)
class Meta:
db_table = "TemplateTrafficSource"
class ThirdPartyCountryCode(BaseModel):
"""
CountryCode
"""
key_code = CharField()
val_code = CharField()
class Meta:
db_table = "ThirdPartyCountryCode"
class ThirdPartyOffer(BaseModel):
"""
offer
"""
updatetime = TimeField()
sourcename = CharField(max_length=20)
userId = IntegerField(null=False)
taskId = IntegerField(null=False)
status = IntegerField(null=False)
offerId = TextField()
name = CharField(max_length=256, null=False, default='')
previewLink = TextField()
trackingLink = TextField()
countryCode = TextField()
payoutMode = IntegerField(null=False, default=1)
payoutValue = CharField(null=False, default='0.00000')
category = TextField()
carrier = TextField()
platform = TextField()
detail = TextField()
class Meta:
db_table = "ThirdPartyOffer"
class Country(BaseModel):
name = CharField(max_length=256, null=False)
alpha2Code = CharField(max_length=2, null=False)
alpha3Code = CharField(max_length=3, null=False)
numCode = IntegerField(null=False)
class Meta:
db_table = "Country"
index = (('alpha2Code', True), ('alpha3Code', True), ('numCode', True))
class Flow(BaseModel):
name = CharField(max_length=256, null=False)
class Meta:
db_table = "Flow"
index = ('id', True)
class Lander(BaseModel):
name = CharField(max_length=256, null=False)
class Meta:
db_table = "Lander"
index = ('id', True)
class Offer(BaseModel):
name = CharField(max_length=256, null=False)
payoutMode = IntegerField(null=False)
payoutValue = FloatField(null=False)
class Meta:
db_table = "Offer"
index = ('id', True)
class TrackingCampaign(BaseModel):
id = IntegerField(null=False)
status = IntegerField(null=False)
name = CharField(max_length=256, null=False)
remark = CharField(max_length=1000, null=False)
TheirCampName = CharField(max_length=1000, null=False)
class Meta:
db_table = "TrackingCampaign"
index = ('id', True)
class TrafficSource(BaseModel):
id = IntegerField(null=False)
userid = IntegerField(null=False)
name = CharField(max_length=256, null=False)
trafficTemplateId = IntegerField(default=0, null=False)
token = CharField(max_length=128)
account = CharField(max_length=128)
password = CharField(max_length=128)
integrations = IntegerField(null=False)
class Meta:
db_table = "TrafficSource"
index = ('id', True)
class AffiliateNetwork(BaseModel):
name = CharField(max_length=256, null=False)
class Meta:
db_table = "AffiliateNetwork"
index = ('id', True)
class AdConversionsStatis(BaseModel):
UserID = CharField(max_length=256, null=True, default='')
PostbackTimestamp = CharField(max_length=256, null=True, default='')
VisitTimestamp = CharField(max_length=256, null=True, default='')
ExternalID = CharField(max_length=256, null=True, default='')
ClickID = CharField(max_length=256, null=True, default='')
TransactionID = CharField(max_length=256, null=True, default='')
Revenue = CharField(max_length=256, null=True, default='0.0')
Cost = CharField(max_length=256, null=True, default='0.0')
CampaignName = CharField(max_length=256, null=True, default='')
CampaignID = CharField(max_length=256, null=True, default='')
LanderName = CharField(max_length=256, null=True, default='')
LanderID = CharField(max_length=256, null=True, default='')
OfferName = CharField(max_length=256, null=True, default='')
OfferID = CharField(max_length=256, null=True, default='')
Country = CharField(max_length=256, null=True, default='')
CountryCode = CharField(max_length=256, null=True, default='')
TrafficSourceName = CharField(max_length=256, null=True, default='')
TrafficSourceID = CharField(max_length=256, null=True, default='')
AffiliateNetworkName = CharField(max_length=256, null=True, default='')
AffiliateNetworkID = CharField(max_length=256, null=True, default='')
Device = CharField(max_length=256, null=True, default='')
OS = CharField(max_length=256, null=True, default='')
OSVersion = CharField(max_length=256, null=True, default='')
Brand = CharField(max_length=256, null=True, default='')
Model = CharField(max_length=256, null=True, default='')
Browser = CharField(max_length=256, null=True, default='')
BrowserVersion = CharField(max_length=256, null=True, default='')
ISP = CharField(max_length=256, null=True, default='')
MobileCarrier = CharField(max_length=256, null=True, default='')
ConnectionType = CharField(max_length=256, null=True, default='')
VisitorIP = CharField(max_length=256, null=True, default='')
VisitorReferrer = CharField(max_length=256, null=True, default='')
V1 = CharField(max_length=256, null=True, default='')
V2 = CharField(max_length=256, null=True, default='')
V3 = CharField(max_length=256, null=True, default='')
V4 = CharField(max_length=256, null=True, default='')
V5 = CharField(max_length=256, null=True, default='')
V6 = CharField(max_length=256, null=True, default='')
V7 = CharField(max_length=256, null=True, default='')
V8 = CharField(max_length=256, null=True, default='')
V9 = CharField(max_length=256, null=True, default='')
V10 = CharField(max_length=256, null=True, default='')
class Meta:
db_table = "AdConversionsStatis"
index = (('ClickID', True))
class AdStatisLog(BaseModel):
UserID = CharField(null=False, default=0)
CampaignID = CharField(null=False, default=0)
CampaignName = CharField(max_length=256, null=True, default='')
FlowID = CharField(null=True, default=0)
FlowName = CharField(max_length=256, null=True, default='')
LanderID = CharField(null=True, default=0)
LanderName = CharField(max_length=256, null=True, default='')
OfferID = CharField(null=True, default=0)
OfferName = CharField(max_length=256, null=True, default='')
OfferUrl = CharField(max_length=256, null=True, default='')
OfferCountry = CharField(max_length=256, null=True, default='')
AffiliateNetworkID = CharField(null=True, default=0)
AffilliateNetworkName = CharField(max_length=256, null=True, default='')
TrafficSourceID = CharField(null=True, default=0)
TrafficSourceName = CharField(max_length=256, null=True, default='')
Language = CharField(max_length=256, null=True, default='')
Model = CharField(max_length=256, null=True, default='')
Country = CharField(max_length=256, null=True, default='')
City = CharField(max_length=256, null=True, default='')
Region = CharField(max_length=256, null=True, default='')
ISP = CharField(max_length=256, null=True, default='')
MobileCarrier = CharField(max_length=256, null=True, default='')
Domain = CharField(max_length=256, null=True, default='')
DeviceType = CharField(max_length=256, null=True, default='')
Brand = CharField(max_length=256, null=True, default='')
OS = CharField(max_length=256, null=True, default='')
OSVersion = CharField(max_length=256, null=True, default='')
Browser = CharField(max_length=256, null=True, default='')
BrowserVersion = CharField(max_length=256, null=True, default='')
ConnectionType = CharField(max_length=256, null=True, default='')
Timestamp = CharField(null=True, default=0)
Visits = CharField(null=True, default=0)
Clicks = CharField(null=True, default=0)
Conversions = CharField(null=True, default=0)
Cost = CharField(null=True, default=0)
Revenue = CharField(null=True, default=0)
Impressions = CharField(null=True, default=0)
KeysMD5 = CharField(max_length=256, null=True, default='')
Ip = CharField(max_length=256, null=True, default='')
V1 = CharField(max_length=256, null=True, default='')
V2 = CharField(max_length=256, null=True, default='')
V3 = CharField(max_length=256, null=True, default='')
V4 = CharField(max_length=256, null=True, default='')
V5 = CharField(max_length=256, null=True, default='')
V6 = CharField(max_length=256, null=True, default='')
V7 = CharField(max_length=256, null=True, default='')
V8 = CharField(max_length=256, null=True, default='')
V9 = CharField(max_length=256, null=True, default='')
V10 = CharField(max_length=256, null=True, default='')
tsCampaignId = CharField(max_length=256, null=True, default='')
tsWebsiteId = CharField(max_length=256, null=True, default='')
class Meta:
db_table = "AdStatis"
index = (('KeysMD5', True))
class AdCost(BaseModel):
id = CharField(null=False)
CampaignID = CharField(max_length=50, null=True)
userid = IntegerField(null=False)
WebsiteId = CharField(max_length=50, null=False, default='')
WebsiteChildId = CharField(max_length=50, null=False, default='')
Cost = CharField(max_length=50, null=False, default='')
Createtime = BigIntegerField()
Status = IntegerField(null=False)
# State = IntegerField(null=False, default=0)
type = IntegerField(null=False)
State = CharField(null=False)
begintime = BigIntegerField()
endtime = BigIntegerField()
updatecost = IntegerField(default=0)
TrafficsourceId = CharField(max_length=100)
remark = CharField(max_length=255)
updatebid = IntegerField(default=0)
bid = FloatField(null=True)
class Meta:
db_table = "AdCost"
class WebsiteId(BaseModel):
id = IntegerField(null=False)
userId = IntegerField(null=False)
status = IntegerField(null=False)
web_site_id = CharField(max_length=256)
state = IntegerField(null=False)
remark = CharField(max_length=256)
campaignId = IntegerField(null=False)
TrafficSourceId = IntegerField(null=False)
class Meta:
db_table = "WebSiteId"
class UserBilling(BaseModel):
totalEvents = IntegerField(null=False)
billedEvents = IntegerField(null=False)
userId = IntegerField(null=False)
expired = IntegerField(null=False)
class Meta:
db_table = "UserBilling"
db.connect()
# a = Country.update(name='ccc').where(Country.id == 1).execute()
# pass
| 36.216837 | 87 | 0.683525 |
7943919783a547676eb4f8c2813e2b0de4121377 | 4,427 | py | Python | tests/tests_emg.py | vansjyo/NeuroKit | 238cd3d89467f7922c68a3a4c1f44806a8466922 | [
"MIT"
] | null | null | null | tests/tests_emg.py | vansjyo/NeuroKit | 238cd3d89467f7922c68a3a4c1f44806a8466922 | [
"MIT"
] | null | null | null | tests/tests_emg.py | vansjyo/NeuroKit | 238cd3d89467f7922c68a3a4c1f44806a8466922 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import neurokit2 as nk
import matplotlib.pyplot as plt
import scipy.stats
import biosppy
# =============================================================================
# EMG
# =============================================================================
def test_emg_simulate():
emg1 = nk.emg_simulate(duration=20, length=5000, burst_number=1)
assert len(emg1) == 5000
emg2 = nk.emg_simulate(duration=20, length=5000, burst_number=15)
assert scipy.stats.median_absolute_deviation(emg1) < scipy.stats.median_absolute_deviation(emg2)
emg3 = nk.emg_simulate(duration=20, length=5000, burst_number=1, burst_duration=2.0)
# pd.DataFrame({"EMG1":emg1, "EMG3": emg3}).plot()
assert len(nk.signal_findpeaks(emg3, height_min=1.0)["Peaks"]) > len(nk.signal_findpeaks(emg1, height_min=1.0)["Peaks"])
def test_emg_clean():
sampling_rate=1000
emg = nk.emg_simulate(duration=20, sampling_rate=sampling_rate)
emg_cleaned = nk.emg_clean(emg, sampling_rate=sampling_rate)
assert emg.size == emg_cleaned.size
# Comparison to biosppy (https://github.com/PIA-Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/emg.py)
original, _, _ = biosppy.tools.filter_signal(signal=emg,
ftype='butter',
band='highpass',
order=4,
frequency=100,
sampling_rate=sampling_rate)
emg_cleaned_biosppy = nk.signal_detrend(original, order=0)
assert np.allclose((emg_cleaned - emg_cleaned_biosppy).mean(), 0, atol=1e-6)
def test_emg_plot():
sampling_rate=1000
emg = nk.emg_simulate(duration=10, sampling_rate=1000, burst_number=3)
emg_summary, _ = nk.emg_process(emg, sampling_rate=sampling_rate)
# Plot data over samples.
nk.emg_plot(emg_summary)
# This will identify the latest figure.
fig = plt.gcf()
assert len(fig.axes) == 2
titles = ["Raw and Cleaned Signal",
"Muscle Activation"]
for (ax, title) in zip(fig.get_axes(), titles):
assert ax.get_title() == title
assert fig.get_axes()[1].get_xlabel() == "Samples"
np.testing.assert_array_equal(fig.axes[0].get_xticks(),
fig.axes[1].get_xticks())
plt.close(fig)
# Plot data over time.
nk.emg_plot(emg_summary, sampling_rate=sampling_rate)
# This will identify the latest figure.
fig = plt.gcf()
assert fig.get_axes()[1].get_xlabel() == "Time (seconds)"
def test_emg_eventrelated():
emg = nk.emg_simulate(duration=20, sampling_rate=1000, burst_number=3)
emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
epochs = nk.epochs_create(emg_signals, events=[3000, 6000, 9000],
sampling_rate=1000,
epochs_start=-0.1, epochs_end=1.9)
emg_eventrelated = nk.emg_eventrelated(epochs)
# Test amplitude features
no_activation = np.where(emg_eventrelated["EMG_Activation"] == 0)[0][0]
assert int(pd.DataFrame(emg_eventrelated.values
[no_activation]).isna().sum()) == 4
assert np.alltrue(np.nansum(np.array(
emg_eventrelated["EMG_Amplitude_Mean"])) <
np.nansum(np.array(
emg_eventrelated["EMG_Amplitude_Max"])))
assert len(emg_eventrelated["Label"]) == 3
def test_emg_intervalrelated():
emg = nk.emg_simulate(duration=40, sampling_rate=1000, burst_number=3)
emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
columns = ['EMG_Activation_N', 'EMG_Amplitude_Mean']
# Test with signal dataframe
features_df = nk.emg_intervalrelated(emg_signals)
assert all(elem in columns for elem
in np.array(features_df.columns.values, dtype=str))
assert features_df.shape[0] == 1 # Number of rows
# Test with dict
epochs = nk.epochs_create(emg_signals, events=[0, 20000],
sampling_rate=1000, epochs_end=20)
features_dict = nk.emg_intervalrelated(epochs)
assert all(elem in columns for elem
in np.array(features_dict.columns.values, dtype=str))
assert features_dict.shape[0] == 2 # Number of rows
| 37.837607 | 135 | 0.615541 |
794391b7ea7953b97340b5e0fa229186988762cf | 1,586 | py | Python | test_scripts/functional_tests/wallet/open_wallet_test.py | hyperledger/indy-post-install-automation | a19cb3c66f0adea6bb4c1fc20e1509cc97bd3d5f | [
"Apache-2.0"
] | 2 | 2021-08-23T15:20:22.000Z | 2021-12-03T01:58:02.000Z | test_scripts/functional_tests/wallet/open_wallet_test.py | hyperledger-archives/indy-post-install-automation | a19cb3c66f0adea6bb4c1fc20e1509cc97bd3d5f | [
"Apache-2.0"
] | 1 | 2018-02-22T10:04:41.000Z | 2018-02-22T10:04:41.000Z | test_scripts/functional_tests/wallet/open_wallet_test.py | hyperledger/indy-post-install-automation | a19cb3c66f0adea6bb4c1fc20e1509cc97bd3d5f | [
"Apache-2.0"
] | 7 | 2018-01-03T20:45:48.000Z | 2019-08-12T11:02:31.000Z | """
Created on Dec 08, 2017
@author: khoi.ngo
Implementing test case open_wallet with valid value.
"""
from indy.error import IndyError
import pytest
from utilities import common
from utilities.result import Status
from utilities.test_scenario_base import TestScenarioBase
from utilities.utils import perform
from indy import pool
class TestOpenWallet(TestScenarioBase):
@pytest.mark.asyncio
async def test(self):
await pool.set_protocol_version(2)
# 1. Create and open a pool
self.steps.add_step("Create and open a pool")
self.pool_handle = await perform(self.steps,
common.create_and_open_pool,
self.pool_name,
self.pool_genesis_txn_file)
# 2. Create and open a wallet
self.steps.add_step("Create and open a wallet")
returned_code = await perform(self.steps,
common.create_and_open_wallet,
self.pool_name, self.wallet_name, self.wallet_credentials)
# 3. Verify that user is able to open a new wallet
self.steps.add_step("Verify the response code of open_wallet API.")
if not isinstance(returned_code, IndyError):
self.wallet_handle = returned_code # using for post-condition
self.steps.get_last_step().set_status(Status.PASSED)
else:
self.steps.get_last_step().set_message(
"Failed. Cannot open the wallet which was created.")
| 36.045455 | 96 | 0.627995 |
794392091da644e03337eb56df872a6c97689b07 | 902 | py | Python | demos/biotool/oauth2client/contrib/django_util/site.py | Servir-Mekong/biotool | 80ef1b18e34db637bf11d2ab84782e6a1a2dddd0 | [
"Apache-2.0"
] | 1 | 2016-09-09T14:45:45.000Z | 2016-09-09T14:45:45.000Z | demos/biotool/oauth2client/contrib/django_util/site.py | Servir-Mekong/Eco-Dashboard | 80ef1b18e34db637bf11d2ab84782e6a1a2dddd0 | [
"Apache-2.0"
] | null | null | null | demos/biotool/oauth2client/contrib/django_util/site.py | Servir-Mekong/Eco-Dashboard | 80ef1b18e34db637bf11d2ab84782e6a1a2dddd0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import urls
from oauth2client.contrib.django_util import views
urlpatterns = [
urls.url(r'oauth2callback/', views.oauth2_callback, name="callback"),
urls.url(r'oauth2authorize/', views.oauth2_authorize, name="authorize")
]
urls = (urlpatterns, "google_oauth", "google_oauth")
| 36.08 | 75 | 0.759424 |
794392491dc0b95ea313e760db2a0367077f052d | 3,661 | py | Python | cride/rides/migrations/0001_initial.py | danhergir/cride | b346138ec597e4f58feed8b1ca6826d214f08135 | [
"MIT"
] | null | null | null | cride/rides/migrations/0001_initial.py | danhergir/cride | b346138ec597e4f58feed8b1ca6826d214f08135 | [
"MIT"
] | null | null | null | cride/rides/migrations/0001_initial.py | danhergir/cride | b346138ec597e4f58feed8b1ca6826d214f08135 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.9 on 2021-06-13 17:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('circles', '0004_invitation'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created', verbose_name='created_at')),
('modified', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was last modified', verbose_name='modified_at')),
('comments', models.TextField(blank=True)),
('rating', models.IntegerField(default=1)),
('circle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='circles.Circle')),
('rated_user', models.ForeignKey(help_text='User that receives the rating.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rated_user', to=settings.AUTH_USER_MODEL)),
('rating_user', models.ForeignKey(help_text='User that emits the rating', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rating_user', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Ride',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created', verbose_name='created_at')),
('modified', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was last modified', verbose_name='modified_at')),
('available_seats', models.PositiveSmallIntegerField(default=1)),
('comments', models.TextField(blank=True)),
('departure_location', models.CharField(max_length=255)),
('departure_date', models.DateTimeField()),
('arrival_location', models.CharField(max_length=255)),
('arrival_date', models.DateTimeField()),
('rating', models.FloatField(null=True)),
('is_active', models.BooleanField(default=True, help_text='Used for disabling the ride or marking it as finished.', verbose_name='active status')),
('offered_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('offered_in', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='circles.Circle')),
('passengers', models.ManyToManyField(related_name='passengers', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.AddField(
model_name='rating',
name='ride',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rated_ride', to='rides.Ride'),
),
]
| 55.469697 | 207 | 0.62442 |
794393b01c036f0976522ce1ea21d17622e05a1e | 681 | py | Python | setup.py | hmlingesh/csv-to-html-table | 114d8c85a121b32604d321973f854614b5a9e8b5 | [
"MIT"
] | null | null | null | setup.py | hmlingesh/csv-to-html-table | 114d8c85a121b32604d321973f854614b5a9e8b5 | [
"MIT"
] | null | null | null | setup.py | hmlingesh/csv-to-html-table | 114d8c85a121b32604d321973f854614b5a9e8b5 | [
"MIT"
] | null | null | null | """
Hello World app for running Python apps on Bluemix
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='python-hello-world-flask',
version='1.0.0',
description='Hello World app for running Python apps on Bluemix',
long_description=long_description,
url='https://github.com/IBM-Bluemix/python-hello-world-flask',
license='Apache-2.0'
)
| 26.192308 | 69 | 0.732746 |
794393d1a7e8c7597428dd549930a830b3e2e2b8 | 386 | py | Python | events/migrations/0028_event_holiday.py | McCarthyCode/Market-to-Market-Chicago | 15d491f6f45c0899864ae9256f2808e46e0e140b | [
"MIT"
] | null | null | null | events/migrations/0028_event_holiday.py | McCarthyCode/Market-to-Market-Chicago | 15d491f6f45c0899864ae9256f2808e46e0e140b | [
"MIT"
] | 1 | 2020-06-09T11:15:17.000Z | 2020-06-09T11:15:17.000Z | events/migrations/0028_event_holiday.py | mattmc318/Market-to-Market-Chicago | 15d491f6f45c0899864ae9256f2808e46e0e140b | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-10-22 03:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0027_auto_20200512_1857'),
]
operations = [
migrations.AddField(
model_name='event',
name='holiday',
field=models.BooleanField(default=False),
),
]
| 20.315789 | 53 | 0.598446 |
794393fb589763a930fda72bea185e4c76b867ae | 2,020 | py | Python | code/python3/index_sorting.py | jaylett/xapian-docsprint | 2e8fdffecf71f7042c0abe49924ba48c11818b7e | [
"MIT"
] | 47 | 2015-01-20T15:38:41.000Z | 2022-02-15T21:03:50.000Z | code/python3/index_sorting.py | jaylett/xapian-docsprint | 2e8fdffecf71f7042c0abe49924ba48c11818b7e | [
"MIT"
] | 16 | 2015-06-09T16:12:50.000Z | 2020-02-05T06:40:18.000Z | code/python3/index_sorting.py | jaylett/xapian-docsprint | 2e8fdffecf71f7042c0abe49924ba48c11818b7e | [
"MIT"
] | 56 | 2015-01-20T15:38:44.000Z | 2022-03-03T18:13:39.000Z | #!/usr/bin/env python
import json
import sys
import xapian
from support import parse_csv_file
def index(datapath, dbpath):
# Create or open the database we're going to be writing to.
db = xapian.WritableDatabase(dbpath, xapian.DB_CREATE_OR_OPEN)
# Set up a TermGenerator that we'll use in indexing.
termgenerator = xapian.TermGenerator()
termgenerator.set_stemmer(xapian.Stem("en"))
for fields in parse_csv_file(datapath):
# 'fields' is a dictionary mapping from field name to value.
# Pick out the fields we're going to index.
description = fields.get('DESCRIPTION', u'')
title = fields.get('TITLE', u'')
identifier = fields.get('id_NUMBER', u'')
collection = fields.get('COLLECTION', u'')
maker = fields.get('MAKER', u'')
# We make a document and tell the term generator to use this.
doc = xapian.Document()
termgenerator.set_document(doc)
# Index each field with a suitable prefix.
termgenerator.index_text(title, 1, 'S')
termgenerator.index_text(description, 1, 'XD')
# Index fields without prefixes for general search.
termgenerator.index_text(title)
termgenerator.increase_termpos()
termgenerator.index_text(description)
### Start of example code.
# add the collection as a value in slot 0
doc.add_value(0, collection)
# add the maker as a value in slot 1
doc.add_value(1, maker)
### End of example code.
# Store all the fields for display purposes.
doc.set_data(json.dumps(fields))
# We use the identifier to ensure each object ends up in the
# database only once no matter how many times we run the
# indexer.
idterm = u"Q" + identifier
doc.add_boolean_term(idterm)
db.replace_document(idterm, doc)
if len(sys.argv) != 3:
print("Usage: %s DATAPATH DBPATH" % sys.argv[0])
sys.exit(1)
index(datapath = sys.argv[1], dbpath = sys.argv[2])
| 33.114754 | 69 | 0.654455 |
794394713869e26b0d0b88070f8f1a200e0683f3 | 2,794 | py | Python | examples/trader/stock_traders.py | ringwraith/zvt | ff5844ff7991132bbf38d464f29f461dba5efa14 | [
"MIT"
] | 1 | 2019-08-24T02:26:51.000Z | 2019-08-24T02:26:51.000Z | examples/trader/stock_traders.py | ringwraith/zvt | ff5844ff7991132bbf38d464f29f461dba5efa14 | [
"MIT"
] | null | null | null | examples/trader/stock_traders.py | ringwraith/zvt | ff5844ff7991132bbf38d464f29f461dba5efa14 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from zvt.domain.common import TradingLevel
from zvt.factors.technical_factor import CrossMaFactor, BullFactor
from zvt.selectors.selector import TargetSelector
from zvt.settings import SAMPLE_STOCK_CODES
from zvt.trader.impls import StockTrader
# make sure run init_data_sample.py to init the data sample at first
# or you could change settings.DATA_PATH to your data path,and run the recorders for the data
class MyMaTrader(StockTrader):
def init_selectors(self, security_list, security_type, exchanges, codes, start_timestamp, end_timestamp):
myselector = TargetSelector(security_list=security_list, security_type=security_type, exchanges=exchanges,
codes=codes, start_timestamp=start_timestamp, end_timestamp=end_timestamp,
provider='joinquant')
myselector.add_filter_factor(
CrossMaFactor(security_list=security_list, security_type=security_type, exchanges=exchanges,
codes=codes, start_timestamp=start_timestamp, end_timestamp=end_timestamp))
self.selectors.append(myselector)
class MyBullTrader(StockTrader):
def init_selectors(self, security_list, security_type, exchanges, codes, start_timestamp, end_timestamp):
myselector = TargetSelector(security_list=security_list, security_type=security_type, exchanges=exchanges,
codes=codes, start_timestamp=start_timestamp, end_timestamp=end_timestamp,
provider='joinquant')
myselector.add_filter_factor(
BullFactor(security_list=security_list, security_type=security_type, exchanges=exchanges,
codes=codes, start_timestamp=start_timestamp, end_timestamp=end_timestamp))
self.selectors.append(myselector)
if __name__ == '__main__':
# single stock with cross ma factor
MyMaTrader(codes=['000338'], level=TradingLevel.LEVEL_1DAY, start_timestamp='2018-01-01',
end_timestamp='2019-06-30', trader_name='000338_ma_trader').run()
# single stock with bull factor
MyBullTrader(codes=['000338'], level=TradingLevel.LEVEL_1DAY, start_timestamp='2018-01-01',
end_timestamp='2019-06-30', trader_name='000338_bull_trader').run()
# multiple stocks with cross ma factor
MyMaTrader(codes=SAMPLE_STOCK_CODES, level=TradingLevel.LEVEL_1DAY, start_timestamp='2018-01-01',
end_timestamp='2019-06-30', trader_name='sample_stocks_ma_trader').run()
# multiple stocks with bull factor
MyBullTrader(codes=SAMPLE_STOCK_CODES, level=TradingLevel.LEVEL_1DAY, start_timestamp='2018-01-01',
end_timestamp='2019-06-30', trader_name='sample_stocks_bull_trader').run()
| 51.740741 | 114 | 0.719757 |
794395bff0077ab2aeee6e61c4575ee4acd4d76d | 12,575 | py | Python | .ipynb_checkpoints/augment-ignore-checkpoint.py | jkooy/darts_ignoring | 7ae7c769cffe81441af9e1a0e0b92552245ae1d1 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/augment-ignore-checkpoint.py | jkooy/darts_ignoring | 7ae7c769cffe81441af9e1a0e0b92552245ae1d1 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/augment-ignore-checkpoint.py | jkooy/darts_ignoring | 7ae7c769cffe81441af9e1a0e0b92552245ae1d1 | [
"MIT"
] | null | null | null | """ Training augmented model """
import os
import torch
import torch.nn as nn
import numpy as np
from tensorboardX import SummaryWriter
from config import AugmentConfig
import utils
from models.augment_cnn import AugmentCNN
import copy
config = AugmentConfig()
device = torch.device("cuda")
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)
logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name)))
config.print_params(logger.info)
class Architect():
""" Compute gradients of alphas """
def __init__(self, net, w_momentum, w_weight_decay):
"""
Args:
net
w_momentum: weights momentum
"""
self.net = net
self.v_net = copy.deepcopy(net)
self.w_momentum = w_momentum
self.w_weight_decay = w_weight_decay
def virtual_step(self, trn_X, trn_y, xi, w_optim, model, Likelihood, batch_size, step):
"""
Compute unrolled weight w' (virtual step)
Step process:
1) forward
2) calc loss
3) compute gradient (by backprop)
4) update gradient
Args:
xi: learning rate for virtual gradient step (same as weights lr)
w_optim: weights optimizer
"""
# forward & calc loss
dataIndex = len(trn_y)+step*batch_size
ignore_crit = nn.CrossEntropyLoss(reduction='none').cuda()
# forward
logits,_ = self.net(trn_X)
# sigmoid loss
loss = torch.dot(torch.sigmoid(Likelihood[step*batch_size:dataIndex]), ignore_crit(logits, trn_y))/(torch.sigmoid(Likelihood[step*batch_size:dataIndex]).sum())
loss.backward()
dtloss_ll = Likelihood.grad
dtloss_w = []
# do virtual step (update gradient)
# below operations do not need gradient tracking
with torch.no_grad():
# dict key is not the value, but the pointer. So original network weight have to
# be iterated also.
for w, vw in zip(self.net.weights(), self.v_net.weights()):
m = w_optim.state[w].get('momentum_buffer', 0.) * self.w_momentum
if w.grad is not None:
vw.copy_(w - xi * (m + w.grad ))
dtloss_w.append(m + w.grad )
elif w.grad is None:
dtloss_w.append(w.grad )
return dtloss_w, dtloss_ll
# 1399:[48, 3, 3, 3], 1:25000
def unrolled_backward(self, trn_X, trn_y, val_X, val_y, xi, w_optim, model, likelihood, Likelihood_optim, batch_size, step):
""" Compute unrolled loss and backward its gradients
Args:
xi: learning rate for virtual gradient step (same as net lr)
w_optim: weights optimizer - for virtual step
"""
# do virtual step (calc w`)
dtloss_w, dtloss_ll = self.virtual_step(trn_X, trn_y, xi, w_optim, model, likelihood, batch_size, step)
logits, aux_logits = self.v_net(val_X)
# calc unrolled loss
ignore_crit = nn.CrossEntropyLoss(reduction='none').to(device)
dataIndex = len(trn_y)+step*batch_size
loss = torch.dot(torch.sigmoid(likelihood[step*batch_size:dataIndex]), ignore_crit(logits, trn_y))
loss = loss/(torch.sigmoid(likelihood[step*batch_size:dataIndex]).sum()) # L_val(w`)
# compute gradient
loss.backward()
dvloss_tloss = 0
for v, dt in zip(self.v_net.weights(), dtloss_w):
if v.grad is not None:
grad_valw_d_trainw = torch.div(v.grad, dt)
grad_valw_d_trainw[torch.isinf(grad_valw_d_trainw)] = 0
grad_valw_d_trainw[torch.isnan(grad_valw_d_trainw)] = 0
grad_val_train = torch.sum(grad_valw_d_trainw)
# print(grad_val_train)
dvloss_tloss += grad_val_train
dlikelihood = dvloss_tloss* dtloss_ll
vprec1, vprec5 = utils.accuracy(logits, val_y, topk=(1, 5))
Likelihood_optim.zero_grad()
likelihood.grad = dlikelihood
print(dvloss_tloss)
print(dtloss_ll)
print('likelihood gradient is:', likelihood.grad)
Likelihood_optim.step()
return likelihood, Likelihood_optim, loss, vprec1, vprec5
def main():
logger.info("Logger is set - training start")
# set default gpu device id
torch.cuda.set_device(config.gpus[0])
# set seed
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.benchmark = True
# get data with meta info
input_size, input_channels, n_classes, train_val_data, test_data = utils.get_data(
config.dataset, config.data_path, config.cutout_length, validation=True)
criterion = nn.CrossEntropyLoss().to(device)
use_aux = config.aux_weight > 0.
model = AugmentCNN(input_size, input_channels, config.init_channels, n_classes, config.layers,
use_aux, config.genotype).to(device) #single GPU
# model = nn.DataParallel(model, device_ids=config.gpus).to(device)
# model size
mb_params = utils.param_size(model)
logger.info("Model size = {:.3f} MB".format(mb_params))
# weights optimizer with SGD
optimizer = torch.optim.SGD(model.parameters(), config.lr, momentum=config.momentum,
weight_decay=config.weight_decay)
n_train = len(train_val_data)
split = n_train // 2
indices = list(range(n_train))
# each train data is endowed with a weight
Likelihood = torch.nn.Parameter(torch.ones(len(indices[:split])).cuda(),requires_grad=True)
Likelihood_optim = torch.optim.SGD({Likelihood}, config.lr)
# data split
train_data = torch.utils.data.Subset(train_val_data, indices[:split])
valid_data = torch.utils.data.Subset(train_val_data, indices[split:])
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.workers,
pin_memory=False)
valid_loader = torch.utils.data.DataLoader(valid_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.workers,
pin_memory=False)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.epochs)
architect = Architect(model, 0.9, 3e-4)
best_top1 = 0.
# training loop
for epoch in range(config.epochs):
lr_scheduler.step()
lr = lr_scheduler.get_lr()[0]
drop_prob = config.drop_path_prob * epoch / config.epochs
model.drop_path_prob(drop_prob)
# training
train(train_loader, valid_loader, model, architect, optimizer, criterion, lr, epoch, Likelihood, Likelihood_optim, config.batch_size)
# validation
cur_step = (epoch+1) * len(train_loader)
top1 = validate(valid_loader, model, criterion, epoch, cur_step)
# save
if best_top1 < top1:
best_top1 = top1
is_best = True
else:
is_best = False
utils.save_checkpoint(model, config.path, is_best)
print("")
logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
def train(train_loader, valid_loader, model, architect, optimizer, criterion, lr, epoch, Likelihood, Likelihood_optim, batch_size):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
standard_losses = utils.AverageMeter()
valid_losses = utils.AverageMeter()
cur_step = epoch*len(train_loader)
cur_lr = optimizer.param_groups[0]['lr']
logger.info("Epoch {} LR {}".format(epoch, cur_lr))
writer.add_scalar('train/lr', cur_lr, cur_step)
model.train()
for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(train_loader, valid_loader)):
trn_X, trn_y = trn_X.to(device, non_blocking=True), trn_y.to(device, non_blocking=True)
val_X, val_y = val_X.to(device, non_blocking=True), val_y.to(device, non_blocking=True)
N = trn_X.size(0)
M = val_X.size(0)
# phase 2. Likelihood step (Likelihood)
Likelihood_optim.zero_grad()
Likelihood, Likelihood_optim, valid_loss, vprec1, vprec5= architect.unrolled_backward(trn_X, trn_y, val_X, val_y, lr, optimizer, model, Likelihood, Likelihood_optim, batch_size, step)
# phase 1. network weight step (w)
optimizer.zero_grad()
logits, aux_logits = model(trn_X)
ignore_crit = nn.CrossEntropyLoss(reduction='none').to(device)
dataIndex = len(trn_y)+step*batch_size
loss = torch.dot(torch.sigmoid(Likelihood[step*batch_size:dataIndex]), ignore_crit(logits, trn_y))
loss = loss/(torch.sigmoid(Likelihood[step*batch_size:dataIndex]).sum())
'''
if config.aux_weight > 0.:
loss += config.aux_weight * criterion(aux_logits, y)
'''
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip)
# update network weight on train data
optimizer.step()
#compare normal loss without weighted
standard_loss = criterion(logits, trn_y)
prec1, prec5 = utils.accuracy(logits, trn_y, topk=(1, 5))
losses.update(loss.item(), N)
standard_losses.update(standard_loss.item(), N)
valid_losses.update(valid_loss.item(), M)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
if step % config.print_freq == 0 or step == len(train_loader)-1:
logger.info(
"Train: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} standard Loss {slosses.avg:.3f} Valid Loss {vlosses.avg:.3f}"
" Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step, len(train_loader)-1, losses=losses, slosses=standard_losses, vlosses=valid_losses,
top1=top1, top5=top5))
writer.add_scalar('train/loss', loss.item(), cur_step)
writer.add_scalar('train/top1', prec1.item(), cur_step)
writer.add_scalar('train/top5', prec5.item(), cur_step)
writer.add_scalar('val/loss', valid_loss.item(), cur_step)
writer.add_scalar('train/top1', vprec1.item(), cur_step)
writer.add_scalar('train/top5', vprec5.item(), cur_step)
cur_step += 1
logger.info("Train: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg))
def validate(valid_loader, model, criterion, epoch, cur_step):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
model.eval()
with torch.no_grad():
for step,(X, y) in enumerate(valid_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
logits, _ = model(X)
loss = criterion(logits, y)
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
losses.update(loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
if step % config.print_freq == 0 or step == len(valid_loader)-1:
logger.info(
"Test: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step, len(valid_loader)-1, losses=losses,
top1=top1, top5=top5))
writer.add_scalar('test/loss', losses.avg, cur_step)
writer.add_scalar('test/top1', top1.avg, cur_step)
writer.add_scalar('test/top5', top5.avg, cur_step)
logger.info("Test: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg))
return top1.avg
if __name__ == "__main__":
main()
| 38.455657 | 191 | 0.59833 |
79439689139bc78e7fafa036aa9680c5c06bf3ab | 3,646 | py | Python | src/zen/tests/gml.py | wangyiranamy/Testing | 2a729d1f73b6df69150807b965b8fedbb7661c04 | [
"BSD-3-Clause"
] | 41 | 2015-01-13T19:49:50.000Z | 2021-05-02T04:11:19.000Z | src/zen/tests/gml.py | wangyiranamy/Testing | 2a729d1f73b6df69150807b965b8fedbb7661c04 | [
"BSD-3-Clause"
] | 9 | 2015-01-28T10:46:27.000Z | 2022-03-12T06:32:39.000Z | src/zen/tests/gml.py | wangyiranamy/Testing | 2a729d1f73b6df69150807b965b8fedbb7661c04 | [
"BSD-3-Clause"
] | 19 | 2015-01-27T12:19:42.000Z | 2019-07-20T21:30:56.000Z | from zen import *
import unittest
import os
import os.path as path
import tempfile
class GMLReadTestCase(unittest.TestCase):
def test_read_directed_test1(self):
fname = path.join(path.dirname(__file__),'test1.gml')
G = gml.read(fname)
self.assertEqual(len(G),3)
self.assertEqual(G.size(),2)
self.assertEqual(type(G),DiGraph)
self.assertTrue(G.has_edge('N1','N2'))
self.assertTrue(G.has_edge('N2','N3'))
self.assertFalse(G.has_edge('N1','N3'))
self.assertFalse(G.has_edge('N3','N2'))
self.assertEqual(G.node_idx('N1'),1)
self.assertEqual(G.node_idx('N2'),2)
self.assertEqual(G.node_idx('N3'),3)
self.assertEqual(G.node_data('N1')['sampleOne'],42)
self.assertEqual(G.node_data('N2')['sampleTwo'],42.1)
self.assertEqual(G.node_data('N3')['sampleThree'],'HELLO WORLD')
self.assertEqual(G.edge_data('N1','N2')['label'],
'Edge from node 1 to node 2')
def test_read_undirected_test1(self):
fname = path.join(path.dirname(__file__),'test2.gml')
G = gml.read(fname)
self.assertEqual(len(G),3)
self.assertEqual(G.size(),2)
self.assertEqual(type(G),Graph)
self.assertTrue(G.has_edge('N1','N2'))
self.assertTrue(G.has_edge('N2','N3'))
self.assertFalse(G.has_edge('N1','N3'))
self.assertTrue(G.has_edge('N3','N2'))
self.assertEqual(G.node_idx('N1'),1)
self.assertEqual(G.node_idx('N2'),2)
self.assertEqual(G.node_idx('N3'),3)
self.assertEqual(G.node_data('N1')['sampleOne'],42)
self.assertEqual(G.node_data('N2')['sampleTwo'],42.1)
self.assertEqual(G.node_data('N3')['sampleThree'],'HELLO WORLD')
self.assertEqual(G.edge_data('N1','N2')['label'],
'Edge from node 1 to node 2')
def test_list_variables(self):
fname = path.join(path.dirname(__file__),'test3.gml')
G = gml.read(fname)
self.assertEqual(len(G),3)
self.assertEqual(G.size(),2)
self.assertEqual(G.node_data('N1')['listVar'],
[1,'a',3.2])
def test_weight_fxn(self):
fname = path.join(path.dirname(__file__),'test3.gml')
G = gml.read(fname,weight_fxn=lambda data:data['value'])
self.assertEqual(len(G),3)
self.assertEqual(G.size(),2)
self.assertEqual(G.weight('N1','N2'),2)
self.assertEqual(G.weight('N2','N3'),3)
def test_non_asci_char(self):
G = Graph()
G.add_node(u'\u2660')
G.add_node(u'\u2663')
G.add_node(u'\u2665')
G.add_node(u'\u2666')
G.add_edge(u'\u2663', u'\u2665')
G.add_edge(u'\u2660', u'\u2666')
G.add_edge(u'\u2665', u'\u2666')
G.add_edge(u'\u2660', u'\u2663')
gml.write(G, 'test4.gml')
H = gml.read('test4.gml')
for nobj in G.nodes():
self.assertEqual(H.node_idx(nobj), G.node_idx(nobj))
for nobj1, nobj2 in G.edges():
self.assertEqual(H.edge_idx(nobj1, nobj2),
G.edge_idx(nobj1, nobj2))
self.assertEqual(G.size(), H.size())
self.assertEqual(len(G), len(H))
def test_tuple_node_objects(self):
G = Graph()
G.add_node((1,2))
G.add_node((2,3))
G.add_edge((1,2),(2,3))
gml.write(G, 'test5.gml')
H = gml.read('test5.gml')
for nobj in G.nodes():
self.assertEqual(H.node_idx(nobj), G.node_idx(nobj))
for nobj1, nobj2 in G.edges():
self.assertEqual(H.edge_idx(nobj1, nobj2),
G.edge_idx(nobj1, nobj2))
self.assertEqual(G.size(), H.size())
self.assertEqual(len(G), len(H))
def test_no_node_data(self):
G = Graph()
G.add_node()
G.add_node()
G.add_edge_(0,1)
gml.write(G, 'test5.gml')
H = gml.read('test5.gml')
for edge_idx in G.edges_():
node_idx1, node_idx2 = H.endpoints_(edge_idx)
H.has_edge_(node_idx1, node_idx2)
self.assertEqual(G.size(), H.size())
self.assertEqual(len(G), len(H))
if __name__ == '__main__':
unittest.main()
| 24.469799 | 66 | 0.665661 |
794397b00537fc54e66f1719163df7e915b4f252 | 9,003 | py | Python | nuart/biclustering/bartmap.py | ACIL-Group/NuART-Py | 36011432f6da9b87452c25cb1911a742f353bc49 | [
"Apache-2.0"
] | 6 | 2018-12-09T21:03:06.000Z | 2021-09-06T09:28:53.000Z | nuart/biclustering/bartmap.py | ACIL-Group/NuART-Py | 36011432f6da9b87452c25cb1911a742f353bc49 | [
"Apache-2.0"
] | null | null | null | nuart/biclustering/bartmap.py | ACIL-Group/NuART-Py | 36011432f6da9b87452c25cb1911a742f353bc49 | [
"Apache-2.0"
] | 1 | 2019-12-14T07:25:31.000Z | 2019-12-14T07:25:31.000Z | """
Copyright 2019 Islam Elnabarawy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# References:
# [1] R. Xu and D. C. Wunsch II, "BARTMAP: A viable structure for biclustering,"
# Neural Networks, vol. 24, no. 7, pp. 709-716, 2011.
# [2] I. Elnabarawy, D. C. Wunsch II, and A. M. Abdelbar, "Biclustering ARTMAP
# Collaborative Filtering Recommender System," in Proceedings of the 2016 International
# Joint Conference on Neural Networks (IJCNN ’16), 2016, pp. 2986-2991.
import multiprocessing
import random
import numpy as np
from sklearn import preprocessing
from nuart.common.linear_algebra import fuzzy_and, max_norm
__author__ = 'Islam Elnabarawy'
class FuzzyARTModule(object):
def __init__(self, rho, alpha, beta, num_features):
self.rho = rho
self.alpha = alpha
self.beta = beta
self.num_clusters = 0
self.num_features = num_features
self.w = np.ones((self.num_clusters, self.num_features * 2))
def train_dataset(self, dataset, max_epochs=np.inf, shuffle=False, random_seed=None):
# complement-code the data
dataset = np.concatenate((dataset, 1 - dataset), axis=1)
# initialize variables
labels = np.zeros(dataset.shape[0])
iterations = 0
w_old = None
indices = list(range(dataset.shape[0]))
if shuffle:
if random_seed is not None:
random.seed(random_seed)
random.shuffle(indices)
while not np.array_equal(self.w, w_old) and iterations < max_epochs:
w_old = self.w
for ix in indices:
labels[ix] = self.train_pattern(dataset[ix, :])
iterations += 1
return labels, iterations
def train_pattern(self, pattern):
# evaluate the pattern to get the winning category
winner = self.eval_pattern(pattern)
# commit the pattern to the winning category
self.commit_pattern(pattern, winner)
return winner
def commit_pattern(self, pattern, category):
# check if the uncommitted node was the winner
if (category + 1) > self.num_clusters:
self.num_clusters += 1
self.w = np.concatenate((self.w, np.ones((1, self.w.shape[1]))))
# update the weight of the winning neuron
self.w[category, :] = self.weight_update(pattern, self.w[category, :], self.beta)
def eval_pattern(self, pattern):
# initialize variables
matches = np.zeros(self.num_clusters)
# calculate the category match values
for jx in range(self.num_clusters):
matches[jx] = self.category_choice(pattern, self.w[jx, :], self.alpha)
# pick the winning category
match_attempts = 0
while match_attempts < self.num_clusters:
# winner-take-all selection
winner = np.argmax(matches)
# vigilance test
if self.vigilance_check(pattern, self.w[winner, :], self.rho):
# the winning category passed the vigilance test
return winner
else:
# shut off this category from further testing
matches[winner] = 0
match_attempts += 1
return self.num_clusters
@staticmethod
def category_choice(pattern, category_w, alpha):
return max_norm(fuzzy_and(pattern, category_w)) / (alpha + max_norm(category_w))
@staticmethod
def vigilance_check(pattern, category_w, rho):
return max_norm(fuzzy_and(pattern, category_w)) >= rho * max_norm(pattern)
@staticmethod
def weight_update(pattern, category_w, beta):
return beta * fuzzy_and(pattern, category_w) + (1 - beta) * category_w
class BARTMAP(object):
def __init__(self, arta_settings, artb_settings, corr_thresh, step_size):
"""
Create a Biclustering ARTMAP object
:param artb_settings: A 3-tuple containing the rho, alpha, and beta parameters of ARTa
:param arta_settings: A 3-tuple containing the rho, alpha, and beta parameters of ARTb
:param corr_thresh: A float specifying the correlation threshold to use for BARTMAP's inter-ART module
:param step_size: The step size parameter for BARTMAP's inter-ART module
"""
super(BARTMAP, self).__init__()
self.arta_settings = arta_settings
self.artb_settings = artb_settings
self.corr_thresh = corr_thresh
self.step_size = step_size
self.num_samples = None
self.num_features = None
self.ARTa = None
self.ARTb = None
self.sample_labels = None
self.num_sample_labels = 0
self.feature_labels = None
self.num_feature_labels = 0
self.map = map
def train(self, data):
sample_data = preprocessing.MinMaxScaler().fit_transform(data)
feature_data = preprocessing.MinMaxScaler().fit_transform(data.transpose())
return self.train_preprocessed(sample_data, feature_data)
def train_preprocessed(self, sample_data, feature_data):
pool = multiprocessing.Pool()
self.map = pool.map
self.num_samples, self.num_features = sample_data.shape
self.ARTa = FuzzyARTModule(*self.arta_settings, self.num_features)
self.ARTb = FuzzyARTModule(*self.artb_settings, self.num_samples)
self.feature_labels, _ = self.ARTb.train_dataset(feature_data)
self.num_feature_labels = self.ARTb.num_clusters
self.sample_labels = np.zeros(self.num_samples, dtype=np.int32)
self.num_sample_labels = 0
for ix in range(self.num_samples):
# re-initialize the ARTa vigilance parameter for each sample
self.ARTa.rho = self.arta_settings[0]
sample = np.concatenate([sample_data[ix, :], 1 - sample_data[ix, :]], axis=0)
while True:
sample_category = self.ARTa.eval_pattern(sample)
if sample_category == self.ARTa.num_clusters:
# new cluster created; always allow new clusters
self.ARTa.commit_pattern(sample, sample_category)
self.sample_labels[ix] = sample_category
self.num_sample_labels += 1
break
else:
# the sample was assigned to an existing cluster; check correlation threshold
sample_cluster = sample_data[np.nonzero(self.sample_labels[:ix] == sample_category)]
correlations = np.array([
self.get_bicluster_correlations(jx, sample, sample_cluster)
for jx in range(self.num_feature_labels)
])
# check the correlations against the threshold
if np.any(correlations > self.corr_thresh) or self.ARTa.rho >= 1:
# allow this sample to be committed into the bicluster
self.ARTa.commit_pattern(sample, sample_category)
self.sample_labels[ix] = sample_category
break
else:
# increase the ARTa vigilance threshold and try again
self.ARTa.rho += self.step_size
if self.ARTa.rho > 1:
self.ARTa.rho = 1
pool.close()
self.map = map
def get_bicluster_correlations(self, jx, sample, sample_cluster):
feature_ix = np.nonzero(self.feature_labels == jx)
return self.get_average_correlation(sample_cluster[:, feature_ix], sample[feature_ix])
def get_average_correlation(self, bicluster, sample):
# compute the average of the correlation between each pair of samples
return np.array(list(self.map(BARTMAP.get_correlation_args, [(row, sample) for row in bicluster]))).mean()
@staticmethod
def get_correlation_args(args):
return BARTMAP.get_correlation(*args)
@staticmethod
def get_correlation(x, y):
# compute the terms for all the item values
terms1 = x - np.mean(x)
terms2 = y - np.mean(y)
# compute the sums to find the pairwise correlation
numerator = np.sum(np.multiply(terms1, terms2))
root1 = np.sqrt(np.sum(np.multiply(terms1, terms1)))
root2 = np.sqrt(np.sum(np.multiply(terms2, terms2)))
return numerator / (root1 * root2) if root1 != 0 and root2 != 0 else 0
| 37.987342 | 114 | 0.634011 |
794397f22334c4db124c0decc963f5fbb527abf7 | 1,613 | py | Python | msdsl/expr/extras.py | sgherbst/msdsl | e38d5ecdb88b3574bda62f22a4f91ce3e4173d12 | [
"MIT"
] | 15 | 2019-05-14T10:12:23.000Z | 2022-03-29T15:29:52.000Z | msdsl/expr/extras.py | sgherbst/msdsl | e38d5ecdb88b3574bda62f22a4f91ce3e4173d12 | [
"MIT"
] | 19 | 2020-01-22T21:44:33.000Z | 2021-06-05T02:10:41.000Z | msdsl/expr/extras.py | sgherbst/msdsl | e38d5ecdb88b3574bda62f22a4f91ce3e4173d12 | [
"MIT"
] | 5 | 2019-10-21T09:53:17.000Z | 2021-08-10T17:32:20.000Z | from typing import Union, List
from numbers import Number, Integral
from msdsl.expr.expr import ModelExpr, concatenate, BitwiseAnd, array
def all_between(x: List[ModelExpr], lo: Union[Number, ModelExpr], hi: Union[Number, ModelExpr]) -> ModelExpr:
"""
Limit checking. Check if a list of ModelExpr objects provided in *x* is larger than *lo* and smaller than *hi*.
:param x: List of ModelExpr that are to be checked
:param lo: Lower limit
:param hi: Upper limit
:return: boolean, 1 if x is within limits, 0 otherwise
"""
return BitwiseAnd([between(elem, lo, hi) for elem in x])
def between(x: ModelExpr, lo: Union[Number, ModelExpr], hi: Union[Number, ModelExpr]) -> ModelExpr:
"""
Limit checking. Check if a ModelExpr object provided in *x* is larger than *lo* and smaller than *hi*.
:param x: ModelExpr that is to be checked
:param lo: Lower limit
:param hi: Upper limit
:return: boolean, 1 if x is within limits, 0 otherwise
"""
return (lo <= x) & (x <= hi)
def replicate(x: ModelExpr, n: Integral):
return concatenate([x]*n)
def if_(condition, then, else_):
"""
Conditional statement. Condition *condition* is evaluated and if result is true, action *then* is executed, otherwise
action *else_*.
:param condition: Conditional expression that is to be evaluated
:param then: Action to be executed for True case
:param else_: Action to be executed for False case
:return: Boolean
"""
return array([else_, then], condition) | 40.325 | 122 | 0.655301 |
794398b0194e39c4b3b063f02919562b6cff5a96 | 373 | py | Python | workout_tracker/users/urls.py | ympaik87/workout_tracker | 9f78e0ef7664b53868f43ccda7256bcfa4105405 | [
"MIT"
] | null | null | null | workout_tracker/users/urls.py | ympaik87/workout_tracker | 9f78e0ef7664b53868f43ccda7256bcfa4105405 | [
"MIT"
] | null | null | null | workout_tracker/users/urls.py | ympaik87/workout_tracker | 9f78e0ef7664b53868f43ccda7256bcfa4105405 | [
"MIT"
] | null | null | null | from django.urls import path
from workout_tracker.users.views import (
user_redirect_view,
user_update_view,
user_detail_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| 24.866667 | 66 | 0.707775 |
79439959fab8f2b4fea936c7985f79c23293d51d | 4,109 | py | Python | uts/zscore.py | Yifei-Liu/uts | 64c137d59fcd0c7c016082018d67a56abac0b28e | [
"MIT"
] | null | null | null | uts/zscore.py | Yifei-Liu/uts | 64c137d59fcd0c7c016082018d67a56abac0b28e | [
"MIT"
] | null | null | null | uts/zscore.py | Yifei-Liu/uts | 64c137d59fcd0c7c016082018d67a56abac0b28e | [
"MIT"
] | null | null | null | # coding: utf-8
__author__ = 'Mário Antunes'
__version__ = '0.1'
__email__ = '[email protected]'
__status__ = 'Development'
import math
import numpy as np
def weighted_avg_and_std(values: np.ndarray, weights: np.ndarray):
"""
Return the weighted average and standard deviation.
Args:
points (np.ndarray): numpy array with values
weights (np.ndarray): numpy array with weights
Returns:
tuple[float, float]: returns a tuple with the weighted average and standard deviation
"""
average = np.average(values, weights=weights)
# Fast and numerically precise:
variance = np.average((values-average)**2, weights=weights)
return (average, math.sqrt(variance))
def zscore(xi: float, mean: float, std: float) -> float:
"""
Return the z-score for a single value.
Args:
xi (float): the single value
mean (float): mean value from the sequence
std (float): standart deviation from the sequence
Returns:
float: the z-score for a single value
"""
if std != 0:
return (xi - mean)/std
else:
return xi - mean
def linear_delta_mapping_points(points: np.ndarray):
"""
Return a linear mapping from the sequence of points.
One way to estimate the z-score metric from a uneven sequence
is to map the values linearly and compute the weight of each new value.
The weight is proportional to the delta in the x axis.
Args:
points (np.ndarray): numpy array with the points (x, y)
Returns:
tuple[np.ndarray, np.ndarray]: the weight and the linear mapping
"""
x = points[:, 0]
y = points[:, 1]
return linear_delta_mapping(x, y)
def linear_delta_mapping(x: np.ndarray, y: np.ndarray):
"""
Return a linear mapping from the sequence of points.
One way to estimate the z-score metric from a uneven sequence
is to map the values linearly and compute the weight of each new value.
The weight is proportional to the delta in the x axis.
Args:
x (np.ndarray): values from the x axis
y (np.ndarray): values from the y axis
Returns:
tuple[np.ndarray, np.ndarray]: the weight and the linear mapping
"""
tdelta = x[1:] - x[:-1]
linear_values = (y[1:] + y[:-1]) / 2.0
return tdelta, linear_values
def zscore_linear(xi: float, points: np.ndarray) -> float:
"""
Return the z-score for a single value, using the linear mapping
to deal with the uneven sequence of values.
Args:
xi (float): the single value
points (np.ndarray): numpy array with the points (x, y)
Returns:
float: the z-score for a single value
Raises:
ValueError: If the lenght of points is smaller than 2.
"""
if len(points) <= 1:
raise ValueError('The number of points is smaller than 2')
weights, values = linear_delta_mapping_points(points)
mean, std = weighted_avg_and_std(values, weights)
return zscore(xi, mean, std)
def zscore_array_points(points: np.ndarray) -> np.ndarray:
"""
Returns the z-score value for all the values in the sequence.
It uses linear mapping to deal with the uneven sequence.
Args:
points (np.ndarray): numpy array with the points (x, y)
Returns:
np.ndarray: the z-score value for all the values in the sequence
"""
x = points[:, 0]
y = points[:, 1]
return zscore_array(x, y)
def zscore_array(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Returns the z-score value for all the values in the sequence.
It uses linear mapping to deal with the uneven sequence.
Args:
x (np.ndarray): values from the x axis
y (np.ndarray): values from the y axis
Returns:
np.ndarray: the z-score value for all the values in the sequence
"""
weights, values = linear_delta_mapping(x, y)
mean, std = weighted_avg_and_std(values, weights)
if std != 0.0:
return (y - mean)/std
else:
return y - mean
| 28.143836 | 93 | 0.640058 |
79439ac28bbfb080bb21b110a889781f1a504560 | 5,304 | py | Python | datasets/pose/data_loader.py | kangcheol/torchcv | 561ef4e662fff1b9b47060bb08842408a205e689 | [
"Apache-2.0"
] | 106 | 2020-09-08T11:30:28.000Z | 2022-03-23T03:07:09.000Z | datasets/pose/data_loader.py | shanhedian2017/torchcv | 6414f5acb41c2f35f8e79e477a57eaba65591c66 | [
"Apache-2.0"
] | 5 | 2020-09-09T09:45:11.000Z | 2022-02-18T03:07:20.000Z | datasets/pose/data_loader.py | shanhedian2017/torchcv | 6414f5acb41c2f35f8e79e477a57eaba65591c66 | [
"Apache-2.0"
] | 10 | 2020-09-09T08:06:36.000Z | 2021-11-01T08:27:15.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You([email protected])
# Class for the Pose Data Loader.
from torch.utils import data
from datasets.pose.loader.default_loader import DefaultLoader
from datasets.pose.loader.openpose_loader import OpenPoseLoader
import datasets.tools.pil_aug_transforms as pil_aug_trans
import datasets.tools.cv2_aug_transforms as cv2_aug_trans
import datasets.tools.transforms as trans
from datasets.tools.collate import collate
from tools.util.logger import Logger as Log
class DataLoader(object):
def __init__(self, configer):
self.configer = configer
if self.configer.get('data', 'image_tool') == 'pil':
self.aug_train_transform = pil_aug_trans.PILAugCompose(self.configer, split='train')
elif self.configer.get('data', 'image_tool') == 'cv2':
self.aug_train_transform = cv2_aug_trans.CV2AugCompose(self.configer, split='train')
else:
Log.error('Not support {} image tool.'.format(self.configer.get('data', 'image_tool')))
exit(1)
if self.configer.get('data', 'image_tool') == 'pil':
self.aug_val_transform = pil_aug_trans.PILAugCompose(self.configer, split='val')
elif self.configer.get('data', 'image_tool') == 'cv2':
self.aug_val_transform = cv2_aug_trans.CV2AugCompose(self.configer, split='val')
else:
Log.error('Not support {} image tool.'.format(self.configer.get('data', 'image_tool')))
exit(1)
self.img_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize(**self.configer.get('data', 'normalize')), ])
def get_trainloader(self):
if self.configer.get('train.loader', default=None) in [None, 'default']:
trainloader = data.DataLoader(
DefaultLoader(root_dir=self.configer.get('data', 'data_dir'), dataset='train',
aug_transform=self.aug_train_transform,
img_transform=self.img_transform,
configer=self.configer),
batch_size=self.configer.get('train', 'batch_size'), shuffle=True,
num_workers=self.configer.get('data', 'workers'), pin_memory=True,
drop_last=self.configer.get('data', 'drop_last'),
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('train', 'data_transformer')
)
)
return trainloader
elif self.configer.get('train', 'loader') == 'openpose':
trainloader = data.DataLoader(
OpenPoseLoader(root_dir=self.configer.get('data', 'data_dir'), dataset='train',
aug_transform=self.aug_train_transform,
img_transform=self.img_transform,
configer=self.configer),
batch_size=self.configer.get('train', 'batch_size'), shuffle=True,
num_workers=self.configer.get('data', 'workers'), pin_memory=True,
drop_last=self.configer.get('data', 'drop_last'),
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('train', 'data_transformer')
)
)
return trainloader
else:
Log.error('{} train loader is invalid.'.format(self.configer.get('train', 'loader')))
exit(1)
def get_valloader(self, dataset=None):
dataset = 'val' if dataset is None else dataset
if self.configer.get('val.loader', default=None) in [None, 'default']:
valloader = data.DataLoader(
DefaultLoader(root_dir=self.configer.get('data', 'data_dir'), dataset=dataset,
aug_transform=self.aug_val_transform,
img_transform=self.img_transform,
configer=self.configer),
batch_size=self.configer.get('val', 'batch_size'), shuffle=False,
num_workers=self.configer.get('data', 'workers'), pin_memory=True,
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('val', 'data_transformer')
)
)
return valloader
elif self.configer.get('val', 'loader') == 'openpose':
valloader = data.DataLoader(
OpenPoseLoader(root_dir=self.configer.get('data', 'data_dir'), dataset=dataset,
aug_transform=self.aug_val_transform,
img_transform=self.img_transform,
configer=self.configer),
batch_size=self.configer.get('val', 'batch_size'), shuffle=False,
num_workers=self.configer.get('data', 'workers'), pin_memory=True,
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('val', 'data_transformer')
)
)
return valloader
else:
Log.error('{} val loader is invalid.'.format(self.configer.get('val', 'loader')))
exit(1)
if __name__ == "__main__":
# Test data loader.
pass
| 44.2 | 99 | 0.584842 |
79439ad0ff1608862ace1645a2820a56cf0c8fe0 | 288 | py | Python | profq_data/helpers/nodes/binary_tree_node.py | ProfessorQu/ProfQ-Data | 13edf73c90ea2545a9a373fabb78a764f247e575 | [
"MIT"
] | null | null | null | profq_data/helpers/nodes/binary_tree_node.py | ProfessorQu/ProfQ-Data | 13edf73c90ea2545a9a373fabb78a764f247e575 | [
"MIT"
] | null | null | null | profq_data/helpers/nodes/binary_tree_node.py | ProfessorQu/ProfQ-Data | 13edf73c90ea2545a9a373fabb78a764f247e575 | [
"MIT"
] | null | null | null | class Node:
"""A class for most data structures
"""
def __init__(self, data: int) -> None:
"""The init function
Args:
data (int): what data to put in the node
"""
self.data = data
self.left = None
self.right = None
| 22.153846 | 52 | 0.510417 |
79439b18c3912597cd972753b041dc6ba5f3ca7f | 3,134 | py | Python | rcsb/app/chem/convertTools.py | rcsb/py-rcsb_app_chem | c2a2465fa12ecb66dfcaf5fdc352d8b824bd21b4 | [
"Apache-2.0"
] | null | null | null | rcsb/app/chem/convertTools.py | rcsb/py-rcsb_app_chem | c2a2465fa12ecb66dfcaf5fdc352d8b824bd21b4 | [
"Apache-2.0"
] | 1 | 2021-08-10T14:52:12.000Z | 2021-08-10T15:08:43.000Z | rcsb/app/chem/convertTools.py | rcsb/py-rcsb_app_chem | c2a2465fa12ecb66dfcaf5fdc352d8b824bd21b4 | [
"Apache-2.0"
] | null | null | null | ##
# File: convertTools.py
# Date: 10-Decmber-2020 jdw
#
# Updates:
#
##
# pylint: skip-file
__docformat__ = "restructuredtext en"
__author__ = "John Westbrook"
__email__ = "[email protected]"
__license__ = "Apache 2.0"
import logging
from enum import Enum
# from typing import List
from fastapi import APIRouter, Path, Query
from fastapi.encoders import jsonable_encoder
from fastapi.responses import FileResponse
# pylint disable=no-name-in-module
from pydantic import BaseModel, Field
from rcsb.utils.chem.ChemCompDepictWrapper import ChemCompDepictWrapper
logger = logging.getLogger(__name__)
router = APIRouter()
class ConvertIdentifierType(str, Enum):
smiles = "SMILES"
inchi = "InChI"
identifierPdb = "IdentifierPDB"
class MoleculeFormatType(str, Enum):
mol = "mol"
sdf = "sdf"
mol2 = "mol2"
mol2h = "mol2h"
class ConvertMoleculeIdentifier(BaseModel):
target: str = Field(None, title="Descriptor string", description="SMILES or InChI chemical descriptor", example="c1ccc(cc1)[C@@H](C(=O)O)N")
fmt: MoleculeFormatType = Field(None, title="Molecule format", description="Molecule format type (mol, sdf, mol2, mol2h)", example="mol")
@router.get("/to-molfile/{convertIdentifierType}", tags=["convert"])
def toMolFileGet(
target: str = Query(None, title="Target molecule identifier", description="SMILES, InChI or PDB identifier", example="c1ccc(cc1)[C@@H](C(=O)O)N"),
fmt: MoleculeFormatType = Query(None, title="Molecule format type", description="Molecule format type (mol, sdf, mol2, mol2h)", example="mol"),
convertIdentifierType: ConvertIdentifierType = Path(
..., title="Molecule identifier type", description="Molecule identifier type (SMILES, InChI or PDB identifier)", example="SMILES"
),
):
logger.debug("Got %r %r %r", convertIdentifierType, target, fmt)
# ---
fmt = fmt.lower() if fmt else "mol"
ccdw = ChemCompDepictWrapper()
molfilePath = ccdw.toMolFile(target, convertIdentifierType, fmt=fmt)
mimeTypeD = {"mol": "chemical/x-mdl-molfile", "sdf": "chemical/x-mdl-sdfile", "mol2": "chemical/x-mol2", "mol2h": "chemical/x-mol2"}
mType = mimeTypeD[fmt]
# ---
return FileResponse(molfilePath, media_type=mType)
@router.post("/to-molfile/{convertIdentifierType}", tags=["convert"])
def toMolFilePost(
target: ConvertMoleculeIdentifier,
convertIdentifierType: ConvertIdentifierType = Path(
..., title="Molecule identifier type", description="Type of molecule identifier (SMILES, InChI or PDB identifier)", example="SMILES"
),
):
qD = jsonable_encoder(target)
logger.debug("qD %r", qD)
fmt = qD["fmt"].lower() if "fmt" in qD and qD["fmt"] else "mol"
logger.debug("Got %r %r %r", convertIdentifierType, target, fmt)
# --
ccdw = ChemCompDepictWrapper()
molfilePath = ccdw.toMolFile(qD["target"], convertIdentifierType, fmt=fmt)
mimeTypeD = {"mol": "chemical/x-mdl-molfile", "sdf": "chemical/x-mdl-sdfile", "mol2": "chemical/x-mol2", "mol2h": "chemical/x-mol2"}
mType = mimeTypeD[fmt]
# ---
return FileResponse(molfilePath, media_type=mType)
| 35.213483 | 150 | 0.702936 |
79439b25270eae84af82de1bfab959e59655e80f | 25,586 | py | Python | lib/onigmo/onigmo.py | carrotop/fluent-bit | 7083a0edf480f09424f25c8e634e4996bf1e101b | [
"Apache-2.0"
] | 3,553 | 2015-01-29T21:43:36.000Z | 2022-03-31T08:41:59.000Z | lib/onigmo/onigmo.py | carrotop/fluent-bit | 7083a0edf480f09424f25c8e634e4996bf1e101b | [
"Apache-2.0"
] | 4,247 | 2015-05-20T15:59:38.000Z | 2022-03-31T23:19:12.000Z | lib/onigmo/onigmo.py | carrotop/fluent-bit | 7083a0edf480f09424f25c8e634e4996bf1e101b | [
"Apache-2.0"
] | 1,176 | 2015-05-20T08:31:11.000Z | 2022-03-31T22:40:08.000Z | # -*- coding: utf-8 -*-
"""Using Onigmo (Oniguruma-mod) regular expression library.
This is a low level wrapper for Onigmo regular expression DLL/shared object.
(This module does not support static link library.)
This provides almost same API as the original C API, so the API is not
object oriented.
Onigmo DLL (onigmo.dll, libonigmo.so, etc.) must be placed in the
default search path. The default search path depends on the system.
"""
import ctypes
import os
import sys
#__all__ = ["onig_new", "onig_free",
# "onig_search", "onig_match",
# "onig_region_new", "onig_region_free",
# "onig_version", "onig_copyright"]
#
# Onigmo API version
# (Must be synchronized with LTVERSION in configure.ac.)
#
_onig_api_version = 6
#
# Type Definitions
#
OnigCodePoint = ctypes.c_uint
class OnigRegexType(ctypes.Structure):
_fields_ = [
]
regex_t = OnigRegexType
OnigRegex = ctypes.POINTER(OnigRegexType)
try:
# Python 2.7
_c_ssize_t = ctypes.c_ssize_t
except AttributeError:
# Python 2.6
if ctypes.sizeof(ctypes.c_int) == ctypes.sizeof(ctypes.c_void_p):
_c_ssize_t = ctypes.c_int
elif ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
_c_ssize_t = ctypes.c_long
elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
_c_ssize_t = ctypes.c_longlong
class OnigRegion(ctypes.Structure):
_fields_ = [
("allocated", ctypes.c_int),
("num_regs", ctypes.c_int),
("beg", ctypes.POINTER(_c_ssize_t)),
("end", ctypes.POINTER(_c_ssize_t)),
("history_root",ctypes.c_void_p),
]
re_registers = OnigRegion
OnigOptionType = ctypes.c_int
class OnigEncodingType(ctypes.Structure):
_fields_ = [
("mbc_enc_len", ctypes.c_void_p),
("name", ctypes.c_char_p),
("max_enc_len", ctypes.c_int),
("min_enc_len", ctypes.c_int),
("is_mbc_newline", ctypes.c_void_p),
("mbc_to_code", ctypes.c_void_p),
("code_to_mbclen", ctypes.c_void_p),
("code_to_mbc", ctypes.c_void_p),
("mbc_case_fold", ctypes.c_void_p),
("apply_all_case_fold", ctypes.c_void_p),
("get_case_fold_codes_by_str", ctypes.c_void_p),
("property_name_to_ctype", ctypes.c_void_p),
("is_code_ctype", ctypes.c_void_p),
("get_ctype_code_range", ctypes.c_void_p),
("left_adjust_char_head", ctypes.c_void_p),
("is_allowed_reverse_match",ctypes.c_void_p),
("case_map", ctypes.c_void_p),
("ruby_encoding_index", ctypes.c_int),
("flags", ctypes.c_int),
]
OnigEncoding = ctypes.POINTER(OnigEncodingType)
class OnigMetaCharTableType(ctypes.Structure):
_fields_ = [
("esc", OnigCodePoint),
("anychar", OnigCodePoint),
("anytime", OnigCodePoint),
("zero_or_one_time",OnigCodePoint),
("one_or_one_time", OnigCodePoint),
("anychar_anytime", OnigCodePoint),
]
class OnigSyntaxType(ctypes.Structure):
_fields_ = [
("op", ctypes.c_uint),
("op2", ctypes.c_uint),
("behavior", ctypes.c_uint),
("options", OnigOptionType),
("meta_char_table", OnigMetaCharTableType),
]
class OnigErrorInfo(ctypes.Structure):
_fields_ = [
("enc", OnigEncoding),
("par", ctypes.c_char_p),
("par_end", ctypes.c_char_p),
]
# load the DLL or the shared library
if os.name in ("nt", "ce"):
# Win32
_libname = "onigmo.dll"
try:
libonig = ctypes.cdll.LoadLibrary(_libname)
except OSError:
# Sometimes MinGW version has a prefix "lib".
_libname = "libonigmo.dll"
try:
libonig = ctypes.cdll.LoadLibrary(_libname)
except OSError:
# Sometimes MinGW version has the API version.
_libname = "libonigmo-%d.dll" % _onig_api_version
libonig = ctypes.cdll.LoadLibrary(_libname)
elif sys.platform == "cygwin":
# Cygwin
_libname = "cygonigmo-%d.dll" % _onig_api_version
libonig = ctypes.cdll.LoadLibrary(_libname)
elif sys.platform == "msys":
# MSYS/MSYS2
_libname = "msys-onigmo-%d.dll" % _onig_api_version
libonig = ctypes.cdll.LoadLibrary(_libname)
elif sys.platform == "darwin":
# Mac
_libname = "libonigmo.dylib"
libonig = ctypes.cdll.LoadLibrary(_libname)
else:
# Unix
_libname = "libonigmo.so"
libonig = ctypes.cdll.LoadLibrary(_libname)
#
# Encodings
#
def _load_encoding(enc):
return ctypes.pointer(OnigEncodingType.in_dll(libonig, enc))
ONIG_ENCODING_ASCII = _load_encoding("OnigEncodingASCII")
ONIG_ENCODING_ISO_8859_1 = _load_encoding("OnigEncodingISO_8859_1")
ONIG_ENCODING_ISO_8859_2 = _load_encoding("OnigEncodingISO_8859_2")
ONIG_ENCODING_ISO_8859_3 = _load_encoding("OnigEncodingISO_8859_3")
ONIG_ENCODING_ISO_8859_4 = _load_encoding("OnigEncodingISO_8859_4")
ONIG_ENCODING_ISO_8859_5 = _load_encoding("OnigEncodingISO_8859_5")
ONIG_ENCODING_ISO_8859_6 = _load_encoding("OnigEncodingISO_8859_6")
ONIG_ENCODING_ISO_8859_7 = _load_encoding("OnigEncodingISO_8859_7")
ONIG_ENCODING_ISO_8859_8 = _load_encoding("OnigEncodingISO_8859_8")
ONIG_ENCODING_ISO_8859_9 = _load_encoding("OnigEncodingISO_8859_9")
ONIG_ENCODING_ISO_8859_10 = _load_encoding("OnigEncodingISO_8859_10")
ONIG_ENCODING_ISO_8859_11 = _load_encoding("OnigEncodingISO_8859_11")
ONIG_ENCODING_ISO_8859_13 = _load_encoding("OnigEncodingISO_8859_13")
ONIG_ENCODING_ISO_8859_14 = _load_encoding("OnigEncodingISO_8859_14")
ONIG_ENCODING_ISO_8859_15 = _load_encoding("OnigEncodingISO_8859_15")
ONIG_ENCODING_ISO_8859_16 = _load_encoding("OnigEncodingISO_8859_16")
ONIG_ENCODING_UTF_8 = _load_encoding("OnigEncodingUTF_8")
ONIG_ENCODING_UTF_16LE = _load_encoding("OnigEncodingUTF_16LE")
ONIG_ENCODING_UTF_16BE = _load_encoding("OnigEncodingUTF_16BE")
ONIG_ENCODING_UTF_32LE = _load_encoding("OnigEncodingUTF_32LE")
ONIG_ENCODING_UTF_32BE = _load_encoding("OnigEncodingUTF_32BE")
ONIG_ENCODING_UTF8 = ONIG_ENCODING_UTF_8
ONIG_ENCODING_UTF16_LE = ONIG_ENCODING_UTF_16LE
ONIG_ENCODING_UTF16_BE = ONIG_ENCODING_UTF_16BE
ONIG_ENCODING_UTF32_LE = ONIG_ENCODING_UTF_32LE
ONIG_ENCODING_UTF32_BE = ONIG_ENCODING_UTF_32BE
ONIG_ENCODING_EUC_JP = _load_encoding("OnigEncodingEUC_JP")
ONIG_ENCODING_EUC_TW = _load_encoding("OnigEncodingEUC_TW")
ONIG_ENCODING_EUC_KR = _load_encoding("OnigEncodingEUC_KR")
ONIG_ENCODING_EUC_CN = _load_encoding("OnigEncodingEUC_CN")
ONIG_ENCODING_SHIFT_JIS = _load_encoding("OnigEncodingShift_JIS")
ONIG_ENCODING_WINDOWS_31J = _load_encoding("OnigEncodingWindows_31J")
ONIG_ENCODING_SJIS = ONIG_ENCODING_SHIFT_JIS
ONIG_ENCODING_CP932 = ONIG_ENCODING_WINDOWS_31J
#ONIG_ENCODING_KOI8 = _load_encoding("OnigEncodingKOI8")
ONIG_ENCODING_KOI8_R = _load_encoding("OnigEncodingKOI8_R")
ONIG_ENCODING_KOI8_U = _load_encoding("OnigEncodingKOI8_U")
ONIG_ENCODING_WINDOWS_1250 = _load_encoding("OnigEncodingWindows_1250")
ONIG_ENCODING_WINDOWS_1251 = _load_encoding("OnigEncodingWindows_1251")
ONIG_ENCODING_WINDOWS_1252 = _load_encoding("OnigEncodingWindows_1252")
ONIG_ENCODING_WINDOWS_1253 = _load_encoding("OnigEncodingWindows_1253")
ONIG_ENCODING_WINDOWS_1254 = _load_encoding("OnigEncodingWindows_1254")
ONIG_ENCODING_WINDOWS_1257 = _load_encoding("OnigEncodingWindows_1257")
ONIG_ENCODING_CP1250 = ONIG_ENCODING_WINDOWS_1250
ONIG_ENCODING_CP1251 = ONIG_ENCODING_WINDOWS_1251
ONIG_ENCODING_CP1252 = ONIG_ENCODING_WINDOWS_1252
ONIG_ENCODING_CP1253 = ONIG_ENCODING_WINDOWS_1253
ONIG_ENCODING_CP1254 = ONIG_ENCODING_WINDOWS_1254
ONIG_ENCODING_CP1257 = ONIG_ENCODING_WINDOWS_1257
ONIG_ENCODING_BIG5 = _load_encoding("OnigEncodingBIG5")
ONIG_ENCODING_GB18030 = _load_encoding("OnigEncodingGB18030")
#ONIG_ENCODING_UNDEF = None
#
# Syntaxes
#
def _load_syntax(syn):
return ctypes.pointer(OnigSyntaxType.in_dll(libonig, syn))
ONIG_SYNTAX_ASIS = _load_syntax("OnigSyntaxASIS")
ONIG_SYNTAX_POSIX_BASIC = _load_syntax("OnigSyntaxPosixBasic")
ONIG_SYNTAX_POSIX_EXTENDED = _load_syntax("OnigSyntaxPosixExtended")
ONIG_SYNTAX_EMACS = _load_syntax("OnigSyntaxEmacs")
ONIG_SYNTAX_GREP = _load_syntax("OnigSyntaxGrep")
ONIG_SYNTAX_GNU_REGEX = _load_syntax("OnigSyntaxGnuRegex")
ONIG_SYNTAX_JAVA = _load_syntax("OnigSyntaxJava")
ONIG_SYNTAX_PERL = _load_syntax("OnigSyntaxPerl")
ONIG_SYNTAX_PERL58 = _load_syntax("OnigSyntaxPerl58")
ONIG_SYNTAX_PERL58_NG = _load_syntax("OnigSyntaxPerl58_NG")
ONIG_SYNTAX_RUBY = _load_syntax("OnigSyntaxRuby")
ONIG_SYNTAX_PYTHON = _load_syntax("OnigSyntaxPython")
ONIG_SYNTAX_DEFAULT = ctypes.POINTER(OnigSyntaxType).in_dll(
libonig, "OnigDefaultSyntax")
#
# Constants
#
ONIG_MAX_ERROR_MESSAGE_LEN = 90
# options
ONIG_OPTION_NONE = 0
ONIG_OPTION_IGNORECASE = 1
ONIG_OPTION_EXTEND = (ONIG_OPTION_IGNORECASE << 1)
ONIG_OPTION_MULTILINE = (ONIG_OPTION_EXTEND << 1)
ONIG_OPTION_DOTALL = ONIG_OPTION_MULTILINE
ONIG_OPTION_SINGLELINE = (ONIG_OPTION_MULTILINE << 1)
ONIG_OPTION_FIND_LONGEST = (ONIG_OPTION_SINGLELINE << 1)
ONIG_OPTION_FIND_NOT_EMPTY = (ONIG_OPTION_FIND_LONGEST << 1)
ONIG_OPTION_NEGATE_SINGLELINE = (ONIG_OPTION_FIND_NOT_EMPTY << 1)
ONIG_OPTION_DONT_CAPTURE_GROUP = (ONIG_OPTION_NEGATE_SINGLELINE << 1)
ONIG_OPTION_CAPTURE_GROUP = (ONIG_OPTION_DONT_CAPTURE_GROUP << 1)
# options (search time)
ONIG_OPTION_NOTBOL = (ONIG_OPTION_CAPTURE_GROUP << 1)
ONIG_OPTION_NOTEOL = (ONIG_OPTION_NOTBOL << 1)
ONIG_OPTION_NOTBOS = (ONIG_OPTION_NOTEOL << 1)
ONIG_OPTION_NOTEOS = (ONIG_OPTION_NOTBOS << 1)
# options (ctype range)
ONIG_OPTION_ASCII_RANGE = (ONIG_OPTION_NOTEOS << 1)
ONIG_OPTION_POSIX_BRACKET_ALL_RANGE = (ONIG_OPTION_ASCII_RANGE << 1)
ONIG_OPTION_WORD_BOUND_ALL_RANGE = (ONIG_OPTION_POSIX_BRACKET_ALL_RANGE << 1)
# options (newline)
ONIG_OPTION_NEWLINE_CRLF = (ONIG_OPTION_WORD_BOUND_ALL_RANGE << 1)
ONIG_OPTION_DEFAULT = ONIG_OPTION_NONE
# syntax (operators)
ONIG_SYN_OP_VARIABLE_META_CHARACTERS = (1<<0)
ONIG_SYN_OP_DOT_ANYCHAR = (1<<1)
ONIG_SYN_OP_ASTERISK_ZERO_INF = (1<<2)
ONIG_SYN_OP_ESC_ASTERISK_ZERO_INF = (1<<3)
ONIG_SYN_OP_PLUS_ONE_INF = (1<<4)
ONIG_SYN_OP_ESC_PLUS_ONE_INF = (1<<5)
ONIG_SYN_OP_QMARK_ZERO_ONE = (1<<6)
ONIG_SYN_OP_ESC_QMARK_ZERO_ONE = (1<<7)
ONIG_SYN_OP_BRACE_INTERVAL = (1<<8)
ONIG_SYN_OP_ESC_BRACE_INTERVAL = (1<<9)
ONIG_SYN_OP_VBAR_ALT = (1<<10)
ONIG_SYN_OP_ESC_VBAR_ALT = (1<<11)
ONIG_SYN_OP_LPAREN_SUBEXP = (1<<12)
ONIG_SYN_OP_ESC_LPAREN_SUBEXP = (1<<13)
ONIG_SYN_OP_ESC_AZ_BUF_ANCHOR = (1<<14)
ONIG_SYN_OP_ESC_CAPITAL_G_BEGIN_ANCHOR = (1<<15)
ONIG_SYN_OP_DECIMAL_BACKREF = (1<<16)
ONIG_SYN_OP_BRACKET_CC = (1<<17)
ONIG_SYN_OP_ESC_W_WORD = (1<<18)
ONIG_SYN_OP_ESC_LTGT_WORD_BEGIN_END = (1<<19)
ONIG_SYN_OP_ESC_B_WORD_BOUND = (1<<20)
ONIG_SYN_OP_ESC_S_WHITE_SPACE = (1<<21)
ONIG_SYN_OP_ESC_D_DIGIT = (1<<22)
ONIG_SYN_OP_LINE_ANCHOR = (1<<23)
ONIG_SYN_OP_POSIX_BRACKET = (1<<24)
ONIG_SYN_OP_QMARK_NON_GREEDY = (1<<25)
ONIG_SYN_OP_ESC_CONTROL_CHARS = (1<<26)
ONIG_SYN_OP_ESC_C_CONTROL = (1<<27)
ONIG_SYN_OP_ESC_OCTAL3 = (1<<28)
ONIG_SYN_OP_ESC_X_HEX2 = (1<<29)
ONIG_SYN_OP_ESC_X_BRACE_HEX8 = (1<<30)
ONIG_SYN_OP_ESC_O_BRACE_OCTAL = (1<<31)
ONIG_SYN_OP2_ESC_CAPITAL_Q_QUOTE = (1<<0)
ONIG_SYN_OP2_QMARK_GROUP_EFFECT = (1<<1)
ONIG_SYN_OP2_OPTION_PERL = (1<<2)
ONIG_SYN_OP2_OPTION_RUBY = (1<<3)
ONIG_SYN_OP2_PLUS_POSSESSIVE_REPEAT = (1<<4)
ONIG_SYN_OP2_PLUS_POSSESSIVE_INTERVAL = (1<<5)
ONIG_SYN_OP2_CCLASS_SET_OP = (1<<6)
ONIG_SYN_OP2_QMARK_LT_NAMED_GROUP = (1<<7)
ONIG_SYN_OP2_ESC_K_NAMED_BACKREF = (1<<8)
ONIG_SYN_OP2_ESC_G_SUBEXP_CALL = (1<<9)
ONIG_SYN_OP2_ATMARK_CAPTURE_HISTORY = (1<<10)
ONIG_SYN_OP2_ESC_CAPITAL_C_BAR_CONTROL = (1<<11)
ONIG_SYN_OP2_ESC_CAPITAL_M_BAR_META = (1<<12)
ONIG_SYN_OP2_ESC_V_VTAB = (1<<13)
ONIG_SYN_OP2_ESC_U_HEX4 = (1<<14)
ONIG_SYN_OP2_ESC_GNU_BUF_ANCHOR = (1<<15)
ONIG_SYN_OP2_ESC_P_BRACE_CHAR_PROPERTY = (1<<16)
ONIG_SYN_OP2_ESC_P_BRACE_CIRCUMFLEX_NOT = (1<<17)
#ONIG_SYN_OP2_CHAR_PROPERTY_PREFIX_IS = (1<<18)
ONIG_SYN_OP2_ESC_H_XDIGIT = (1<<19)
ONIG_SYN_OP2_INEFFECTIVE_ESCAPE = (1<<20)
ONIG_SYN_OP2_ESC_CAPITAL_R_LINEBREAK = (1<<21)
ONIG_SYN_OP2_ESC_CAPITAL_X_EXTENDED_GRAPHEME_CLUSTER = (1<<22)
ONIG_SYN_OP2_ESC_V_VERTICAL_WHITESPACE = (1<<23)
ONIG_SYN_OP2_ESC_H_HORIZONTAL_WHITESPACE = (1<<24)
ONIG_SYN_OP2_ESC_CAPITAL_K_KEEP = (1<<25)
ONIG_SYN_OP2_ESC_G_BRACE_BACKREF = (1<<26)
ONIG_SYN_OP2_QMARK_SUBEXP_CALL = (1<<27)
ONIG_SYN_OP2_QMARK_VBAR_BRANCH_RESET = (1<<28)
ONIG_SYN_OP2_QMARK_LPAREN_CONDITION = (1<<29)
ONIG_SYN_OP2_QMARK_CAPITAL_P_NAMED_GROUP = (1<<30)
ONIG_SYN_OP2_OPTION_JAVA = (1<<31)
# syntax (behavior)
ONIG_SYN_CONTEXT_INDEP_ANCHORS = (1<<31)
ONIG_SYN_CONTEXT_INDEP_REPEAT_OPS = (1<<0)
ONIG_SYN_CONTEXT_INVALID_REPEAT_OPS = (1<<1)
ONIG_SYN_ALLOW_UNMATCHED_CLOSE_SUBEXP = (1<<2)
ONIG_SYN_ALLOW_INVALID_INTERVAL = (1<<3)
ONIG_SYN_ALLOW_INTERVAL_LOW_ABBREV = (1<<4)
ONIG_SYN_STRICT_CHECK_BACKREF = (1<<5)
ONIG_SYN_DIFFERENT_LEN_ALT_LOOK_BEHIND = (1<<6)
ONIG_SYN_CAPTURE_ONLY_NAMED_GROUP = (1<<7)
ONIG_SYN_ALLOW_MULTIPLEX_DEFINITION_NAME = (1<<8)
ONIG_SYN_FIXED_INTERVAL_IS_GREEDY_ONLY = (1<<9)
ONIG_SYN_ALLOW_MULTIPLEX_DEFINITION_NAME_CALL = (1<<10)
ONIG_SYN_USE_LEFT_MOST_NAMED_GROUP = (1<<11)
# (behavior) in char class [...]
ONIG_SYN_NOT_NEWLINE_IN_NEGATIVE_CC = (1<<20)
ONIG_SYN_BACKSLASH_ESCAPE_IN_CC = (1<<21)
ONIG_SYN_ALLOW_EMPTY_RANGE_IN_CC = (1<<22)
ONIG_SYN_ALLOW_DOUBLE_RANGE_OP_IN_CC = (1<<23)
# syntax (behavior) warning
ONIG_SYN_WARN_CC_OP_NOT_ESCAPED = (1<<24)
ONIG_SYN_WARN_REDUNDANT_NESTED_REPEAT = (1<<25)
ONIG_SYN_WARN_CC_DUP = (1<<26)
# meta character specifiers (onig_set_meta_char())
ONIG_META_CHAR_ESCAPE = 0
ONIG_META_CHAR_ANYCHAR = 1
ONIG_META_CHAR_ANYTIME = 2
ONIG_META_CHAR_ZERO_OR_ONE_TIME = 3
ONIG_META_CHAR_ONE_OR_MORE_TIME = 4
ONIG_META_CHAR_ANYCHAR_ANYTIME = 5
ONIG_INEFFECTIVE_META_CHAR = 0
# error codes
def ONIG_IS_PATTERN_ERROR(ecode):
return ((ecode) <= -100 and (ecode) > -1000)
# normal return
ONIG_NORMAL = 0
ONIG_MISMATCH = -1
ONIG_NO_SUPPORT_CONFIG = -2
# internal error
ONIGERR_MEMORY = -5
ONIGERR_TYPE_BUG = -6
ONIGERR_PARSER_BUG = -11
ONIGERR_STACK_BUG = -12
ONIGERR_UNDEFINED_BYTECODE = -13
ONIGERR_UNEXPECTED_BYTECODE = -14
ONIGERR_MATCH_STACK_LIMIT_OVER = -15
ONIGERR_PARSE_DEPTH_LIMIT_OVER = -16
ONIGERR_DEFAULT_ENCODING_IS_NOT_SET = -21
ONIGERR_SPECIFIED_ENCODING_CANT_CONVERT_TO_WIDE_CHAR = -22
# general error
ONIGERR_INVALID_ARGUMENT = -30
# syntax error
ONIGERR_END_PATTERN_AT_LEFT_BRACE = -100
ONIGERR_END_PATTERN_AT_LEFT_BRACKET = -101
ONIGERR_EMPTY_CHAR_CLASS = -102
ONIGERR_PREMATURE_END_OF_CHAR_CLASS = -103
ONIGERR_END_PATTERN_AT_ESCAPE = -104
ONIGERR_END_PATTERN_AT_META = -105
ONIGERR_END_PATTERN_AT_CONTROL = -106
ONIGERR_META_CODE_SYNTAX = -108
ONIGERR_CONTROL_CODE_SYNTAX = -109
ONIGERR_CHAR_CLASS_VALUE_AT_END_OF_RANGE = -110
ONIGERR_CHAR_CLASS_VALUE_AT_START_OF_RANGE = -111
ONIGERR_UNMATCHED_RANGE_SPECIFIER_IN_CHAR_CLASS = -112
ONIGERR_TARGET_OF_REPEAT_OPERATOR_NOT_SPECIFIED = -113
ONIGERR_TARGET_OF_REPEAT_OPERATOR_INVALID = -114
ONIGERR_NESTED_REPEAT_OPERATOR = -115
ONIGERR_UNMATCHED_CLOSE_PARENTHESIS = -116
ONIGERR_END_PATTERN_WITH_UNMATCHED_PARENTHESIS = -117
ONIGERR_END_PATTERN_IN_GROUP = -118
ONIGERR_UNDEFINED_GROUP_OPTION = -119
ONIGERR_INVALID_POSIX_BRACKET_TYPE = -121
ONIGERR_INVALID_LOOK_BEHIND_PATTERN = -122
ONIGERR_INVALID_REPEAT_RANGE_PATTERN = -123
ONIGERR_INVALID_CONDITION_PATTERN = -124
# values error (syntax error)
ONIGERR_TOO_BIG_NUMBER = -200
ONIGERR_TOO_BIG_NUMBER_FOR_REPEAT_RANGE = -201
ONIGERR_UPPER_SMALLER_THAN_LOWER_IN_REPEAT_RANGE = -202
ONIGERR_EMPTY_RANGE_IN_CHAR_CLASS = -203
ONIGERR_MISMATCH_CODE_LENGTH_IN_CLASS_RANGE = -204
ONIGERR_TOO_MANY_MULTI_BYTE_RANGES = -205
ONIGERR_TOO_SHORT_MULTI_BYTE_STRING = -206
ONIGERR_TOO_BIG_BACKREF_NUMBER = -207
ONIGERR_INVALID_BACKREF = -208
ONIGERR_NUMBERED_BACKREF_OR_CALL_NOT_ALLOWED = -209
ONIGERR_TOO_MANY_CAPTURE_GROUPS = -210
ONIGERR_TOO_SHORT_DIGITS = -211
ONIGERR_TOO_LONG_WIDE_CHAR_VALUE = -212
ONIGERR_EMPTY_GROUP_NAME = -214
ONIGERR_INVALID_GROUP_NAME = -215
ONIGERR_INVALID_CHAR_IN_GROUP_NAME = -216
ONIGERR_UNDEFINED_NAME_REFERENCE = -217
ONIGERR_UNDEFINED_GROUP_REFERENCE = -218
ONIGERR_MULTIPLEX_DEFINED_NAME = -219
ONIGERR_MULTIPLEX_DEFINITION_NAME_CALL = -220
ONIGERR_NEVER_ENDING_RECURSION = -221
ONIGERR_GROUP_NUMBER_OVER_FOR_CAPTURE_HISTORY = -222
ONIGERR_INVALID_CHAR_PROPERTY_NAME = -223
ONIGERR_INVALID_CODE_POINT_VALUE = -400
ONIGERR_INVALID_WIDE_CHAR_VALUE = -400
ONIGERR_TOO_BIG_WIDE_CHAR_VALUE = -401
ONIGERR_NOT_SUPPORTED_ENCODING_COMBINATION = -402
ONIGERR_INVALID_COMBINATION_OF_OPTIONS = -403
# errors related to thread
#ONIGERR_OVER_THREAD_PASS_LIMIT_COUNT = -1001
OnigWarnFunc = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
#
# Onigmo APIs
#
# onig_init
onig_init = libonig.onig_init
# onig_error_code_to_str
libonig.onig_error_code_to_str.argtypes = [ctypes.c_char_p, _c_ssize_t,
ctypes.POINTER(OnigErrorInfo)]
def onig_error_code_to_str(err_buf, err_code, err_info=None):
return libonig.onig_error_code_to_str(err_buf, err_code, err_info)
# onig_set_warn_func
libonig.onig_set_warn_func.argtypes = [OnigWarnFunc]
onig_set_warn_func = libonig.onig_set_warn_func
# onig_set_verb_warn_func
libonig.onig_set_verb_warn_func.argtypes = [OnigWarnFunc]
onig_set_verb_warn_func = libonig.onig_set_verb_warn_func
# onig_new
libonig.onig_new.argtypes = [ctypes.POINTER(OnigRegex),
ctypes.c_void_p, ctypes.c_void_p,
OnigOptionType, OnigEncoding, ctypes.POINTER(OnigSyntaxType),
ctypes.POINTER(OnigErrorInfo)]
onig_new = libonig.onig_new
# onig_reg_init
# onig_new_without_alloc
# onig_new_deluxe
# onig_free
libonig.onig_free.argtypes = [OnigRegex]
onig_free = libonig.onig_free
# onig_free_body
# onig_search
libonig.onig_search.argtypes = [OnigRegex,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
ctypes.POINTER(OnigRegion), OnigOptionType]
libonig.onig_search.restype = _c_ssize_t
onig_search = libonig.onig_search
# onig_search_gpos
libonig.onig_search_gpos.argtypes = [OnigRegex,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p,
ctypes.POINTER(OnigRegion), OnigOptionType]
libonig.onig_search_gpos.restype = _c_ssize_t
onig_search_gpos = libonig.onig_search_gpos
# onig_match
libonig.onig_match.argtypes = [OnigRegex,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
ctypes.POINTER(OnigRegion), OnigOptionType]
libonig.onig_match.restype = _c_ssize_t
onig_match = libonig.onig_match
# onig_region_new
libonig.onig_region_new.argtypes = []
libonig.onig_region_new.restype = ctypes.POINTER(OnigRegion)
onig_region_new = libonig.onig_region_new
# onig_region_init
# onig_region_free
libonig.onig_region_free.argtypes = [ctypes.POINTER(OnigRegion), ctypes.c_int]
onig_region_free = libonig.onig_region_free
# onig_region_copy
# onig_region_clear
# onig_region_resize
# onig_region_set
# onig_name_to_group_numbers
# onig_name_to_backref_number
# onig_foreach_name
# onig_number_of_names
# onig_number_of_captures
# onig_number_of_capture_histories
# onig_get_capture_tree
# onig_capture_tree_traverse
# onig_noname_group_capture_is_active
# onig_get_encoding
# onig_get_options
# onig_get_case_fold_flag
# onig_get_syntax
# onig_set_default_syntax
libonig.onig_set_default_syntax.argtypes = [ctypes.POINTER(OnigSyntaxType)]
libonig.onig_set_default_syntax.restype = ctypes.c_int
onig_set_default_syntax = libonig.onig_set_default_syntax
# onig_copy_syntax
libonig.onig_copy_syntax.argtypes = [ctypes.POINTER(OnigSyntaxType),
ctypes.POINTER(OnigSyntaxType)]
onig_copy_syntax = libonig.onig_copy_syntax
# onig_get_syntax_op
libonig.onig_get_syntax_op.argtypes = [ctypes.POINTER(OnigSyntaxType)]
libonig.onig_get_syntax_op.restype = ctypes.c_int
onig_get_syntax_op = libonig.onig_get_syntax_op
# onig_get_syntax_op2
libonig.onig_get_syntax_op2.argtypes = [ctypes.POINTER(OnigSyntaxType)]
libonig.onig_get_syntax_op2.restype = ctypes.c_int
onig_get_syntax_op2 = libonig.onig_get_syntax_op2
# onig_get_syntax_behavior
libonig.onig_get_syntax_behavior.argtypes = [ctypes.POINTER(OnigSyntaxType)]
libonig.onig_get_syntax_behavior.restype = ctypes.c_int
onig_get_syntax_behavior = libonig.onig_get_syntax_behavior
# onig_get_syntax_options
libonig.onig_get_syntax_options.argtypes = [ctypes.POINTER(OnigSyntaxType)]
libonig.onig_get_syntax_options.restype = ctypes.c_int
onig_get_syntax_options = libonig.onig_get_syntax_options
# onig_set_syntax_op
libonig.onig_set_syntax_op.argtypes = [ctypes.POINTER(OnigSyntaxType),
ctypes.c_int]
onig_set_syntax_op = libonig.onig_set_syntax_op
# onig_set_syntax_op2
libonig.onig_set_syntax_op2.argtypes = [ctypes.POINTER(OnigSyntaxType),
ctypes.c_int]
onig_set_syntax_op2 = libonig.onig_set_syntax_op2
# onig_set_syntax_behavior
libonig.onig_set_syntax_behavior.argtypes = [ctypes.POINTER(OnigSyntaxType),
ctypes.c_int]
onig_set_syntax_behavior = libonig.onig_set_syntax_behavior
# onig_set_syntax_options
libonig.onig_set_syntax_options.argtypes = [ctypes.POINTER(OnigSyntaxType),
ctypes.c_int]
onig_set_syntax_options = libonig.onig_set_syntax_options
# onig_set_meta_char
# onig_copy_encoding
# onig_get_default_case_fold_flag
# onig_set_default_case_fold_flag
# onig_get_match_stack_limit_size
libonig.onig_get_match_stack_limit_size.argtypes = []
libonig.onig_get_match_stack_limit_size.restype = ctypes.c_int
onig_get_match_stack_limit_size = libonig.onig_get_match_stack_limit_size
# onig_set_match_stack_limit_size
libonig.onig_set_match_stack_limit_size.argtypes = [ctypes.c_int]
libonig.onig_set_match_stack_limit_size.restype = ctypes.c_int
onig_set_match_stack_limit_size = libonig.onig_set_match_stack_limit_size
# onig_get_parse_depth_limit
libonig.onig_get_parse_depth_limit.argtypes = []
libonig.onig_get_parse_depth_limit.restype = ctypes.c_int
onig_get_parse_depth_limit = libonig.onig_get_parse_depth_limit
# onig_set_parse_depth_limit
libonig.onig_set_parse_depth_limit.argtypes = [ctypes.c_int]
libonig.onig_set_parse_depth_limit.restype = ctypes.c_int
onig_set_parse_depth_limit = libonig.onig_set_parse_depth_limit
# onig_end
libonig.onig_end.argtypes = []
onig_end = libonig.onig_end
# onig_version
libonig.onig_version.argtypes = []
libonig.onig_version.restype = ctypes.c_char_p
def onig_version():
return libonig.onig_version().decode()
# onig_copyright
libonig.onig_copyright.argtypes = []
libonig.onig_copyright.restype = ctypes.c_char_p
def onig_copyright():
return libonig.onig_copyright().decode()
| 40.742038 | 80 | 0.70222 |
79439cc92631b22d0a5023cbac9a9ec9ecb29493 | 9,505 | py | Python | sanic_ext/extensions/openapi/builders.py | ChihweiLHBird/sanic-ext | f0193a0cc89650a43c50fe543b43d1832307896f | [
"MIT"
] | null | null | null | sanic_ext/extensions/openapi/builders.py | ChihweiLHBird/sanic-ext | f0193a0cc89650a43c50fe543b43d1832307896f | [
"MIT"
] | null | null | null | sanic_ext/extensions/openapi/builders.py | ChihweiLHBird/sanic-ext | f0193a0cc89650a43c50fe543b43d1832307896f | [
"MIT"
] | null | null | null | """
Builders for the oas3 object types
These are completely internal, so can be refactored if desired without concern
for breaking user experience
"""
from collections import defaultdict
from typing import Optional
from ...utils.route import remove_nulls, remove_nulls_from_kwargs
from .autodoc import YamlStyleParametersParser
from .definitions import (
Any,
Components,
Contact,
Dict,
ExternalDocumentation,
Info,
License,
List,
OpenAPI,
Operation,
Parameter,
PathItem,
RequestBody,
Response,
Server,
Tag,
)
class OperationBuilder:
summary: str
description: str
operationId: str
requestBody: RequestBody
externalDocs: ExternalDocumentation
tags: List[str]
security: List[Any]
parameters: List[Parameter]
responses: Dict[str, Response]
callbacks: List[str] # TODO
deprecated: bool = False
def __init__(self):
self.tags = []
self.security = []
self.parameters = []
self.responses = {}
self._default = {}
self._autodoc = None
self._exclude = False
self._allow_autodoc = True
def name(self, value: str):
self.operationId = value
def describe(self, summary: str = None, description: str = None):
if summary:
self.summary = summary
if description:
self.description = description
def document(self, url: str, description: str = None):
self.externalDocs = ExternalDocumentation.make(url, description)
def tag(self, *args: str):
for arg in args:
self.tags.append(arg)
def deprecate(self):
self.deprecated = True
def body(self, content: Any, **kwargs):
self.requestBody = RequestBody.make(content, **kwargs)
def parameter(
self, name: str, schema: Any, location: str = "query", **kwargs
):
self.parameters.append(
Parameter.make(name, schema, location, **kwargs)
)
def response(
self, status, content: Any = None, description: str = None, **kwargs
):
self.responses[status] = Response.make(content, description, **kwargs)
def secured(self, *args, **kwargs):
items = {**{v: [] for v in args}, **kwargs}
gates = {}
for name, params in items.items():
gate = name.__name__ if isinstance(name, type) else name
gates[gate] = params
self.security.append(gates)
def disable_autodoc(self):
self._allow_autodoc = False
def build(self):
operation_dict = self._build_merged_dict()
if "responses" not in operation_dict:
# todo -- look into more consistent default response format
operation_dict["responses"] = {"default": {"description": "OK"}}
return Operation(**operation_dict)
def _build_merged_dict(self):
defined_dict = self.__dict__.copy()
autodoc_dict = self._autodoc or {}
default_dict = self._default
merged_dict = {}
for d in (default_dict, autodoc_dict, defined_dict):
cleaned = {
k: v for k, v in d.items() if v and not k.startswith("_")
}
merged_dict.update(cleaned)
return merged_dict
def autodoc(self, docstring: str):
y = YamlStyleParametersParser(docstring)
self._autodoc = y.to_openAPI_3()
def exclude(self, flag: bool = True):
self._exclude = flag
class OperationStore(defaultdict):
_singleton = None
def __new__(cls) -> Any:
if not cls._singleton:
cls._singleton = super().__new__(cls)
return cls._singleton
def __init__(self):
super().__init__(OperationBuilder)
@classmethod
def reset(cls):
cls._singleton = None
class SpecificationBuilder:
_urls: List[str]
_title: str
_version: str
_description: Optional[str]
_terms: Optional[str]
_contact: Contact
_license: License
_paths: Dict[str, Dict[str, OperationBuilder]]
_tags: Dict[str, Tag]
_components: Dict[str, Any]
_servers: List[Server]
# _components: ComponentsBuilder
# deliberately not included
_singleton = None
def __new__(cls) -> Any:
if not cls._singleton:
cls._singleton = super().__new__(cls)
cls._setup_instance(cls._singleton)
return cls._singleton
@classmethod
def _setup_instance(cls, instance):
instance._components = defaultdict(dict)
instance._contact = None
instance._description = None
instance._external = None
instance._license = None
instance._paths = defaultdict(dict)
instance._servers = []
instance._tags = {}
instance._terms = None
instance._title = None
instance._urls = []
instance._version = None
@classmethod
def reset(cls):
cls._singleton = None
@property
def tags(self):
return self._tags
def url(self, value: str):
self._urls.append(value)
def describe(
self,
title: str,
version: str,
description: Optional[str] = None,
terms: Optional[str] = None,
):
self._title = title
self._version = version
self._description = description
self._terms = terms
def _do_describe(
self,
title: str,
version: str,
description: Optional[str] = None,
terms: Optional[str] = None,
):
if any([self._title, self._version, self._description, self._terms]):
return
self.describe(title, version, description, terms)
def tag(self, name: str, description: Optional[str] = None, **kwargs):
self._tags[name] = Tag(name, description=description, **kwargs)
def external(self, url: str, description: Optional[str] = None, **kwargs):
self._external = ExternalDocumentation(url, description=description)
def contact(self, name: str = None, url: str = None, email: str = None):
kwargs = remove_nulls_from_kwargs(name=name, url=url, email=email)
self._contact = Contact(**kwargs)
def _do_contact(
self, name: str = None, url: str = None, email: str = None
):
if self._contact:
return
self.contact(name, url, email)
def license(self, name: str = None, url: str = None):
if name is not None:
self._license = License(name, url=url)
def _do_license(self, name: str = None, url: str = None):
if self._license:
return
self.license(name, url)
def operation(self, path: str, method: str, operation: OperationBuilder):
for _tag in operation.tags:
if _tag in self._tags.keys():
continue
self._tags[_tag] = Tag(_tag)
self._paths[path][method.lower()] = operation
def add_component(self, location: str, name: str, obj: Any):
self._components[location].update({name: obj})
def has_component(self, location: str, name: str) -> bool:
return name in self._components.get(location, {})
def raw(self, data):
if "info" in data:
self.describe(
data["info"].get("title"),
data["info"].get("version"),
data["info"].get("description"),
data["info"].get("terms"),
)
if "servers" in data:
for server in data["servers"]:
self._servers.append(Server(**server))
if "paths" in data:
self._paths.update(data["paths"])
if "components" in data:
for location, component in data["components"].items():
self._components[location].update(component)
if "security" in data:
...
if "tags" in data:
for tag in data["tags"]:
self.tag(**tag)
if "externalDocs" in data:
self.external(**data["externalDocs"])
def build(self) -> OpenAPI:
info = self._build_info()
paths = self._build_paths()
tags = self._build_tags()
url_servers = getattr(self, "_urls", None)
servers = self._servers
if url_servers is not None:
for url_server in url_servers:
servers.append(Server(url=url_server))
components = (
Components(**self._components) if self._components else None
)
return OpenAPI(
info,
paths,
tags=tags,
servers=servers,
components=components,
externalDocs=self._external,
)
def _build_info(self) -> Info:
kwargs = remove_nulls(
{
"description": self._description,
"termsOfService": self._terms,
"license": self._license,
"contact": self._contact,
},
deep=False,
)
return Info(self._title, self._version, **kwargs)
def _build_tags(self):
return [self._tags[k] for k in self._tags]
def _build_paths(self) -> Dict:
paths = {}
for path, operations in self._paths.items():
paths[path] = PathItem(
**{
k: v if isinstance(v, dict) else v.build()
for k, v in operations.items()
}
)
return paths
| 27.550725 | 78 | 0.582851 |
79439d4238f37334534f020b74192aeb0ee908e0 | 4,635 | py | Python | yaaz/src/optimisation.py | swasun/Yet-Another-AlphaZero | dc9fc185ecb1ba345be1c2b79bd0898c820d4d0c | [
"MIT"
] | 2 | 2019-03-13T18:00:21.000Z | 2020-06-16T03:30:40.000Z | yaaz/src/optimisation.py | swasun/Yet-Another-AlphaZero | dc9fc185ecb1ba345be1c2b79bd0898c820d4d0c | [
"MIT"
] | null | null | null | yaaz/src/optimisation.py | swasun/Yet-Another-AlphaZero | dc9fc185ecb1ba345be1c2b79bd0898c820d4d0c | [
"MIT"
] | null | null | null | #####################################################################################
# MIT License #
# #
# Copyright (C) 2019 Charly Lamothe #
# #
# This file is part of Yet-Another-AlphaZero. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
from chess_model import ChessModel
from dataset import Dataset
from error_handling.console_logger import ConsoleLogger
from chess_env import ChessEnv
import os
import numpy as np
import random
class Optimisation(object):
def __init__(self, model, dataset):
self._model = model
self._dataset = dataset
def start(self):
environment_batches = self._dataset.load_n_last_environment_batches(1)
losses = list()
for environment_batch in environment_batches:
mini_batch = []
values = []
policies = []
actual_policies = []
for environment in environment_batch:
env = ChessEnv()
result = environment['result']
if result == '1/2-1/2':
values += [[-1.0]]
elif result == '1-0':
values += [[1.0]]
else:
values += [[-1.0]]
policies += environment['policies']
actions = environment['actions']
if len(actions) % 2 > 0:
action = random.randint(0, (len(actions) - 3) / 2)
else:
action = random.randint(0, (len(actions) - 2) / 2)
for i in range(0, action):
env._board.push(actions[2 * i])
env._board.push(actions[(2 * i) + 1])
state = env.build_state(T=8)
mini_batch += [state]
probabilities = np.zeros((73, 8, 8))
actual_probabilities = env.filter_illegal_probabilities(probabilities, is_training=True, q=environment['policies'][action])
actual_probabilities = np.ndarray.flatten(actual_probabilities)
actual_policies += [actual_probabilities]
for i in range(len(environment_batch)):
labels = {'policy_head': np.reshape(actual_policies[i], (1, 8 * 8 * 73)), 'value_head': np.array(values[i])}
history = self._model.fit(mini_batch[i], labels)
losses += [history.history['loss']]
print(np.mean(losses))
if __name__ == "__main__":
dataset = Dataset(results_path='..' + os.sep + '..' + os.sep + 'results' + os.sep + 'chess')
model = dataset.load_best_model()
if model is None:
model = ChessModel()
optimisation = Optimisation(model, dataset)
optimisation.start()
| 50.380435 | 139 | 0.484574 |
79439d8a07e3e20e3a361afa525efba7ade7a0ae | 506 | py | Python | dotinstall/installer/util.py | TeeJayYang/dotinstall | d0ee99d264425fee7132623753717072e67a533b | [
"Apache-2.0",
"MIT"
] | 1 | 2019-09-04T02:52:51.000Z | 2019-09-04T02:52:51.000Z | dotinstall/installer/util.py | TeeJayYang/dotinstall | d0ee99d264425fee7132623753717072e67a533b | [
"Apache-2.0",
"MIT"
] | 3 | 2018-11-28T05:15:12.000Z | 2021-10-18T01:13:08.000Z | dotinstall/installer/util.py | TeeJayYang/dotinstall | d0ee99d264425fee7132623753717072e67a533b | [
"Apache-2.0",
"MIT"
] | 2 | 2017-10-30T23:14:36.000Z | 2018-11-27T03:46:24.000Z | from dotinstall.installer.apt_installer import AptInstaller
from dotinstall.installer.brew_installer import BrewInstaller
from dotinstall.installer.eopkg_installer import EopkgInstaller
from dotinstall.installer.pacman_installer import PacmanInstaller
installers = [AptInstaller(), BrewInstaller(), EopkgInstaller(), PacmanInstaller()]
def get_system_installer(): # pragma: no cover
for installer in installers:
if installer.installer_exists():
return installer
return None
| 36.142857 | 83 | 0.798419 |
79439db88c6791357f555f69796702bfef10fd70 | 20,102 | py | Python | evolution.py | brianwgoldman/LengthBiasCGP | a81cf7215b2dd0a06412cae4626d37a943db6b85 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2016-02-24T13:32:38.000Z | 2021-03-16T07:03:07.000Z | evolution.py | brianwgoldman/LengthBiasCGP | a81cf7215b2dd0a06412cae4626d37a943db6b85 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | evolution.py | brianwgoldman/LengthBiasCGP | a81cf7215b2dd0a06412cae4626d37a943db6b85 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | '''
Handles how to perform all of the actual evolution.
'''
import random
import sys
from copy import copy
from util import diff_count
from collections import defaultdict
class Individual(object):
'''
An individual object used to combine gene fitness with genomes, as
well methods for manipulating those genomes.
'''
def __init__(self, graph_length, input_length, output_length,
max_arity, function_list, **_):
'''
Create a new individual instance.
Parameters:
- ``graph_length``: The number of nodes in the CGP encoded graph.
- ``input_length``: The number of input variables.
- ``output_length``: The number of output variables.
- ``max_arity``: The maximum arity used by any function.
- ``function_list``: The list of functions a node can use.
'''
self.node_step = max_arity + 1
self.input_length = input_length
self.graph_length = graph_length
self.function_list = function_list
self.output_length = output_length
self.genes = None
self.genes = [self.random_gene(index) for index in
range(graph_length * self.node_step + output_length)]
self.determine_active_nodes()
# If memory problems arise, make this globally shared
self.scratch = [None] * (graph_length + self.input_length)
self.fitness = -sys.maxint
def random_gene(self, index, invalid=None):
'''
Determines a random gene value given a gene index. If optional
``invalid`` option is used, the returned value will only be ``invalid``
if the gene has no other valid values.
Parameters:
- ``index``: The gene index who's value is being set.
- ``invalid``: Value to avoid returning if possible
'''
node_number = index // self.node_step
gene_number = index % self.node_step
# If the gene is used to specify output locations
if node_number >= self.graph_length:
node_number = self.graph_length
gene_number = -1
# If the gene controls the function of a node
if gene_number == 0:
if len(self.function_list) == 1:
return self.function_list[0]
while True:
choice = random.choice(self.function_list)
if choice != invalid:
return choice
# If the gene controls a connection / output location
else:
if node_number + self.input_length == 1:
return -1
while True:
choice = random.randrange(-self.input_length, node_number)
if choice != invalid:
return choice
def dag_random_gene(self, index, invalid=None):
'''
Determines a random gene value given a gene index of a full DAG.
If optional ``invalid`` option is used, the returned value
will only be ``invalid`` if the gene has no other valid values.
Parameters:
- ``index``: The gene index who's value is being set.
- ``invalid``: Value to avoid returning if possible
'''
node_number = index // self.node_step
gene_number = index % self.node_step
if node_number >= self.graph_length:
node_number = self.graph_length
gene_number = -1
# If it is a function gene
if gene_number == 0:
if len(self.function_list) == 1:
return self.function_list[0]
while True:
choice = random.choice(self.function_list)
if choice != invalid:
return choice
# If you are dealing with output locations or individual initialization
elif gene_number < 0 or not self.genes:
if node_number + self.input_length == 1:
return -1
while True:
choice = random.randrange(-self.input_length, node_number)
if choice != invalid:
return choice
# If you are resetting a connection link on an existing individual
else:
return self.valid_reconnect(node_number, invalid)
def valid_reconnect(self, node_index, invalid=None):
'''
When using a DAG individual, find a random connection location that
does not depend on the current node.
Parameters:
- ``node_index``: The index of the node who's connection is being reset
- ``invalid``: Value to avoid returning if possible
'''
# Nodes always depend on themselves and inputs never depend on nodes
dependent = {node_index: True, invalid: False}
# Current inputs are not dependent on the mutating node
for conn in self.connections(node_index):
dependent[conn] = False
for index in range(-self.input_length, 0):
dependent[index] = False
def is_dependent(current):
'''
Internal recursive function to determine if a node index
is dependent on ``node_index``. Also updates the dependency
dictionary.
Parameters:
- ``current``: The current working node index to be checked for
dependency.
'''
if current in dependent:
return dependent[current]
for conn in self.connections(current):
if is_dependent(conn):
dependent[current] = True
return True
dependent[current] = False
return False
# Create the list of all possible connections
options = range(-self.input_length, self.graph_length)
for index in range(len(options)):
# Choose a random untried option and swap it to the next index
swapdown = random.randrange(index, len(options))
options[index], options[swapdown] = (options[swapdown],
options[index])
option = options[index]
# Test this option
if option != invalid and not is_dependent(option):
return option
return invalid
def copy(self):
'''
Return a copy of the individual. Note that individuals are shallow
copied except for their list of genes.
'''
# WARNING individuals are shallow copied except for things added here
new = copy(self)
new.genes = list(self.genes)
return new
def connections(self, node_index):
'''
Return the list of connections that a specified node has.
Parameters
- ``node_index``: The index of the node information is being requested
for. Note this is different from gene index.
'''
node_start = self.node_step * node_index
return self.genes[node_start + 1: node_start + self.node_step]
def determine_active_nodes(self):
'''
Determines which nodes are currently active and sets self.active
to the sorted list of active genes. Automatically called by gene
manipulating member functions.
'''
self.active = set(self.genes[-self.output_length:])
for node_index in reversed(range(self.graph_length)):
if node_index in self.active:
# add all of the connection genes for this node
self.active.update(self.connections(node_index))
self.active = sorted([acting for acting in self.active if acting >= 0])
def dag_determine_active_nodes(self):
'''
Determines which nodes are currently active and sets self.active
to the sorted list of active genes in DAG individuals.
Automatically called by gene manipulating member functions.
'''
depends_on = defaultdict(set)
feeds_to = defaultdict(set)
# The output locations start as 'connected'
connected = self.genes[-self.output_length:]
added = set(connected)
# Build a bi-directional dependency tree
while connected:
working = connected.pop()
# Ignore input locations
if working < 0:
continue
for conn in self.connections(working):
# Record that 'working' takes input from 'conn'
depends_on[working].add(conn)
# Record that 'conn' sends its output to 'working'
feeds_to[conn].add(working)
if conn not in added:
connected.append(conn)
added.add(conn)
# find the order in which to evaluate them
self.active = []
# All input locations start out addable
addable = [x for x in range(-self.input_length, 0)]
while addable:
working = addable.pop()
# Find everything that depends on 'working' for input
for conn in feeds_to[working]:
# Record that 'conn' is no longer waiting on 'working'
depends_on[conn].remove(working)
if len(depends_on[conn]) == 0:
addable.append(conn)
self.active.append(conn)
def all_active(self):
'''
Function that always makes all nodes in the genome active. Useful
when the fitness function analyzes nodes directly when combined with
Single mutation.
'''
self.active = range(self.graph_length)
def evaluate(self, inputs):
'''
Given a list of inputs, return a list of outputs from executing
this individual.
Parameters:
- ``inputs``: The list of input values for the individual to process.
'''
# Start by loading the input values into scratch
# NOTE: Input locations are given as negative values
self.scratch[-len(inputs):] = inputs[::-1]
# Loop through the active genes in order
for node_index in self.active:
function = self.genes[node_index * self.node_step]
args = [self.scratch[con] for con in self.connections(node_index)]
# Apply the function to the inputs from scratch, saving results
# back to the scratch
self.scratch[node_index] = function(*args)
# Extract outputs from the scratch space
return [self.scratch[output]
for output in self.genes[-self.output_length:]]
def mutate(self, mutation_rate):
'''
Return a mutated version of this individual using the specified
mutation rate.
Parameters:
- ``mutation_rate``: The probability that a specific gene will mutate.
'''
mutant = self.copy()
for index in range(len(mutant.genes)):
if random.random() < mutation_rate:
mutant.genes[index] = mutant.random_gene(index,
mutant.genes[index])
# Have the mutant recalculate its active genes
mutant.determine_active_nodes()
return mutant
def one_active_mutation(self, _):
'''
Return a mutated version of this individual using the ``Single``
mutation method.
'''
mutant = self.copy()
while True:
# Choose an index at random
index = random.randrange(len(mutant.genes))
# Get a new value for that gene
newval = mutant.random_gene(index)
# If that value is different than the current value
if newval != mutant.genes[index]:
mutant.genes[index] = newval
# Determine if that gene was part of an active node
node_number = index // self.node_step
if (node_number >= self.graph_length or
node_number in self.active):
break
# Have the mutant recalculate its active genes
mutant.determine_active_nodes()
return mutant
def reorder(self):
'''
Return an individual who's genes have been reordered randomly without
changing any of the actual connection information.
'''
# Build a list of dependencies
depends_on = defaultdict(set)
feeds_to = defaultdict(set)
for node_index in range(self.graph_length):
for conn in self.connections(node_index):
# Record that 'node_index' takes input from 'conn'
depends_on[node_index].add(conn)
# Record that 'conn' sends its output to 'node_index'
feeds_to[conn].add(node_index)
# Create a dictionary storing how to translate location information
new_order = {i: i for i in range(-self.input_length, 0)}
# Input locations start as addable
addable = new_order.keys()
counter = 0
while addable:
# Choose a node at random who's dependencies have already been met
working = random.choice(addable)
addable.remove(working)
# If 'working' is not an input location
if working >= 0:
# Assign this node to the next available index
new_order[working] = counter
counter += 1
# Update all dependencies now that this node has been added
for to_add in feeds_to[working]:
# Mark 'to_add' as having its requirement on 'working' complete
depends_on[to_add].remove(working)
if len(depends_on[to_add]) == 0:
addable.append(to_add)
# Create the new individual using the new ordering
mutant = self.copy()
for node_index in range(self.graph_length):
# Find the new starting location in the mutant for this node
start = new_order[node_index] * self.node_step
# Move over the function gene
mutant.genes[start] = self.genes[node_index * self.node_step]
# Translate connection genes to have new order information
connections = [new_order[conn]
for conn in self.connections(node_index)]
# Move over the connection genes
mutant.genes[start + 1:start + self.node_step] = connections
length = len(self.genes)
# Update the output locations
for index in range(length - self.output_length, length):
mutant.genes[index] = new_order[self.genes[index]]
# Have the mutant recalculate its active genes
mutant.determine_active_nodes()
return mutant
def asym_phenotypic_difference(self, other):
'''
Determine how many genes would have to be mutated in order to make
the ``other`` individual phenotypically identical to ``self``.
Parameters:
- ``other``: The individual to compare with.
'''
# Count the differences in the output locations
count = diff_count(self.genes[-self.output_length:],
other.genes[-self.output_length:])
# For each active node
for node_index in self.active:
index = node_index * self.node_step
# Count the number of different connection genes
count += diff_count(self.connections(node_index),
other.connections(node_index))
# Include differences in the function gene
count += (self.genes[index] !=
other.genes[index])
return count
def show_active(self):
'''
Prints the active portions of the individual in a somewhat readable
way.
'''
for node_index in self.active:
node_start = self.node_step * node_index
print node_index, self.genes[node_start],
print self.connections(node_index)
print self.genes[-self.output_length:]
def __lt__(self, other):
'''
Returns the result of self.fitness < other.fitness.
'''
return self.fitness < other.fitness
def __le__(self, other):
'''
Returns the result of self.fitness <= other.fitness
'''
return self.fitness <= other.fitness
def generate(config, frequencies):
'''
An ``Individual`` generator that will yield a never ending supply of
``Individual`` objects that need to have their fitness set before the
next ``Individual`` can be yielded.
Parameters:
- ``config``: A dictionary containing all configuration information
required to generate initial individuals. Should include values
for:
- All configuration information required to initialize an Individual.
- ``dag``: If DAG based individuals should be used.
- ``reorder``: If the parent should be reordered before making offspring.
- ``mutation_rate``: The probably to use for mutation.
- ``off_size``: The number of offspring to produce per generation.
- ``output_length``: The number of output variables.
- ``max_arity``: The maximum arity used by any function.
- ``speed``: String specifying the way to handle duplicate
individual creation, either ``normal'', ``skip'', ``accumulate``, or
``single``.
- ``active_push``: Determines if fitness should break ties depending on
number of active nodes.
Valid settings are ``none``, ``more``, or ``less``.
- ``problem``: The problem these individuals are solving. Used on in
the case where problems require unusual individual modification.
- ``frequencies``: Dictionary used to return information about how often
individuals of different lengths are evolved.
'''
if config['dag']:
# Override base functions with dag versions
Individual.determine_active_nodes = \
Individual.dag_determine_active_nodes
Individual.random_gene = \
Individual.dag_random_gene
if config['speed'] == 'single':
# Override normal mutation with Single
Individual.mutate = Individual.one_active_mutation
if config['problem'] == 'Flat':
# Override normal method for determining active genes
Individual.determine_active_nodes = Individual.all_active
parent = Individual(**config)
# Evaluate initial individual
yield parent
while True:
if config['reorder']:
# Replace the parent with a reordered version of itself
parent = parent.reorder()
# Create mutant offspring
mutants = [parent.mutate(config['mutation_rate'])
for _ in range(config['off_size'])]
# Determine how many active genes the parent has
for index, mutant in enumerate(mutants):
prev = mutant
if config['speed'] not in ['normal', 'single']:
change = parent.asym_phenotypic_difference(mutant)
if change == 0:
if config['speed'] == 'skip':
continue
if config['speed'] == 'accumulate':
while change == 0:
# As long as there have been no changes,
# keep mutating
prev = mutant
mutant = prev.mutate(config['mutation_rate'])
change = parent.asym_phenotypic_difference(mutant)
if 'frequency_results' in config:
# Records the length of the generated individual
frequencies[len(mutant.active)] += 1
# Send the offspring out to be evaluated
yield mutant
if config['speed'] == 'accumulate':
# If the mutant is strickly worse, use the last equivalent
mutants[index] = prev if mutant < parent else mutant
best_child = max(mutants)
if parent <= best_child:
parent = best_child
| 40.940937 | 79 | 0.593822 |
79439e40089d9b1cc00bb56d0d916cd55378fc77 | 2,644 | py | Python | url`s_and_templates/django101/settings.py | EmilianStoyanov/python-web | 60ddb1f0cc4c5bb1615317967c4da33c4171b27b | [
"MIT"
] | 3 | 2021-01-19T18:54:38.000Z | 2022-01-05T17:28:41.000Z | url`s_and_templates/django101/settings.py | EmilianStoyanov/python-web | 60ddb1f0cc4c5bb1615317967c4da33c4171b27b | [
"MIT"
] | null | null | null | url`s_and_templates/django101/settings.py | EmilianStoyanov/python-web | 60ddb1f0cc4c5bb1615317967c4da33c4171b27b | [
"MIT"
] | null | null | null | from os.path import join
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'yiamnr83kv$okon9j)d58t)(wr&_hb4f(yr#reec4$ae6s_t62'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django101',
'django102',
'django101_admin',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django101.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django101.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = ''
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
| 24.943396 | 91 | 0.677383 |
79439e72705d2c1de958b119ecc0dfe3284f8c29 | 5,791 | py | Python | tests/unit/wrappers/ensembles/test_ensembles.py | pavelg087/hcrystalball | 25f186dc72d4e273c6696a5c822f601d54bab734 | [
"MIT"
] | 1 | 2021-04-12T17:08:17.000Z | 2021-04-12T17:08:17.000Z | tests/unit/wrappers/ensembles/test_ensembles.py | pavelg087/hcrystalball | 25f186dc72d4e273c6696a5c822f601d54bab734 | [
"MIT"
] | null | null | null | tests/unit/wrappers/ensembles/test_ensembles.py | pavelg087/hcrystalball | 25f186dc72d4e273c6696a5c822f601d54bab734 | [
"MIT"
] | 1 | 2022-01-03T16:02:35.000Z | 2022-01-03T16:02:35.000Z | import pytest
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from pandas.testing import assert_frame_equal
from hcrystalball.ensemble import StackingEnsemble, SimpleEnsemble
from hcrystalball.exceptions import DuplicatedModelNameError
@pytest.fixture(
scope="module",
params=["with_duplicates", "no_duplicates", "no_duplicates_with_pipeline"],
)
def base_learners(request):
class DummyModel:
def __init__(self, alpha, name):
self.alpha = alpha
self.name = name
self.fitted = False
def fit(self, X, y):
self.fitted = True
def predict(self, X):
return pd.DataFrame(np.ones(len(X)) * self.alpha, columns=["dummy"], index=X.index)
if request.param == "with_duplicates":
return [DummyModel(name="model", alpha=5), DummyModel(name="model", alpha=20)]
elif request.param == "no_duplicates":
return [
DummyModel(name="model_1", alpha=5),
DummyModel(name="model_2", alpha=20),
]
elif request.param == "no_duplicates_with_pipeline":
return [
Pipeline([("model", DummyModel(name="model_1", alpha=5))]),
DummyModel(name="model_2", alpha=20),
]
elif request.param == "with_duplicates_with_pipeline":
return [
Pipeline([("model", DummyModel(name="model_1", alpha=5))]),
DummyModel(name="model__model_1", alpha=20),
]
else:
return None
@pytest.mark.parametrize(
"base_learners, ensemble, kwargs, expected_error",
[
("no_duplicates", StackingEnsemble, {"meta_model": LinearRegression()}, None),
("no_duplicates", SimpleEnsemble, {}, None),
(
"with_duplicates",
StackingEnsemble,
{"meta_model": LinearRegression()},
DuplicatedModelNameError,
),
("with_duplicates", SimpleEnsemble, {}, DuplicatedModelNameError),
(
"no_duplicates_with_pipeline",
StackingEnsemble,
{"meta_model": LinearRegression()},
None,
),
("no_duplicates_with_pipeline", SimpleEnsemble, {}, None),
(
"with_duplicates_with_pipeline",
StackingEnsemble,
{"meta_model": LinearRegression()},
DuplicatedModelNameError,
),
("with_duplicates_with_pipeline", SimpleEnsemble, {}, DuplicatedModelNameError),
],
indirect=["base_learners"],
)
def test_check_base_learners_names(base_learners, ensemble, kwargs, expected_error):
if expected_error is None:
se = ensemble(base_learners=base_learners, **kwargs)
assert isinstance(se, ensemble)
else:
with pytest.raises(expected_error):
_ = ensemble(base_learners=base_learners, **kwargs)
@pytest.mark.parametrize(
"base_learners, ensemble_func, expected_error",
[
("no_duplicates", "mean", None),
("no_duplicates", "min", None),
("no_duplicates", "max", None),
("no_duplicates", "median", None),
("no_duplicates", "agg", ValueError), # pandas available func
("no_duplicates", "random_string", ValueError), # no real func
],
indirect=["base_learners"],
)
def test_ensemble_func(base_learners, ensemble_func, expected_error):
if expected_error is not None:
with pytest.raises(expected_error):
_ = SimpleEnsemble(base_learners=base_learners, ensemble_func=ensemble_func)
else:
model = SimpleEnsemble(base_learners=base_learners, ensemble_func=ensemble_func)
alphas = [bl.alpha for bl in model.base_learners]
X = pd.DataFrame(index=pd.date_range("2012", "2016", freq="Y"))
model.fit(X, y=np.ones(len(X)))
exp_result = pd.DataFrame(
(
pd.DataFrame(np.ones(len(X)) * alphas[0])
.assign(xx=np.ones(len(X)) * alphas[1])
.apply(ensemble_func, axis=1)
.values
),
columns=[model.name],
index=X.index,
)
assert_frame_equal(exp_result, model.predict(X))
@pytest.mark.parametrize("base_learners", [("no_duplicates")], indirect=["base_learners"])
def test_ensembles_stackingensemble_create_horizons_as_features(base_learners):
n_splits = 2
horizon = 3
model = StackingEnsemble(
meta_model=LinearRegression(),
base_learners=base_learners,
train_n_splits=n_splits,
train_horizon=horizon,
)
cross_result_index = np.arange(horizon * n_splits, dtype=int)
df = model._create_horizons_as_features(cross_result_index, horizon=horizon, n_splits=n_splits)
assert isinstance(df, pd.DataFrame)
assert df.shape == (n_splits * horizon, horizon)
@pytest.mark.parametrize("base_learners", [("no_duplicates")], indirect=["base_learners"])
def test_ensembles_stackingensemble_create_weekdays_as_features(base_learners):
n_splits = 2
horizon = 3
model = StackingEnsemble(
meta_model=LinearRegression(),
base_learners=base_learners,
train_n_splits=n_splits,
train_horizon=horizon,
)
cross_result_index = pd.DatetimeIndex(
["2020-01-01", "2020-01-02", "2020-01-03", "2020-01-04", "2020-01-05"]
)
df = model._create_weekdays_as_features(cross_result_index)
result = pd.DataFrame(
{
"Friday": [0, 0, 1, 0, 0],
"Saturday": [0, 0, 0, 1, 0],
"Sunday": [0, 0, 0, 0, 1],
"Thursday": [0, 1, 0, 0, 0],
"Wednesday": [1, 0, 0, 0, 0],
},
index=cross_result_index,
).astype("uint8")
assert_frame_equal(result, df)
| 32.903409 | 99 | 0.625453 |
79439f10d84c1ca42f995d4097fb9362e7243099 | 25 | py | Python | data/studio21_generated/introductory/4215/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/introductory/4215/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/introductory/4215/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | def count_number(n, x):
| 12.5 | 23 | 0.68 |
79439f72a06999080c58c993e1f36a2f819d96a0 | 4,231 | py | Python | backtest/algos/trash/algo_ema_v1.py | block1o1/CryptoPredicted | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | [
"MIT"
] | 4 | 2021-10-14T21:22:25.000Z | 2022-03-12T19:58:48.000Z | backtest/algos/trash/algo_ema_v1.py | inevolin/CryptoPredicted | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | [
"MIT"
] | null | null | null | backtest/algos/trash/algo_ema_v1.py | inevolin/CryptoPredicted | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | [
"MIT"
] | 1 | 2022-03-15T22:52:53.000Z | 2022-03-15T22:52:53.000Z |
import json
import sys
sys.dont_write_bytecode = True
import numpy as np
import datetime
import random
import math
import core
def run(debug):
base = "BTC"
#base = "ETH"
#base = "LTC"
quote = "USDT"
historymins = 60*24*30*1 #60*24*30*4
interval = 60
dtend = datetime.datetime.strptime('2018-04-15 00:00', '%Y-%m-%d %H:%M')
dtstart = dtend - datetime.timedelta(minutes=historymins)
inp = core.getPriceExchange_v1('binance', interval, base, quote, historymins, dtend)
uncertainty_margin = 0.001
def sig(prev_len, prevY, prevs, price):
if len(prevY) == 0: return price
multiplier = (2 / float(1 + prev_len))
v = price*multiplier + prevY[-1]*(1-multiplier)
return v
def work(_1, _2):
portfolio = {}
dtit = dtstart
prevs = []
canBuy = True
canSell = False
traceA = core.createNewScatterTrace("traceA", "y")
traceA['prev_len'] = _1
traceB = core.createNewScatterTrace("traceB", "y")
traceB['prev_len'] = _2
while dtit <= dtend:
idx = datetime.datetime.strftime(dtit, '%Y-%m-%dT%H:%M')
if idx in inp:
c = inp[idx]['close']
o = inp[idx]['open']
l = inp[idx]['low']
h = inp[idx]['high']
#price = (o+c)/2 # ok
# price = c # ok
price = o + (c-o)*random.randint(0,10)/10 # ok
#price = random.uniform(o, c) if c > o else random.uniform(c, o)
# price = random.uniform(l, h) # much worse than [open, close]
buyprice = price
sellprice = price
core.portfolioPriceEntry(portfolio, dtit, price, o, c, l, h)
core.addToScatterTrace(traceA, dtit, sig(traceA['prev_len'], traceA['y'], prevs, price))
core.addToScatterTrace(traceB, dtit, sig(traceB['prev_len'], traceB['y'], prevs, price))
if len(traceA['y']) > 1:
if canBuy and (traceA['y'][-2] < traceB['y'][-2] and traceA['y'][-1] > traceB['y'][-1]):
core.portfolioBuy(portfolio, dtit, buyprice, uncertainty_margin)
canSell = True
canBuy = False
elif canSell and (traceA['y'][-2] > traceB['y'][-2] and traceA['y'][-1] < traceB['y'][-1]):
core.portfolioSell(portfolio, dtit, sellprice, uncertainty_margin)
canSell = False
canBuy = True
prevs.append(price)
dtit += datetime.timedelta(minutes=interval)
# beautify (replacing 0's by None )
for i,v in enumerate(traceB['y']):
if v == 0:
traceB['y'][i]=None
proc = core.processPortfolio(portfolio, 1)
return (proc, portfolio, [traceA, traceB])
if debug == 0: # computing ROI
A = 1
B = 2
avgs = []
for x in range(100):
(proc, portfolio, traces) = work(A, B)
print("%s ROI \t %f" % (str(x), proc['_']['ROI%']))
avgs.append(proc['_']['ROI%'])
print("avg ROI%: " + str(sum(avgs)/len(avgs)))
std = np.std(avgs)
print("std ROI%: " + str(std))
elif debug == 1: # brute-force searching for optimal parameters (A & B)
arr = []
for A in range(1, 30):
for B in range(2, 30):
if (B <= A): continue
rois = []
for x in range(5):
(proc, portfolio, traces) = work(A, B)
rois.append( proc['_']['ROI%'] )
arr.append({"ROI": np.average(rois), "A": A, "B": B})
print("ROI: %i %i %f" % (A, B, np.average(rois)))
print(sorted(arr, key=lambda x: x['ROI']))
else: # computing and plotting out
A = 8
B = 23
(proc, portfolio, traces) = work(A, B)
print("ROI: (%i, %i) %f" % (A, B, proc['_']['ROI%']))
core.portfolioToChart_OHLC(portfolio, traces)
if __name__ == '__main__':
debug = 2
run(debug) | 33.314961 | 111 | 0.491373 |
79439fd10a687de8f9739a63bb86e71c000f5a9b | 147 | py | Python | tests/expected/string_encoding.py | OctoPrint/codemods | 6c6cd4bd689582f906571951b0eb7729c4923b51 | [
"MIT"
] | 5 | 2020-10-06T12:02:23.000Z | 2021-04-26T00:31:55.000Z | tests/expected/string_encoding.py | OctoPrint/codemods | 6c6cd4bd689582f906571951b0eb7729c4923b51 | [
"MIT"
] | null | null | null | tests/expected/string_encoding.py | OctoPrint/codemods | 6c6cd4bd689582f906571951b0eb7729c4923b51 | [
"MIT"
] | 1 | 2020-10-10T17:18:39.000Z | 2020-10-10T17:18:39.000Z | temp_regex = rb"T:((\d*\.)\d+)"
temp_regex = rb"T:((\d*\.)\d+)"
"äöüß".encode()
"äöüß".encode()
b"Hello World"
b"Hello World"
b"Hello World"
| 10.5 | 31 | 0.564626 |
79439fd9b3c2f738920329a1f025d784fda4c140 | 12,711 | py | Python | coverage/results.py | nedbat/covcode | 59ce1f44c00b991c64efe8ecb0cf70c13dec5521 | [
"Apache-2.0"
] | null | null | null | coverage/results.py | nedbat/covcode | 59ce1f44c00b991c64efe8ecb0cf70c13dec5521 | [
"Apache-2.0"
] | null | null | null | coverage/results.py | nedbat/covcode | 59ce1f44c00b991c64efe8ecb0cf70c13dec5521 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Results of coverage measurement."""
import collections
from coverage.debug import SimpleReprMixin
from coverage.exceptions import ConfigError
from coverage.misc import contract, nice_pair
class Analysis:
"""The results of analyzing a FileReporter."""
def __init__(self, data, precision, file_reporter, file_mapper):
self.data = data
self.file_reporter = file_reporter
self.filename = file_mapper(self.file_reporter.filename)
self.statements = self.file_reporter.lines()
self.excluded = self.file_reporter.excluded_lines()
# Identify missing statements.
executed = self.data.lines(self.filename) or []
executed = self.file_reporter.translate_lines(executed)
self.executed = executed
self.missing = self.statements - self.executed
if self.data.has_arcs():
self._arc_possibilities = sorted(self.file_reporter.arcs())
self.exit_counts = self.file_reporter.exit_counts()
self.no_branch = self.file_reporter.no_branch_lines()
n_branches = self._total_branches()
mba = self.missing_branch_arcs()
n_partial_branches = sum(len(v) for k,v in mba.items() if k not in self.missing)
n_missing_branches = sum(len(v) for k,v in mba.items())
else:
self._arc_possibilities = []
self.exit_counts = {}
self.no_branch = set()
n_branches = n_partial_branches = n_missing_branches = 0
self.numbers = Numbers(
precision=precision,
n_files=1,
n_statements=len(self.statements),
n_excluded=len(self.excluded),
n_missing=len(self.missing),
n_branches=n_branches,
n_partial_branches=n_partial_branches,
n_missing_branches=n_missing_branches,
)
def missing_formatted(self, branches=False):
"""The missing line numbers, formatted nicely.
Returns a string like "1-2, 5-11, 13-14".
If `branches` is true, includes the missing branch arcs also.
"""
if branches and self.has_arcs():
arcs = self.missing_branch_arcs().items()
else:
arcs = None
return format_lines(self.statements, self.missing, arcs=arcs)
def has_arcs(self):
"""Were arcs measured in this result?"""
return self.data.has_arcs()
@contract(returns='list(tuple(int, int))')
def arc_possibilities(self):
"""Returns a sorted list of the arcs in the code."""
return self._arc_possibilities
@contract(returns='list(tuple(int, int))')
def arcs_executed(self):
"""Returns a sorted list of the arcs actually executed in the code."""
executed = self.data.arcs(self.filename) or []
executed = self.file_reporter.translate_arcs(executed)
return sorted(executed)
@contract(returns='list(tuple(int, int))')
def arcs_missing(self):
"""Returns a sorted list of the unexecuted arcs in the code."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
missing = (
p for p in possible
if p not in executed
and p[0] not in self.no_branch
and p[1] not in self.excluded
)
return sorted(missing)
@contract(returns='list(tuple(int, int))')
def arcs_unpredicted(self):
"""Returns a sorted list of the executed arcs missing from the code."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
# Exclude arcs here which connect a line to itself. They can occur
# in executed data in some cases. This is where they can cause
# trouble, and here is where it's the least burden to remove them.
# Also, generators can somehow cause arcs from "enter" to "exit", so
# make sure we have at least one positive value.
unpredicted = (
e for e in executed
if e not in possible
and e[0] != e[1]
and (e[0] > 0 or e[1] > 0)
)
return sorted(unpredicted)
def _branch_lines(self):
"""Returns a list of line numbers that have more than one exit."""
return [l1 for l1,count in self.exit_counts.items() if count > 1]
def _total_branches(self):
"""How many total branches are there?"""
return sum(count for count in self.exit_counts.values() if count > 1)
@contract(returns='dict(int: list(int))')
def missing_branch_arcs(self):
"""Return arcs that weren't executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
"""
missing = self.arcs_missing()
branch_lines = set(self._branch_lines())
mba = collections.defaultdict(list)
for l1, l2 in missing:
if l1 in branch_lines:
mba[l1].append(l2)
return mba
@contract(returns='dict(int: list(int))')
def executed_branch_arcs(self):
"""Return arcs that were executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
"""
executed = self.arcs_executed()
branch_lines = set(self._branch_lines())
eba = collections.defaultdict(list)
for l1, l2 in executed:
if l1 in branch_lines:
eba[l1].append(l2)
return eba
@contract(returns='dict(int: tuple(int, int))')
def branch_stats(self):
"""Get stats about branches.
Returns a dict mapping line numbers to a tuple:
(total_exits, taken_exits).
"""
missing_arcs = self.missing_branch_arcs()
stats = {}
for lnum in self._branch_lines():
exits = self.exit_counts[lnum]
missing = len(missing_arcs[lnum])
stats[lnum] = (exits, exits - missing)
return stats
class Numbers(SimpleReprMixin):
"""The numerical results of measuring coverage.
This holds the basic statistics from `Analysis`, and is used to roll
up statistics across files.
"""
def __init__(self,
precision=0,
n_files=0, n_statements=0, n_excluded=0, n_missing=0,
n_branches=0, n_partial_branches=0, n_missing_branches=0
):
assert 0 <= precision < 10
self._precision = precision
self._near0 = 1.0 / 10**precision
self._near100 = 100.0 - self._near0
self.n_files = n_files
self.n_statements = n_statements
self.n_excluded = n_excluded
self.n_missing = n_missing
self.n_branches = n_branches
self.n_partial_branches = n_partial_branches
self.n_missing_branches = n_missing_branches
def init_args(self):
"""Return a list for __init__(*args) to recreate this object."""
return [
self._precision,
self.n_files, self.n_statements, self.n_excluded, self.n_missing,
self.n_branches, self.n_partial_branches, self.n_missing_branches,
]
@property
def n_executed(self):
"""Returns the number of executed statements."""
return self.n_statements - self.n_missing
@property
def n_executed_branches(self):
"""Returns the number of executed branches."""
return self.n_branches - self.n_missing_branches
@property
def pc_covered(self):
"""Returns a single percentage value for coverage."""
if self.n_statements > 0:
numerator, denominator = self.ratio_covered
pc_cov = (100.0 * numerator) / denominator
else:
pc_cov = 100.0
return pc_cov
@property
def pc_covered_str(self):
"""Returns the percent covered, as a string, without a percent sign.
Note that "0" is only returned when the value is truly zero, and "100"
is only returned when the value is truly 100. Rounding can never
result in either "0" or "100".
"""
return self.display_covered(self.pc_covered)
def display_covered(self, pc):
"""Return a displayable total percentage, as a string.
Note that "0" is only returned when the value is truly zero, and "100"
is only returned when the value is truly 100. Rounding can never
result in either "0" or "100".
"""
if 0 < pc < self._near0:
pc = self._near0
elif self._near100 < pc < 100:
pc = self._near100
else:
pc = round(pc, self._precision)
return "%.*f" % (self._precision, pc)
def pc_str_width(self):
"""How many characters wide can pc_covered_str be?"""
width = 3 # "100"
if self._precision > 0:
width += 1 + self._precision
return width
@property
def ratio_covered(self):
"""Return a numerator and denominator for the coverage ratio."""
numerator = self.n_executed + self.n_executed_branches
denominator = self.n_statements + self.n_branches
return numerator, denominator
def __add__(self, other):
nums = Numbers(precision=self._precision)
nums.n_files = self.n_files + other.n_files
nums.n_statements = self.n_statements + other.n_statements
nums.n_excluded = self.n_excluded + other.n_excluded
nums.n_missing = self.n_missing + other.n_missing
nums.n_branches = self.n_branches + other.n_branches
nums.n_partial_branches = (
self.n_partial_branches + other.n_partial_branches
)
nums.n_missing_branches = (
self.n_missing_branches + other.n_missing_branches
)
return nums
def __radd__(self, other):
# Implementing 0+Numbers allows us to sum() a list of Numbers.
assert other == 0 # we only ever call it this way.
return self
def _line_ranges(statements, lines):
"""Produce a list of ranges for `format_lines`."""
statements = sorted(statements)
lines = sorted(lines)
pairs = []
start = None
lidx = 0
for stmt in statements:
if lidx >= len(lines):
break
if stmt == lines[lidx]:
lidx += 1
if not start:
start = stmt
end = stmt
elif start:
pairs.append((start, end))
start = None
if start:
pairs.append((start, end))
return pairs
def format_lines(statements, lines, arcs=None):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
Both `lines` and `statements` can be any iterable. All of the elements of
`lines` must be in `statements`, and all of the values must be positive
integers.
If `arcs` is provided, they are (start,[end,end,end]) pairs that will be
included in the output as long as start isn't in `lines`.
"""
line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)]
if arcs:
line_exits = sorted(arcs)
for line, exits in line_exits:
for ex in sorted(exits):
if line not in lines and ex not in lines:
dest = (ex if ex > 0 else "exit")
line_items.append((line, f"{line}->{dest}"))
ret = ', '.join(t[-1] for t in sorted(line_items))
return ret
@contract(total='number', fail_under='number', precision=int, returns=bool)
def should_fail_under(total, fail_under, precision):
"""Determine if a total should fail due to fail-under.
`total` is a float, the coverage measurement total. `fail_under` is the
fail_under setting to compare with. `precision` is the number of digits
to consider after the decimal point.
Returns True if the total should fail.
"""
# We can never achieve higher than 100% coverage, or less than zero.
if not (0 <= fail_under <= 100.0):
msg = f"fail_under={fail_under} is invalid. Must be between 0 and 100."
raise ConfigError(msg)
# Special case for fail_under=100, it must really be 100.
if fail_under == 100.0 and total != 100.0:
return True
return round(total, precision) < fail_under
| 35.11326 | 92 | 0.61907 |
7943a0391aef87a77f55197b3b70173744eb560f | 448 | py | Python | ex033.py | Nawaus/Ex-curso-em-video | 3517248a0ecc3669608a8023075c166b007eaeec | [
"Unlicense"
] | 1 | 2021-11-27T01:39:58.000Z | 2021-11-27T01:39:58.000Z | ex033.py | Nawaus/Ex-curso-em-video | 3517248a0ecc3669608a8023075c166b007eaeec | [
"Unlicense"
] | null | null | null | ex033.py | Nawaus/Ex-curso-em-video | 3517248a0ecc3669608a8023075c166b007eaeec | [
"Unlicense"
] | null | null | null | #033
a = int(input('Primeiro valor: '))
b = int(input('Segundo valor: '))
c = int(input('Terceiro valor: '))
# Verificando o quem é menor
menor = a
if b < a and b < c:
menor = a
if c < a and c < b:
menor = c
# Verificando quem é maior
maior = a
if b > a and b > c:
maior = b
if c > a and c > b:
maior = c
print('O menor valor digitado foi {}'.format(menor))
print('O maior valor digitado foi {}'.format(maior)) | 23.578947 | 53 | 0.578125 |
7943a0b6fcb8862938e558bdbbfe9bf903b36bec | 5,755 | py | Python | .github/scripts/deploy.py | rse-ops/proposals | a09790692c6b09dc7d1400b8f8dde49dd886cca8 | [
"MIT"
] | null | null | null | .github/scripts/deploy.py | rse-ops/proposals | a09790692c6b09dc7d1400b8f8dde49dd886cca8 | [
"MIT"
] | null | null | null | .github/scripts/deploy.py | rse-ops/proposals | a09790692c6b09dc7d1400b8f8dde49dd886cca8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# This script does the following.
# 1. Takes in a space separated list of changed files
# 2. For each changed file, adds a header (title) based on the filename
# 3. Sets output for the prepared files to move into the site
import argparse
import os
import json
import re
import sys
import tempfile
def read_file(filename):
with open(filename, "r") as fd:
content = fd.read()
return content
def read_json(filename):
with open(filename, "r") as fd:
content = json.loads(fd.read())
return content
# Templates
draft_template = """---
title: %s
layout: proposal
pr: %s
tags:
- %s
---"""
approved_template = """---
title: %s
layout: proposal
tags:
- %s
---"""
draft_label = os.environ.get("draft_label", "draft")
approved_label = os.environ.get("approved_label", "approved")
def get_parser():
parser = argparse.ArgumentParser(description="Proposals Parsing Client")
description = "Prepare proposal drafts"
subparsers = parser.add_subparsers(
help="actions",
title="actions",
description=description,
dest="command",
)
draft = subparsers.add_parser("draft", help="prepare drafts")
approved = subparsers.add_parser("approved", help="add approved proposals")
remove = subparsers.add_parser("remove", help="remove non-existing proposals")
for command in [draft, approved, remove]:
command.add_argument(
"files", help="the drafts to consider (changed files)", nargs="*"
)
return parser
def get_title(filename):
"""
Convert name-of-markdown.md to Name Of Markdown
"""
basename = os.path.basename(filename)
return " ".join([x.capitalize() for x in basename.split(".", 1)[0].split("-")])
def is_correct(filename):
"""
Formatting and sanity checks
"""
if not os.path.exists(filename):
print("%s does not exist, skipping!" % filename)
return False
dirname = os.path.basename(os.path.dirname(filename))
if dirname != "proposals":
print("%s is not a proposal, skipping." % filename)
return False
# Check that we end in markdown
if not filename.endswith("md"):
print("%s does not end in .md, skipping." % filename)
return False
# and only have lowercase and -
basename = os.path.basename(filename).replace(".md", "")
if not re.search("^[a-z0-9-]*$", basename):
print(
"%s contains invalid characters: only lowercase letters, numbers, and - are allowed!"
% basename
)
return False
return True
def find_removed(files):
"""
Only allow removed on merge into main, so it's approved by owners
"""
removed = []
for filename in files:
if not os.path.exists(filename):
removed.append(filename)
print("::set-output name=removed::%s" % " ".join(removed))
def prepare_preposals(files, template_string, template_tag, with_pr=False):
"""
Generic shared function to prepare proposal files
"""
tmpdir = tempfile.mkdtemp()
final_files = []
for filename in files:
if not is_correct(filename):
continue
# Prepare header
title = get_title(filename)
if with_pr:
pr = PullRequest()
# Default to custom tag on PR or just draft default
template = template_string % (title, pr.url, pr.get_tag() or template_tag)
else:
template = template_string % (title, template_tag)
content = template + "\n\n" + read_file(filename)
# Write to final location
tmppath = os.path.join(tmpdir, os.path.basename(filename))
with open(tmppath, "w") as fd:
fd.write(content)
final_files.append(tmppath)
# When we have final files, set in environment
print("::set-output name=proposals::%s" % " ".join(final_files))
def prepare_approved(files):
"""
Prepare approved (in progress) proposals
"""
prepare_preposals(files, approved_template, approved_label, with_pr=False)
def prepare_drafts(files):
"""
Prepare proposal drafts
"""
prepare_preposals(files, draft_template, draft_label, with_pr=True)
class PullRequest:
"""Helper class to get pull request and labels to indicate status"""
def __init__(self):
from github import Github
self.gh = Github(os.getenv("GITHUB_TOKEN"))
events_path = os.getenv("GITHUB_EVENT_PATH")
self.event = read_json(events_path)
self.repo = self.gh.get_repo(self.repo_name)
self.number = self.event["pull_request"]["number"]
@property
def repo_name(self):
return self.event["repository"]["full_name"]
@property
def url(self):
return "https://github.com/%s/pull/%s" % (self.repo_name, self.number)
def get_tag(self):
pr = self.repo.get_pull(self.number)
# Return the first status we find
for label in pr.get_labels():
if label.name.startswith("status-"):
name = label.name.replace("status-", "").strip()
return name
def main():
parser = get_parser()
def help(return_code=0):
parser.print_help()
sys.exit(return_code)
# If an error occurs while parsing the arguments, the interpreter will exit with value 2
args, extra = parser.parse_known_args()
if not args.command:
help()
print(args.files)
# Prepare drafts
if args.command == "draft":
prepare_drafts(args.files)
elif args.command == "approved":
prepare_approved(args.files)
elif args.command == "remove":
find_removed(args.files)
if __name__ == "__main__":
main()
| 26.278539 | 97 | 0.631972 |
7943a0d84e5541c5310d3b230ae410809c8ec659 | 2,479 | py | Python | util/compute_bootstrap.py | AnneBeyer/tgen | f7d7d13a85b8fd35919097c7d11345ddb9775d26 | [
"Apache-2.0"
] | 222 | 2015-06-15T14:39:41.000Z | 2022-03-12T03:45:32.000Z | util/compute_bootstrap.py | AnneBeyer/tgen | f7d7d13a85b8fd35919097c7d11345ddb9775d26 | [
"Apache-2.0"
] | 40 | 2015-12-02T10:42:44.000Z | 2021-12-05T17:31:11.000Z | util/compute_bootstrap.py | AnneBeyer/tgen | f7d7d13a85b8fd35919097c7d11345ddb9775d26 | [
"Apache-2.0"
] | 72 | 2015-07-27T08:11:48.000Z | 2022-03-24T14:25:37.000Z | #!/usr/bin/env python
# -"- coding: utf-8 -"-
from argparse import ArgumentParser
import os
import re
from subprocess import call
from tgen.logf import log_info
MY_PATH = os.path.dirname(os.path.abspath(__file__))
def lcall(arg_str):
log_info(arg_str)
return call(arg_str, shell=True)
def get_confidence(metric, lines):
for idx, line in enumerate(lines):
if line.startswith(metric):
lines = lines[idx:]
break
for idx, line in enumerate(lines):
if line.startswith('Confidence of [Sys1'):
return line.strip()
return '???'
def process_all(args):
join_sets = os.path.join(MY_PATH, 'join_sets.pl')
gen_log = os.path.join(MY_PATH, 'mteval-v13a-sig.pl')
bootstrap = os.path.join(MY_PATH, 'paired_bootstrap_resampling_bleu_v13a.pl')
# create the test and source files
lcall("%s %s/s*/test-conc.sgm > %s/test-conc.sgm" %
(join_sets, args.experiment_dirs[0], args.target_dir))
lcall("%s %s/s*/test-das.sgm > %s/test-das.sgm" %
(join_sets, args.experiment_dirs[0], args.target_dir))
exp_nums = []
for exp_dir in args.experiment_dirs:
exp_num = int(re.search(r'(?:^|/)([0-9]+)', exp_dir).group(1))
exp_nums.append(exp_num)
lcall("%s %s/s*/out-text.sgm > %s/%d.sgm" % (join_sets, exp_dir, args.target_dir, exp_num))
os.chdir(args.target_dir)
for exp_num in exp_nums:
lcall("%s -s test-das.sgm -r test-conc.sgm -t %d.sgm -f %d.log.txt > %d.score.txt" %
(gen_log, exp_num, exp_num, exp_num))
for skip, exp_num1 in enumerate(exp_nums):
for exp_num2 in exp_nums[skip + 1:]:
# recompute only if not done already (TODO switch for this)
out_file = 'bootstrap.%dvs%d.txt' % (exp_num1, exp_num2)
if not os.path.isfile(out_file) or os.stat(out_file).st_size == 0:
lcall("%s %s.log.txt %s.log.txt 1000 0.01 > %s" %
(bootstrap, exp_num1, exp_num2, out_file))
with open(out_file) as fh:
bootstrap_data = fh.readlines()
print "%dvs%d BLEU: %s" % (exp_num1, exp_num2, bootstrap_data[0].strip())
if __name__ == '__main__':
ap = ArgumentParser()
ap.add_argument('target_dir', type=str, help='Target directory for bootstrap logs')
ap.add_argument('experiment_dirs', nargs='+', type=str, help='Experiment directories to use')
args = ap.parse_args()
process_all(args)
| 34.915493 | 99 | 0.626059 |
7943a128bfb949a033d9c02de7e9e524c93c5a37 | 558 | py | Python | ogn_lib/constants.py | akolar/ogn-lib | 6b307cad9bf82316a69bb8c82ebfa734040e2689 | [
"MIT"
] | null | null | null | ogn_lib/constants.py | akolar/ogn-lib | 6b307cad9bf82316a69bb8c82ebfa734040e2689 | [
"MIT"
] | 17 | 2017-12-16T12:49:18.000Z | 2018-05-21T10:12:29.000Z | ogn_lib/constants.py | akolar/ogn-lib | 6b307cad9bf82316a69bb8c82ebfa734040e2689 | [
"MIT"
] | null | null | null | import enum
class AirplaneType(enum.Enum):
unknown = 0
glider = 1
tow_plane = 2
helicopter_rotorcraft = 3
parachute = 4
drop_plane = 5
hang_glider = 6
paraglider = 7
powered_aircraft = 8
jet_aircraft = 9
ufo = 10
baloon = 11
airship = 12
uav = 13
static_object = 15
class AddressType(enum.Enum):
unknown = 0b000
icao = 0b001
flarm = 0b010
ogn_tracker = 0b011
naviter = 0b100
class BeaconType(enum.Enum):
aircraft_beacon = 1
server_beacon = 2
server_status = 3
| 16.411765 | 30 | 0.623656 |
7943a185bdfa57c7369d17cea134abbeca2ae127 | 502 | py | Python | longes_sequence/tests/test_4.py | NikolayLutakov/FMI-Python-Basics | 1712ae4aec8371f1144aa83d579d2151e1ea7eaa | [
"MIT"
] | null | null | null | longes_sequence/tests/test_4.py | NikolayLutakov/FMI-Python-Basics | 1712ae4aec8371f1144aa83d579d2151e1ea7eaa | [
"MIT"
] | null | null | null | longes_sequence/tests/test_4.py | NikolayLutakov/FMI-Python-Basics | 1712ae4aec8371f1144aa83d579d2151e1ea7eaa | [
"MIT"
] | null | null | null | from solution_logic import solution
rows = 0
cols = 0
test_matrix_4 = []
def fill_test_matrix_4():
for row in range(rows):
str_arr = input().split(' ')
line = [str(num) for num in str_arr]
test_matrix_4.append(line)
def create_test_4():
str_arr = input().split(' ')
dimensions = [int(num) for num in str_arr]
global rows
rows = dimensions[0]
global cols
cols = dimensions[1]
fill_test_matrix_4()
def test_4():
solution(test_matrix_4)
| 17.928571 | 46 | 0.63745 |
7943a193cfe352ce2d878ea0ec30408da173f611 | 2,107 | py | Python | src/freemovr_engine/plot_utils.py | strawlab/flyvr | 335892cae740e53e82e07b526e1ba53fbd34b0ce | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3 | 2015-01-29T14:09:25.000Z | 2016-04-24T04:25:49.000Z | src/freemovr_engine/plot_utils.py | strawlab/flyvr | 335892cae740e53e82e07b526e1ba53fbd34b0ce | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | src/freemovr_engine/plot_utils.py | strawlab/flyvr | 335892cae740e53e82e07b526e1ba53fbd34b0ce | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from pymvg.plot_utils import plot_camera
def get_3d_verts(geom):
allw = []
res_u = 32
res_v = 5
for tc1 in np.linspace(0,1,res_v):
tc = np.vstack( (
np.linspace(0,1.,res_u),
tc1*np.ones( (res_u,) ),
)).T
world = geom.model.texcoord2worldcoord(tc)
allw.append(world)
allw = np.concatenate(allw)
return allw
def plot_camera( ax, display, scale=0.2):
C = display.get_camcenter()
C.shape=(3,)
ax.plot( [C[0]], [C[1]], [C[2]], 'ko', ms=5 )
world_coords = display.project_camera_frame_to_3d( [[scale,0,0],
[0,scale,0],
[0,0,scale],
[0,0,-scale],
[0,0,0],
[0,scale,0],
[0,0,scale]] )
for i in range(3):
c = 'rgb'[i]
vv = world_coords[i]
v = np.vstack( ([C],[vv]) )
ax.plot( v[:,0], v[:,1], v[:,2], c+'-' )
uv_raw = np.array([[0,0],
[0,display.height],
[display.width, display.height],
[display.width, 0],
[0,0]])
pts3d_near = display.project_pixel_to_3d_ray( uv_raw, distorted=True, distance=0.1*scale)
pts3d_far = display.project_pixel_to_3d_ray( uv_raw, distorted=True, distance=scale)
# ring at near depth
ax.plot( pts3d_near[:,0], pts3d_near[:,1], pts3d_near[:,2], 'k-' )
# ring at far depth
ax.plot( pts3d_far[:,0], pts3d_far[:,1], pts3d_far[:,2], 'k-' )
# connectors
for i in range(len(pts3d_near)-1):
pts3d = np.vstack((pts3d_near[i,:],pts3d_far[i,:]))
ax.plot( pts3d[:,0], pts3d[:,1], pts3d[:,2], 'k-' )
ax.text( C[0], C[1], C[2], display.name )
ax.text( pts3d_far[0,0], pts3d_far[0,1], pts3d_far[0,2], 'UL' )
| 35.711864 | 93 | 0.456573 |
7943a1c5a5d0ce1cb84fd764615f9828b0d89135 | 1,050 | py | Python | boids/command.py | irinagrigorescu/bad_boids | 5508f1b246041c57df95af1f641b1c90c369befe | [
"MIT"
] | null | null | null | boids/command.py | irinagrigorescu/bad_boids | 5508f1b246041c57df95af1f641b1c90c369befe | [
"MIT"
] | null | null | null | boids/command.py | irinagrigorescu/bad_boids | 5508f1b246041c57df95af1f641b1c90c369befe | [
"MIT"
] | null | null | null | import sys
from argparse import ArgumentParser
from matplotlib import animation
from matplotlib import pyplot as plt
from flock import Flock
from animator import FlockAnimator
# Command line entry point
def process():
parser = ArgumentParser(description = \
"Simulate the motion of a flock of birds")
# Parameters
parser.add_argument('--file', '-f', dest = 'configFile')
# Print help message even if no flag is provided
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# Catch exception if file does not exist
try:
# Create object
boids = Flock(args.configFile)
# Plot figures
animator = FlockAnimator((-500,1500), (-500,1500), "The Boids!", boids)
animator.animate_flock()
except IOError:
print "The file you provided does not exist.\n"
parser.print_help()
except:
print "Unexpected error.", sys.exc_info()[0], "\n"
raise
if __name__ == "__main__":
process()
| 25.609756 | 79 | 0.640952 |
7943a1fabc9a00f980fbaa46319c2d17ede4b6ff | 1,278 | py | Python | team_10/haiku.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | 1 | 2019-09-15T18:59:49.000Z | 2019-09-15T18:59:49.000Z | team_10/haiku.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | team_10/haiku.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | import wave
import os
import random
import sys
WORDS = {
1: [
'a',
'and',
'the',
'code',
'get',
'dance',
'will',
'fork',
'git',
'snake',
'plant',
'trees',
],
2: [
'python',
'dojo',
'dancing',
'pizza',
'cocos',
'Cardiff',
'London',
'Pycon',
'hurry',
'quickly',
],
3: [
'meditate',
'introspect',
'validate',
'optimist',
'realist',
'happiness',
'indulgence',
'decadence',
'unsponsored',
'reverted',
],
}
def generate_haiku():
lines = []
for syl_count in 5, 7, 5:
this_line = []
while syl_count > 3:
this_syl = random.randint(1, 3)
this_word = random.choice(
WORDS[this_syl]
)
syl_count -= this_syl
this_line.append(this_word)
if syl_count > 0:
this_line.append(random.choice(WORDS[syl_count]))
lines.append(' '.join(this_line))
return lines
def main():
haiku = generate_haiku()
for line in haiku:
print(line)
if __name__ == '__main__':
main() | 17.75 | 61 | 0.435837 |
7943a2a352a12510ccb18760d9656a6cd9e8f55b | 1,388 | py | Python | nagini/star.py | bmorris3/nagini | f020e5ec97b29274d6b4105c909efd64b86f0b85 | [
"BSD-3-Clause"
] | 1 | 2018-02-13T19:51:47.000Z | 2018-02-13T19:51:47.000Z | rms/star.py | bmorris3/rms | 4239e488f6fae9869782ffbac9f39747b58afdda | [
"MIT"
] | null | null | null | rms/star.py | bmorris3/rms | 4239e488f6fae9869782ffbac9f39747b58afdda | [
"MIT"
] | 1 | 2018-11-23T20:57:09.000Z | 2018-11-23T20:57:09.000Z | # Licensed under the MIT License - see LICENSE
from .planet import Planet
__all__ = ['Star']
class Star(object):
"""
An object for stellar parameters, to use as inputs for STSP.
"""
def __init__(self, planet=None, rotation_period=None, inc_stellar=None,
spot_contrast=0.7, u=[0.2, 0.1], rho_s=1.0):
"""
Parameters
----------
rotation_period : float
Stellar rotation period in days
inc_stellar : float
Stellar inclination (measured away from observer's line-of-sight)
in units of degrees
spot_contrast : float
Relative intensity of a spot to the photosphere (0==perfectly dark,
1==same as photosphere)
u : float
Quadratic limb darkening parameters
limb_dark : float
Limb darkening formula
planet : `~rms.Planet`
Planet parameters. If planet is None, a non-transiting planet will
be used for STSP computations.
rho_s : float
Stellar density in units of the solar density
"""
self.inc_stellar = inc_stellar
self.per_rot = rotation_period
self.spot_contrast = spot_contrast
if planet is None:
planet = Planet.non_transiting()
self.planet = planet
self.u = u
self.rho_s = rho_s
| 30.844444 | 79 | 0.592939 |
7943a424a3c3f3d5a9c8fab3ab65eaef86b9a686 | 29,070 | py | Python | tests/test_hooks/conan-center/test_conan-center.py | Minimonium/hooks | 92a4ade551dab17c497244f42dc51328cc7fee2e | [
"MIT"
] | null | null | null | tests/test_hooks/conan-center/test_conan-center.py | Minimonium/hooks | 92a4ade551dab17c497244f42dc51328cc7fee2e | [
"MIT"
] | null | null | null | tests/test_hooks/conan-center/test_conan-center.py | Minimonium/hooks | 92a4ade551dab17c497244f42dc51328cc7fee2e | [
"MIT"
] | null | null | null | import os
import platform
import textwrap
import pytest
from conans import tools
from conans.client.command import ERROR_INVALID_CONFIGURATION, SUCCESS
from conans.tools import Version
from conans import __version__ as conan_version
from tests.utils.test_cases.conan_client import ConanClientTestCase
class ConanCenterTests(ConanClientTestCase):
conanfile_base = textwrap.dedent("""\
from conans import ConanFile
class AConan(ConanFile):
url = "fake_url.com"
license = "fake_license"
description = "whatever"
homepage = "homepage.com"
topics = ("fake_topic", "another_fake_topic")
exports_sources = "header.h"
{placeholder}
def package(self):
self.copy("*", dst="include")
""")
conanfile_header_only_with_settings = textwrap.dedent("""\
from conans import ConanFile
class AConan(ConanFile):
url = "fake_url.com"
license = "fake_license"
description = "whatever"
homepage = "homepage.com"
exports_sources = "header.h"
settings = "os", "compiler", "arch", "build_type"
def package(self):
self.copy("*", dst="include")
def package_id(self):
self.info.header_only()
""")
conanfile_fpic = textwrap.dedent("""\
from conans import ConanFile
class Fpic(ConanFile):
url = "fake_url.com"
license = "fake_license"
description = "whatever"
settings = "os", "arch", "compiler", "build_type"
options = {'fPIC': [True, False]}
default_options = {'fPIC': True}
""")
conanfile_header_only = conanfile_base.format(placeholder='')
conanfile_installer = conanfile_base.format(placeholder='settings = "os_build"')
conanfile = conanfile_base.format(placeholder='settings = "os"')
def _get_environ(self, **kwargs):
kwargs = super(ConanCenterTests, self)._get_environ(**kwargs)
kwargs.update({'CONAN_HOOKS': os.path.join(os.path.dirname(__file__), '..', '..', '..',
'hooks', 'conan-center')})
return kwargs
def test_no_duplicated_messages(self):
tools.save('conanfile.py', content=self.conanfile)
output = self.conan(['create', '.', 'name/version@jgsogo/test'])
self.assertIn("ERROR: [PACKAGE LICENSE (KB-H012)] No 'licenses' folder found in package", output)
self.assertNotIn("[PACKAGE LICENSE (KB-H012)] OK", output)
def test_conanfile(self):
tools.save('conanfile.py', content=self.conanfile)
output = self.conan(['create', '.', 'name/version@jgsogo/test'])
self.assertIn("[RECIPE METADATA (KB-H003)] OK", output)
self.assertIn("[HEADER_ONLY, NO COPY SOURCE (KB-H005)] OK", output)
self.assertIn("[FPIC OPTION (KB-H006)] OK", output)
self.assertIn("[FPIC MANAGEMENT (KB-H007)] 'fPIC' option not found", output)
self.assertIn("[VERSION RANGES (KB-H008)] OK", output)
self.assertIn("[LIBCXX MANAGEMENT (KB-H011)] OK", output)
self.assertIn("ERROR: [MATCHING CONFIGURATION (KB-H014)] Empty package", output)
self.assertIn("ERROR: [PACKAGE LICENSE (KB-H012)] No 'licenses' folder found in package", output)
self.assertIn("[DEFAULT PACKAGE LAYOUT (KB-H013)] OK", output)
self.assertIn("[SHARED ARTIFACTS (KB-H015)] OK", output)
self.assertIn("[EXPORT LICENSE (KB-H023)] OK", output)
self.assertIn("ERROR: [TEST PACKAGE FOLDER (KB-H024)] There is no 'test_package' for this "
"recipe", output)
self.assertIn("[META LINES (KB-H025)] OK", output)
self.assertIn("ERROR: [CONAN CENTER INDEX URL (KB-H027)] The attribute 'url' should " \
"point to: https://github.com/conan-io/conan-center-index", output)
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
self.assertIn("[SYSTEM REQUIREMENTS (KB-H032)] OK", output)
def test_conanfile_header_only(self):
tools.save('conanfile.py', content=self.conanfile_header_only)
tools.save('header.h', content="")
output = self.conan(['create', '.', 'name/version@jgsogo/test'])
self.assertIn("[RECIPE METADATA (KB-H003)] OK", output)
self.assertIn("[HEADER_ONLY, NO COPY SOURCE (KB-H005)] This recipe is a header only library", output)
self.assertIn("[FPIC OPTION (KB-H006)] OK", output)
self.assertIn("[FPIC MANAGEMENT (KB-H007)] 'fPIC' option not found", output)
self.assertIn("[VERSION RANGES (KB-H008)] OK", output)
self.assertIn("[LIBCXX MANAGEMENT (KB-H011)] OK", output)
self.assertIn("[MATCHING CONFIGURATION (KB-H014)] OK", output)
self.assertNotIn("ERROR: [MATCHING CONFIGURATION (KB-H014)]", output)
self.assertIn("ERROR: [PACKAGE LICENSE (KB-H012)] No 'licenses' folder found in package", output)
self.assertIn("[DEFAULT PACKAGE LAYOUT (KB-H013)] OK", output)
self.assertIn("[SHARED ARTIFACTS (KB-H015)] OK", output)
self.assertIn("[EXPORT LICENSE (KB-H023)] OK", output)
self.assertIn("ERROR: [TEST PACKAGE FOLDER (KB-H024)] There is no 'test_package' for this "
"recipe", output)
self.assertIn("[META LINES (KB-H025)] OK", output)
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
self.assertIn("[SYSTEM REQUIREMENTS (KB-H032)] OK", output)
def test_conanfile_header_only_with_settings(self):
tools.save('conanfile.py', content=self.conanfile_header_only_with_settings)
tools.save('header.h', content="")
output = self.conan(['create', '.', 'name/version@jgsogo/test'])
self.assertIn("[RECIPE METADATA (KB-H003)] OK", output)
self.assertIn("[HEADER_ONLY, NO COPY SOURCE (KB-H005)] OK", output)
self.assertIn("[FPIC OPTION (KB-H006)] OK", output)
self.assertIn("[FPIC MANAGEMENT (KB-H007)] 'fPIC' option not found", output)
self.assertIn("[VERSION RANGES (KB-H008)] OK", output)
self.assertIn("[LIBCXX MANAGEMENT (KB-H011)] OK", output)
self.assertIn("[MATCHING CONFIGURATION (KB-H014)] OK", output)
self.assertIn("ERROR: [PACKAGE LICENSE (KB-H012)] No 'licenses' folder found in package", output)
self.assertIn("[DEFAULT PACKAGE LAYOUT (KB-H013)] OK", output)
self.assertIn("[SHARED ARTIFACTS (KB-H015)] OK", output)
self.assertIn("[EXPORT LICENSE (KB-H023)] OK", output)
self.assertIn("ERROR: [TEST PACKAGE FOLDER (KB-H024)] There is no 'test_package' for this "
"recipe", output)
self.assertIn("[META LINES (KB-H025)] OK", output)
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
self.assertIn("[SYSTEM REQUIREMENTS (KB-H032)] OK", output)
def test_conanfile_installer(self):
tools.save('conanfile.py', content=self.conanfile_installer)
output = self.conan(['create', '.', 'name/version@jgsogo/test'])
self.assertIn("[RECIPE METADATA (KB-H003)] OK", output)
self.assertIn("[HEADER_ONLY, NO COPY SOURCE (KB-H005)] OK", output)
self.assertIn("[FPIC OPTION (KB-H006)] OK", output)
self.assertIn("[FPIC MANAGEMENT (KB-H007)] 'fPIC' option not found", output)
self.assertIn("[VERSION RANGES (KB-H008)] OK", output)
self.assertIn("[LIBCXX MANAGEMENT (KB-H011)] OK", output)
self.assertIn("ERROR: [MATCHING CONFIGURATION (KB-H014)] Empty package", output)
self.assertIn("ERROR: [MATCHING CONFIGURATION (KB-H014)] Packaged artifacts does not match",
output)
self.assertIn("ERROR: [PACKAGE LICENSE (KB-H012)] No 'licenses' folder found in package", output)
self.assertIn("[DEFAULT PACKAGE LAYOUT (KB-H013)] OK", output)
self.assertIn("[SHARED ARTIFACTS (KB-H015)] OK", output)
self.assertIn("ERROR: [TEST PACKAGE FOLDER (KB-H024)] There is no 'test_package' for this "
"recipe", output)
self.assertIn("[META LINES (KB-H025)] OK", output)
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
def test_shebang(self):
conanfile = textwrap.dedent("""\
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, tools
import os
class AConan(ConanFile):
url = "fake_url.com"
license = "fake_license"
description = "whatever"
exports_sources = "header.h"
def package(self):
tools.save(os.path.join(self.package_folder, "__init__.py"),
content="#!/usr/bin/env python")
self.copy("*", dst="include")
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
""")
tools.save('conanfile.py', content=conanfile)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("ERROR: [META LINES (KB-H025)] PEP 263 (encoding) is not allowed in the " \
"conanfile. Remove the line 2", output)
self.assertIn("ERROR: [META LINES (KB-H025)] vim editor configuration detected in your " \
"recipe. Remove the line 17", output)
self.assertIn("ERROR: [META LINES (KB-H025)] Shebang (#!) detected in your recipe. " \
"Remove the line 1", output)
def test_run_environment_test_package(self):
conanfile_tp = textwrap.dedent("""\
from conans import ConanFile, RunEnvironment, tools
class TestConan(ConanFile):
settings = "os", "arch"
def test(self):
env_build = RunEnvironment(self)
with tools.environment_append(env_build.vars):
self.run("echo bar")
""")
tools.save('test_package/conanfile.py', content=conanfile_tp)
tools.save('conanfile.py', content=self.conanfile)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[TEST PACKAGE FOLDER (KB-H024)] OK", output)
self.assertIn("ERROR: [TEST PACKAGE - RUN ENVIRONMENT (KB-H029)] The 'RunEnvironment()' "
"build helper is no longer needed. It has been integrated into the "
"self.run(..., run_environment=True)", output)
conanfile_tp = textwrap.dedent("""\
from conans import ConanFile, tools
class TestConan(ConanFile):
settings = "os", "arch"
def test(self):
self.run("echo bar", run_environment=True)
""")
tools.save('test_package/conanfile.py', content=conanfile_tp)
tools.save('conanfile.py', content=self.conanfile)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[TEST PACKAGE FOLDER (KB-H024)] OK", output)
self.assertIn("[TEST PACKAGE - RUN ENVIRONMENT (KB-H029)] OK", output)
self.assertIn("[EXPORT LICENSE (KB-H023)] OK", output)
self.assertIn("[TEST PACKAGE - NO IMPORTS() (KB-H034)] OK", output)
def test_exports_licenses(self):
tools.save('conanfile.py',
content=self.conanfile_base.format(placeholder='exports = "LICENSE"'))
output = self.conan(['create', '.', 'name/version@name/test'])
self.assertIn("ERROR: [EXPORT LICENSE (KB-H023)] This recipe is exporting a license file." \
" Remove LICENSE from `exports`", output)
tools.save('conanfile.py',
content=self.conanfile_base.format(placeholder='exports_sources = "LICENSE"'))
output = self.conan(['create', '.', 'name/version@name/test'])
self.assertIn("ERROR: [EXPORT LICENSE (KB-H023)] This recipe is exporting a license file." \
" Remove LICENSE from `exports_sources`", output)
tools.save('conanfile.py',
content=self.conanfile_base.format(placeholder='exports = ["foobar", "COPYING.md"]'))
output = self.conan(['create', '.', 'name/version@name/test'])
self.assertIn("ERROR: [EXPORT LICENSE (KB-H023)] This recipe is exporting a license file." \
" Remove COPYING.md from `exports`", output)
def test_fpic_remove(self):
conanfile = textwrap.dedent("""\
from conans import ConanFile
class LinuxOnly(ConanFile):
url = "fake_url.com"
license = "fake_license"
description = "whatever"
settings = "os", "arch", "compiler", "build_type"
options = {"fPIC": [True, False], "shared": [True, False]}
default_options = {"fPIC": True, "shared": False}
""")
tools.save('conanfile.py', content=conanfile)
output = self.conan(['create', '.', 'package/version@conan/test'])
self.assertIn("[FPIC OPTION (KB-H006)] OK", output)
if tools.os_info.is_windows:
self.assertIn("ERROR: [FPIC MANAGEMENT (KB-H007)] 'fPIC' option not managed " \
"correctly. Please remove it for Windows " \
"configurations: del self.options.fpic", output)
else:
self.assertIn("[FPIC MANAGEMENT (KB-H007)] OK. 'fPIC' option found and apparently " \
"well managed", output)
def test_fpic_remove_windows(self):
conanfile = textwrap.dedent("""\
from conans import ConanFile
class Conan(ConanFile):
url = "fake_url.com"
license = "fake_license"
description = "whatever"
settings = "os", "arch", "compiler", "build_type"
options = {"fPIC": [True, False], "shared": [True, False]}
default_options = {"fPIC": True, "shared": False}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
""")
tools.save('conanfile.py', content=conanfile)
output = self.conan(['create', '.', 'package/version@conan/test'])
self.assertIn("[FPIC OPTION (KB-H006)] OK", output)
if platform.system() == "Windows":
self.assertIn("[FPIC MANAGEMENT (KB-H007)] 'fPIC' option not found", output)
else:
self.assertIn("[FPIC MANAGEMENT (KB-H007)] OK. 'fPIC' option found and apparently well "
"managed", output)
self.assertIn("[FPIC MANAGEMENT (KB-H007)] OK", output)
def test_fpic_remove_windows_configuration(self):
conanfile = textwrap.dedent("""\
from conans import ConanFile
from conans.errors import ConanInvalidConfiguration
class Conan(ConanFile):
url = "fake_url.com"
license = "fake_license"
description = "whatever"
settings = "os", "arch", "compiler", "build_type"
options = {"fPIC": [True, False], "shared": [True, False]}
default_options = {"fPIC": True, "shared": False}
def configure(self):
if self.settings.os == "Windows":
raise ConanInvalidConfiguration("Windows not supported")
""")
tools.save('conanfile.py', content=conanfile)
if platform.system() == "Windows":
expected_return_code = ERROR_INVALID_CONFIGURATION
else:
expected_return_code = SUCCESS
output = self.conan(['create', '.', 'package/version@conan/test'], expected_return_code)
if platform.system() == "Windows":
self.assertNotIn("[FPIC MANAGEMENT (KB-H007)] OK", output)
else:
self.assertIn("[FPIC MANAGEMENT (KB-H007)] OK. 'fPIC' option found and apparently well "
"managed", output)
def test_conanfile_cppstd(self):
content = textwrap.dedent("""\
from conans import ConanFile
class AConan(ConanFile):
url = "fake_url.com"
license = "fake_license"
description = "whatever"
exports_sources = "header.h", "test.c"
settings = "os", "compiler", "arch", "build_type"
def configure(self):
{configure}
def package(self):
self.copy("*", dst="include")
""")
tools.save('test.c', content="#define FOO 1")
tools.save('conanfile.py', content=content.format(
configure="pass"))
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("ERROR: [LIBCXX MANAGEMENT (KB-H011)] Can't detect C++ source files but " \
"recipe does not remove 'self.settings.compiler.libcxx'", output)
self.assertIn("ERROR: [CPPSTD MANAGEMENT (KB-H022)] Can't detect C++ source files but " \
"recipe does not remove 'self.settings.compiler.cppstd'", output)
tools.save('conanfile.py', content=content.format(configure="""
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd"""))
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[LIBCXX MANAGEMENT (KB-H011)] OK", output)
self.assertIn("[CPPSTD MANAGEMENT (KB-H022)] OK", output)
def test_missing_attributes(self):
conanfile = textwrap.dedent("""\
from conans import ConanFile
class AConan(ConanFile):
pass
""")
bad_recipe_output = [
"ERROR: [RECIPE METADATA (KB-H003)] Conanfile doesn't have 'url' attribute.",
"ERROR: [RECIPE METADATA (KB-H003)] Conanfile doesn't have 'license' attribute.",
"ERROR: [RECIPE METADATA (KB-H003)] Conanfile doesn't have 'description' attribute.",
"ERROR: [RECIPE METADATA (KB-H003)] Conanfile doesn't have 'homepage' attribute.",
"WARN: [RECIPE METADATA (KB-H003)] Conanfile doesn't have 'topics' attribute."
]
tools.save('conanfile.py', content=conanfile)
output = self.conan(['create', '.', 'name/version@user/test'])
for msg in bad_recipe_output:
self.assertIn(msg, output)
self.assertNotIn("[RECIPE METADATA (KB-H003)] OK", output)
tools.save('conanfile.py', content=self.conanfile_base.format(placeholder=''))
output = self.conan(['create', '.', 'name/version@user/test'])
for msg in bad_recipe_output:
self.assertNotIn(msg, output)
self.assertIn("[RECIPE METADATA (KB-H003)] OK", output)
def test_cci_url(self):
conanfile = textwrap.dedent("""\
from conans import ConanFile
class AConan(ConanFile):
url = "https://github.com/conan-io/conan-center-index"
license = "fake_license"
description = "whatever"
exports_sources = "header.h"
def package(self):
self.copy("*", dst="include")
""")
tools.save('conanfile.py', content=conanfile)
output = self.conan(['create', '.', 'name/version@jgsogo/test'])
self.assertIn("[CONAN CENTER INDEX URL (KB-H027)] OK", output)
def test_cmake_minimum_version(self):
conanfile = self.conanfile_base.format(placeholder="exports_sources = \"CMakeLists.txt\"")
cmake = """project(test)
"""
tools.save('conanfile.py', content=conanfile)
tools.save('CMakeLists.txt', content=cmake)
output = self.conan(['create', '.', 'name/version@user/test'])
path = os.path.join(".", "CMakeLists.txt")
self.assertIn("ERROR: [CMAKE MINIMUM VERSION (KB-H028)] The CMake file '%s' must contain a "
"minimum version declared at the beginning "
"(e.g. cmake_minimum_required(VERSION 3.1.2))" % path,
output)
cmake = textwrap.dedent("""
# foobar.cmake
cmake_minimum_required(VERSION 2.8)
project(test)
""")
tools.save('CMakeLists.txt', content=cmake)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
cmake = textwrap.dedent("""
cmake_minimum_required(VERSION 2.8)
project(test)
""")
tools.save('CMakeLists.txt', content=cmake)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
def test_cmake_minimum_version_test_package(self):
conanfile = self.conanfile_base.format(placeholder="exports_sources = \"CMakeLists.txt\"")
conanfile_tp = textwrap.dedent("""\
from conans import ConanFile, tools, CMake
class TestConan(ConanFile):
settings = "os", "arch"
def build(self):
cmake = CMake(self)
def test(self):
self.run("echo bar", run_environment=True)
""")
cmake = """cmake_minimum_required(VERSION 2.8.11)
project(test)
"""
tools.save('conanfile.py', content=conanfile)
tools.save('CMakeLists.txt', content=cmake)
tools.save('test_package/CMakeLists.txt', content=cmake)
tools.save('test_package/conanfile.py', content=conanfile_tp)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
# validate residual cmake files in test_package/build
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
self.assertNotIn("ERROR: [CMAKE MINIMUM VERSION (KB-H028)]", output)
cmake = textwrap.dedent("""CMAKE_MINIMUM_REQUIRED (VERSION 2.8.11)
project(test)
""")
tools.save('CMakeLists.txt', content=cmake)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
cmake = textwrap.dedent("""cmake_minimum_required(VERSION 2.8.11)
project(test)
""")
tools.save('CMakeLists.txt', content=cmake)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
self.assertNotIn("ERROR: [CMAKE MINIMUM VERSION (KB-H028)]", output)
cmake = textwrap.dedent("""project(test)
cmake_minimum_required(VERSION 2.8.11)
""")
tools.save('CMakeLists.txt', content=cmake)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("ERROR: [CMAKE MINIMUM VERSION (KB-H028)]", output)
self.assertNotIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
cmake = """cmake_minimum_required(VERSION 2.8.11)
project(test)
"""
tools.save('CMakeLists.txt', content=cmake)
cmake = textwrap.dedent("""project(test)
cmake_minimum_required(VERSION 2.8.11)
""")
tools.save('test_package/CMakeLists.txt', content=cmake)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("ERROR: [CMAKE MINIMUM VERSION (KB-H028)]", output)
self.assertNotIn("[CMAKE MINIMUM VERSION (KB-H028)] OK", output)
def test_system_requirements(self):
conanfile = textwrap.dedent("""\
from conans import ConanFile
from conans.tools import SystemPackageTool
class SystemReqConan(ConanFile):
url = "https://github.com/conan-io/conan-center-index"
license = "fake_license"
description = "whatever"
def system_requirements(self):
installer = SystemPackageTool()
""")
tools.save('conanfile.py', content=conanfile)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[SYSTEM REQUIREMENTS (KB-H032)] OK", output)
conanfile += " installer.install([])"
tools.save('conanfile.py', content=conanfile)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("ERROR: [SYSTEM REQUIREMENTS (KB-H032)] The method " \
"'SystemPackageTool.install' is not allowed in the recipe.", output)
conanfile = conanfile.replace("installer.install([])", "SystemPackageTool().install([])")
tools.save('conanfile.py', content=conanfile)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("ERROR: [SYSTEM REQUIREMENTS (KB-H032)] The method " \
"'SystemPackageTool.install' is not allowed in the recipe.", output)
output = self.conan(['create', '.', 'libusb/version@user/test'])
self.assertIn("[SYSTEM REQUIREMENTS (KB-H032)] 'libusb' is part of the allowlist.", output)
self.assertNotIn("ERROR: [SYSTEM REQUIREMENTS (KB-H032)]", output)
def test_imports_not_allowed(self):
conanfile_tp = textwrap.dedent("""\
from conans import ConanFile, tools
class TestConan(ConanFile):
settings = "os", "arch"
def imports(self):
self.copy("*.dll", "", "bin")
self.copy("*.dylib", "", "lib")
def test(self):
self.run("echo bar", run_environment=True)
""")
tools.save('test_package/conanfile.py', content=conanfile_tp)
tools.save('conanfile.py', content=self.conanfile)
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[TEST PACKAGE FOLDER (KB-H024)] OK", output)
self.assertIn("[TEST PACKAGE - RUN ENVIRONMENT (KB-H029)] OK", output)
self.assertIn("ERROR: [TEST PACKAGE - NO IMPORTS() (KB-H034)] The method `imports` is not " \
"allowed in test_package/conanfile.py", output)
def test_no_author(self):
conanfile = textwrap.dedent("""\
from conans import ConanFile
class AConan(ConanFile):
{}
def configure(self):
pass
""")
tools.save('conanfile.py', content=conanfile.replace("{}", ""))
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[NO AUTHOR (KB-H037)] OK", output)
tools.save('conanfile.py', content=conanfile.replace("{}", "author = 'foobar'"))
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn('ERROR: [NO AUTHOR (KB-H037)] Conanfile should not contain author. '
'Remove \'author = "foobar"\'', output)
tools.save('conanfile.py', content=conanfile.replace("{}", "author = ('foo', 'bar')"))
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn('ERROR: [NO AUTHOR (KB-H037)] Conanfile should not contain author. '
'Remove \'author = (\'foo\', \'bar\')', output)
@pytest.mark.skipif(Version(conan_version) < "1.21", reason="requires Conan 1.21 or higher")
def test_no_target_name(self):
conanfile = textwrap.dedent("""\
from conans import ConanFile
class AConan(ConanFile):
def package_info(self):
{}
""")
pkg_config = 'self.cpp_info.names["pkg_config"] = "foolib"'
regular = 'self.cpp_info.name = "Foo"'
cmake = 'self.cpp_info.names["cmake"] = "Foo"'
cmake_multi = 'self.cpp_info.names["cmake_multi"] = "Foo"'
cmake_find = 'self.cpp_info.names["cmake_find_package"] = "Foo"'
cmake_find_multi = 'self.cpp_info.names["cmake_find_package_multi"] = "Foo"'
tools.save('conanfile.py', content=conanfile.replace("{}", regular))
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("ERROR: [NO TARGET NAME (KB-H040)] "
"CCI uses the name of the package for cmake generator."
" Use 'cpp_info.names' instead.", output)
for line, gen in [(cmake, "cmake"), (cmake_multi, "cmake_multi")]:
tools.save('conanfile.py', content=conanfile.replace("{}", line))
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("ERROR: [NO TARGET NAME (KB-H040)] CCI uses the name of the package for "
"{0} generator. Conanfile should not contain "
"'self.cpp_info.names['{0}']'. "
" Use 'cmake_find_package' and 'cmake_find_package_multi' instead.".format(gen), output)
for it in [pkg_config, cmake_find, cmake_find_multi]:
tools.save('conanfile.py', content=conanfile.replace("{}", it))
output = self.conan(['create', '.', 'name/version@user/test'])
self.assertIn("[NO TARGET NAME (KB-H040)] OK", output)
| 48.45 | 114 | 0.595356 |
7943a57efa6eee80c17abe87f852e0d90ab24f29 | 2,211 | py | Python | misc/process_boundary_flux.py | diamondjems016/galaxy_analysis | fa1367085a6b9870de2546daf3163aaa41129ea0 | [
"MIT"
] | 1 | 2021-01-15T15:33:05.000Z | 2021-01-15T15:33:05.000Z | misc/process_boundary_flux.py | diamondjems016/galaxy_analysis | fa1367085a6b9870de2546daf3163aaa41129ea0 | [
"MIT"
] | null | null | null | misc/process_boundary_flux.py | diamondjems016/galaxy_analysis | fa1367085a6b9870de2546daf3163aaa41129ea0 | [
"MIT"
] | 1 | 2020-11-29T00:15:25.000Z | 2020-11-29T00:15:25.000Z | """
process_boundary_flux
Author: A. Emerick
Notes: script and functions to process output
from domain boundary mass flux computation
"""
import numpy as np
import os
import subprocess
__all__ = ['process_boundary_flux']
def process_boundary_flux(data = None, filename = None, wdir = '.'):
"""
Given a set of boundary mass flux data, loop through
and stitch this together so that there is no double
counting of timesteps. Processed file contains cumulative sum
of outflowing mass for each field.
"""
if data is None:
if filename is None:
filename = wdir + '/boundary_mass_flux.dat'
if not os.path.isfile(filename):
print('boundary mass flux file not found at ' + filename)
return False, 0
data = np.genfromtxt(filename)
with open(filename, 'r') as f:
header = f.readline()
data = data[data[:,1].argsort()]
unique_time = np.unique(data[:,1])
filtered_data = [None]*np.size(unique_time)
for i, t in enumerate(unique_time):
selection = (data[:,1] == t)
filtered_data[i] = np.mean(data[selection], axis= 0)
filtered_data = np.array(filtered_data)
for i in np.arange(2, np.size(filtered_data[0])):
filtered_data[:,i] = np.cumsum(filtered_data[:,i])
# output result
outfile = wdir + '/filtered_boundary_mass_flux.dat'
np.savetxt(outfile, filtered_data, fmt = ('%0.6E'), header = header)
# clean out some NAN's or # if present
with open(outfile) as oldfile, open(wdir + '/temp.txt', 'w') as newfile:
for line in oldfile:
if not any(badword in line for badword in ['NAN',"# "]):
newfile.write(line)
# final cleaning
with open(outfile,'w') as output, open(wdir + '/temp.txt') as tempfile:
for line in tempfile:
output.write(line)
# remove temporary file
bash_command = "rm " + wdir + "/temp.txt"
subprocess.call(bash_command, shell=True)
data = np.genfromtxt(outfile, names = True)
return True, data
if __name__ == '__main__':
file_exists, data = process_boundary_flux(filename = 'boundary_mass_flux.dat')
| 27.6375 | 82 | 0.632293 |
7943a6038687525beb95d2f8e78a37df30387a66 | 2,130 | py | Python | sarpy/utils/create_kmz.py | bombaci-vsc/sarpy | 3e31e9d7fca77612b60f2507f6f7068d1660a3e2 | [
"MIT"
] | 119 | 2018-07-12T22:08:17.000Z | 2022-03-24T12:11:39.000Z | sarpy/utils/create_kmz.py | bombaci-vsc/sarpy | 3e31e9d7fca77612b60f2507f6f7068d1660a3e2 | [
"MIT"
] | 72 | 2018-03-29T15:57:37.000Z | 2022-03-10T01:46:21.000Z | sarpy/utils/create_kmz.py | bombaci-vsc/sarpy | 3e31e9d7fca77612b60f2507f6f7068d1660a3e2 | [
"MIT"
] | 54 | 2018-03-27T19:57:20.000Z | 2022-03-09T20:53:11.000Z | """
Create kmz products based on SICD type reader.
For a basic help on the command-line, check
>>> python -m sarpy.utils.create_kmz --help
"""
import argparse
import logging
import os
from sarpy.io.complex.converter import open_complex
from sarpy.io.product.kmz_product_creation import create_kmz_view
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Create derived product is SIDD format from a SICD type file.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'input_file', metavar='input_file',
help='Path input data file, or directory for radarsat, RCM, or sentinel.\n'
'* For radarsat or RCM, this can be the product.xml file, or parent directory\n'
' of product.xml or metadata/product.xml.\n'
'* For sentinel, this can be the manifest.safe file, or parent directory of\n'
' manifest.safe.\n')
parser.add_argument(
'output_directory', metavar='output_directory',
help='Path to the output directory where the product file(s) will be created.\n'
'This directory MUST exist.\n'
'* Depending on the input file, multiple product files may be produced.\n'
'* The name for the ouput file(s) will be chosen based on CoreName and\n '
' transmit/collect polarization.\n')
parser.add_argument(
'-s', '--size', default=3072, type=int, help='Maximum size for the interpolated image, put -1 for full size')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Verbose (level="INFO") logging?')
args = parser.parse_args()
if args.verbose:
logger = logging.getLogger('sarpy')
logger.setLevel('INFO')
reader = open_complex(args.input_file)
file_stem = os.path.splitext(os.path.split(args.input_file)[1])[0]
pixel_limit = None if args.size == -1 else args.size
create_kmz_view(reader, args.output_directory, pixel_limit=pixel_limit, file_stem='View-{}'.format(file_stem))
| 39.444444 | 117 | 0.679343 |
7943a77ef8e29d915df5662636770bb2a29a4282 | 6,034 | py | Python | netanalysis/ooni/ooni_client.py | dharmaxbum1/net-analysis | 94c9b7fc68be56c17cc1bf076681e4260ba4728b | [
"Apache-2.0"
] | 1 | 2019-12-23T05:07:41.000Z | 2019-12-23T05:07:41.000Z | netanalysis/ooni/ooni_client.py | dharmaxbum1/net-analysis | 94c9b7fc68be56c17cc1bf076681e4260ba4728b | [
"Apache-2.0"
] | null | null | null | netanalysis/ooni/ooni_client.py | dharmaxbum1/net-analysis | 94c9b7fc68be56c17cc1bf076681e4260ba4728b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Jigsaw Operations LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=yield-inside-async-function
import abc
import asyncio
from collections import deque
from concurrent.futures import Executor
from functools import singledispatch
import logging
import os
import os.path
from typing import Any, AsyncIterable, Dict, Iterable, List
from urllib.parse import urlencode, quote
import aiohttp
import ujson as json
class OoniClient(abc.ABC):
@abc.abstractmethod
async def get_measurement(self, measurement_id: str) -> Dict:
pass
@abc.abstractmethod
def list_measurements(self, country_code: str, url: str) -> AsyncIterable[Dict]:
pass
def _read_json_from_file(filename):
with open(filename, mode="r") as file:
return json.load(file)
def _write_json_to_file(json_object, filename):
with open(filename, mode="w+") as file:
return json.dump(json_object, file)
class CachedOoniClient(OoniClient):
def __init__(self, origin: OoniClient, cache_dir: str, executor: Executor) -> None:
self._origin = origin
self._cache_dir = cache_dir
self._executor = executor
os.makedirs(os.path.join(cache_dir, "measurement"), exist_ok=True)
async def _run_async(self, *args):
return await asyncio.get_event_loop().run_in_executor(self._executor, *args)
async def get_measurement(self, measurement_id: str):
measurement_filename = os.path.join(
self._cache_dir, "measurement", "%s.json" % measurement_id)
logging.debug("Look up measurement %s", measurement_id)
try:
measurement = await self._run_async(_read_json_from_file, measurement_filename)
logging.debug("Cache hit for measurement %s", measurement_id)
except (FileNotFoundError, json.decoder.JSONDecodeError):
logging.debug("Cache miss for measurement %s", measurement_id)
measurement = await self._origin.get_measurement(measurement_id)
await self._run_async(_write_json_to_file, measurement, measurement_filename)
return measurement
def list_measurements(self, *args, **kwargs) -> AsyncIterable[Dict]:
return self._origin.list_measurements(*args, **kwargs)
@singledispatch
def _trim_json(json_obj, max_string_size: int):
return json_obj
@_trim_json.register(dict)
def _(json_dict: dict, max_string_size: int):
keys_to_delete = [] # type: str
for key, value in json_dict.items():
if type(value) == str and len(value) > max_string_size:
keys_to_delete.append(key)
else:
_trim_json(value, max_string_size)
for key in keys_to_delete:
del json_dict[key]
return json_dict
@_trim_json.register(list)
def _(json_list: list, max_string_size: int):
for item in json_list:
_trim_json(item, max_string_size)
return json_list
# Documentation: https://api.ooni.io/api/
class ApiOoniClient(OoniClient):
def __init__(self, api_url: str, http_client: aiohttp.ClientSession, max_string_size=1000) -> None:
self._api_url = api_url
self._http_client = http_client
self._max_string_size = max_string_size
async def _get_json(self, url):
try:
logging.debug("Fetching %s", url)
async with self._http_client.get(url) as response:
json_obj = await response.json(encoding="utf8")
if self._max_string_size:
_trim_json(json_obj, self._max_string_size)
return json_obj
except Exception as error:
raise Exception("Failed to query url %s" % url, error)
def _api_query_url(self, path, params=None):
query_url = "%s/%s" % (self._api_url, quote(path))
if params:
query_url = query_url + "?" + urlencode(params)
return query_url
async def get_measurement(self, measurement_id: str):
logging.debug("Fetching measurement %s", measurement_id)
measurement = await self._get_json(self._api_query_url("measurement/%s" % measurement_id))
return measurement
async def list_measurements(self, country_code: str=None, url: str=None):
# Params order_by and input make the query *a lot* slower.
# TODO: Consider fetching without input.
# Unfortunately pagination breaks without order_by
params = {
"test_name": "web_connectivity",
"order_by": "test_start_time",
"order": "desc",
"limit": 1000,
}
if country_code:
params["probe_cc"] = country_code
if url:
params["input"] = url
params["limit"] = 100
next_page_url = self._api_query_url("measurements", params)
measurement_entries = deque()
while True:
if not measurement_entries:
if not next_page_url:
return
logging.debug("Fetching %s", next_page_url)
async with self._http_client.get(next_page_url) as response:
response_json = await response.json(encoding="utf8")
next_page_url = response_json["metadata"].get("next_url")
measurement_entries.extend(response_json["results"])
if measurement_entries:
yield measurement_entries.popleft()
def CreatePublicApiOoniClient(http_client: aiohttp.ClientSession):
return ApiOoniClient("https://api.ooni.io/api/v1", http_client)
| 36.349398 | 103 | 0.673682 |
7943a7c9b028e696b387ff15afb5ee2a225928cc | 3,318 | py | Python | examples/undocumented/python/evaluation_cross_validation_mkl_weight_storage.py | mrkarna/shogun | dc4b41d8e3cdaecf39c59d2414d68b424a448da7 | [
"BSD-3-Clause"
] | null | null | null | examples/undocumented/python/evaluation_cross_validation_mkl_weight_storage.py | mrkarna/shogun | dc4b41d8e3cdaecf39c59d2414d68b424a448da7 | [
"BSD-3-Clause"
] | null | null | null | examples/undocumented/python/evaluation_cross_validation_mkl_weight_storage.py | mrkarna/shogun | dc4b41d8e3cdaecf39c59d2414d68b424a448da7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Heiko Strathmann
from numpy.random import randn
from numpy import *
# generate some overlapping training vectors
num_vectors=5
vec_distance=1
traindat=concatenate((randn(2,num_vectors)-vec_distance,
randn(2,num_vectors)+vec_distance), axis=1)
label_traindat=concatenate((-ones(num_vectors), ones(num_vectors)));
parameter_list = [[traindat,label_traindat]]
def evaluation_cross_validation_mkl_weight_storage(traindat=traindat, label_traindat=label_traindat):
from shogun import CrossValidation, CrossValidationResult
from shogun import ParameterObserverCV
from shogun import ContingencyTableEvaluation, ACCURACY
from shogun import StratifiedCrossValidationSplitting
from shogun import BinaryLabels
from shogun import RealFeatures, CombinedFeatures
from shogun import CombinedKernel
from shogun import MKLClassification
import shogun as sg
import numpy as np
# training data, combined features all on same data
features=RealFeatures(traindat)
comb_features=CombinedFeatures()
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
labels=BinaryLabels(label_traindat)
# kernel, different Gaussians combined
kernel=CombinedKernel()
kernel.append_kernel(sg.kernel("GaussianKernel", log_width=np.log(0.1)))
kernel.append_kernel(sg.kernel("GaussianKernel", log_width=np.log(1)))
kernel.append_kernel(sg.kernel("GaussianKernel", log_width=np.log(2)))
# create mkl using libsvm, due to a mem-bug, interleaved is not possible
svm=MKLClassification();
svm.put("svm", sg.as_svm(sg.machine("LibSVM")))
svm.set_interleaved_optimization_enabled(False);
svm.set_kernel(kernel);
# splitting strategy for 5 fold cross-validation (for classification its better
# to use "StratifiedCrossValidation", but the standard
# "StratifiedCrossValidationSplitting" is also available
splitting_strategy=StratifiedCrossValidationSplitting(labels, 5)
# evaluation method
evaluation_criterium=ContingencyTableEvaluation(ACCURACY)
# cross-validation instance
cross_validation=CrossValidation(svm, comb_features, labels,
splitting_strategy, evaluation_criterium)
cross_validation.set_autolock(False)
# append cross vlaidation output classes
mkl_storage=ParameterObserverCV()
cross_validation.subscribe_to_parameters(mkl_storage)
cross_validation.set_num_runs(3)
# perform cross-validation
result=cross_validation.evaluate()
# print mkl weights
weights = []
for obs_index in range(mkl_storage.get_num_observations()):
obs = mkl_storage.get_observation(obs_index)
for fold_index in range(obs.get_num_folds()):
fold = obs.get_fold(fold_index)
machine = MKLClassification.obtain_from_generic(fold.get_trained_machine())
w = machine.get_kernel().get_subkernel_weights()
weights.append(w)
print("mkl weights during cross--validation")
print(weights)
if __name__=='__main__':
print('Evaluation CrossValidationClassification')
evaluation_cross_validation_mkl_weight_storage(*parameter_list[0])
| 37.704545 | 101 | 0.768837 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.